diff options
Diffstat (limited to 'drivers/infiniband')
120 files changed, 3388 insertions, 1754 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 6ebd9ad95010..e3cdafff8ece 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ - multicast.o mad.o smi.o agent.o mad_rmpp.o + multicast.o mad.o smi.o agent.o mad_rmpp.o \ + security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..437522ca97b4 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -61,6 +61,7 @@ struct addr_req { void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; + struct delayed_work work; int status; u32 seq; }; @@ -179,8 +180,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, } /* Construct the family header first */ - header = (struct rdma_ls_ip_resolve_header *) - skb_put(skb, NLMSG_ALIGN(sizeof(*header))); + header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); header->ifindex = dev_addr->bound_dev_if; nla_put(skb, attrtype, size, daddr); @@ -269,6 +269,7 @@ int rdma_translate_ip(const struct sockaddr *addr, return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_addr->bound_dev_if = dev->ifindex; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); @@ -281,6 +282,7 @@ int rdma_translate_ip(const struct sockaddr *addr, &((const struct sockaddr_in6 *)addr)->sin6_addr, dev, 1)) { ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_addr->bound_dev_if = dev->ifindex; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); break; @@ -294,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr, } EXPORT_SYMBOL(rdma_translate_ip); -static void set_timeout(unsigned long time) +static void set_timeout(struct delayed_work *delayed_work, unsigned long time) { unsigned long delay; @@ -302,7 +304,7 @@ static void set_timeout(unsigned long time) if ((long)delay < 0) delay = 0; - mod_delayed_work(addr_wq, &work, delay); + mod_delayed_work(addr_wq, delayed_work, delay); } static void queue_req(struct addr_req *req) @@ -317,8 +319,7 @@ static void queue_req(struct addr_req *req) list_add(&req->list, &temp_req->list); - if (req_list.next == &req->list) - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); mutex_unlock(&lock); } @@ -406,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in, fl4.saddr = src_ip; fl4.flowi4_oif = addr->bound_dev_if; rt = ip_route_output_key(addr->net, &fl4); - if (IS_ERR(rt)) { - ret = PTR_ERR(rt); - goto out; - } + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; + src_in->sin_family = AF_INET; src_in->sin_addr.s_addr = fl4.saddr; @@ -424,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in, *prt = rt; return 0; -out: - return ret; } #if IS_ENABLED(CONFIG_IPV6) @@ -449,12 +448,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, return ret; rt = (struct rt6_info *)dst; - if (ipv6_addr_any(&fl6.saddr)) { - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr); - if (ret) - goto put; - + if (ipv6_addr_any(&src_in->sin6_addr)) { src_in->sin6_family = AF_INET6; src_in->sin6_addr = fl6.saddr; } @@ -471,9 +465,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, *pdst = dst; return 0; -put: - dst_release(dst); - return ret; } #else static int addr6_resolve(struct sockaddr_in6 *src_in, @@ -518,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in, struct dst_entry *dst; int ret; + if (!addr->net) { + pr_warn_ratelimited("%s: missing namespace\n", __func__); + return -EINVAL; + } + if (src_in->sa_family == AF_INET) { struct rtable *rt = NULL; const struct sockaddr_in *dst_in4 = @@ -531,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in, if (resolve_neigh) ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); - ndev = rt->dst.dev; - dev_hold(ndev); + if (addr->bound_dev_if) { + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + ndev = rt->dst.dev; + dev_hold(ndev); + } ip_rt_put(rt); } else { @@ -548,19 +548,63 @@ static int addr_resolve(struct sockaddr *src_in, if (resolve_neigh) ret = addr_resolve_neigh(dst, dst_in, addr, seq); - ndev = dst->dev; - dev_hold(ndev); + if (addr->bound_dev_if) { + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + ndev = dst->dev; + dev_hold(ndev); + } dst_release(dst); } - addr->bound_dev_if = ndev->ifindex; - addr->net = dev_net(ndev); + if (ndev->flags & IFF_LOOPBACK) { + ret = rdma_translate_ip(dst_in, addr, NULL); + /* + * Put the loopback device and get the translated + * device instead. + */ + dev_put(ndev); + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + addr->bound_dev_if = ndev->ifindex; + } dev_put(ndev); return ret; } +static void process_one_req(struct work_struct *_work) +{ + struct addr_req *req; + struct sockaddr *src_in, *dst_in; + + mutex_lock(&lock); + req = container_of(_work, struct addr_req, work.work); + + if (req->status == -ENODATA) { + src_in = (struct sockaddr *)&req->src_addr; + dst_in = (struct sockaddr *)&req->dst_addr; + req->status = addr_resolve(src_in, dst_in, req->addr, + true, req->seq); + if (req->status && time_after_eq(jiffies, req->timeout)) { + req->status = -ETIMEDOUT; + } else if (req->status == -ENODATA) { + /* requeue the work for retrying again */ + set_timeout(&req->work, req->timeout); + mutex_unlock(&lock); + return; + } + } + list_del(&req->list); + mutex_unlock(&lock); + + req->callback(req->status, (struct sockaddr *)&req->src_addr, + req->addr, req->context); + put_client(req->client); + kfree(req); +} + static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; @@ -578,20 +622,23 @@ static void process_req(struct work_struct *work) true, req->seq); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; - else if (req->status == -ENODATA) + else if (req->status == -ENODATA) { + set_timeout(&req->work, req->timeout); continue; + } } list_move_tail(&req->list, &done_list); } - if (!list_empty(&req_list)) { - req = list_entry(req_list.next, struct addr_req, list); - set_timeout(req->timeout); - } mutex_unlock(&lock); list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); + /* It is safe to cancel other work items from this work item + * because at a time there can be only one work item running + * with this single threaded work queue. + */ + cancel_delayed_work(&req->work); req->callback(req->status, (struct sockaddr *) &req->src_addr, req->addr, req->context); put_client(req->client); @@ -634,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->context = context; req->client = client; atomic_inc(&client->refcount); + INIT_DELAYED_WORK(&req->work, process_one_req); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); @@ -688,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) req->status = -ECANCELED; req->timeout = jiffies; list_move(&req->list, &req_list); - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); break; } } @@ -794,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; - if (neigh->nud_state & NUD_VALID) { - set_timeout(jiffies); - } + if (neigh->nud_state & NUD_VALID) + set_timeout(&work, jiffies); } return 0; } @@ -807,7 +854,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); + addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); if (!addr_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b1371eb9f46c..efc94304dee3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; + bool enforce_security; }; union ib_gid zgid; @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx) +{ + unsigned long flags; + int p; + + if (port_num < rdma_start_port(device) || + port_num > rdma_end_port(device)) + return -EINVAL; + + p = port_num - rdma_start_port(device); + read_lock_irqsave(&device->cache.lock, flags); + *sn_pfx = device->cache.ports[p].subnet_prefix; + read_unlock_irqrestore(&device->cache.lock, flags); + + return 0; +} +EXPORT_SYMBOL(ib_get_cached_subnet_prefix); + int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, - u8 port) + u8 port, + bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; + device->cache.ports[port - rdma_start_port(device)].subnet_prefix = + tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); + if (enforce_security) + ib_security_cache_change(device, + port, + tprops->subnet_prefix); + kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); - ib_cache_update(work->device, work->port_num); + ib_cache_update(work->device, + work->port_num, + work->enforce_security); kfree(work); } @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; + if (event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_GID_CHANGE) + work->enforce_security = true; + else + work->enforce_security = false; + queue_work(ib_wq, &work->work); } } @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) - ib_cache_update(device, p + rdma_start_port(device)); + ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1844770f3ae8..2b4d613a3474 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); - sa_path_set_service_id(primary_path, req_msg->service_id); + primary_path->service_id = req_msg->service_id; if (req_msg->alt_local_lid) { alt_path->dgid = req_msg->alt_local_gid; @@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); - sa_path_set_service_id(alt_path, req_msg->service_id); + alt_path->service_id = req_msg->service_id; } } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91b7a2fe5a55..0eb393237ba2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port, if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) return ret; - if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { + if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) ndev = dev_get_by_index(&init_net, bound_if_index); - if (ndev && ndev->flags & IFF_LOOPBACK) { - pr_info("detected loopback device\n"); - dev_put(ndev); - - if (!device->get_netdev) - return -EOPNOTSUPP; - - ndev = device->get_netdev(device, port); - if (!ndev) - return -ENODEV; - } - } else { + else gid_type = IB_GID_TYPE_IB; - } + ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, ndev, NULL); @@ -1044,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); + qp_attr->port_num = id_priv->id.port_num; + *qp_attr_mask |= IB_QP_PORT; } else ret = -ENOSYS; @@ -1140,7 +1131,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr, ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); - ib->sib_sid = sa_path_get_service_id(path); + ib->sib_sid = path->service_id; ib->sib_scope_id = 0; } else { ib->sib_pkey = listen_ib->sib_pkey; @@ -1274,8 +1265,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, memcpy(&req->local_gid, &req_param->primary_path->sgid, sizeof(req->local_gid)); req->has_gid = true; - req->service_id = - sa_path_get_service_id(req_param->primary_path); + req->service_id = req_param->primary_path->service_id; req->pkey = be16_to_cpu(req_param->primary_path->pkey); if (req->pkey != req_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" @@ -1827,7 +1817,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, struct rdma_route *rt; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; - const __be64 service_id = sa_path_get_service_id(path); + const __be64 service_id = + ib_event->param.req_rcvd.primary_path->service_id; int ret; id = rdma_create_id(listen_id->route.addr.dev_addr.net, @@ -2345,9 +2336,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; - sa_path_set_service_id(&path_rec, - rdma_get_service_id(&id_priv->id, - cma_dst_addr(id_priv))); + path_rec.service_id = rdma_get_service_id(&id_priv->id, + cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | @@ -2570,21 +2560,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) goto err2; } - if (ndev->flags & IFF_LOOPBACK) { - dev_put(ndev); - if (!id_priv->id.device->get_netdev) { - ret = -EOPNOTSUPP; - goto err2; - } - - ndev = id_priv->id.device->get_netdev(id_priv->id.device, - id_priv->id.port_num); - if (!ndev) { - ret = -ENODEV; - goto err2; - } - } - supported_gids = roce_gid_type_mask_support(id_priv->id.device, id_priv->id.port_num); gid_type = cma_route_gid_type(addr->dev_addr.network, diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..11ae67514e13 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> +#include "mad_priv.h" + +struct pkey_index_qp_list { + struct list_head pkey_index_list; + u16 pkey_index; + /* Lock to hold while iterating the qp_list. */ + spinlock_t qp_list_lock; + struct list_head qp_list; +}; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); @@ -169,6 +179,16 @@ void ib_mad_cleanup(void); int ib_sa_init(void); void ib_sa_cleanup(void); +int ibnl_init(void); +void ibnl_cleanup(void); + +/** + * Check if there are any listeners to the netlink group + * @group: the netlink group ID + * Returns 0 on success or a negative for no listeners. + */ +int ibnl_chk_listeners(unsigned int group); + int ib_nl_handle_resolve_resp(struct sk_buff *skb, struct netlink_callback *cb); int ib_nl_handle_set_timeout(struct sk_buff *skb, @@ -176,4 +196,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx); + +#ifdef CONFIG_SECURITY_INFINIBAND +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec); + +void ib_security_destroy_port_pkey_list(struct ib_device *device); + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix); + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata); + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_destroy_qp_security_begin(struct ib_qp_security *sec); +void ib_destroy_qp_security_abort(struct ib_qp_security *sec); +void ib_destroy_qp_security_end(struct ib_qp_security *sec); +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_close_shared_qp_security(struct ib_qp_security *sec); +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type); +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); +#else +static inline int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + return 0; +} + +static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ +} + +static inline void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ +} + +static inline int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + return qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); +} + +static inline int ib_create_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ +} + +static inline int ib_open_shared_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ +} + +static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + return 0; +} + +static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ +} + +static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, + u16 pkey_index) +{ + return 0; +} +#endif #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 81d447da0048..221468f77128 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/security.h> +#include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data); +static void ib_policy_change_task(struct work_struct *work); +static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); + +static struct notifier_block ibdev_lsm_nb = { + .notifier_call = ib_security_change, +}; static int ib_device_check_mandatory(struct ib_device *device) { @@ -325,6 +335,65 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); +static int setup_port_pkey_list(struct ib_device *device) +{ + int i; + + /** + * device->port_pkey_list is indexed directly by the port number, + * Therefore it is declared as a 1 based array with potential empty + * slots at the beginning. + */ + device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, + sizeof(*device->port_pkey_list), + GFP_KERNEL); + + if (!device->port_pkey_list) + return -ENOMEM; + + for (i = 0; i < (rdma_end_port(device) + 1); i++) { + spin_lock_init(&device->port_pkey_list[i].list_lock); + INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); + } + + return 0; +} + +static void ib_policy_change_task(struct work_struct *work) +{ + struct ib_device *dev; + + down_read(&lists_rwsem); + list_for_each_entry(dev, &device_list, core_list) { + int i; + + for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { + u64 sp; + int ret = ib_get_cached_subnet_prefix(dev, + i, + &sp); + + WARN_ONCE(ret, + "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", + ret); + if (!ret) + ib_security_cache_change(dev, i, sp); + } + } + up_read(&lists_rwsem); +} + +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data) +{ + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + schedule_work(&ib_policy_change_work); + + return NOTIFY_OK; +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -385,6 +454,12 @@ int ib_register_device(struct ib_device *device, goto out; } + ret = setup_port_pkey_list(device); + if (ret) { + pr_warn("Couldn't create per port_pkey_list\n"); + goto out; + } + ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); @@ -462,12 +537,16 @@ void ib_unregister_device(struct ib_device *device) } up_read(&lists_rwsem); - mutex_unlock(&device_mutex); - ib_device_unregister_rdmacg(device); ib_device_unregister_sysfs(device); + + mutex_unlock(&device_mutex); + ib_cache_cleanup_one(device); + ib_security_destroy_port_pkey_list(device); + kfree(device->port_pkey_list); + down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) @@ -1082,10 +1161,18 @@ static int __init ib_core_init(void) goto err_sa; } + ret = register_lsm_notifier(&ibdev_lsm_nb); + if (ret) { + pr_warn("Couldn't register LSM notifier. ret %d\n", ret); + goto err_ibnl_clients; + } + ib_cache_setup(); return 0; +err_ibnl_clients: + ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: @@ -1105,6 +1192,7 @@ err: static void __exit ib_core_cleanup(void) { + unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 192ee3dafb80..f8f53bb90837 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" +#include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); + ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); + if (ret2) { + ret = ERR_PTR(ret2); + goto error4; + } + spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); - goto error4; + goto error5; } } @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; - -error4: +error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +error4: kfree(reg_req); error3: kfree(mad_agent_priv); @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; + int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); + + err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); + if (err) { + ret = ERR_PTR(err); + goto error2; + } + mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); - goto error2; + goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; - +error3: + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); + kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); + kfree(mad_snoop_priv); } @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { - mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; + ret = ib_mad_enforce_security(mad_agent_priv, + mad_send_wr->send_wr.pkey_index); + if (ret) + goto error; + if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; + int ret; + + ret = ib_mad_enforce_security(mad_agent_priv, + mad_recv_wc->wc->pkey_index); + if (ret) { + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } + + return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b784055423c8..94931c474d41 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -37,6 +37,7 @@ #include <net/net_namespace.h> #include <net/sock.h> #include <rdma/rdma_netlink.h> +#include "core_priv.h" struct ibnl_client { struct list_head list; @@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group) return -1; return 0; } -EXPORT_SYMBOL(ibnl_chk_listeners); int ibnl_add_client(int index, int nops, const struct ibnl_client_cbs cb_table[]) diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index db958d3207ef..94a9eefb3cfc 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -42,6 +42,8 @@ #include <rdma/ib_cache.h> #include <rdma/ib_addr.h> +static struct workqueue_struct *gid_cache_wq; + enum gid_op_type { GID_DEL = 0, GID_ADD @@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, } INIT_WORK(&ndev_work->work, netdevice_event_work_handler); - queue_work(ib_wq, &ndev_work->work); + queue_work(gid_cache_wq, &ndev_work->work); return NOTIFY_DONE; } @@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event, dev_hold(ndev); work->gid_attr.ndev = ndev; - queue_work(ib_wq, &work->work); + queue_work(gid_cache_wq, &work->work); return NOTIFY_DONE; } @@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = { int __init roce_gid_mgmt_init(void) { + gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0); + if (!gid_cache_wq) + return -ENOMEM; + register_inetaddr_notifier(&nb_inetaddr); if (IS_ENABLED(CONFIG_IPV6)) register_inet6addr_notifier(&nb_inet6addr); @@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void) * ib-core is removed, all physical devices have been removed, * so no issue with remaining hardware contexts. */ + destroy_workqueue(gid_cache_wq); } diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e335b09c022e..70fa4cabe48e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -194,7 +194,7 @@ static u32 tid; .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { - { PATH_REC_FIELD(ib.service_id), + { PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = { .field_name = "sa_path_rec:" #field static const struct ib_field opa_path_rec_table[] = { - { OPA_PATH_REC_FIELD(opa.service_id), + { OPA_PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -759,8 +759,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, query->mad_buf->context[1] = NULL; /* Construct the family header first */ - header = (struct rdma_ls_resolve_header *) - skb_put(skb, NLMSG_ALIGN(sizeof(*header))); + header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); memcpy(header->device_name, query->port->agent->device->name, LS_DEVICE_NAME_MAX); header->port_num = query->port->port_num; @@ -774,7 +773,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, /* Now build the attributes */ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { - val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); + val64 = be64_to_cpu(sa_rec->service_id); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, sizeof(val64), &val64); } diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c new file mode 100644 index 000000000000..70ad19c4c73e --- /dev/null +++ b/drivers/infiniband/core/security.c @@ -0,0 +1,709 @@ +/* + * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_SECURITY_INFINIBAND + +#include <linux/security.h> +#include <linux/completion.h> +#include <linux/list.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> +#include "core_priv.h" +#include "mad_priv.h" + +static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey = NULL; + struct pkey_index_qp_list *tmp_pkey; + struct ib_device *dev = pp->sec->dev; + + spin_lock(&dev->port_pkey_list[pp->port_num].list_lock); + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[pp->port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + pkey = tmp_pkey; + break; + } + } + spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock); + return pkey; +} + +static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, + u16 *pkey, + u64 *subnet_prefix) +{ + struct ib_device *dev = pp->sec->dev; + int ret; + + ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); + + return ret; +} + +static int enforce_qp_pkey_security(u16 pkey, + u64 subnet_prefix, + struct ib_qp_security *qp_sec) +{ + struct ib_qp_security *shared_qp_sec; + int ret; + + ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); + if (ret) + return ret; + + if (qp_sec->qp == qp_sec->qp->real_qp) { + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; + } + } + return 0; +} + +/* The caller of this function must hold the QP security + * mutex of the QP of the security structure in *pps. + * + * It takes separate ports_pkeys and security structure + * because in some cases the pps will be for a new settings + * or the pps will be for the real QP and security structure + * will be for a shared QP. + */ +static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, + struct ib_qp_security *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret = 0; + + if (!pps) + return 0; + + if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { + ret = get_pkey_and_subnet_prefix(&pps->main, + &pkey, + &subnet_prefix); + if (ret) + return ret; + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + if (ret) + return ret; + } + + if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { + ret = get_pkey_and_subnet_prefix(&pps->alt, + &pkey, + &subnet_prefix); + if (ret) + return ret; + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void qp_to_error(struct ib_qp_security *sec) +{ + struct ib_qp_security *shared_qp_sec; + struct ib_qp_attr attr = { + .qp_state = IB_QPS_ERR + }; + struct ib_event event = { + .event = IB_EVENT_QP_FATAL + }; + + /* If the QP is in the process of being destroyed + * the qp pointer in the security structure is + * undefined. It cannot be modified now. + */ + if (sec->destroying) + return; + + ib_modify_qp(sec->qp, + &attr, + IB_QP_STATE); + + if (sec->qp->event_handler && sec->qp->qp_context) { + event.element.qp = sec->qp; + sec->qp->event_handler(&event, + sec->qp->qp_context); + } + + list_for_each_entry(shared_qp_sec, + &sec->shared_qp_list, + shared_qp_list) { + struct ib_qp *qp = shared_qp_sec->qp; + + if (qp->event_handler && qp->qp_context) { + event.element.qp = qp; + event.device = qp->device; + qp->event_handler(&event, + qp->qp_context); + } + } +} + +static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, + struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct ib_port_pkey *pp, *tmp_pp; + bool comp; + LIST_HEAD(to_error_list); + u16 pkey_val; + + if (!ib_get_cached_pkey(device, + port_num, + pkey->pkey_index, + &pkey_val)) { + spin_lock(&pkey->qp_list_lock); + list_for_each_entry(pp, &pkey->qp_list, qp_list) { + if (atomic_read(&pp->sec->error_list_count)) + continue; + + if (enforce_qp_pkey_security(pkey_val, + subnet_prefix, + pp->sec)) { + atomic_inc(&pp->sec->error_list_count); + list_add(&pp->to_error_list, + &to_error_list); + } + } + spin_unlock(&pkey->qp_list_lock); + } + + list_for_each_entry_safe(pp, + tmp_pp, + &to_error_list, + to_error_list) { + mutex_lock(&pp->sec->mutex); + qp_to_error(pp->sec); + list_del(&pp->to_error_list); + atomic_dec(&pp->sec->error_list_count); + comp = pp->sec->destroying; + mutex_unlock(&pp->sec->mutex); + + if (comp) + complete(&pp->sec->error_complete); + } +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static int port_pkey_list_insert(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *tmp_pkey; + struct pkey_index_qp_list *pkey; + struct ib_device *dev; + u8 port_num = pp->port_num; + int ret = 0; + + if (pp->state != IB_PORT_PKEY_VALID) + return 0; + + dev = pp->sec->dev; + + pkey = get_pkey_idx_qp_list(pp); + + if (!pkey) { + bool found = false; + + pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); + if (!pkey) + return -ENOMEM; + + spin_lock(&dev->port_pkey_list[port_num].list_lock); + /* Check for the PKey again. A racing process may + * have created it. + */ + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + kfree(pkey); + pkey = tmp_pkey; + found = true; + break; + } + } + + if (!found) { + pkey->pkey_index = pp->pkey_index; + spin_lock_init(&pkey->qp_list_lock); + INIT_LIST_HEAD(&pkey->qp_list); + list_add(&pkey->pkey_index_list, + &dev->port_pkey_list[port_num].pkey_list); + } + spin_unlock(&dev->port_pkey_list[port_num].list_lock); + } + + spin_lock(&pkey->qp_list_lock); + list_add(&pp->qp_list, &pkey->qp_list); + spin_unlock(&pkey->qp_list_lock); + + pp->state = IB_PORT_PKEY_LISTED; + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void port_pkey_list_remove(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey; + + if (pp->state != IB_PORT_PKEY_LISTED) + return; + + pkey = get_pkey_idx_qp_list(pp); + + spin_lock(&pkey->qp_list_lock); + list_del(&pp->qp_list); + spin_unlock(&pkey->qp_list_lock); + + /* The setting may still be valid, i.e. after + * a destroy has failed for example. + */ + pp->state = IB_PORT_PKEY_VALID; +} + +static void destroy_qp_security(struct ib_qp_security *sec) +{ + security_ib_free_security(sec->security); + kfree(sec->ports_pkeys); + kfree(sec); +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, + const struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + struct ib_ports_pkeys *new_pps; + struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; + + new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); + if (!new_pps) + return NULL; + + if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { + if (!qp_pps) { + new_pps->main.port_num = qp_attr->port_num; + new_pps->main.pkey_index = qp_attr->pkey_index; + } else { + new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? + qp_attr->port_num : + qp_pps->main.port_num; + + new_pps->main.pkey_index = + (qp_attr_mask & IB_QP_PKEY_INDEX) ? + qp_attr->pkey_index : + qp_pps->main.pkey_index; + } + new_pps->main.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->main.port_num = qp_pps->main.port_num; + new_pps->main.pkey_index = qp_pps->main.pkey_index; + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) + new_pps->main.state = IB_PORT_PKEY_VALID; + } + + if (qp_attr_mask & IB_QP_ALT_PATH) { + new_pps->alt.port_num = qp_attr->alt_port_num; + new_pps->alt.pkey_index = qp_attr->alt_pkey_index; + new_pps->alt.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->alt.port_num = qp_pps->alt.port_num; + new_pps->alt.pkey_index = qp_pps->alt.pkey_index; + if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) + new_pps->alt.state = IB_PORT_PKEY_VALID; + } + + new_pps->main.sec = qp->qp_sec; + new_pps->alt.sec = qp->qp_sec; + return new_pps; +} + +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + struct ib_qp *real_qp = qp->real_qp; + int ret; + + ret = ib_create_qp_security(qp, dev); + + if (ret) + return ret; + + mutex_lock(&real_qp->qp_sec->mutex); + ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, + qp->qp_sec); + + if (ret) + goto ret; + + if (qp != real_qp) + list_add(&qp->qp_sec->shared_qp_list, + &real_qp->qp_sec->shared_qp_list); +ret: + mutex_unlock(&real_qp->qp_sec->mutex); + if (ret) + destroy_qp_security(qp->qp_sec); + + return ret; +} + +void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ + struct ib_qp *real_qp = sec->qp->real_qp; + + mutex_lock(&real_qp->qp_sec->mutex); + list_del(&sec->shared_qp_list); + mutex_unlock(&real_qp->qp_sec->mutex); + + destroy_qp_security(sec); +} + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + int ret; + + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); + if (!qp->qp_sec) + return -ENOMEM; + + qp->qp_sec->qp = qp; + qp->qp_sec->dev = dev; + mutex_init(&qp->qp_sec->mutex); + INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); + atomic_set(&qp->qp_sec->error_list_count, 0); + init_completion(&qp->qp_sec->error_complete); + ret = security_ib_alloc_security(&qp->qp_sec->security); + if (ret) + kfree(qp->qp_sec); + + return ret; +} +EXPORT_SYMBOL(ib_create_qp_security); + +void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ + mutex_lock(&sec->mutex); + + /* Remove the QP from the lists so it won't get added to + * a to_error_list during the destroy process. + */ + if (sec->ports_pkeys) { + port_pkey_list_remove(&sec->ports_pkeys->main); + port_pkey_list_remove(&sec->ports_pkeys->alt); + } + + /* If the QP is already in one or more of those lists + * the destroying flag will ensure the to error flow + * doesn't operate on an undefined QP. + */ + sec->destroying = true; + + /* Record the error list count to know how many completions + * to wait for. + */ + sec->error_comps_pending = atomic_read(&sec->error_list_count); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ + int ret; + int i; + + /* If a concurrent cache update is in progress this + * QP security could be marked for an error state + * transition. Wait for this to complete. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + mutex_lock(&sec->mutex); + sec->destroying = false; + + /* Restore the position in the lists and verify + * access is still allowed in case a cache update + * occurred while attempting to destroy. + * + * Because these setting were listed already + * and removed during ib_destroy_qp_security_begin + * we know the pkey_index_qp_list for the PKey + * already exists so port_pkey_list_insert won't fail. + */ + if (sec->ports_pkeys) { + port_pkey_list_insert(&sec->ports_pkeys->main); + port_pkey_list_insert(&sec->ports_pkeys->alt); + } + + ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); + if (ret) + qp_to_error(sec); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ + int i; + + /* If a concurrent cache update is occurring we must + * wait until this QP security structure is processed + * in the QP to error flow before destroying it because + * the to_error_list is in use. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + destroy_qp_security(sec); +} + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct pkey_index_qp_list *pkey; + + list_for_each_entry(pkey, + &device->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + check_pkey_qps(pkey, + device, + port_num, + subnet_prefix); + } +} + +void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ + struct pkey_index_qp_list *pkey, *tmp_pkey; + int i; + + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { + spin_lock(&device->port_pkey_list[i].list_lock); + list_for_each_entry_safe(pkey, + tmp_pkey, + &device->port_pkey_list[i].pkey_list, + pkey_index_list) { + list_del(&pkey->pkey_index_list); + kfree(pkey); + } + spin_unlock(&device->port_pkey_list[i].list_lock); + } +} + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + int ret = 0; + struct ib_ports_pkeys *tmp_pps; + struct ib_ports_pkeys *new_pps; + bool special_qp = (qp->qp_type == IB_QPT_SMI || + qp->qp_type == IB_QPT_GSI || + qp->qp_type >= IB_QPT_RESERVED1); + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || + (qp_attr_mask & IB_QP_ALT_PATH)); + + if (pps_change && !special_qp) { + mutex_lock(&qp->qp_sec->mutex); + new_pps = get_new_pps(qp, + qp_attr, + qp_attr_mask); + + /* Add this QP to the lists for the new port + * and pkey settings before checking for permission + * in case there is a concurrent cache update + * occurring. Walking the list for a cache change + * doesn't acquire the security mutex unless it's + * sending the QP to error. + */ + ret = port_pkey_list_insert(&new_pps->main); + + if (!ret) + ret = port_pkey_list_insert(&new_pps->alt); + + if (!ret) + ret = check_qp_port_pkey_settings(new_pps, + qp->qp_sec); + } + + if (!ret) + ret = qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); + + if (pps_change && !special_qp) { + /* Clean up the lists and free the appropriate + * ports_pkeys structure. + */ + if (ret) { + tmp_pps = new_pps; + } else { + tmp_pps = qp->qp_sec->ports_pkeys; + qp->qp_sec->ports_pkeys = new_pps; + } + + if (tmp_pps) { + port_pkey_list_remove(&tmp_pps->main); + port_pkey_list_remove(&tmp_pps->alt); + } + kfree(tmp_pps); + mutex_unlock(&qp->qp_sec->mutex); + } + return ret; +} +EXPORT_SYMBOL(ib_security_modify_qp); + +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret; + + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); + + if (ret) + return ret; + + return security_ib_pkey_access(sec, subnet_prefix, pkey); +} +EXPORT_SYMBOL(ib_security_pkey_access); + +static int ib_mad_agent_security_change(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); + + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, + ag->device->name, + ag->port_num); + + return NOTIFY_OK; +} + +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + int ret; + + ret = security_ib_alloc_security(&agent->security); + if (ret) + return ret; + + if (qp_type != IB_QPT_SMI) + return 0; + + ret = security_ib_endport_manage_subnet(agent->security, + agent->device->name, + agent->port_num); + if (ret) + return ret; + + agent->lsm_nb.notifier_call = ib_mad_agent_security_change; + ret = register_lsm_notifier(&agent->lsm_nb); + if (ret) + return ret; + + agent->smp_allowed = true; + agent->lsm_nb_reg = true; + return 0; +} + +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ + security_ib_free_security(agent->security); + if (agent->lsm_nb_reg) + unregister_lsm_notifier(&agent->lsm_nb); +} + +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) +{ + int ret; + + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) + return -EACCES; + + ret = ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); + + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 3dbf811d3c51..21e60b1e2ff4 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { page = sg_page(sg); - if (umem->writable && dirty) + if (!PageDirty(page) && umem->writable && dirty) set_page_dirty_lock(page); put_page(page); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 0780b1afefa9..8c4ec564e495 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, struct vm_area_struct *vma; struct hstate *h; + down_read(&mm->mmap_sem); vma = find_vma(mm, ib_umem_start(umem)); - if (!vma || !is_vm_hugetlb_page(vma)) + if (!vma || !is_vm_hugetlb_page(vma)) { + up_read(&mm->mmap_sem); return -EINVAL; + } h = hstate_vma(vma); umem->page_shift = huge_page_shift(h); + up_read(&mm->mmap_sem); umem->hugetlb = 1; } else { umem->hugetlb = 0; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 70b7fb156414..739bd69ef1d4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); obj->uobject.object = cq; @@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_resize_cq cmd; - struct ib_uverbs_resize_cq_resp resp; + struct ib_uverbs_resize_cq_resp resp = {}; struct ib_udata udata; struct ib_cq *cq; int ret = -EINVAL; @@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, struct ib_uobject *uobj; struct ib_cq *cq; struct ib_ucq_object *obj; - struct ib_uverbs_event_queue *ev_queue; int ret = -EINVAL; if (copy_from_user(&cmd, buf, sizeof cmd)) @@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, */ uverbs_uobject_get(uobj); cq = uobj->object; - ev_queue = cq->cq_context; obj = container_of(cq->uobject, struct ib_ucq_object, uobject); memset(&resp, 0, sizeof(resp)); @@ -1508,6 +1506,10 @@ static int create_qp(struct ib_uverbs_file *file, } if (cmd->qp_type != IB_QPT_XRC_TGT) { + ret = ib_create_qp_security(qp, device); + if (ret) + goto err_cb; + qp->real_qp = qp; qp->device = device; qp->pd = pd; @@ -1520,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file, qp->qp_type = attr.qp_type; atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); + qp->port = 0; if (attr.send_cq) atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) @@ -1931,6 +1934,12 @@ static int modify_qp(struct ib_uverbs_file *file, goto out; } + if ((cmd->base.attr_mask & IB_QP_PORT) && + !rdma_is_port_valid(qp->device, cmd->base.port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; @@ -1954,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file, attr->alt_timeout = cmd->base.alt_timeout; attr->rate_limit = cmd->rate_limit; - attr->ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_AV) + attr->ah_attr.type = rdma_ah_find_type(qp->device, + cmd->base.dest.port_num); if (cmd->base.dest.is_global) { rdma_ah_set_grh(&attr->ah_attr, NULL, cmd->base.dest.flow_label, @@ -1973,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file, rdma_ah_set_port_num(&attr->ah_attr, cmd->base.dest.port_num); - attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_ALT_PATH) + attr->alt_ah_attr.type = + rdma_ah_find_type(qp->device, cmd->base.dest.port_num); if (cmd->base.alt_dest.is_global) { rdma_ah_set_grh(&attr->alt_ah_attr, NULL, cmd->base.alt_dest.flow_label, @@ -1996,25 +2007,13 @@ static int modify_qp(struct ib_uverbs_file *file, rdma_ah_set_port_num(&attr->alt_ah_attr, cmd->base.alt_dest.port_num); - if (qp->real_qp == qp) { - if (cmd->base.attr_mask & IB_QP_AV) { - ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); - if (ret) - goto release_qp; - } - ret = qp->device->modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask), - udata); - } else { - ret = ib_modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask)); - } + ret = ib_modify_qp_with_udata(qp, attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + udata); release_qp: uobj_put_obj_read(qp); - out: kfree(attr); @@ -2091,7 +2090,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp_resp resp; struct ib_uobject *uobj; - struct ib_qp *qp; struct ib_uqp_object *obj; int ret = -EINVAL; @@ -2105,7 +2103,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - qp = uobj->object; obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); /* * Make sure we don't free the memory in remove_commit as we still @@ -2541,6 +2538,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) + return -EINVAL; + INIT_UDATA(&udata, buf + sizeof(cmd), (unsigned long)cmd.response + sizeof(resp), in_len - sizeof(cmd), out_len - sizeof(resp)); @@ -3004,7 +3004,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, { struct ib_uverbs_ex_destroy_wq cmd = {}; struct ib_uverbs_ex_destroy_wq_resp resp = {}; - struct ib_wq *wq; struct ib_uobject *uobj; struct ib_uwq_object *obj; size_t required_cmd_sz; @@ -3038,7 +3037,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - wq = uobj->object; obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); /* * Make sure we don't free the memory in remove_commit as we still @@ -3728,10 +3726,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, struct ib_uverbs_destroy_srq cmd; struct ib_uverbs_destroy_srq_resp resp; struct ib_uobject *uobj; - struct ib_srq *srq; struct ib_uevent_object *obj; int ret = -EINVAL; - enum ib_srq_type srq_type; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; @@ -3741,9 +3737,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - srq = uobj->object; obj = container_of(uobj, struct ib_uevent_object, uobject); - srq_type = srq->srq_type; /* * Make sure we don't free the memory in remove_commit as we still * needs the uobject memory to create the response. diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3d2609608f58..5e530d2bee44 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + kobject_put(&file->device->kobj); kfree(file); } @@ -917,7 +918,6 @@ err: static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; - struct ib_uverbs_device *dev = file->device; mutex_lock(&file->cleanup_mutex); if (file->ucontext) { @@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) ib_uverbs_release_async_event_file); kref_put(&file->ref, ib_uverbs_release_file); - kobject_put(&dev->kobj); return 0; } @@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, kref_get(&file->ref); mutex_unlock(&uverbs_dev->lists_mutex); - ib_uverbs_event_handler(&file->event_handler, &event); mutex_lock(&file->cleanup_mutex); ucontext = file->ucontext; @@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, * for example due to freeing the resources * (e.g mmput). */ + ib_uverbs_event_handler(&file->event_handler, &event); ib_dev->disassociate_ucontext(ucontext); mutex_lock(&file->cleanup_mutex); ib_uverbs_cleanup_ucontext(file, ucontext, true); diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 8b9587fe2303..94fd989c9060 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, } EXPORT_SYMBOL(ib_copy_qp_attr_to_user); -void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, - struct sa_path_rec *src) +static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, + struct sa_path_rec *src) { - memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); - memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); + memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); + memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); dst->dlid = htons(ntohl(sa_path_get_dlid(src))); dst->slid = htons(ntohl(sa_path_get_slid(src))); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4792f5209ac2..b456e3ca1876 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -44,6 +44,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <net/addrconf.h> +#include <linux/security.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> @@ -451,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, } EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); +/* + * This function creates ah from the incoming packet. + * Incoming packet has dgid of the receiver node on which this code is + * getting executed and, sgid contains the GID of the sender. + * + * When resolving mac address of destination, the arrived dgid is used + * as sgid and, sgid is used as dgid because sgid contains destinations + * GID whom to respond to. + * + * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the + * position of arguments dgid and sgid do not match the order of the + * parameters. + */ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr) @@ -506,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, } resolved_dev = dev_get_by_index(&init_net, if_index); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); - } rcu_read_lock(); if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, resolved_dev)) @@ -713,12 +722,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, { struct ib_qp *qp; unsigned long flags; + int err; qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->real_qp = real_qp; + err = ib_open_shared_qp_security(qp, real_qp->device); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + qp->real_qp = real_qp; atomic_inc(&real_qp->usecnt); qp->device = real_qp->device; qp->event_handler = event_handler; @@ -804,6 +821,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, if (IS_ERR(qp)) return qp; + ret = ib_create_qp_security(qp, device); + if (ret) { + ib_destroy_qp(qp); + return ERR_PTR(ret); + } + qp->device = device; qp->real_qp = qp; qp->uobject = NULL; @@ -815,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, spin_lock_init(&qp->mr_lock); INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->port = 0; if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) return ib_create_xrc_qp(qp, qp_init_attr); @@ -1253,20 +1277,40 @@ out: } EXPORT_SYMBOL(ib_resolve_eth_dmac); -int ib_modify_qp(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask) +/** + * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. + * @qp: The QP to modify. + * @attr: On input, specifies the QP attributes to modify. On output, + * the current values of selected QP attributes are returned. + * @attr_mask: A bit-mask used to specify which attributes of the QP + * are being modified. + * @udata: pointer to user's input output buffer information + * are being modified. + * It returns 0 on success and returns appropriate error code on error. + */ +int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) { + int ret; - if (qp_attr_mask & IB_QP_AV) { - int ret; - - ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); + if (attr_mask & IB_QP_AV) { + ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); if (ret) return ret; } + ret = ib_security_modify_qp(qp, attr, attr_mask, udata); + if (!ret && (attr_mask & IB_QP_PORT)) + qp->port = attr->port_num; - return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); + return ret; +} +EXPORT_SYMBOL(ib_modify_qp_with_udata); + +int ib_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); @@ -1295,6 +1339,7 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; @@ -1335,6 +1380,7 @@ int ib_destroy_qp(struct ib_qp *qp) struct ib_cq *scq, *rcq; struct ib_srq *srq; struct ib_rwq_ind_table *ind_tbl; + struct ib_qp_security *sec; int ret; WARN_ON_ONCE(qp->mrs_used > 0); @@ -1350,6 +1396,9 @@ int ib_destroy_qp(struct ib_qp *qp) rcq = qp->recv_cq; srq = qp->srq; ind_tbl = qp->rwq_ind_tbl; + sec = qp->qp_sec; + if (sec) + ib_destroy_qp_security_begin(sec); if (!qp->uobject) rdma_rw_cleanup_mrs(qp); @@ -1366,6 +1415,11 @@ int ib_destroy_qp(struct ib_qp *qp) atomic_dec(&srq->usecnt); if (ind_tbl) atomic_dec(&ind_tbl->usecnt); + if (sec) + ib_destroy_qp_security_end(sec); + } else { + if (sec) + ib_destroy_qp_security_abort(sec); } return ret; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..85527532c49d 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -51,11 +51,24 @@ #define BNXT_RE_PAGE_SIZE_8M BIT(23) #define BNXT_RE_PAGE_SIZE_1G BIT(30) +#define BNXT_RE_MAX_MR_SIZE BIT(30) + #define BNXT_RE_MAX_QPC_COUNT (64 * 1024) #define BNXT_RE_MAX_MRW_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) +#define BNXT_RE_UD_QP_HW_STALL 0x400000 + +#define BNXT_RE_RQ_WQE_THRESHOLD 32 + +/* + * Setting the default ack delay value to 16, which means + * the default timeout is approx. 260ms(4 usec * 2 ^(timeout)) + */ + +#define BNXT_RE_DEFAULT_ACK_DELAY 16 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..f0e01b3ac711 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -61,6 +61,48 @@ #include "ib_verbs.h" #include <rdma/bnxt_re-abi.h> +static int __from_ib_access_flags(int iflags) +{ + int qflags = 0; + + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; + if (iflags & IB_ACCESS_MW_BIND) + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; + if (iflags & IB_ZERO_BASED) + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; + if (iflags & IB_ACCESS_ON_DEMAND) + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; + return qflags; +}; + +static enum ib_access_flags __to_ib_access_flags(int qflags) +{ + enum ib_access_flags iflags = 0; + + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) + iflags |= IB_ACCESS_MW_BIND; + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) + iflags |= IB_ZERO_BASED; + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) + iflags |= IB_ACCESS_ON_DEMAND; + return iflags; +}; + static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { @@ -103,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ib_attr->sys_image_guid); - ib_attr->max_mr_size = ~0ull; - ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | - BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M | - BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G; + ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; + ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K; ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_part_id = rdev->en_dev->pdev->device; @@ -132,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_mr = dev_attr->max_mr; ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; - ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; - ib_attr->atomic_cap = IB_ATOMIC_HCA; - ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; + ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; + if (dev_attr->is_atomic) { + ib_attr->atomic_cap = IB_ATOMIC_HCA; + ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; + } ib_attr->max_ee_rd_atom = 0; ib_attr->max_res_rd_atom = 0; @@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_ah = dev_attr->max_ah; - ib_attr->max_fmr = dev_attr->max_fmr; - ib_attr->max_map_per_fmr = 1; /* ? */ + ib_attr->max_fmr = 0; + ib_attr->max_map_per_fmr = 0; ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq_wr = dev_attr->max_srq_wqes; @@ -159,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; ib_attr->max_pkeys = 1; - ib_attr->local_ca_ack_delay = 0; + ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; return 0; } @@ -348,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, return -EINVAL; ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid - (sgid_tbl, - &sgid_tbl->tbl[ctx->idx], true); - if (rc) + rc = bnxt_qplib_del_sgid(sgid_tbl, + &sgid_tbl->tbl[ctx->idx], + true); + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); - ctx_tbl = sgid_tbl->ctx; - ctx_tbl[ctx->idx] = NULL; - kfree(ctx); + } else { + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = NULL; + kfree(ctx); + } } } else { return -EINVAL; @@ -410,37 +454,173 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -/* Protection Domains */ -int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) +#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) + +static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) { + struct bnxt_re_fence_data *fence = &pd->fence; + struct ib_mr *ib_mr = &fence->mr->ib_mr; + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + + memset(wqe, 0, sizeof(*wqe)); + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = ib_mr->lkey; + wqe->bind.va = (u64)(unsigned long)fence->va; + wqe->bind.length = fence->size; + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; + + /* Save the initial rkey in fence structure for now; + * wqe->bind.r_key will be set at (re)bind time. + */ + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); +} + +static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) +{ + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, + qplib_qp); + struct ib_pd *ib_pd = qp->ib_qp.pd; struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); - struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; + struct bnxt_qplib_swqe wqe; int rc; - if (ib_pd->uobject && pd->dpi.dbr) { - struct ib_ucontext *ib_uctx = ib_pd->uobject->context; - struct bnxt_re_ucontext *ucntx; + memcpy(&wqe, fence_wqe, sizeof(wqe)); + wqe.bind.r_key = fence->bind_rkey; + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); - /* Free DPI only if this is the first PD allocated by the - * application and mark the context dpi as NULL - */ - ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); + dev_dbg(rdev_to_dev(qp->rdev), + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + wqe.bind.r_key, qp->qplib_qp.id, pd); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + return rc; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); - rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, - &rdev->qplib_res.dpi_tbl, - &pd->dpi); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); - /* Don't fail, continue*/ - ucntx->dpi = NULL; + return rc; +} + +static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = fence->mr; + + if (fence->mw) { + bnxt_re_dealloc_mw(fence->mw); + fence->mw = NULL; + } + if (mr) { + if (mr->ib_mr.rkey) + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, + true); + if (mr->ib_mr.lkey) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + kfree(mr); + fence->mr = NULL; + } + if (fence->dma_addr) { + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + fence->dma_addr = 0; } +} + +static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) +{ + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = NULL; + dma_addr_t dma_addr = 0; + struct ib_mw *mw; + u64 pbl_tbl; + int rc; - rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, - &rdev->qplib_res.pd_tbl, - &pd->qplib_pd); + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + rc = dma_mapping_error(dev, dma_addr); if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); - return rc; + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + rc = -EIO; + fence->dma_addr = 0; + goto fail; + } + fence->dma_addr = dma_addr; + + /* Allocate a MR */ + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + rc = -ENOMEM; + goto fail; + } + fence->mr = mr; + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + goto fail; + } + + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->qplib_mr.va = (u64)(unsigned long)fence->va; + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; + pbl_tbl = dma_addr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, + BNXT_RE_FENCE_PBL_SIZE, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + goto fail; + } + mr->ib_mr.rkey = mr->qplib_mr.rkey; + + /* Create a fence MW only for kernel consumers */ + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); + if (IS_ERR(mw)) { + dev_err(rdev_to_dev(rdev), + "Failed to create fence-MW for PD: %p\n", pd); + rc = PTR_ERR(mw); + goto fail; + } + fence->mw = mw; + + bnxt_re_create_fence_wqe(pd); + return 0; + +fail: + bnxt_re_destroy_fence_mr(pd); + return rc; +} + +/* Protection Domains */ +int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + int rc; + + bnxt_re_destroy_fence_mr(pd); + + if (pd->qplib_pd.id) { + rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, + &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); } kfree(pd); @@ -472,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, if (udata) { struct bnxt_re_pd_resp resp; - if (!ucntx->dpi) { + if (!ucntx->dpi.dbr) { /* Allocate DPI in alloc_pd to avoid failing of * ibv_devinfo and family of application when DPIs * are depleted. */ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, - &pd->dpi, ucntx)) { + &ucntx->dpi, ucntx)) { rc = -ENOMEM; goto dbfail; } - ucntx->dpi = &pd->dpi; } resp.pdid = pd->qplib_pd.id; /* Still allow mapping this DBR to the new user PD. */ - resp.dpi = ucntx->dpi->dpi; - resp.dbr = (u64)ucntx->dpi->umdbr; + resp.dpi = ucntx->dpi.dpi; + resp.dbr = (u64)ucntx->dpi.umdbr; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { @@ -498,6 +677,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, } } + if (!udata) + if (bnxt_re_create_fence_mr(pd)) + dev_warn(rdev_to_dev(rdev), + "Failed to create Fence-MR\n"); return &pd->ib_pd; dbfail: (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -761,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, qplib_qp->rq.nmap = umem->nmap; } - qplib_qp->dpi = cntx->dpi; + qplib_qp->dpi = &cntx->dpi; return 0; rqfail: ib_umem_release(qp->sumem); @@ -849,12 +1032,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.sq.q_full_delta = 1; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.rq.q_full_delta = 1; qp->qplib_qp.mtu = qp1_qp->mtu; @@ -917,10 +1104,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false); - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; @@ -959,6 +1142,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_init_attr->cap.max_recv_wr; + qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -967,6 +1153,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); if (qp_init_attr->qp_type == IB_QPT_GSI) { + /* Allocate 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_init_attr->cap.max_send_wr; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -1006,6 +1198,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, } } else { + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + + qp->qplib_qp.sq.q_full_delta -= 1; + qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; if (udata) { @@ -1025,6 +1233,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->ib_qp.qp_num = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); if (udata) { struct bnxt_re_qp_resp resp; @@ -1129,48 +1338,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) } } -static int __from_ib_access_flags(int iflags) -{ - int qflags = 0; - - if (iflags & IB_ACCESS_LOCAL_WRITE) - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; - if (iflags & IB_ACCESS_REMOTE_READ) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; - if (iflags & IB_ACCESS_REMOTE_WRITE) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; - if (iflags & IB_ACCESS_REMOTE_ATOMIC) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; - if (iflags & IB_ACCESS_MW_BIND) - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; - if (iflags & IB_ZERO_BASED) - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; - if (iflags & IB_ACCESS_ON_DEMAND) - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; - return qflags; -}; - -static enum ib_access_flags __to_ib_access_flags(int qflags) -{ - enum ib_access_flags iflags = 0; - - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) - iflags |= IB_ACCESS_LOCAL_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) - iflags |= IB_ACCESS_REMOTE_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) - iflags |= IB_ACCESS_REMOTE_READ; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) - iflags |= IB_ACCESS_REMOTE_ATOMIC; - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) - iflags |= IB_ACCESS_MW_BIND; - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) - iflags |= IB_ZERO_BASED; - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) - iflags |= IB_ACCESS_ON_DEMAND; - return iflags; -}; - static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) @@ -1347,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; - qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; + /* Cap the max_rd_atomic to device max */ + qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, + dev_attr->max_qp_rd_atom); } if (qp_attr_mask & IB_QP_SQ_PSN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; qp->qplib_qp.sq.psn = qp_attr->sq_psn; } if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (qp_attr->max_dest_rd_atomic > + dev_attr->max_qp_init_rd_atom) { + dev_err(rdev_to_dev(rdev), + "max_dest_rd_atomic requested%d is > dev_max%d", + qp_attr->max_dest_rd_atomic, + dev_attr->max_qp_init_rd_atom); + return -EINVAL; + } + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; @@ -1378,11 +1556,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_attr->cap.max_send_wr; + /* + * Reserving one slot for Phantom WQE. Some application can + * post one extra entry in this case. Allowing this to avoid + * unexpected Queue full condition + */ + qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; if (qp->qplib_qp.rq.max_wqe) { entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; } else { /* SRQ was used prior, just ignore the RQ caps */ @@ -1883,6 +2071,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, return payload_sz; } +static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) +{ + if ((qp->ib_qp.qp_type == IB_QPT_UD || + qp->ib_qp.qp_type == IB_QPT_GSI || + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { + int qp_attr_mask; + struct ib_qp_attr qp_attr; + + qp_attr_mask = IB_QP_STATE; + qp_attr.qp_state = IB_QPS_RTS; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); + qp->qplib_qp.wqe_cnt = 0; + } +} + static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, struct ib_send_wr *wr) @@ -1928,6 +2132,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } @@ -2024,6 +2229,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; @@ -2071,7 +2277,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; + unsigned long flags; + u32 count = 0; + spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); @@ -2100,9 +2309,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, *bad_wr = wr; break; } + + /* Ring DB if the RQEs posted reaches a threshold value */ + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { + bnxt_qplib_post_recv_db(&qp->qplib_qp); + count = 0; + } + wr = wr->next; } - bnxt_qplib_post_recv_db(&qp->qplib_qp); + + if (count) + bnxt_qplib_post_recv_db(&qp->qplib_qp); + + spin_unlock_irqrestore(&qp->rq_lock, flags); + return rc; } @@ -2177,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, } cq->qplib_cq.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.nmap = cq->umem->nmap; - cq->qplib_cq.dpi = uctx->dpi; + cq->qplib_cq.dpi = &uctx->dpi; } else { cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), @@ -2643,25 +2864,64 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } +static int send_phantom_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + rc = bnxt_re_bind_fence_mw(lib_qp); + if (!rc) { + lib_qp->sq.phantom_wqe_cnt++; + dev_dbg(&lib_qp->sq.hwq.pdev->dev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); + } + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_qp *qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_qp *lib_qp; u32 tbl_idx; struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; spin_lock_irqsave(&cq->cq_lock, flags); budget = min_t(u32, num_entries, cq->max_cql); + num_entries = budget; if (!cq->cql) { dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); goto exit; } cqe = &cq->cql[0]; while (budget) { - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); + lib_qp = NULL; + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); + if (lib_qp) { + sq = &lib_qp->sq; + if (sq->send_phantom) { + qp = container_of(lib_qp, + struct bnxt_re_qp, qplib_qp); + if (send_phantom_wqe(qp) == -ENOMEM) + dev_err(rdev_to_dev(cq->rdev), + "Phantom failed! Scheduled to send again\n"); + else + sq->send_phantom = false; + } + } + if (!ncqe) break; @@ -2767,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, else if (ib_cqn_flags & IB_CQ_SOLICITED) type = DBR_DBR_TYPE_CQ_ARMSE; + /* Poll to see if there are missed events */ + if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && + !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) + return 1; + bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); return 0; @@ -2822,6 +3087,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) struct bnxt_re_dev *rdev = mr->rdev; int rc; + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } + if (mr->npages && mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); @@ -2829,8 +3100,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) mr->npages = 0; mr->pages = NULL; } - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (!IS_ERR_OR_NULL(mr->ib_umem)) ib_umem_release(mr->ib_umem); @@ -2914,97 +3183,52 @@ fail: return ERR_PTR(rc); } -/* Fast Memory Regions */ -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_re_fmr *fmr; + struct bnxt_re_mw *mw; int rc; - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); - return ERR_PTR(-ENOMEM); - } - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); - if (!fmr) + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) return ERR_PTR(-ENOMEM); + mw->rdev = rdev; + mw->qplib_mw.pd = &pd->qplib_pd; - fmr->rdev = rdev; - fmr->qplib_fmr.pd = &pd->qplib_pd; - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; - - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); goto fail; + } + mw->ib_mw.rkey = mw->qplib_mw.rkey; - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; + atomic_inc(&rdev->mw_count); + return &mw->ib_mw; - atomic_inc(&rdev->mr_count); - return &fmr->ib_fmr; fail: - kfree(fmr); + kfree(mw); return ERR_PTR(rc); } -int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, - u64 iova) +int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) { - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); + struct bnxt_re_dev *rdev = mw->rdev; int rc; - fmr->qplib_fmr.va = iova; - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; - - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, - list_len, true); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", - fmr->ib_fmr.lkey); - return rc; -} - -int bnxt_re_unmap_fmr(struct list_head *fmr_list) -{ - struct bnxt_re_dev *rdev; - struct bnxt_re_fmr *fmr; - struct ib_fmr *ib_fmr; - int rc = 0; - - /* Validate each FMRs inside the fmr_list */ - list_for_each_entry(ib_fmr, fmr_list, list) { - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); - rdev = fmr->rdev; - - if (rdev) { - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, - &fmr->qplib_fmr, true); - if (rc) - break; - } + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + return rc; } - return rc; -} - -int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) -{ - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; - int rc; - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); - - kfree(fmr); - atomic_dec(&rdev->mr_count); + kfree(mw); + atomic_dec(&rdev->mw_count); return rc; } @@ -3022,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, struct scatterlist *sg; int entry; + if (length > BNXT_RE_MAX_MR_SIZE) { + dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", + length, BNXT_RE_MAX_MR_SIZE); + return ERR_PTR(-ENOMEM); + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -3165,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); + + struct bnxt_re_dev *rdev = uctx->rdev; + int rc = 0; + if (uctx->shpg) free_page((unsigned long)uctx->shpg); + + if (uctx->dpi.dbr) { + /* Free DPI only if this is the first PD allocated by the + * application and mark the context dpi as NULL + */ + rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, + &rdev->qplib_res.dpi_tbl, + &uctx->dpi); + if (rc) + dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!"); + /* Don't fail, continue*/ + uctx->dpi.dbr = NULL; + } + kfree(uctx); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..a0bb7e33d7ca 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -44,11 +44,22 @@ struct bnxt_re_gid_ctx { u32 refcnt; }; +#define BNXT_RE_FENCE_BYTES 64 +struct bnxt_re_fence_data { + u32 size; + u8 va[BNXT_RE_FENCE_BYTES]; + dma_addr_t dma_addr; + struct bnxt_re_mr *mr; + struct ib_mw *mw; + struct bnxt_qplib_swqe bind_wqe; + u32 bind_rkey; +}; + struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; - struct bnxt_qplib_dpi dpi; + struct bnxt_re_fence_data fence; }; struct bnxt_re_ah { @@ -62,6 +73,7 @@ struct bnxt_re_qp { struct bnxt_re_dev *rdev; struct ib_qp ib_qp; spinlock_t sq_lock; /* protect sq */ + spinlock_t rq_lock; /* protect rq */ struct bnxt_qplib_qp qplib_qp; struct ib_umem *sumem; struct ib_umem *rumem; @@ -114,7 +126,7 @@ struct bnxt_re_mw { struct bnxt_re_ucontext { struct bnxt_re_dev *rdev; struct ib_ucontext ib_uctx; - struct bnxt_qplib_dpi *dpi; + struct bnxt_qplib_dpi dpi; void *shpg; spinlock_t sh_lock; /* protect shpg */ }; @@ -181,12 +193,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, u32 max_num_sg); int bnxt_re_dereg_mr(struct ib_mr *mr); -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); -int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, - u64 iova); -int bnxt_re_unmap_fmr(struct list_head *fmr_list); -int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata); +int bnxt_re_dealloc_mw(struct ib_mw *mw); struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..ceae2d92fb08 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); req.stats_dma_addr = cpu_to_le64(dma_map); + req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); @@ -507,10 +508,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->map_mr_sg = bnxt_re_map_mr_sg; - ibdev->alloc_fmr = bnxt_re_alloc_fmr; - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; - ibdev->unmap_fmr = bnxt_re_unmap_fmr; - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..9af1514e5944 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_qp1 req; - struct creq_create_qp1_resp *resp; + struct creq_create_qp1_resp resp; struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; @@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp1_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; struct cmdq_create_qp req; - struct creq_create_qp_resp *resp; + struct creq_create_qp_resp resp; struct bnxt_qplib_pbl *pbl; struct sq_psn_search **psn_search_ptr; unsigned long int psn_search, poff = 0; @@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) } req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; - struct creq_modify_qp_resp *resp; + struct creq_modify_qp_resp resp; u16 cmd_flags = 0, pkey; u32 temp32[4]; u32 bmask; + int rc; RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); @@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); - resp = (struct creq_modify_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; qp->cur_qp_state = qp->state; return 0; } @@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_query_qp req; - struct creq_query_qp_resp *resp; + struct creq_query_qp_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_qp_resp_sb *sb; u16 cmd_flags = 0; u32 temp32[4]; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) + return -ENOMEM; + sb = sbuf->sb; + req.qp_cid = cpu_to_le32(qp->id); req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void **)&sb, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; /* Extract the context from the side buffer */ qp->state = sb->en_sqd_async_notify_state & CREQ_QUERY_QP_RESP_SB_STATE_MASK; @@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); memcpy(qp->smac, sb->src_mac, 6); qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); - return 0; +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) @@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_qp req; - struct creq_destroy_qp_resp *resp; + struct creq_destroy_qp_resp resp; unsigned long flags; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); req.qp_cid = cpu_to_le32(qp->id); - resp = (struct creq_destroy_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; /* Must walk the associated CQs to nullified the QP ptr */ spin_lock_irqsave(&qp->scq->hwq.lock, flags); @@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { + + if (bnxt_qplib_queue_full(sq)) { + dev_err(&sq->hwq.pdev->dev, + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, + sq->q_full_delta); rc = -ENOMEM; goto done; } @@ -1203,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } /* Each SGE entry = 1 WQE size16 */ wqe_size16 = wqe->num_sge; + /* HW requires wqe size has room for atleast one SGE even if + * none was supplied by ULP + */ + if (!wqe->num_sge) + wqe_size16++; } /* Specifics */ @@ -1373,6 +1303,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } sq->hwq.prod++; + + qp->wqe_cnt++; + done: return rc; } @@ -1411,8 +1344,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { + if (bnxt_qplib_queue_full(rq)) { dev_err(&rq->hwq.pdev->dev, "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); rc = -EINVAL; @@ -1437,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rqe->flags = wqe->flags; rqe->wqe_size = wqe->num_sge + ((offsetof(typeof(*rqe), data) + 15) >> 4); + /* HW requires wqe size has room for atleast one SGE even if none + * was supplied by ULP + */ + if (!wqe->num_sge) + rqe->wqe_size++; /* Supply the rqe->wr_id index to the wr_id_tbl for now */ rqe->wr_id[0] = cpu_to_le32(sw_prod); @@ -1483,7 +1420,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_cq req; - struct creq_create_cq_resp *resp; + struct creq_create_cq_resp resp; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc; @@ -1525,30 +1462,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << CMDQ_CREATE_CQ_CNQ_ID_SFT); - resp = (struct creq_create_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - cq->id = le32_to_cpu(resp->xid); + + cq->id = le32_to_cpu(resp.xid); cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; init_waitqueue_head(&cq->waitq); @@ -1566,33 +1485,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_cq req; - struct creq_destroy_cq_resp *resp; + struct creq_destroy_cq_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); req.cq_cid = cpu_to_le32(cq->id); - resp = (struct creq_destroy_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; bnxt_qplib_free_hwq(res->pdev, &cq->hwq); return 0; } @@ -1664,14 +1567,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, return rc; } +/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 + */ +static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_swq *swq; + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; + struct cq_req *peek_req_hwcqe; + struct bnxt_qplib_qp *peek_qp; + struct bnxt_qplib_q *peek_sq; + int i, rc = 0; + + /* Normal mode */ + /* Check for the psn_search marking before completing */ + swq = &sq->swq[sw_sq_cons]; + if (swq->psn_search && + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { + /* Unmark */ + swq->psn_search->flags_next_psn = cpu_to_le32 + (le32_to_cpu(swq->psn_search->flags_next_psn) + & ~0x80000000); + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + sq->condition = true; + sq->send_phantom = true; + + /* TODO: Only ARM if the previous SQE is ARMALL */ + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); + + rc = -EAGAIN; + goto out; + } + if (sq->condition) { + /* Peek at the completions */ + peek_raw_cq_cons = cq->hwq.cons; + peek_sw_cq_cons = cq_cons; + i = cq->hwq.max_elements; + while (i--) { + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] + [CQE_IDX(peek_sw_cq_cons)]; + /* If the next hwcqe is VALID */ + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, + cq->hwq.max_elements)) { + /* If the next hwcqe is a REQ */ + if ((peek_hwcqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK) == + CQ_BASE_CQE_TYPE_REQ) { + peek_req_hwcqe = (struct cq_req *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + ((unsigned long) + le64_to_cpu + (peek_req_hwcqe->qp_handle)); + peek_sq = &peek_qp->sq; + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( + peek_req_hwcqe->sq_cons_idx) - 1 + , &sq->hwq); + /* If the hwcqe's sq's wr_id matches */ + if (peek_sq == sq && + sq->swq[peek_sq_cons_idx].wr_id == + BNXT_QPLIB_FENCE_WRID) { + /* + * Unbreak only if the phantom + * comes back + */ + dev_dbg(&cq->hwq.pdev->dev, + "FP:Got Phantom CQE"); + sq->condition = false; + sq->single = true; + rc = 0; + goto out; + } + } + /* Valid but not the phantom, so keep looping */ + } else { + /* Not valid yet, just exit and wait */ + rc = -EINVAL; + goto out; + } + peek_sw_cq_cons++; + peek_raw_cq_cons++; + } + dev_err(&cq->hwq.pdev->dev, + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + rc = -EINVAL; + } +out: + return rc; +} + static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, - struct bnxt_qplib_cqe **pcqe, int *budget) + struct bnxt_qplib_cqe **pcqe, int *budget, + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) { struct bnxt_qplib_qp *qp; struct bnxt_qplib_q *sq; struct bnxt_qplib_cqe *cqe; - u32 sw_cons, cqe_cons; + u32 sw_sq_cons, cqe_sq_cons; + struct bnxt_qplib_swq *swq; int rc = 0; qp = (struct bnxt_qplib_qp *)((unsigned long) @@ -1683,13 +1685,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, } sq = &qp->sq; - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); - if (cqe_cons > sq->hwq.max_elements) { + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); + if (cqe_sq_cons > sq->hwq.max_elements) { dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process req reported "); dev_err(&cq->hwq.pdev->dev, "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", - cqe_cons, sq->hwq.max_elements); + cqe_sq_cons, sq->hwq.max_elements); return -EINVAL; } /* If we were in the middle of flushing the SQ, continue */ @@ -1698,53 +1700,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Require to walk the sq's swq to fabricate CQEs for all previously * signaled SWQEs due to CQE aggregation from the current sq cons - * to the cqe_cons + * to the cqe_sq_cons */ cqe = *pcqe; while (*budget) { - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); - if (sw_cons == cqe_cons) + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); + if (sw_sq_cons == cqe_sq_cons) + /* Done */ break; + + swq = &sq->swq[sw_sq_cons]; memset(cqe, 0, sizeof(*cqe)); cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->qp_handle = (u64)(unsigned long)qp; cqe->src_qp = qp->id; - cqe->wr_id = sq->swq[sw_cons].wr_id; - cqe->type = sq->swq[sw_cons].type; + cqe->wr_id = swq->wr_id; + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) + goto skip; + cqe->type = swq->type; /* For the last CQE, check for status. For errors, regardless * of the request being signaled or not, it must complete with * the hwcqe error status */ - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && hwcqe->status != CQ_REQ_STATUS_OK) { cqe->status = hwcqe->status; dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Req "); dev_err(&cq->hwq.pdev->dev, "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", - sw_cons, cqe->wr_id, cqe->status); + sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; sq->flush_in_progress = true; /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + sq->condition = false; + sq->single = false; } else { - if (sq->swq[sw_cons].flags & - SQ_SEND_FLAGS_SIGNAL_COMP) { + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; } } +skip: sq->hwq.cons++; + if (sq->single) + break; } +out: *pcqe = cqe; - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { /* Out of budget */ rc = -EAGAIN; goto done; } + /* + * Back to normal completion mode only after it has completed all of + * the WC for this CQE + */ + sq->single = false; if (!sq->flush_in_progress) goto done; flush: @@ -1872,6 +1895,25 @@ flush_rq: return rc; } +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) +{ + struct cq_base *hw_cqe, **hw_cqe_ptr; + unsigned long flags; + u32 sw_cons, raw_cons; + bool rc = true; + + spin_lock_irqsave(&cq->hwq.lock, flags); + raw_cons = cq->hwq.cons; + sw_cons = HWQ_CMP(raw_cons, &cq->hwq); + hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)]; + + /* Check for Valid bit. If the CQE is valid, return false */ + rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); + spin_unlock_irqrestore(&cq->hwq.lock, flags); + return rc; +} + static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, struct cq_res_raweth_qp1 *hwcqe, struct bnxt_qplib_cqe **pcqe, @@ -2074,7 +2116,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, } int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num_cqes) + int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; unsigned long flags; @@ -2099,7 +2141,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, case CQ_BASE_CQE_TYPE_REQ: rc = bnxt_qplib_cq_process_req(cq, (struct cq_req *)hw_cqe, - &cqe, &budget); + &cqe, &budget, + sw_cons, lib_qp); break; case CQ_BASE_CQE_TYPE_RES_RC: rc = bnxt_qplib_cq_process_res_rc(cq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..19176e06c98a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -88,6 +88,7 @@ struct bnxt_qplib_swq { struct bnxt_qplib_swqe { /* General */ +#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ u64 wr_id; u8 reqs_type; u8 type; @@ -216,9 +217,16 @@ struct bnxt_qplib_q { struct scatterlist *sglist; u32 nmap; u32 max_wqe; + u16 q_full_delta; u16 max_sge; u32 psn; bool flush_in_progress; + bool condition; + bool single; + bool send_phantom; + u32 phantom_wqe_cnt; + u32 phantom_cqe_cnt; + u32 next_cq_cons; }; struct bnxt_qplib_qp { @@ -242,6 +250,7 @@ struct bnxt_qplib_qp { u8 timeout; u8 retry_cnt; u8 rnr_retry; + u64 wqe_cnt; u32 min_rnr_timer; u32 max_rd_atomic; u32 max_dest_rd_atomic; @@ -301,6 +310,13 @@ struct bnxt_qplib_qp { (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ !((raw_cons) & (cp_bit))) +static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) +{ + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, + &qplib_q->hwq); +} + struct bnxt_qplib_cqe { u8 status; u8 type; @@ -432,7 +448,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num); + int num, struct bnxt_qplib_qp **qp); +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -39,72 +39,55 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/prefetch.h> +#include <linux/delay.h> + #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_rcfw.h" static void bnxt_qplib_service_creq(unsigned long data); /* Hardware communication channel */ -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { u16 cbit; int rc; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; - if (!test_bit(cbit, rcfw->cmdq_bitmap)) - dev_warn(&rcfw->pdev->dev, - "QPLIB: CMD bit %d for cookie 0x%x is not set?", - cbit, cookie); - rc = wait_event_timeout(rcfw->waitq, !test_bit(cbit, rcfw->cmdq_bitmap), msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); - if (!rc) { - dev_warn(&rcfw->pdev->dev, - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", - RCFW_CMD_WAIT_TIME_MS, cookie); - } - - return rc; + return rc ? 0 : -ETIMEDOUT; }; -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { - u32 count = -1; + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; u16 cbit; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (!test_bit(cbit, rcfw->cmdq_bitmap)) goto done; do { + mdelay(1); /* 1m sec */ bnxt_qplib_service_creq((unsigned long)rcfw); } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); done: - return count; + return count ? 0 : -ETIMEDOUT; }; -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block) +static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, + struct creq_base *resp, void *sb, u8 is_block) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; - struct bnxt_qplib_crsqe *crsqe = NULL; - struct bnxt_qplib_crsbe **crsb_ptr; + struct bnxt_qplib_crsq *crsqe; u32 sw_prod, cmdq_prod; - u8 retry_cnt = 0xFF; - dma_addr_t dma_addr; unsigned long flags; u32 size, opcode; u16 cookie, cbit; int pg, idx; u8 *preq; -retry: opcode = req->opcode; if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && @@ -112,63 +95,50 @@ retry: dev_err(&rcfw->pdev->dev, "QPLIB: RCFW not initialized, reject opcode 0x%x", opcode); - return NULL; + return -EINVAL; } if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); - return NULL; + return -EINVAL; } /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ spin_lock_irqsave(&cmdq->lock, flags); - if (req->cmd_size > cmdq->max_elements - - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & - (cmdq->max_elements - 1))) { + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EAGAIN; } - retry_cnt = 0xFF; - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (is_block) cookie |= RCFW_CMD_IS_BLOCKING; + + set_bit(cbit, rcfw->cmdq_bitmap); req->cookie = cpu_to_le16(cookie); - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW MAX outstanding cmd reached!"); - atomic_dec(&rcfw->seq_num); + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp) { spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EBUSY; } - /* Reserve a resp buffer slot if requested */ - if (req->resp_size && crsbe) { - spin_lock(&crsb->lock); - sw_prod = HWQ_CMP(crsb->prod, crsb); - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] - [get_crsb_idx(sw_prod)]; - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); - req->resp_addr = cpu_to_le64(dma_addr); - crsb->prod++; - spin_unlock(&crsb->lock); - - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + - BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + memset(resp, 0, sizeof(*resp)); + crsqe->resp = (struct creq_qp_event *)resp; + crsqe->resp->cookie = req->cookie; + crsqe->req_size = req->cmd_size; + if (req->resp_size && sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; + + req->resp_addr = cpu_to_le64(sbuf->dma_addr); + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; } + cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; @@ -190,23 +160,24 @@ retry: preq += min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe)); cmdq->prod++; + rcfw->seq_num++; } while (size > 0); + rcfw->seq_num++; + cmdq_prod = cmdq->prod; if (rcfw->flags & FIRMWARE_FIRST_FLAG) { - /* The very first doorbell write is required to set this flag - * which prompts the FW to reset its internal pointers + /* The very first doorbell write + * is required to set this flag + * which prompts the FW to reset + * its internal pointers */ cmdq_prod |= FIRMWARE_FIRST_FLAG; rcfw->flags &= ~FIRMWARE_FIRST_FLAG; } - sw_prod = HWQ_CMP(crsq->prod, crsq); - crsqe = &crsq->crsq[sw_prod]; - memset(crsqe, 0, sizeof(*crsqe)); - crsq->prod++; - crsqe->req_size = req->cmd_size; /* ring CMDQ DB */ + wmb(); writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + rcfw->cmdq_bar_reg_prod_off); writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + @@ -214,9 +185,56 @@ retry: done: spin_unlock_irqrestore(&cmdq->lock, flags); /* Return the CREQ response pointer */ - return crsqe ? &crsqe->qp_event : NULL; + return 0; } +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, + struct creq_base *resp, + void *sb, u8 is_block) +{ + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; + u16 cookie; + u8 opcode, retry_cnt = 0xFF; + int rc = 0; + + do { + opcode = req->opcode; + rc = __send_message(rcfw, req, resp, sb, is_block); + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; + if (!rc) + break; + + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { + /* send failed */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", + cookie, opcode); + return rc; + } + is_block ? mdelay(1) : usleep_range(500, 1000); + + } while (retry_cnt--); + + if (is_block) + rc = __block_for_resp(rcfw, cookie); + else + rc = __wait_for_resp(rcfw, cookie); + if (rc) { + /* timed out */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + return rc; + } + + if (evnt->status) { + /* failed with status */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", + cookie, opcode, evnt->status); + rc = -EFAULT; + } + + return rc; +} /* Completions */ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, struct creq_func_event *func_event) @@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, struct creq_qp_event *qp_event) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_crsqe *crsqe; - u16 cbit, cookie, blocked = 0; + struct bnxt_qplib_crsq *crsqe; unsigned long flags; - u32 sw_cons; + u16 cbit, blocked = 0; + u16 cookie; + __le16 mcookie; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: @@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, default: /* Command Response */ spin_lock_irqsave(&cmdq->lock, flags); - sw_cons = HWQ_CMP(crsq->cons, crsq); - crsqe = &crsq->crsq[sw_cons]; - crsq->cons++; - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); - - cookie = le16_to_cpu(crsqe->qp_event.cookie); + cookie = le16_to_cpu(qp_event->cookie); + mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp && + crsqe->resp->cookie == mcookie) { + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + crsqe->resp = NULL; + } else { + dev_err(&rcfw->pdev->dev, + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", + crsqe->resp ? "mismatch" : "collision", + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); + } if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) dev_warn(&rcfw->pdev->dev, "QPLIB: CMD bit %d was not requested", cbit); - cmdq->cons += crsqe->req_size; - spin_unlock_irqrestore(&cmdq->lock, flags); + crsqe->req_size = 0; + if (!blocked) wake_up(&rcfw->waitq); - break; + spin_unlock_irqrestore(&cmdq->lock, flags); } return 0; } @@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) struct creq_base *creqe, **creq_ptr; u32 sw_cons, raw_cons; unsigned long flags; - u32 type; + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; - /* Service the CREQ until empty */ + /* Service the CREQ until budget is over */ spin_lock_irqsave(&creq->lock, flags); raw_cons = creq->cons; - while (1) { + while (budget > 0) { sw_cons = HWQ_CMP(raw_cons, creq); creq_ptr = (struct creq_base **)creq->pbl_ptr; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; @@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) type = creqe->type & CREQ_BASE_TYPE_MASK; switch (type) { case CREQ_BASE_TYPE_QP_EVENT: - if (!bnxt_qplib_process_qp_event - (rcfw, (struct creq_qp_event *)creqe)) - rcfw->creq_qp_event_processed++; - else { - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); - dev_warn(&rcfw->pdev->dev, - "QPLIB: type = 0x%x not handled", - type); - } + bnxt_qplib_process_qp_event + (rcfw, (struct creq_qp_event *)creqe); + rcfw->creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event @@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) break; } raw_cons++; + budget--; } + if (creq->cons != raw_cons) { creq->cons = raw_cons; CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, @@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) /* RCFW */ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) { - struct creq_deinitialize_fw_resp *resp; struct cmdq_deinitialize_fw req; + struct creq_deinitialize_fw_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); - resp = (struct creq_deinitialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) - return -EINVAL; - - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) - return -ETIMEDOUT; - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) - return -EFAULT; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; @@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn) { - struct creq_initialize_fw_resp *resp; struct cmdq_initialize_fw req; + struct creq_initialize_fw_resp resp; u16 cmd_flags = 0, level; + int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); @@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, skip_ctx_setup: req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); - resp = (struct creq_initialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW failed"); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; } void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); - kfree(rcfw->crsq.crsq); + kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); - rcfw->pdev = NULL; } @@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, goto fail; } - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); - if (!rcfw->crsq.crsq) + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); + if (!rcfw->crsqe_tbl) goto fail; - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, - &rcfw->crsb.max_elements, - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, - HWQ_TYPE_CTX)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: HW channel CRSB allocation failed"); - goto fail; - } return 0; fail: @@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int rc; /* General */ - atomic_set(&rcfw->seq_num, 0); + rcfw->seq_num = 0; rcfw->flags = FIRMWARE_FIRST_FLAG; bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * sizeof(unsigned long)); @@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; - /* CRSQ */ - rcfw->crsq.prod = 0; - rcfw->crsq.cons = 0; - /* CREQ */ rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); @@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; } + +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size) +{ + struct bnxt_qplib_rcfw_sbuf *sbuf; + + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); + if (!sbuf) + return NULL; + + sbuf->size = size; + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, + &sbuf->dma_addr, GFP_ATOMIC); + if (!sbuf->sb) + goto bail; + + return sbuf; +bail: + kfree(sbuf); + return NULL; +} + +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf) +{ + if (sbuf->sb) + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, + sbuf->sb, sbuf->dma_addr); + kfree(sbuf); +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -73,6 +73,7 @@ #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_CMD_IS_BLOCKING 0x8000 +#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 /* Cmdq contains a fix number of a 16-Byte slots */ struct bnxt_qplib_cmdqe { @@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { u8 data[1024]; }; -/* CRSQ SB */ -#define BNXT_QPLIB_CRSBE_MAX_CNT 4 -#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) -#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) - -#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) -#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) - -static inline u32 get_crsb_pg(u32 val) -{ - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; -} - -static inline u32 get_crsb_idx(u32 val) -{ - return val & MAX_CRSB_IDX_PER_PG; -} - -static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, - u32 prod, dma_addr_t *dma_addr) -{ - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * - BNXT_QPLIB_CRSBE_UNITS; -} - /* CREQ */ /* Allocate 1 per QP for async error notification for now */ #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) @@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) #define CREQ_DB(db, raw_cons, cp_bit) \ writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) +#define CREQ_ENTRY_POLL_BUDGET 0x100 + /* HWQ */ -struct bnxt_qplib_crsqe { - struct creq_qp_event qp_event; + +struct bnxt_qplib_crsq { + struct creq_qp_event *resp; u32 req_size; }; -struct bnxt_qplib_crsq { - struct bnxt_qplib_crsqe *crsq; - u32 prod; - u32 cons; - u32 max_elements; +struct bnxt_qplib_rcfw_sbuf { + void *sb; + dma_addr_t dma_addr; + u32 size; }; /* RCFW Communication Channels */ @@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); - atomic_t seq_num; + u32 seq_num; /* Bar region info */ void __iomem *cmdq_bar_reg_iomem; @@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { /* Actual Cmd and Resp Queues */ struct bnxt_qplib_hwq cmdq; - struct bnxt_qplib_crsq crsq; - struct bnxt_qplib_hwq crsb; + struct bnxt_qplib_crsq *crsqe_tbl; }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); @@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, (struct bnxt_qplib_rcfw *, struct creq_func_event *)); -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block); +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size); +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf); +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, struct creq_base *resp, + void *sbuf, u8 is_block); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) +#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ + ((HWQ_CMP(hwq->prod, hwq)\ + - HWQ_CMP(hwq->cons, hwq))\ + & (hwq->max_elements - 1))) enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..ef91ab786dd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -51,43 +51,51 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; /* Device */ + +static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) +{ + int rc; + u16 pcie_ctl2; + + rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, + &pcie_ctl2); + if (rc) + return false; + return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); +} + int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr) { struct cmdq_query_func req; - struct creq_query_func_resp *resp; + struct creq_query_func_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_func_resp_sb *sb; u16 cmd_flags = 0; u32 temp; u8 *tqm_alloc; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_func_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, - 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) { dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); + return -ENOMEM; } + + sb = sbuf->sb; + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; + /* Extract the context from the side buffer */ attr->max_qp = le32_to_cpu(sb->max_qp); + /* max_qp value reported by FW for PF doesn't include the QP1 for PF */ + attr->max_qp += 1; attr->max_qp_rd_atom = sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; @@ -95,6 +103,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; attr->max_qp_sges = sb->max_sge; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); @@ -130,7 +143,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); } - return 0; + + attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } /* SGID */ @@ -178,8 +195,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, /* Remove GID from the SGID table */ if (update) { struct cmdq_delete_gid req; - struct creq_delete_gid_resp *resp; + struct creq_delete_gid_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); if (sgid_tbl->hw_id[index] == 0xFFFF) { @@ -188,31 +206,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -EINVAL; } req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); - resp = (struct creq_delete_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, - 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; } memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); @@ -234,7 +231,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_res, sgid_tbl); struct bnxt_qplib_rcfw *rcfw = res->rcfw; - int i, free_idx, rc = 0; + int i, free_idx; if (!sgid_tbl) { dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); @@ -266,10 +263,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } if (update) { struct cmdq_add_gid req; - struct creq_add_gid_resp *resp; + struct creq_add_gid_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, ADD_GID, cmd_flags); @@ -290,31 +288,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(temp16[1]); req.src_mac[2] = cpu_to_be16(temp16[2]); - resp = (struct creq_add_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: ADD_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPIB: SP: ADD_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); @@ -325,7 +303,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, *index = free_idx; /* unlock */ - return rc; + return 0; } /* pkeys */ @@ -422,10 +400,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_ah req; - struct creq_create_ah_resp *resp; + struct creq_create_ah_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); @@ -450,28 +429,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) req.dest_mac[1] = cpu_to_le16(temp16[1]); req.dest_mac[2] = cpu_to_le16(temp16[2]); - resp = (struct creq_create_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - ah->id = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; + + ah->id = le32_to_cpu(resp.xid); return 0; } @@ -479,35 +442,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_ah req; - struct creq_destroy_ah_resp *resp; + struct creq_destroy_ah_resp resp; u16 cmd_flags = 0; + int rc; /* Clean up the AH table in the device */ RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); req.ah_cid = cpu_to_le32(ah->id); - resp = (struct creq_destroy_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; return 0; } @@ -516,8 +463,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deallocate_key req; - struct creq_deallocate_key_resp *resp; + struct creq_deallocate_key_resp resp; u16 cmd_flags = 0; + int rc; if (mrw->lkey == 0xFFFFFFFF) { dev_info(&res->pdev->dev, @@ -536,27 +484,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) else req.key = cpu_to_le32(mrw->lkey); - resp = (struct creq_deallocate_key_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; + /* Free the qplib's MRW memory */ if (mrw->hwq.max_elements) bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); @@ -568,9 +500,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_allocate_mrw req; - struct creq_allocate_mrw_resp *resp; + struct creq_allocate_mrw_resp resp; u16 cmd_flags = 0; unsigned long tmp; + int rc; RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); @@ -584,33 +517,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) tmp = (unsigned long)mrw; req.mrw_handle = cpu_to_le64(tmp); - resp = (struct creq_allocate_mrw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) - mrw->rkey = le32_to_cpu(resp->xid); + mrw->rkey = le32_to_cpu(resp.xid); else - mrw->lkey = le32_to_cpu(resp->xid); + mrw->lkey = le32_to_cpu(resp.xid); return 0; } @@ -619,40 +536,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deregister_mr req; - struct creq_deregister_mr_resp *resp; + struct creq_deregister_mr_resp resp; u16 cmd_flags = 0; int rc; RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); req.lkey = cpu_to_le32(mrw->lkey); - resp = (struct creq_deregister_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); - return -EINVAL; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) + return rc; /* Free the qplib's MR memory */ if (mrw->hwq.max_elements) { @@ -669,7 +563,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_register_mr req; - struct creq_register_mr_resp *resp; + struct creq_register_mr_resp resp; u16 cmd_flags = 0, level; int pg_ptrs, pages, i, rc; dma_addr_t **pbl_ptr; @@ -730,36 +624,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.key = cpu_to_le32(mr->lkey); req.mr_size = cpu_to_le64(mr->total_size); - resp = (struct creq_register_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); - rc = -EINVAL; - goto fail; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) goto fail; - } + return 0; fail: @@ -804,35 +673,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_map_tc_to_cos req; - struct creq_map_tc_to_cos_resp *resp; + struct creq_map_tc_to_cos_resp resp; u16 cmd_flags = 0; - int tleft; + int rc = 0; RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); - return -EINVAL; - } - - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); - if (!tleft) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); - return -ETIMEDOUT; - } - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..2ce7e2a32cf0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -40,6 +40,10 @@ #ifndef __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__ +#define BNXT_QPLIB_RESERVED_QP_WRS 128 + +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 + struct bnxt_qplib_dev_attr { char fw_ver[32]; u16 max_sgid; @@ -68,6 +72,7 @@ struct bnxt_qplib_dev_attr { u32 max_inline_data; u32 l2_db_size; u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; + bool is_atomic; }; struct bnxt_qplib_pd { diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 558d6a03375d..3eff6541bd6f 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -142,8 +142,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) pr_debug("%s alloc_skb failed\n", __func__); return -ENOMEM; } - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7, T3_SOPEOP); @@ -561,8 +560,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) | V_EC_TYPE(0) | V_EC_GEN(1) | V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32; - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, T3_CTL_QP_TID, 7, T3_SOPEOP); wqe->flags = cpu_to_be32(MODQP_WRITE_EC); @@ -837,7 +835,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) if (!skb) return -ENOMEM; pr_debug("%s rdev_p %p\n", __func__, rdev_p); - wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index b61630eba912..86975370a4c0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -175,7 +175,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; - req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; @@ -190,7 +190,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -211,7 +211,7 @@ int iwch_resume_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -398,7 +398,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); - req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); @@ -417,8 +417,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, abort_arp_failure); - req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); @@ -456,7 +455,7 @@ static int send_connect(struct iwch_ep *ep) skb->priority = CPL_PRIORITY_SETUP; set_arp_failure_handler(skb, act_open_req_arp_failure); - req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); req->local_port = ep->com.local_addr.sin_port; @@ -514,7 +513,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -547,7 +546,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) return -ENOMEM; } skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; @@ -565,7 +564,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(mpalen); @@ -597,7 +596,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) } skb->priority = CPL_PRIORITY_DATA; skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | @@ -616,7 +615,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -801,7 +800,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) return 0; } - req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); @@ -1206,7 +1205,7 @@ static int listen_start(struct iwch_listen_ep *ep) return -ENOMEM; } - req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); req->local_port = ep->com.local_addr.sin_port; @@ -1247,7 +1246,7 @@ static int listen_stop(struct iwch_listen_ep *ep) pr_err("%s - failed to alloc skb\n", __func__); return -ENOMEM; } - req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); @@ -1615,7 +1614,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto out; } rpl_skb->priority = CPL_PRIORITY_DATA; - rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); + rpl = skb_put(rpl_skb, sizeof(*rpl)); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 29d30744d6c9..0cd0c1fa27d4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, struct iwch_mr *mhp; u32 mmid; u32 stag = 0; - int ret = 0; + int ret = -ENOMEM; if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > T3_MAX_FASTREG_DEPTH) @@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, goto err; mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); - if (!mhp->pages) { - ret = -ENOMEM; + if (!mhp->pages) goto pl_err; - } mhp->rhp = rhp; ret = iwch_alloc_pbl(mhp, max_num_sg); @@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) + ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid); + if (ret) goto err3; pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ba6d5d281b03..7f633da0185d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -670,8 +670,7 @@ int iwch_post_zb_read(struct iwch_ep *ep) pr_err("%s cannot send zb_read!!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr)); - memset(wqe, 0, sizeof(struct t3_rdma_read_wr)); + wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr)); wqe->read.rdmaop = T3_READ_REQ; wqe->read.reserved[0] = 0; wqe->read.reserved[1] = 0; @@ -702,8 +701,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) pr_err("%s cannot send TERMINATE!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, 40); - memset(wqe, 0, 40); + wqe = skb_put_zero(skb, 40); wqe->send.rdmaop = T3_TERMINATE; /* immediate data length */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b6fe45924c6e..e49b34c3b136 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -398,7 +398,8 @@ void _c4iw_free_ep(struct kref *kref) (const u32 *)&sin6->sin6_addr.s6_addr, 1); } - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); if (ep->mpa_skb) @@ -488,6 +489,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); + kfree_skb(skb); return 0; } @@ -498,6 +500,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); + kfree_skb(skb); return 0; } @@ -569,11 +572,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) pr_debug("%s rdev %p\n", __func__, rdev); req->cmd = CPL_ABORT_NO_RST; + skb_get(skb); ret = c4iw_ofld_send(rdev, skb); if (ret) { __state_set(&ep->com, DEAD); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); - } + } else + kfree_skb(skb); } static int send_flowc(struct c4iw_ep *ep) @@ -592,7 +597,7 @@ static int send_flowc(struct c4iw_ep *ep) else nparams = 9; - flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); + flowc = __skb_put(skb, FLOWC_LEN); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); @@ -782,18 +787,16 @@ static int send_connect(struct c4iw_ep *ep) if (ep->com.remote_addr.ss_family == AF_INET) { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req = (struct cpl_act_open_req *)skb_put(skb, wrlen); + req = skb_put(skb, wrlen); INIT_TP_WR(req, 0); break; case CHELSIO_T5: - t5req = (struct cpl_t5_act_open_req *)skb_put(skb, - wrlen); + t5req = skb_put(skb, wrlen); INIT_TP_WR(t5req, 0); req = (struct cpl_act_open_req *)t5req; break; case CHELSIO_T6: - t6req = (struct cpl_t6_act_open_req *)skb_put(skb, - wrlen); + t6req = skb_put(skb, wrlen); INIT_TP_WR(t6req, 0); req = (struct cpl_act_open_req *)t6req; t5req = (struct cpl_t5_act_open_req *)t6req; @@ -834,18 +837,16 @@ static int send_connect(struct c4iw_ep *ep) } else { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); + req6 = skb_put(skb, wrlen); INIT_TP_WR(req6, 0); break; case CHELSIO_T5: - t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, - wrlen); + t5req6 = skb_put(skb, wrlen); INIT_TP_WR(t5req6, 0); req6 = (struct cpl_act_open_req6 *)t5req6; break; case CHELSIO_T6: - t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, - wrlen); + t6req6 = skb_put(skb, wrlen); INIT_TP_WR(t6req6, 0); req6 = (struct cpl_act_open_req6 *)t6req6; t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; @@ -922,8 +923,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1029,8 +1029,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1110,8 +1109,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1195,7 +1193,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) /* setup the hwtid for this connection */ ep->hwtid = tid; - cxgb4_insert_tid(t, ep, tid); + cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); insert_ep_tid(ep); ep->snd_seq = be32_to_cpu(req->snd_isn); @@ -1902,8 +1900,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); - req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( @@ -2300,7 +2297,8 @@ fail: (const u32 *)&sin6->sin6_addr.s6_addr, 1); } if (status && act_open_has_tid(status)) - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), + ep->com.local_addr.ss_family); remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); cxgb4_free_atid(t, atid); @@ -2517,7 +2515,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } - hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + + hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct tcphdr) + ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) child_ep->mtu = peer_mss + hdrs; @@ -2576,7 +2575,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); - cxgb4_insert_tid(t, child_ep, hwtid); + cxgb4_insert_tid(t, child_ep, hwtid, + child_ep->com.local_addr.ss_family); insert_ep_tid(child_ep); if (accept_cr(child_ep, skb, req)) { c4iw_put_ep(&parent_ep->com); @@ -2844,7 +2844,8 @@ out: 1); } remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); @@ -3747,9 +3748,9 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); - tcp_parse_options(skb, &tmp_opt, 0, NULL); + tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); - req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); + req = __skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); req->l2info = cpu_to_be16(SYN_INTF_V(intf) | SYN_MAC_IDX_V(RX_MACIDX_G( @@ -3801,8 +3802,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); if (!req_skb) return; - req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(req_skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 14de5bde1b63..be07da1997e6 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -44,8 +44,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, wr_len = sizeof *res_wr + sizeof *res; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | @@ -114,8 +113,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | @@ -965,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, goto err3; if (ucontext) { + ret = -ENOMEM; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err4; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 329fb65e8fb0..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, kfree(entry); } - list_for_each_safe(pos, nxt, &uctx->qpids) { + list_for_each_safe(pos, nxt, &uctx->cqids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); @@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); if (!rdev->free_workq) { err = -ENOMEM; - goto err_free_status_page; + goto err_free_status_page_and_wr_log; } rdev->status_page->db_off = 0; return 0; -err_free_status_page: +err_free_status_page_and_wr_log: + if (c4iw_wr_log && rdev->wr_log) + kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); @@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) { destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); + c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + c4iw_ocqp_pool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } @@ -971,7 +975,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.hw_queue.t4_eq_status_entries = - devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; + devp->rdev.lldi.sge_egrstatuspagesize / 64; devp->rdev.hw_queue.t4_max_eq_size = 65520; devp->rdev.hw_queue.t4_max_iq_size = 65520; devp->rdev.hw_queue.t4_max_rq_size = 8192 - diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ee7f43e419a..c2fba76becd4 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -81,8 +81,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | (wait ? FW_WR_COMPL_F : 0)); @@ -142,8 +141,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { @@ -663,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, rhp = php->rhp; if (mr_type != IB_MR_TYPE_MEM_REG || - max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && + max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl)) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 8e4154b4253e..cb7fc0d35d1d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -293,8 +293,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(2) | @@ -570,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { if (wr->num_sge > 1) return -EINVAL; - if (wr->num_sge) { + if (wr->num_sge && wr->sg_list[0].length) { wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr >> 32)); @@ -1228,7 +1227,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( @@ -1350,7 +1349,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | @@ -1419,7 +1418,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 5d6b1eeaa9a0..94b54850ec75 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd) } } -static void write_global_credit(struct hfi1_devdata *dd, - u8 vau, u16 total, u16 shared) +/* + * Set up allocation unit vaulue. + */ +void set_up_vau(struct hfi1_devdata *dd, u8 vau) { - write_csr(dd, SEND_CM_GLOBAL_CREDIT, - ((u64)total << - SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | - ((u64)shared << - SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | - ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); + + /* do not modify other values in the register */ + reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; + reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); } /* * Set up initial VL15 credits of the remote. Assumes the rest of - * the CM credit registers are zero from a previous global or credit reset . + * the CM credit registers are zero from a previous global or credit reset. + * Shared limit for VL15 will always be 0. */ -void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) +void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) { - /* leave shared count at zero for both global and VL15 */ - write_global_credit(dd, vau, vl15buf, 0); + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); + + /* set initial values for total and shared credit limit */ + reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | + SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); + + /* + * Set total limit to be equal to VL15 credits. + * Leave shared limit at 0. + */ + reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); @@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd) for (i = 0; i < TXE_NUM_DATA_VL; i++) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); - write_global_credit(dd, 0, 0, 0); + write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); /* reset the CM block */ pio_send_control(dd, PSC_CM_RESET); + /* reset cached value */ + dd->vl15buf_cached = 0; } /* convert a vCU to a CU */ @@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_up_work); + struct hfi1_devdata *dd = ppd->dd; + set_link_state(ppd, HLS_UP_INIT); /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ - read_ltp_rtt(ppd->dd); + read_ltp_rtt(dd); /* * OPA specifies that certain counters are cleared on a transition * to link up, so do that. */ - clear_linkup_counters(ppd->dd); + clear_linkup_counters(dd); /* * And (re)set link up default values. */ set_linkup_defaults(ppd); + /* + * Set VL15 credits. Use cached value from verify cap interrupt. + * In case of quick linkup or simulator, vl15 value will be set by + * handle_linkup_change. VerifyCap interrupt handler will not be + * called in those scenarios. + */ + if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) + set_up_vl15(dd, dd->vl15buf_cached); + /* enforce link speed enabled */ if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { /* oops - current speed is not enabled, bounce */ - dd_dev_err(ppd->dd, + dd_dev_err(dd, "Link speed active 0x%x is outside enabled 0x%x, downing link\n", ppd->link_speed_active, ppd->link_speed_enabled); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, @@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work) */ if (vau == 0) vau = 1; - set_up_vl15(dd, vau, vl15buf); + set_up_vau(dd, vau); + + /* + * Set VL15 credits to 0 in global credit register. Cache remote VL15 + * credits value and wait for link-up interrupt ot set it. + */ + set_up_vl15(dd, 0); + dd->vl15buf_cached = vl15buf; /* set up the LCB CRC mode */ crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; @@ -12814,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) /* clear from the handled mask of the general interrupt */ m = isrc / 64; n = isrc % 64; - dd->gi_mask[m] &= ~((u64)1 << n); + if (likely(m < CCE_NUM_INT_CSRS)) { + dd->gi_mask[m] &= ~((u64)1 << n); + } else { + dd_dev_err(dd, "remap interrupt err\n"); + return; + } /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 5bfa839d1c48..793514f1d15f 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h @@ -839,7 +839,9 @@ #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) +#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 +#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index da322e6668cc..414a04a481c2 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1045,6 +1045,14 @@ struct hfi1_devdata { /* initial vl15 credits to use */ u16 vl15_init; + /* + * Cached value for vl15buf, read during verify cap interrupt. VL15 + * credits are to be kept at 0 and set when handling the link-up + * interrupt. This removes the possibility of receiving VL15 MAD + * packets before this HFI is ready. + */ + u16 vl15buf_cached; + /* Misc small ints */ u8 n_krcv_queues; u8 qos_shift; @@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); -void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); +void set_up_vau(struct hfi1_devdata *dd, u8 vau); +void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); void reset_link_credits(struct hfi1_devdata *dd); void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index ba265d0ae93b..04a5082d5ac5 100644 --- a/drivers/infiniband/hw/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c @@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) * the remote values. Both sides must be using the values. */ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { - set_up_vl15(dd, dd->vau, dd->vl15_init); + set_up_vau(dd, dd->vau); + set_up_vl15(dd, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 93faf86d54b6..6a9f6f9819e1 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) /* * Save BARs and command to rewrite after device reset. */ - dd->pcibar0 = addr; - dd->pcibar1 = addr >> 32; + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 650305cc0373..1a7af9f60c13 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->pid); } -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp) +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv; - priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); + priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, + priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, rdi->dparms.node); if (!priv->s_ahg) { kfree(priv); diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index 1eb9cd7b8c19..6fe542b6a927 100644 --- a/drivers/infiniband/hw/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h @@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp); /* * Functions provided by hfi1 driver for rdmavt to use */ -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp); +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); unsigned free_all_qps(struct rvt_dev_info *rdi); void notify_qp_reset(struct rvt_qp *qp); diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 069bdaf061ab..1080778a1f7c 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -2159,8 +2159,11 @@ send_last: ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; - if (!ret) + if (!ret) { + /* peer will send again */ + rvt_put_ss(&qp->r_sge); goto rnr_nak; + } wc.ex.imm_data = ohdr->u.rc.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 50d140d25e38..2f3bbcac1e34 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = { }; static struct attribute *port_cc_default_attributes[] = { - &cc_prescan_attr.attr + &cc_prescan_attr.attr, + NULL }; static struct kobj_type port_cc_ktype = { diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 90e7b77d68e8..2d19f9bb434d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1779,7 +1779,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->alloc_hw_stats = alloc_hw_stats; ibdev->get_hw_stats = get_hw_stats; ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn; - ibdev->free_rdma_netdev = hfi1_vnic_free_rn; /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h index e2c455299b53..4a621cde4abb 100644 --- a/drivers/infiniband/hw/hfi1/vnic.h +++ b/drivers/infiniband/hw/hfi1/vnic.h @@ -176,7 +176,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)); -void hfi1_vnic_free_rn(struct net_device *netdev); int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, struct hfi1_vnic_vport_info *vinfo, struct sk_buff *skb, u64 pbc, u8 plen); diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index b601c2929f8f..339f0cdd56d6 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -833,6 +833,15 @@ static const struct net_device_ops hfi1_netdev_ops = { .ndo_get_stats64 = hfi1_vnic_get_stats64, }; +static void hfi1_vnic_free_rn(struct net_device *netdev) +{ + struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); + + hfi1_vnic_deinit(vinfo); + mutex_destroy(&vinfo->lock); + free_netdev(netdev); +} + struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, u8 port_num, enum rdma_netdev_t type, @@ -864,6 +873,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, vinfo->num_tx_q = dd->chip_sdma_engines; vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; vinfo->netdev = netdev; + rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG; @@ -892,12 +902,3 @@ init_fail: free_netdev(netdev); return ERR_PTR(rc); } - -void hfi1_vnic_free_rn(struct net_device *netdev) -{ - struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); - - hfi1_vnic_deinit(vinfo); - mutex_destroy(&vinfo->lock); - free_netdev(netdev); -} diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index f78a733a63ec..d545302b8ef8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, } else { u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); - if (!dmac) + if (!dmac) { + kfree(ah); return ERR_PTR(-EINVAL); + } memcpy(ah->av.mac, dmac, ETH_ALEN); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 37d5d29597a4..2540b65e242c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_READ: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_SEND: case IB_WR_SEND_WITH_INV: @@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) union ib_gid dgid; u64 subnet_prefix; int attr_mask = 0; - int i; + int i, j; int ret; + u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; u8 phy_port; + u8 port = 0; u8 sl; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; @@ -709,27 +711,35 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) attr.rnr_retry = 7; attr.timeout = 0x12; attr.path_mtu = IB_MTU_256; + attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); rdma_ah_set_static_rate(&attr.ah_attr, 3); subnet_prefix = cpu_to_be64(0xfe80000000000000LL); for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { + phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : + (i % HNS_ROCE_MAX_PORTS); + sl = i / HNS_ROCE_MAX_PORTS; + + for (j = 0; j < caps->num_ports; j++) { + if (hr_dev->iboe.phy_port[j] == phy_port) { + queue_en[i] = 1; + port = j; + break; + } + } + + if (!queue_en[i]) + continue; + free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (IS_ERR(free_mr->mr_free_qp[i])) { + if (!free_mr->mr_free_qp[i]) { dev_err(dev, "Create loop qp failed!\n"); goto create_lp_qp_failed; } hr_qp = free_mr->mr_free_qp[i]; - sl = i / caps->num_ports; - - if (caps->num_ports == HNS_ROCE_MAX_PORTS) - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % caps->num_ports); - else - phy_port = i % caps->num_ports; - - hr_qp->port = phy_port + 1; + hr_qp->port = port; hr_qp->phy_port = phy_port; hr_qp->ibqp.qp_type = IB_QPT_RC; hr_qp->ibqp.device = &hr_dev->ib_dev; @@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) hr_qp->ibqp.recv_cq = cq; hr_qp->ibqp.send_cq = cq; - rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); - rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); - attr.port_num = phy_port + 1; + rdma_ah_set_port_num(&attr.ah_attr, port + 1); + rdma_ah_set_sl(&attr.ah_attr, sl); + attr.port_num = port + 1; attr.dest_qp_num = hr_qp->qpn; memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[phy_port], + hr_dev->dev_addr[port], MAC_ADDR_OCTET_NUM); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); + memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); + memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); dgid.raw[11] = 0xff; dgid.raw[12] = 0xfe; dgid.raw[8] ^= 2; rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - attr_mask |= IB_QP_PORT; ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, IB_QPS_RESET, IB_QPS_INIT); @@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); if (ret) dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", @@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; int i; int ret; - int ne; + int ne = 0; mr_work = container_of(work, struct hns_roce_mr_free_work, work); hr_mr = (struct hns_roce_mr *)mr_work->mr; @@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ne++; + ret = hns_roce_v1_send_lp_wqe(hr_qp); if (ret) { dev_err(dev, @@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } - ne = HNS_ROCE_V1_RESV_QP; do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { @@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) goto free_work; } ne -= ret; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); + usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, + (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); } while (ne && time_before_eq(jiffies, end)); if (ne != 0) @@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; - } else { + } else { /* RQ conrespond to CQE */ wc->byte_len = le32_to_cpu(cqe->byte_cnt); opcode = roce_get_field(cqe->cqe_byte_4, @@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, old_cnt = roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) + if (cur_cnt - old_cnt > + SDB_ST_CMP_VAL) { success_flags = 1; - else { - send_ptr = roce_get_field(old_send, + } else { + send_ptr = + roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + roce_get_field(sdb_retry_cnt, @@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) struct hns_roce_dev *hr_dev; struct hns_roce_qp *hr_qp; struct device *dev; + unsigned long qpn; int ret; qp_work_entry = container_of(work, struct hns_roce_qp_work, work); @@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) dev = &hr_dev->pdev->dev; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; hr_qp = qp_work_entry->qp; + qpn = hr_qp->qpn; - dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); qp_work_entry->sche_cnt++; @@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) &qp_work_entry->db_wait_stage); if (ret) { dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - hr_qp->qpn); + qpn); return; } @@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); + dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); return; } @@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) if (hr_qp->ibqp.qp_type == IB_QPT_RC) { /* RC QP, release QPN */ - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + hns_roce_release_range_qp(hr_dev, qpn, 1); kfree(hr_qp); } else kfree(hr_to_hr_sqp(hr_qp)); kfree(qp_work_entry); - dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); } int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c3b41f95e70a..d9777b662eba 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, return -ENODEV; } - spin_lock_bh(&hr_dev->iboe.lock); - switch (event) { case NETDEV_UP: case NETDEV_CHANGE: @@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, break; } - spin_unlock_bh(&hr_dev->iboe.lock); return 0; } diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index da2eb5a281fa..9b1566468744 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev, int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev); void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); void i40iw_add_pdusecount(struct i40iw_pd *iwpd); void i40iw_rem_devusecount(struct i40iw_device *iwdev); diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index f3bc01bce483..5a2fa743676c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, } ctrl_ird |= IETF_PEER_TO_PEER; - ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: @@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, } else { type = I40IW_CM_EVENT_CONNECTED; cm_node->state = I40IW_CM_STATE_OFFLOADED; - i40iw_send_ack(cm_node); } + i40iw_send_ack(cm_node); break; default: pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); @@ -3488,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || - (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { + (last_ae == I40IW_AE_LLP_CONNECTION_RESET) || + iwdev->reset)) { issue_close = 1; iwqp->cm_id = NULL; if (!iwqp->flush_issued) { @@ -4266,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev) cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); attr.qp_state = IB_QPS_ERR; i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); + if (iwdev->reset) + i40iw_cm_disconn(cm_node->iwqp); i40iw_rem_ref_cm_node(cm_node); } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index f82483b3d1e7..a49ff2eb6fb3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( u64 base = 0; u32 i, j; u32 k = 0; - u32 low; /* copy base values in obj_info */ - for (i = I40IW_HMC_IW_QP, j = 0; - i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + if ((i == I40IW_HMC_IW_SRQ) || + (i == I40IW_HMC_IW_FSIMC) || + (i == I40IW_HMC_IW_FSIAV)) { + info[i].base = 0; + info[i].cnt = 0; + continue; + } get_64bit_val(buf, j, &temp); info[i].base = RS_64_1(temp, 32) * 512; if (info[i].base > base) { base = info[i].base; k = i; } - low = (u32)(temp); - if (low) - info[i].cnt = low; + if (i == I40IW_HMC_IW_APBVT_ENTRY) { + info[i].cnt = 1; + continue; + } + if (i == I40IW_HMC_IW_QP) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + else if (i == I40IW_HMC_IW_CQ) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + else + info[i].cnt = (u32)(temp); } size = info[k].cnt * info[k].size + info[k].base; if (size & 0x1FFFFF) @@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( } /** + * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size + * @buf: ptr to fpm query buffer + * @buf_idx: index into buf + * @info: ptr to i40iw_hmc_obj_info struct + * @rsrc_idx: resource index into info + * + * Decode a 64 bit value from fpm query buffer into max count and size + */ +static u64 i40iw_sc_decode_fpm_query(u64 *buf, + u32 buf_idx, + struct i40iw_hmc_obj_info *obj_info, + u32 rsrc_idx) +{ + u64 temp; + u32 size; + + get_64bit_val(buf, buf_idx, &temp); + obj_info[rsrc_idx].max_cnt = (u32)temp; + size = (u32)RS_64_1(temp, 32); + obj_info[rsrc_idx].size = LS_64_1(1, size); + + return temp; +} + +/** * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer * @buf: ptr to fpm query buffer * @info: ptr to i40iw_hmc_obj_info struct @@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( struct i40iw_hmc_info *hmc_info, struct i40iw_hmc_fpm_misc *hmc_fpm_misc) { - u64 temp; struct i40iw_hmc_obj_info *obj_info; - u32 i, j, size; + u64 temp; + u32 size; u16 max_pe_sds; obj_info = hmc_info->hmc_obj; @@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( hmc_fpm_misc->max_sds = max_pe_sds; hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; - for (i = I40IW_HMC_IW_QP, j = 8; - i <= I40IW_HMC_IW_ARP; i++, j += 8) { - get_64bit_val(buf, j, &temp); - if (i == I40IW_HMC_IW_QP) - obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); - else if (i == I40IW_HMC_IW_CQ) - obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); - else - obj_info[i].max_cnt = (u32)temp; + get_64bit_val(buf, 8, &temp); + obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size); - size = (u32)RS_64_1(temp, 32); - obj_info[i].size = ((u64)1 << size); - } - for (i = I40IW_HMC_IW_MR, j = 48; - i <= I40IW_HMC_IW_PBLE; i++, j += 8) { - get_64bit_val(buf, j, &temp); - obj_info[i].max_cnt = (u32)temp; - size = (u32)RS_64_1(temp, 32); - obj_info[i].size = LS_64_1(1, size); - } + get_64bit_val(buf, 16, &temp); + obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size); + + i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE); + i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP); + + obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; + obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; + + i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR); + i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); - get_64bit_val(buf, 120, &temp); - hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); get_64bit_val(buf, 64, &temp); + obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_XFFL].size = 4; hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); if (!hmc_fpm_misc->xf_block_size) return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1); + get_64bit_val(buf, 80, &temp); + obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_Q1FL].size = 4; hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); if (!hmc_fpm_misc->q1_block_size) return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER); + + get_64bit_val(buf, 112, &temp); + obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_PBLE].size = 8; + + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); + hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); + hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); + return 0; } @@ -285,28 +333,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa struct i40iw_sc_dev *dev = vsi->dev; struct i40iw_sc_qp *qp = NULL; bool qs_handle_change = false; - bool mss_change = false; unsigned long flags; u16 qs_handle; int i; - if (vsi->mss != l2params->mss) { - mss_change = true; - vsi->mss = l2params->mss; - } + vsi->mss = l2params->mss; i40iw_fill_qos_list(l2params->qs_handle_list); for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { qs_handle = l2params->qs_handle_list[i]; if (vsi->qos[i].qs_handle != qs_handle) qs_handle_change = true; - else if (!mss_change) - continue; /* no MSS nor qs handle change */ spin_lock_irqsave(&vsi->qos[i].lock, flags); qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); while (qp) { - if (mss_change) - i40iw_qp_mss_modify(dev, qp); if (qs_handle_change) { qp->qs_handle = qs_handle; /* issue cqp suspend command */ @@ -1978,6 +2018,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); } + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return ret_code; } @@ -2395,7 +2437,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( set_64bit_val(wqe, 8, - LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); @@ -2410,7 +2451,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | - LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | @@ -3400,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ hmc_info->sd_table.sd_entry = virt_mem.va; } - /* fill size of objects which are fixed */ - hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; - hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; - hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; - hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; - hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; - return ret_code; } @@ -4848,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) { u8 fcn_id = vsi->fcn_id; - if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) + if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT) vsi->dev->fcn_id_array[fcn_id] = false; i40iw_hw_stats_stop_timer(vsi); } diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index a39ac12b6a7e..2ebaadbed379 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -1507,8 +1507,8 @@ enum { I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), I40IW_SHADOWAREA_MASK = (128 - 1), - I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, - I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1), + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1) }; enum i40iw_alignment { diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 2728af3103ce..ae8463ff59a7 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) if (free_hwcqp) dev->cqp_ops->cqp_destroy(dev->cqp); + i40iw_cleanup_pending_cqp_op(iwdev); + i40iw_free_dma_mem(dev->hw, &cqp->sq); kfree(cqp->scratch_array); iwdev->cqp.scratch_array = NULL; @@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev, /** * i40iw_destroy_aeq - destroy aeq * @iwdev: iwarp device - * @reset: true if called before reset * * Issue a destroy aeq request and * free the resources associated with the aeq * The function is called during driver unload */ -static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) +static void i40iw_destroy_aeq(struct i40iw_device *iwdev) { enum i40iw_status_code status = I40IW_ERR_NOT_READY; struct i40iw_sc_dev *dev = &iwdev->sc_dev; @@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) if (!iwdev->msix_shared) i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); - if (reset) + if (iwdev->reset) goto exit; if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) @@ -304,19 +305,17 @@ exit: * i40iw_destroy_ceq - destroy ceq * @iwdev: iwarp device * @iwceq: ceq to be destroyed - * @reset: true if called before reset * * Issue a destroy ceq request and * free the resources associated with the ceq */ static void i40iw_destroy_ceq(struct i40iw_device *iwdev, - struct i40iw_ceq *iwceq, - bool reset) + struct i40iw_ceq *iwceq) { enum i40iw_status_code status; struct i40iw_sc_dev *dev = &iwdev->sc_dev; - if (reset) + if (iwdev->reset) goto exit; status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); @@ -335,12 +334,11 @@ exit: /** * i40iw_dele_ceqs - destroy all ceq's * @iwdev: iwarp device - * @reset: true if called before reset * * Go through all of the device ceq's and for each ceq * disable the ceq interrupt and destroy the ceq */ -static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) +static void i40iw_dele_ceqs(struct i40iw_device *iwdev) { u32 i = 0; struct i40iw_sc_dev *dev = &iwdev->sc_dev; @@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) if (iwdev->msix_shared) { i40iw_disable_irq(dev, msix_vec, (void *)iwdev); - i40iw_destroy_ceq(iwdev, iwceq, reset); + i40iw_destroy_ceq(iwdev, iwceq); iwceq++; i++; } for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { i40iw_disable_irq(dev, msix_vec, (void *)iwceq); - i40iw_destroy_ceq(iwdev, iwceq, reset); + i40iw_destroy_ceq(iwdev, iwceq); } } /** * i40iw_destroy_ccq - destroy control cq * @iwdev: iwarp device - * @reset: true if called before reset * * Issue destroy ccq request and * free the resources associated with the ccq */ -static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) +static void i40iw_destroy_ccq(struct i40iw_device *iwdev) { struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_ccq *ccq = &iwdev->ccq; enum i40iw_status_code status = 0; - if (!reset) + if (!iwdev->reset) status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); if (status) i40iw_pr_err("ccq destroy failed %d\n", status); @@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, iwceq->msix_idx = msix_vec->idx; status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); if (status) { - i40iw_destroy_ceq(iwdev, iwceq, false); + i40iw_destroy_ceq(iwdev, iwceq); break; } i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); @@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) status = i40iw_configure_aeq_vector(iwdev); if (status) { - i40iw_destroy_aeq(iwdev, false); + i40iw_destroy_aeq(iwdev); return status; } @@ -1319,13 +1316,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); if (status) - goto exit; + goto error; info.fpm_query_buf_pa = mem.pa; info.fpm_query_buf = mem.va; status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); if (status) - goto exit; + goto error; info.fpm_commit_buf_pa = mem.pa; info.fpm_commit_buf = mem.va; info.hmc_fn_id = ldev->fid; @@ -1347,11 +1344,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, info.exception_lan_queue = 1; info.vchnl_send = i40iw_virtchnl_send; status = i40iw_device_init(&iwdev->sc_dev, &info); -exit: - if (status) { - kfree(iwdev->hmc_info_mem); - iwdev->hmc_info_mem = NULL; - } + + if (status) + goto error; memset(&vsi_info, 0, sizeof(vsi_info)); vsi_info.dev = &iwdev->sc_dev; vsi_info.back_vsi = (void *)iwdev; @@ -1362,11 +1357,19 @@ exit: memset(&stats_info, 0, sizeof(stats_info)); stats_info.fcn_id = ldev->fid; stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); + if (!stats_info.pestat) { + status = I40IW_ERR_NO_MEMORY; + goto error; + } stats_info.stats_initialize = true; if (stats_info.pestat) i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); } return status; +error: + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; + return status; } /** @@ -1436,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, /** * i40iw_deinit_device - clean up the device resources * @iwdev: iwarp device - * @reset: true if called before reset * * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, * destroy the device queues and free the pble and the hmc objects */ -static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) +static void i40iw_deinit_device(struct i40iw_device *iwdev) { struct i40e_info *ldev = iwdev->ldev; @@ -1458,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) i40iw_destroy_rdma_device(iwdev->iwibdev); /* fallthrough */ case IP_ADDR_REGISTERED: - if (!reset) + if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); /* fallthrough */ case INET_NOTIFIER: @@ -1468,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); } /* fallthrough */ + case PBLE_CHUNK_MEM: + i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + /* fallthrough */ case CEQ_CREATED: - i40iw_dele_ceqs(iwdev, reset); + i40iw_dele_ceqs(iwdev); /* fallthrough */ case AEQ_CREATED: - i40iw_destroy_aeq(iwdev, reset); + i40iw_destroy_aeq(iwdev); /* fallthrough */ case IEQ_CREATED: - i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset); + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); /* fallthrough */ case ILQ_CREATED: - i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset); + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); /* fallthrough */ case CCQ_CREATED: - i40iw_destroy_ccq(iwdev, reset); - /* fallthrough */ - case PBLE_CHUNK_MEM: - i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + i40iw_destroy_ccq(iwdev); /* fallthrough */ case HMC_OBJS_CREATED: - i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); + i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); /* fallthrough */ case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); @@ -1664,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); if (status) break; + iwdev->init_state = PBLE_CHUNK_MEM; iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); i40iw_register_notifiers(); iwdev->init_state = INET_NOTIFIER; @@ -1687,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) } while (0); i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); - i40iw_deinit_device(iwdev, false); + i40iw_deinit_device(iwdev); return -ERESTART; } @@ -1768,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool iwdev = &hdl->device; iwdev->closing = true; + if (reset) + iwdev->reset = true; + i40iw_cm_disconnect_all(iwdev); destroy_workqueue(iwdev->virtchnl_wq); - i40iw_deinit_device(iwdev, reset); + i40iw_deinit_device(iwdev); } /** @@ -1933,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev, bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) { struct i40iw_device *iwdev; - wait_queue_t wait; + wait_queue_entry_t wait; iwdev = dev->back_dev; diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h index aa66c1c63dfa..f27be3e7830b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h @@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); void *i40iw_remove_head(struct list_head *list); void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); -void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index db41ab40da9c..7f5583d83622 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 16, header[0]); + + /* Ensure all data is written before writing valid bit */ + wmb(); set_64bit_val(wqe, 24, header[1]); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); @@ -682,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, - I40IW_CQ0_ALIGNMENT_MASK); + I40IW_CQ0_ALIGNMENT); if (ret) return ret; @@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, if (!list_empty(rxlist)) { tmpbuf = (struct i40iw_puda_buf *)rxlist->next; - plist = &tmpbuf->list; while ((struct list_head *)tmpbuf != rxlist) { if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) break; + plist = &tmpbuf->list; tmpbuf = (struct i40iw_puda_buf *)plist->next; } /* Insert buf before tmpbuf */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h index 91c421762f06..f7013f11d808 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_status.h +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h @@ -62,7 +62,7 @@ enum i40iw_status_code { I40IW_ERR_INVALID_ALIGNMENT = -23, I40IW_ERR_FLUSHED_QUEUE = -24, I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, - I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, + I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26, I40IW_ERR_TIMEOUT = -27, I40IW_ERR_OPCODE_MISMATCH = -28, I40IW_ERR_CQP_COMPL_ERROR = -29, diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index 7b76259752b0..959ec81fba99 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -541,7 +541,6 @@ struct i40iw_create_qp_info { struct i40iw_modify_qp_info { u64 rx_win0; u64 rx_win1; - u16 new_mss; u8 next_iwarp_state; u8 termlen; bool ord_valid; @@ -554,7 +553,6 @@ struct i40iw_modify_qp_info { bool dont_send_term; bool dont_send_fin; bool cached_var_valid; - bool mss_change; bool force_loopback; }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c index b0d3a0e8a9b5..1060725d18bc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_uk.c +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c @@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, op_info = &info->op.inline_rdma_write; if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); if (ret_code) @@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, op_info = &info->op.inline_send; if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); if (ret_code) @@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); - info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); + info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); @@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, u8 *wqe_size) { if (data_size > I40IW_MAX_INLINE_DATA_SIZE) - return I40IW_ERR_INVALID_IMM_DATA_SIZE; + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; if (data_size <= 16) *wqe_size = I40IW_QP_WQE_MIN_SIZE; diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 409a3781e735..e311ec559f4e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait */ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) { + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); unsigned long flags; if (cqp_request->dynamic) { @@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); spin_unlock_irqrestore(&cqp->req_lock, flags); } + wake_up(&iwdev->close_wq); } /** @@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp, } /** + * i40iw_free_pending_cqp_request -free pending cqp request objs + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp, + struct i40iw_cqp_request *cqp_request) +{ + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); + + if (cqp_request->waiting) { + cqp_request->compl_info.error = true; + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + } + i40iw_put_cqp_request(cqp, cqp_request); + wait_event_timeout(iwdev->close_wq, + !atomic_read(&cqp_request->refcount), + 1000); +} + +/** + * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions + * @iwdev: iwarp device + */ +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp *cqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request = NULL; + struct cqp_commands_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring); + wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring); + for (i = 0; i < pending_work; i++) { + cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx]; + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring); + } + + while (!list_empty(&dev->cqp_cmd_head)) { + pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); + cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info); + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + } +} + +/** * i40iw_free_qp - callback after destroy cqp completes * @cqp_request: cqp request for destroy qp * @num: not used @@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp) cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; cqp_info->in.u.qp_destroy.remove_hash_idx = true; status = i40iw_handle_cqp_op(iwdev, cqp_request); - if (status) - i40iw_pr_err("CQP-OP Destroy QP fail"); + if (!status) + return; + + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + i40iw_rem_devusecount(iwdev); } /** @@ -757,23 +813,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b } /** - * i40iw_qp_mss_modify - modify mss for qp - * @dev: hardware control device structure - * @qp: hardware control qp - */ -void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) -{ - struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; - struct i40iw_modify_qp_info info; - - memset(&info, 0, sizeof(info)); - info.mss_change = true; - info.new_mss = qp->vsi->mss; - i40iw_hw_modify_qp(iwdev, iwqp, &info, false); -} - -/** * i40iw_term_modify_qp - modify qp for term message * @qp: hardware control qp * @next_state: qp's next state diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 4dbe61ec7a77..02d871db7ca5 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, u32 qp_num) { + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); if (qp_num) i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); kfree(iwqp->kqp.wrid_mem); @@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, struct i40iw_qp_init_info *init_info) { - struct i40iw_pbl *iwpbl = iwqp->iwpbl; + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; iwqp->page = qpmr->sq_page; @@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, ucontext = to_ucontext(ibpd->uobject->context); if (req.user_wqe_buffers) { + struct i40iw_pbl *iwpbl; + spin_lock_irqsave( &ucontext->qp_reg_mem_list_lock, flags); - iwqp->iwpbl = i40iw_get_pbl( + iwpbl = i40iw_get_pbl( (unsigned long)req.user_wqe_buffers, &ucontext->qp_reg_mem_list); spin_unlock_irqrestore( &ucontext->qp_reg_mem_list_lock, flags); - if (!iwqp->iwpbl) { + if (!iwpbl) { err_code = -ENODATA; i40iw_pr_err("no pbl info\n"); goto error; } + memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl)); } } err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); @@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, memset(&req, 0, sizeof(req)); iwcq->user_mode = true; ucontext = to_ucontext(context); - if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) + if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { + err_code = -EFAULT; goto cq_free_resources; + } spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, @@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) ucontext = to_ucontext(ibpd->uobject->context); i40iw_del_memlist(iwmr, ucontext); } - if (iwpbl->pbl_allocated) + if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP) i40iw_free_pble(iwdev->pble_rsrc, palloc); kfree(iwmr); return 0; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h index 07c3fec77de6..9067443cd311 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h @@ -170,7 +170,7 @@ struct i40iw_qp { struct i40iw_qp_kmode kqp; struct i40iw_dma_mem host_ctx; struct timer_list terminate_timer; - struct i40iw_pbl *iwpbl; + struct i40iw_pbl iwpbl; struct i40iw_dma_mem q2_ctx_mem; struct i40iw_dma_mem ietf_mem; struct completion sq_drained; diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c index f4d13683a403..48fd327f876b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c @@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, if (!dev->vchnl_up) return I40IW_ERR_NOT_READY; if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { - if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); - else - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); return I40IW_SUCCESS; } for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 1e6c526450d9..fedaf8260105 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id mad->mad_hdr.attr_id == CM_REP_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { sl_cm_id = get_local_comm_id(mad); + id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); + if (id) + goto cont; id = id_map_alloc(ibdev, slave_id, sl_cm_id); if (IS_ERR(id)) { mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", @@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id return -EINVAL; } +cont: set_local_comm_id(mad, id->pv_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 4f5a143fc0a7..ff931c580557 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * int err; err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, - PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); + PAGE_SIZE * 2, &buf->buf); if (err) goto out; @@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); if (err) goto err_mtt; @@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, uar = &to_mucontext(context)->uar; } else { - err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index b4694717f6f3..21d31cb1325f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc if (port < 0) return; ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); + ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); mlx4_ib_query_ah(&ah.ibah, &ah_attr); if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 521d0def2d9e..d1b43cbbfea7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -61,8 +61,7 @@ #include <rdma/mlx4-abi.h> #define DRV_NAME MLX4_IB_DRV_NAME -#define DRV_VERSION "2.2-1" -#define DRV_RELDATE "Feb 2014" +#define DRV_VERSION "4.0-0" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF @@ -79,7 +78,7 @@ MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_ass static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" - DRV_VERSION " (" DRV_RELDATE ")\n"; + DRV_VERSION "\n"; static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); @@ -1156,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * call to mlx4_ib_vma_close. */ put_task_struct(owning_process); - msleep(1); + usleep_range(1000, 2000); owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); if (!owning_process || diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 3405e947dc1e..b73f89700ef9 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy if (!count) break; - msleep(1); + usleep_range(1000, 2000); } while (time_after(end, jiffies)); flush_workqueue(ctx->mcg_wq); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index c2b9cbf4da05..9db82e67e959 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags { MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, - MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO, /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 996e9058e515..75c0e6c5dd56 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, - gfp_t gfp) + struct ib_udata *udata, int sqpn, + struct mlx4_ib_qp **caller_qp) { int qpn; int err; @@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { - sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); + sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); if (!sqp) return -ENOMEM; qp = &sqp->qp; qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; } else { - qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); + qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; qp->pri.vid = 0xFFFF; @@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err; if (qp_has_rq(init_attr)) { - err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); + err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; @@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, - &qp->buf, gfp)) { + &qp->buf)) { memcpy(&init_attr->cap, &backup_cap, sizeof(backup_cap)); err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, @@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_db; if (mlx4_buf_alloc(dev->dev, qp->buf_size, - PAGE_SIZE * 2, &qp->buf, gfp)) { + PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } @@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); + err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->sq.wrid) qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->rq.wrid) qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; - err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); + err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; @@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, int err; int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; u16 xrcdn = 0; - gfp_t gfp; - gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ? - GFP_NOIO : GFP_KERNEL; /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. @@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | MLX4_IB_QP_NETIF | - MLX4_IB_QP_CREATE_ROCE_V2_GSI | - MLX4_IB_QP_CREATE_USE_GFP_NOIO)) + MLX4_IB_QP_CREATE_ROCE_V2_GSI)) return ERR_PTR(-EINVAL); if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { @@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-EINVAL); if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | - MLX4_IB_QP_CREATE_USE_GFP_NOIO | MLX4_IB_QP_CREATE_ROCE_V2_GSI | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && init_attr->qp_type != IB_QPT_UD) || @@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: - qp = kzalloc(sizeof *qp, gfp); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; @@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_UD: { err = create_qp_common(to_mdev(pd->device), pd, init_attr, - udata, 0, &qp, gfp); + udata, 0, &qp); if (err) { kfree(qp); return ERR_PTR(err); @@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, } err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, - sqpn, - &qp, gfp); + sqpn, &qp); if (err) return ERR_PTR(err); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index e32dd58937a8..0facaf5f6d23 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_mtt; } else { - err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) goto err_srq; *srq->db.db = 0; - if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, - GFP_KERNEL)) { + if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, + &srq->buf)) { err = -ENOMEM; goto err_db; } @@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 94c049b62c2f..a384d72ea3cd 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_db; @@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_buf; @@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto ex_resize; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f1b56de64871..95db929bdc34 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters_ext *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; @@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d45772da0963..f7fcde1ff0aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -60,8 +60,7 @@ #include "cmd.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "2.2-1" -#define DRIVER_RELDATE "Feb 2014" +#define DRIVER_VERSION "5.0-0" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); @@ -70,7 +69,7 @@ MODULE_VERSION(DRIVER_VERSION); static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" - DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + DRIVER_VERSION "\n"; enum { MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, @@ -224,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, - struct ib_port_attr *props) +static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, + struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_core_dev *mdev = dev->mdev; @@ -233,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, enum ib_mtu ndev_ib_mtu; u16 qkey_viol_cntr; u32 eth_prot_oper; + int err; /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - if (mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num)) - return; + err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num); + if (err) + return err; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); @@ -259,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ndev = mlx5_ib_get_netdev(device, port_num); if (!ndev) - return; + return 0; if (mlx5_lag_is_active(dev->mdev)) { rcu_read_lock(); @@ -282,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, dev_put(ndev); props->active_mtu = min(props->max_mtu, ndev_ib_mtu); + return 0; } -static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, - const struct ib_gid_attr *attr, - void *mlx5_addr) +static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr) { -#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) - char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_l3_address); - void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_mac_47_32); + enum ib_gid_type gid_type = IB_GID_TYPE_IB; + u8 roce_version = 0; + u8 roce_l3_type = 0; + bool vlan = false; + u8 mac[ETH_ALEN]; + u16 vlan_id = 0; - if (!gid) - return; - - ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr); + if (gid) { + gid_type = attr->gid_type; + ether_addr_copy(mac, attr->ndev->dev_addr); - if (is_vlan_dev(attr->ndev)) { - MLX5_SET_RA(mlx5_addr, vlan_valid, 1); - MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); + if (is_vlan_dev(attr->ndev)) { + vlan = true; + vlan_id = vlan_dev_vlan_id(attr->ndev); + } } - switch (attr->gid_type) { + switch (gid_type) { case IB_GID_TYPE_IB: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); + roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); + roce_version = MLX5_ROCE_VERSION_2; + if (ipv6_addr_v4mapped((void *)gid)) + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; + else + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; break; default: - WARN_ON(true); - } - - if (attr->gid_type != IB_GID_TYPE_IB) { - if (ipv6_addr_v4mapped((void *)gid)) - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV4); - else - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV6); + mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); } - if ((attr->gid_type == IB_GID_TYPE_IB) || - !ipv6_addr_v4mapped((void *)gid)) - memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); - else - memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); -} - -static int set_roce_addr(struct ib_device *device, u8 port_num, - unsigned int index, - const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; - void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); - enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); - - if (ll != IB_LINK_LAYER_ETHERNET) - return -EINVAL; - - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); - - MLX5_SET(set_roce_address_in, in, roce_address_index, index); - MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, + roce_l3_type, gid->raw, mac, vlan, + vlan_id); } static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, @@ -358,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(device, port_num, index, gid, attr); + return set_roce_addr(to_mdev(device), port_num, index, gid, attr); } static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, unsigned int index, __always_unused void **context) { - return set_roce_addr(device, port_num, index, NULL, NULL); + return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL); } __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, @@ -440,7 +415,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); u8 atomic_req_8B_endianness_mode = - MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); + MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond @@ -979,20 +954,31 @@ out: int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { + unsigned int count; + int ret; + switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: - return mlx5_query_mad_ifc_port(ibdev, port, props); + ret = mlx5_query_mad_ifc_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_HCA: - return mlx5_query_hca_port(ibdev, port, props); + ret = mlx5_query_hca_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_NIC: - mlx5_query_port_roce(ibdev, port, props); - return 0; + ret = mlx5_query_port_roce(ibdev, port, props); + break; default: - return -EINVAL; + ret = -EINVAL; } + + if (!ret && props) { + count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev); + props->gid_tbl_len -= count; + } + return ret; } static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, @@ -1099,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND); + /* CM layer calls ib_modify_port() regardless of the link layer. For + * Ethernet ports, qkey violation and Port capabilities are meaningless. + */ + if (!is_ib) + return 0; + if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; value = ~props->clr_port_cap_mask | props->set_port_cap_mask; @@ -2263,7 +2255,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler || !spec) { err = -ENOMEM; @@ -2979,6 +2971,18 @@ error_0: return ret; } +static u8 mlx5_get_umr_fence(u8 umr_fence_cap) +{ + switch (umr_fence_cap) { + case MLX5_CAP_UMR_FENCE_NONE: + return MLX5_FENCE_MODE_NONE; + case MLX5_CAP_UMR_FENCE_SMALL: + return MLX5_FENCE_MODE_INITIATOR_SMALL; + default: + return MLX5_FENCE_MODE_STRONG_ORDERING; + } +} + static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; @@ -3456,7 +3460,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev, __be32 val; int ret, i; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3485,7 +3489,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, int ret, i; int offset = port->cnts.num_q_counters; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3530,6 +3534,11 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, return num_counters; } +static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) +{ + return mlx5_rdma_netdev_free(netdev); +} + static struct net_device* mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, u8 port_num, @@ -3538,16 +3547,19 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, unsigned char name_assign_type, void (*setup)(struct net_device *)) { + struct net_device *netdev; + struct rdma_netdev *rn; + if (type != RDMA_NETDEV_IPOIB) return ERR_PTR(-EOPNOTSUPP); - return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, - name, setup); -} - -static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) -{ - return mlx5_rdma_netdev_free(netdev); + netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, + name, setup); + if (likely(!IS_ERR_OR_NULL(netdev))) { + rn = netdev_priv(netdev); + rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev; + } + return netdev; } static void *mlx5_ib_add(struct mlx5_core_dev *mdev) @@ -3680,8 +3692,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; @@ -3693,6 +3706,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) mlx5_ib_internal_fill_odp_caps(dev); + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); + if (MLX5_CAP_GEN(mdev, imaicl)) { dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 38c877bc45e5..bdcf25410c99 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -349,7 +349,7 @@ struct mlx5_ib_qp { struct mlx5_ib_wq rq; u8 sq_signal_bits; - u8 fm_cache; + u8 next_fence; struct mlx5_ib_wq sq; /* serialize qp state modifications @@ -654,6 +654,7 @@ struct mlx5_ib_dev { struct mlx5_ib_port *port; struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg fp_bfreg; + u8 umr_fence; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..2c40a2e989d2 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) } } +static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->cache.root); + dev->cache.root = NULL; +} + static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) { struct mlx5_mr_cache *cache = &dev->cache; @@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) sprintf(ent->name, "%d", ent->order); ent->dir = debugfs_create_dir(ent->name, cache->root); if (!ent->dir) - return -ENOMEM; + goto err; ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, &size_fops); if (!ent->fsize) - return -ENOMEM; + goto err; ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, &limit_fops); if (!ent->flimit) - return -ENOMEM; + goto err; ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, &ent->cur); if (!ent->fcur) - return -ENOMEM; + goto err; ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, &ent->miss); if (!ent->fmiss) - return -ENOMEM; + goto err; } return 0; -} - -static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) -{ - if (!mlx5_debugfs_root) - return; +err: + mlx5_mr_cache_debugfs_cleanup(dev); - debugfs_remove_recursive(dev->cache.root); + return -ENOMEM; } static void delay_time_func(unsigned long ctx) @@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) if (err) mlx5_ib_warn(dev, "cache debugfs failure\n"); + /* + * We don't want to fail driver if debugfs failed to initialize, + * so we are not forwarding error to the user. + */ + return 0; } @@ -825,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, access_flags, 0); err = PTR_ERR_OR_ZERO(*umem); if (err < 0) { - mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); + mlx5_ib_err(dev, "umem get failed (%d)\n", err); return err; } @@ -1110,7 +1120,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*pas) * ((npages + 1) / 2) * 2; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; @@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { - if (unlikely(i > mr->max_descs)) + if (unlikely(i >= mr->max_descs)) break; klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index ae0746754008..3d701c7a4c91 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( if (qp->ibqp.qp_type != IB_QPT_RC) { av = *wqe; - if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) + if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) *wqe += sizeof(struct mlx5_av); else *wqe += sizeof(struct mlx5_base_av); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 93959e1e43a3..f58f8f5f3ebe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_umem; @@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_buf; @@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, return err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_umem; @@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, u32 rq_pas_size = get_rq_pas_size(qpc); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, goto err_destroy_tis; sq->base.container_mibqp = qp; + sq->base.mqp.event = mlx5_ib_qp_event; } if (qp->rq.wqe_cnt) { @@ -1372,7 +1373,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1633,7 +1634,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2164,7 +2165,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2189,7 +2190,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2434,7 +2435,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2479,7 +2480,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -3738,24 +3739,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) } } -static u8 get_fence(u8 fence, struct ib_send_wr *wr) -{ - if (unlikely(wr->opcode == IB_WR_LOCAL_INV && - wr->send_flags & IB_SEND_FENCE)) - return MLX5_FENCE_MODE_STRONG_ORDERING; - - if (unlikely(fence)) { - if (wr->send_flags & IB_SEND_FENCE) - return MLX5_FENCE_MODE_SMALL_AND_FENCE; - else - return fence; - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { - return MLX5_FENCE_MODE_FENCE; - } - - return 0; -} - static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, unsigned *idx, @@ -3784,8 +3767,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static void finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, - int nreq, u8 fence, u8 next_fence, - u32 mlx5_opcode) + int nreq, u8 fence, u32 mlx5_opcode) { u8 opmod = 0; @@ -3793,7 +3775,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, mlx5_opcode | ((u32)opmod << 24)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); ctrl->fm_ce_se |= fence; - qp->fm_cache = next_fence; if (unlikely(qp->wq_sig)) ctrl->signature = wq_sig(ctrl); @@ -3853,7 +3834,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - fence = qp->fm_cache; num_sge = wr->num_sge; if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_warn(dev, "\n"); @@ -3870,6 +3850,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } + if (wr->opcode == IB_WR_LOCAL_INV || + wr->opcode == IB_WR_REG_MR) { + fence = dev->umr_fence; + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + } else if (wr->send_flags & IB_SEND_FENCE) { + if (qp->next_fence) + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + fence = MLX5_FENCE_MODE_FENCE; + } else { + fence = qp->next_fence; + } + switch (ibqp->qp_type) { case IB_QPT_XRC_INI: xrc = seg; @@ -3896,7 +3889,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; case IB_WR_LOCAL_INV: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); set_linv_wr(qp, &seg, &size); @@ -3904,7 +3896,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_REG_MR: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_REG_MR; ctrl->imm = cpu_to_be32(reg_wr(wr)->key); err = set_reg_wr(qp, reg_wr(wr), &seg, &size); @@ -3927,9 +3918,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_UMR); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_UMR); /* * SET_PSV WQEs are not signaled and solicited * on error @@ -3954,9 +3944,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { @@ -3966,7 +3955,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, mr->sig->psv_wire.psv_idx, &seg, &size); @@ -3976,9 +3964,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; num_sge = 0; goto skip_psv; @@ -4089,8 +4077,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, - get_fence(fence, wr), next_fence, + qp->next_fence = next_fence; + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, mlx5_ib_opcode[wr->opcode]); skip_psv: if (0) @@ -4294,7 +4282,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4321,7 +4309,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_rq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4625,7 +4613,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4855,7 +4843,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, return ERR_PTR(-ENOMEM); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err; @@ -4934,7 +4922,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, return -EOPNOTSUPP; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7cb145f9a6db..43707b101f47 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_umem; } - in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); + in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_umem; @@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); - in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages); + in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_buf; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 5b9601014f0c..a30aa6527f7e 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -815,7 +815,7 @@ static struct pci_driver nes_pci_driver = { .remove = nes_remove, }; -static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) +static ssize_t adapter_show(struct device_driver *ddp, char *buf) { unsigned int devfn = 0xffffffff; unsigned char bus_number = 0xff; @@ -834,7 +834,7 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn); } -static ssize_t nes_store_adapter(struct device_driver *ddp, +static ssize_t adapter_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -843,7 +843,7 @@ static ssize_t nes_store_adapter(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) +static ssize_t eeprom_cmd_show(struct device_driver *ddp, char *buf) { u32 eeprom_cmd = 0xdead; u32 i = 0; @@ -859,7 +859,7 @@ static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd); } -static ssize_t nes_store_ee_cmd(struct device_driver *ddp, +static ssize_t eeprom_cmd_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -880,7 +880,7 @@ static ssize_t nes_store_ee_cmd(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) +static ssize_t eeprom_data_show(struct device_driver *ddp, char *buf) { u32 eeprom_data = 0xdead; u32 i = 0; @@ -897,7 +897,7 @@ static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data); } -static ssize_t nes_store_ee_data(struct device_driver *ddp, +static ssize_t eeprom_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -918,7 +918,7 @@ static ssize_t nes_store_ee_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) +static ssize_t flash_cmd_show(struct device_driver *ddp, char *buf) { u32 flash_cmd = 0xdead; u32 i = 0; @@ -935,7 +935,7 @@ static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd); } -static ssize_t nes_store_flash_cmd(struct device_driver *ddp, +static ssize_t flash_cmd_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -956,7 +956,7 @@ static ssize_t nes_store_flash_cmd(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) +static ssize_t flash_data_show(struct device_driver *ddp, char *buf) { u32 flash_data = 0xdead; u32 i = 0; @@ -973,7 +973,7 @@ static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data); } -static ssize_t nes_store_flash_data(struct device_driver *ddp, +static ssize_t flash_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -994,12 +994,12 @@ static ssize_t nes_store_flash_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf) +static ssize_t nonidx_addr_show(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr); } -static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, +static ssize_t nonidx_addr_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1010,7 +1010,7 @@ static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) +static ssize_t nonidx_data_show(struct device_driver *ddp, char *buf) { u32 nonidx_data = 0xdead; u32 i = 0; @@ -1027,7 +1027,7 @@ static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data); } -static ssize_t nes_store_nonidx_data(struct device_driver *ddp, +static ssize_t nonidx_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1048,12 +1048,12 @@ static ssize_t nes_store_nonidx_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf) +static ssize_t idx_addr_show(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr); } -static ssize_t nes_store_idx_addr(struct device_driver *ddp, +static ssize_t idx_addr_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1064,7 +1064,7 @@ static ssize_t nes_store_idx_addr(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) +static ssize_t idx_data_show(struct device_driver *ddp, char *buf) { u32 idx_data = 0xdead; u32 i = 0; @@ -1081,7 +1081,7 @@ static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data); } -static ssize_t nes_store_idx_data(struct device_driver *ddp, +static ssize_t idx_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1102,11 +1102,7 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp, return strnlen(buf, count); } - -/** - * nes_show_wqm_quanta - */ -static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) +static ssize_t wqm_quanta_show(struct device_driver *ddp, char *buf) { u32 wqm_quanta_value = 0xdead; u32 i = 0; @@ -1123,12 +1119,8 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value); } - -/** - * nes_store_wqm_quanta - */ -static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, - const char *buf, size_t count) +static ssize_t wqm_quanta_store(struct device_driver *ddp, const char *buf, + size_t count) { unsigned long wqm_quanta_value; u32 wqm_config1; @@ -1153,26 +1145,16 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, return strnlen(buf, count); } -static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, - nes_show_adapter, nes_store_adapter); -static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, - nes_show_ee_cmd, nes_store_ee_cmd); -static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR, - nes_show_ee_data, nes_store_ee_data); -static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR, - nes_show_flash_cmd, nes_store_flash_cmd); -static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR, - nes_show_flash_data, nes_store_flash_data); -static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR, - nes_show_nonidx_addr, nes_store_nonidx_addr); -static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR, - nes_show_nonidx_data, nes_store_nonidx_data); -static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR, - nes_show_idx_addr, nes_store_idx_addr); -static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, - nes_show_idx_data, nes_store_idx_data); -static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR, - nes_show_wqm_quanta, nes_store_wqm_quanta); +static DRIVER_ATTR_RW(adapter); +static DRIVER_ATTR_RW(eeprom_cmd); +static DRIVER_ATTR_RW(eeprom_data); +static DRIVER_ATTR_RW(flash_cmd); +static DRIVER_ATTR_RW(flash_data); +static DRIVER_ATTR_RW(nonidx_addr); +static DRIVER_ATTR_RW(nonidx_data); +static DRIVER_ATTR_RW(idx_addr); +static DRIVER_ATTR_RW(idx_data); +static DRIVER_ATTR_RW(wqm_quanta); static int nes_create_driver_sysfs(struct pci_driver *drv) { diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index fb983df7c157..de4025deaa4a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; } ctrl_ird |= IETF_PEER_TO_PEER; - ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: @@ -743,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (type == NES_TIMER_TYPE_SEND) { new_send->seq_num = ntohl(tcp_hdr(skb)->seq); - atomic_inc(&new_send->skb->users); + refcount_inc(&new_send->skb->users); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); cm_node->send_entry = new_send; add_ref_cm_node(cm_node); @@ -925,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass) flags); break; } - atomic_inc(&send_entry->skb->users); + refcount_inc(&send_entry->skb->users); cm_packets_retrans++; nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " "for node %p, jiffies = %lu, time to send = " @@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) type = NES_CM_EVENT_CONNECTED; cm_node->state = NES_CM_STATE_TSA; } - + send_ack(cm_node, NULL); break; default: WARN_ON(1); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8f9d8b4ad583..b0adf65e4bdb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) int_cnt++; - msleep(1); + usleep_range(1000, 2000); } if (int_cnt > 1) { spin_lock_irqsave(&nesadapter->phy_lock, flags); @@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { break; } } - msleep(1); + usleep_range(1000, 2000); } } } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 2f30bda8457a..27d5e8d9f08d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -744,7 +744,8 @@ err: if (is_uctx_pd) { ocrdma_release_ucontext_pd(uctx); } else { - status = _ocrdma_dealloc_pd(dev, pd); + if (_ocrdma_dealloc_pd(dev, pd)) + pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__); } exit: return ERR_PTR(status); @@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, goto err; if (udata == NULL) { + status = -ENOMEM; srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, GFP_KERNEL); if (srq->rqe_wr_id_tbl == NULL) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 6a72095d6c7a..b5851fd67d4f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -37,7 +37,7 @@ #include <linux/iommu.h> #include <linux/pci.h> #include <net/addrconf.h> -#include <linux/qed/qede_roce.h> + #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qedr.h" @@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), - &cnq->pbl); + &cnq->pbl, NULL); if (rc) goto err4; @@ -886,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev) memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ - rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, - dev->gsi_ll2_mac_address, - dev->ndev->dev_addr); + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); @@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) * initialization done before RoCE driver notifies * event to stack. */ -static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: @@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = { static int __init qedr_init_module(void) { - return qede_roce_register_driver(&qedr_drv); + return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { - qede_roce_unregister_driver(&qedr_drv); + qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..ab7784bfdac6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -36,8 +36,8 @@ #include <rdma/ib_addr.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_chain.h> -#include <linux/qed/qed_roce_if.h> -#include <linux/qed/qede_roce.h> +#include <linux/qed/qed_rdma_if.h> +#include <linux/qed/qede_rdma.h> #include <linux/qed/roce_common.h> #include "qedr_hsi_rdma.h" @@ -58,7 +58,10 @@ #define QEDR_MSG_QP " QP" #define QEDR_MSG_GSI " GSI" -#define QEDR_CQ_MAGIC_NUMBER (0x11223344) +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) + +#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) +#define FW_PAGE_SHIFT (12) struct qedr_dev; @@ -150,6 +153,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; + u8 gsi_ll2_handle; + uint wq_multiplier; u8 gsi_ll2_mac_address[ETH_ALEN]; int gsi_qp_created; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 3d7705cec770..4689e802b332 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -44,7 +44,7 @@ #include <rdma/ib_cache.h> #include <linux/qed/qed_if.h> -#include <linux/qed/qed_roce_if.h> +#include <linux/qed/qed_rdma_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> @@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, dev->gsi_qp = qp; } -void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +void qedr_ll2_complete_tx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) { - struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; + struct qed_roce_ll2_packet *pkt = cookie; struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; @@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } -void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params) +void qedr_ll2_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { - struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); - qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; - qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; - qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; - ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? + -EINVAL : 0; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; + /* note: length stands for data length i.e. GRH is excluded */ + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = + data->length.data_length; + *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) = + ntohl(data->opaque_data_0); + *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) = + ntohs((u16)data->opaque_data_1); qedr_inc_sw_gsi_cons(&qp->rq); @@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } +void qedr_ll2_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, bool b_last_packet) +{ + /* Do nothing... */ +} + static void qedr_destroy_gsi_cq(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { @@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, return 0; } +static int qedr_ll2_post_tx(struct qedr_dev *dev, + struct qed_roce_ll2_packet *pkt) +{ + enum qed_ll2_roce_flavor_type roce_flavor; + struct qed_ll2_tx_pkt_info ll2_tx_pkt; + int rc; + int i; + + memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt)); + + roce_flavor = (pkt->roce_mode == ROCE_V1) ? + QED_LL2_ROCE : QED_LL2_RROCE; + + if (pkt->roce_mode == ROCE_V2_IPV4) + ll2_tx_pkt.enable_ip_cksum = 1; + + ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg; + ll2_tx_pkt.vlan = 0; + ll2_tx_pkt.tx_dest = pkt->tx_dest; + ll2_tx_pkt.qed_roce_flavor = roce_flavor; + ll2_tx_pkt.first_frag = pkt->header.baddr; + ll2_tx_pkt.first_frag_len = pkt->header.len; + ll2_tx_pkt.cookie = pkt; + + /* tx header */ + rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx, + dev->gsi_ll2_handle, + &ll2_tx_pkt, 1); + if (rc) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + + DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc); + return rc; + } + + /* tx payload */ + for (i = 0; i < pkt->n_seg; i++) { + rc = dev->ops->ll2_set_fragment_of_tx_packet( + dev->rdma_ctx, + dev->gsi_ll2_handle, + pkt->payload[i].baddr, + pkt->payload[i].len); + + if (rc) { + /* if failed not much to do here, partial packet has + * been posted we can't free memory, will need to wait + * for completion + */ + DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc); + return rc; + } + } + + return 0; +} + +int qedr_ll2_stop(struct qedr_dev *dev) +{ + int rc; + + if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE) + return 0; + + /* remove LL2 MAC address filter */ + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, NULL); + + rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) + DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc); + + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE; + + return rc; +} + +int qedr_ll2_start(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, struct qedr_qp *qp) +{ + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + int rc; + + /* configure and start LL2 */ + cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; + cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; + cbs.rx_release_cb = qedr_ll2_release_rx_packet; + cbs.tx_release_cb = qedr_ll2_complete_tx_packet; + cbs.cookie = dev; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_ROCE; + data.input.mtu = dev->ndev->mtu; + data.input.rx_num_desc = attrs->cap.max_recv_wr; + data.input.rx_drop_ttl0_flg = true; + data.input.rx_vlan_removal_en = false; + data.input.tx_num_desc = attrs->cap.max_send_wr; + data.input.tx_tc = 0; + data.input.tx_dest = QED_LL2_TX_DEST_NW; + data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET; + data.input.ai_err_no_buf = QED_LL2_DROP_PACKET; + data.input.gsi_enable = 1; + data.p_connection_handle = &dev->gsi_ll2_handle; + data.cbs = &cbs; + + rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to acquire LL2 connection (rc=%d)\n", + rc); + return rc; + } + + rc = dev->ops->ll2_establish_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to establish LL2 connection (rc=%d)\n", + rc); + goto err1; + } + + rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr); + if (rc) + goto err2; + + return 0; + +err2: + dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); +err1: + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + return rc; +} + struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { - struct qed_roce_ll2_params ll2_params; int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return ERR_PTR(rc); - /* configure and start LL2 */ - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.max_tx_buffers = attrs->cap.max_send_wr; - ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; - ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; - ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; - ll2_params.cb_cookie = (void *)dev; - ll2_params.mtu = dev->ndev->mtu; - ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); - rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return ERR_PTR(rc); @@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, err: kfree(qp->rqe_wr_id); - rc = dev->ops->roce_ll2_stop(dev->cdev); + rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); @@ -223,15 +374,7 @@ err: int qedr_destroy_gsi_qp(struct qedr_dev *dev) { - int rc; - - rc = dev->ops->roce_ll2_stop(dev->cdev); - if (rc) - DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); - else - DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); - - return rc; + return qedr_ll2_stop(dev); } #define QEDR_MAX_UD_HEADER_SIZE (100) @@ -270,11 +413,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev, return rc; } - vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); - if (vlan_id < VLAN_CFI_MASK) - has_vlan = true; - if (sgid_attr.ndev) + if (sgid_attr.ndev) { + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); + if (vlan_id < VLAN_CFI_MASK) + has_vlan = true; + dev_put(sgid_attr.ndev); + } if (!memcmp(&sgid, &zgid, sizeof(sgid))) { DP_ERR(dev, "gsi post send: GID not found GID index %d\n", @@ -419,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_tx_params params; struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; @@ -447,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - memset(¶ms, 0, sizeof(params)); - spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); @@ -457,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + rc = qedr_ll2_post_tx(dev, pkt); + if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); @@ -465,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); } else { - if (rc == QED_ROCE_TX_HEAD_FAILURE) { - /* TX failed while posting header - release resources */ - dma_free_coherent(&dev->pdev->dev, pkt->header.len, - pkt->header.vaddr, pkt->header.baddr); - kfree(pkt); - } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { - /* NTD since TX failed while posting a fragment. We will - * release the resources on TX callback - */ - } - DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; @@ -502,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_buffer buf; unsigned long flags; - int status = 0; - int rc; + int rc = 0; if ((qp->state != QED_ROCE_QP_STATE_RTR) && (qp->state != QED_ROCE_QP_STATE_RTS)) { @@ -516,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, return -EINVAL; } - memset(&buf, 0, sizeof(buf)); - spin_lock_irqsave(&qp->q_lock, flags); while (wr) { @@ -528,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto err; } - buf.baddr = wr->sg_list[0].addr; - buf.len = wr->sg_list[0].length; - - rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx, + dev->gsi_ll2_handle, + wr->sg_list[0].addr, + wr->sg_list[0].length, + 0 /* cookie */, + 1 /* notify_fw */); if (rc) { DP_ERR(dev, "gsi post recv: failed to post rx buffer (rc=%d)\n", @@ -551,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&qp->q_lock, flags); - return status; + return rc; err: spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..2ae71b8f1ba8 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -53,6 +53,14 @@ #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) +static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src, + size_t len) +{ + size_t min_len = min_t(size_t, len, udata->outlen); + + return ib_copy_to_udata(udata, src, min_len); +} + int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { if (index > QEDR_ROCE_PKEY_TABLE_LEN) @@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, uresp.sges_per_srq_wr = dev->attr.max_srq_sge; uresp.max_cqes = QEDR_MAX_CQES; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) goto err; @@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, uresp.pd_id = pd_id; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); @@ -653,14 +661,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl *pbl, - struct qedr_pbl_info *pbl_info) + struct qedr_pbl_info *pbl_info, u32 pg_shift) { int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; struct scatterlist *sg; struct regpair *pbe; + u64 pg_addr; int entry; - u32 addr; if (!pbl_info->num_pbes) return; @@ -683,29 +692,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, shift = umem->page_shift; + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { pages = sg_dma_len(sg) >> shift; + pg_addr = sg_dma_address(sg); for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { - /* store the page address in pbe */ - pbe->lo = cpu_to_le32(sg_dma_address(sg) + - (pg_cnt << shift)); - addr = upper_32_bits(sg_dma_address(sg) + - (pg_cnt << shift)); - pbe->hi = cpu_to_le32(addr); - pbe_cnt++; - total_num_pbes++; - pbe++; - - if (total_num_pbes == pbl_info->num_pbes) - return; - - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); + + pg_addr += BIT(pg_shift); + pbe_cnt++; + total_num_pbes++; + pbe++; + + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == + (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + + fw_pg_cnt++; } } } @@ -722,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev, uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); uresp.icid = cq->icid; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); @@ -754,7 +769,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, u64 buf_addr, size_t buf_len, int access, int dmasync) { - int page_cnt; + u32 fw_pages; int rc; q->buf_addr = buf_addr; @@ -766,8 +781,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, return PTR_ERR(q->umem); } - page_cnt = ib_umem_page_count(q->umem); - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + fw_pages = ib_umem_page_count(q->umem) << + (q->umem->page_shift - FW_PAGE_SHIFT); + + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; @@ -777,7 +794,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, goto err0; } - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, + FW_PAGE_SHIFT); return 0; @@ -925,7 +943,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, QED_CHAIN_CNT_TYPE_U32, chain_entries, sizeof(union rdma_cqe), - &cq->pbl); + &cq->pbl, NULL); if (rc) goto err1; @@ -1228,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev, uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; uresp.qp_id = qp->qp_id; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "create qp: failed a copy to user space with qp icid=0x%x.\n", @@ -1413,7 +1431,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_sq_elems, QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl); + &qp->sq.pbl, NULL); if (rc) return rc; @@ -1427,7 +1445,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_rq_elems, QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl); + &qp->rq.pbl, NULL); if (rc) return rc; @@ -2226,7 +2244,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, goto err1; qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, - &mr->info.pbl_info); + &mr->info.pbl_info, mr->umem->page_shift); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { @@ -3209,6 +3227,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, case IB_WC_REG_MR: qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; break; + case IB_WC_RDMA_READ: + case IB_WC_SEND: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; default: break; } diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 5984981e7dd4..a343e3b5d4cb 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { }; -static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, - gfp_t gfp) +static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) { - unsigned long page = get_zeroed_page(gfp); + unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. @@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. */ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp) + enum ib_qp_type type, u8 port) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; @@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { - get_map_page(qpt, map, gfp); + get_map_page(qpt, map); if (unlikely(!map->page)) break; } @@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) return ib_mtu_enum_to_int(pmtu); } -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct qib_qp_priv *priv; - priv = kzalloc(sizeof(*priv), gfp); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); if (!priv->s_hdr) { kfree(priv); return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index fc8b88514da5..4ddbcac5eabe 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1956,8 +1956,10 @@ send_last: ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; - if (!ret) + if (!ret) { + rvt_put_ss(&qp->r_sge); goto rnr_nak; + } wc.ex.imm_data = ohdr->u.rc.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index da0db5485ddc..a52fc67b40d7 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd, * Functions provided by qib driver for rdmavt to use */ unsigned qib_free_all_qps(struct rvt_dev_info *rdi); -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_notify_qp_reset(struct rvt_qp *qp); int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp); + enum ib_qp_type type, u8 port); void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 69bda611d313..90aa326fd7c0 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, struct pvrdma_dev *dev = to_vdev(ibcq->device); struct pvrdma_cq *cq = to_vcq(ibcq); u32 val = cq->cq_handle; + unsigned long flags; + int has_data = 0; val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; + spin_lock_irqsave(&cq->cq_lock, flags); + pvrdma_write_uar_cq(dev, val); - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + unsigned int head; + + has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, + cq->ibcq.cqe, &head); + if (unlikely(has_data == PVRDMA_INVALID_IDX)) + dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); + } + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + return has_data; } /** diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 727e81cc2c8f..8876ee7bc326 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { EXPORT_SYMBOL(ib_rvt_state_ops); static void get_map_page(struct rvt_qpn_table *qpt, - struct rvt_qpn_map *map, - gfp_t gfp) + struct rvt_qpn_map *map) { - unsigned long page = get_zeroed_page(gfp); + unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. @@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { if (!map->page) { - get_map_page(qpt, map, GFP_KERNEL); + get_map_page(qpt, map); if (!map->page) { ret = -ENOMEM; break; @@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, * Return: The queue pair number */ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port_num, gfp_t gfp) + enum ib_qp_type type, u8 port_num) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; u32 ret; if (rdi->driver_f.alloc_qpn) - return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); + return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; @@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { - get_map_page(qpt, map, gfp); + get_map_page(qpt, map); if (unlikely(!map->page)) break; } @@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); void *priv = NULL; - gfp_t gfp; size_t sqsize; if (!rdi) @@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || - init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) + init_attr->create_flags) return ERR_PTR(-EINVAL); - /* GFP_NOIO is applicable to RC QP's only */ - - if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && - init_attr->qp_type != IB_QPT_RC) - return ERR_PTR(-EINVAL); - - gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? - GFP_NOIO : GFP_KERNEL; - /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || @@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, sz = sizeof(struct rvt_sge) * init_attr->cap.max_send_sge + sizeof(struct rvt_swqe); - if (gfp == GFP_NOIO) - swq = __vmalloc( - sqsize * sz, - gfp | __GFP_ZERO, PAGE_KERNEL); - else - swq = vzalloc_node( - sqsize * sz, - rdi->dparms.node); + swq = vzalloc_node(sqsize * sz, rdi->dparms.node); if (!swq) return ERR_PTR(-ENOMEM); @@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); - qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); + qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL, + rdi->dparms.node); if (!qp) goto bail_swq; @@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, kzalloc_node( sizeof(*qp->s_ack_queue) * rvt_max_atomic(rdi), - gfp, + GFP_KERNEL, rdi->dparms.node); if (!qp->s_ack_queue) goto bail_qp; @@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, * Driver needs to set up it's private QP structure and do any * initialization that is needed. */ - priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); + priv = rdi->driver_f.qp_priv_alloc(rdi, qp); if (IS_ERR(priv)) { ret = priv; goto bail_qp; @@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->r_rq.wq = vmalloc_user( sizeof(struct rvt_rwq) + qp->r_rq.size * sz); - else if (gfp == GFP_NOIO) - qp->r_rq.wq = __vmalloc( - sizeof(struct rvt_rwq) + - qp->r_rq.size * sz, - gfp | __GFP_ZERO, PAGE_KERNEL); else qp->r_rq.wq = vzalloc_node( sizeof(struct rvt_rwq) + @@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, init_attr->qp_type, - init_attr->port_num, gfp); + init_attr->port_num); if (err < 0) { ret = ERR_PTR(err); goto bail_rq_wq; @@ -1280,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_TIMEOUT) { qp->timeout = attr->timeout; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); + qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout); } if (attr_mask & IB_QP_QKEY) diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -68,6 +68,7 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { + u32 retval; int err; SHASH_DESC_ON_STACK(shash, rxe->tfm); @@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, return crc32_le(crc, next, len); } - return *(u32 *)shash_desc_ctx(shash); + retval = *(u32 *)shash_desc_ctx(shash); + barrier_data(shash_desc_ctx(shash)); + return retval; } int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index c3a140ed4df2..08f3f90d2912 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) if (unlikely(qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_run_task(&qp->req.task, 1); + + rxe_drop_ref(qp); } int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) @@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) return -EAGAIN; } + rxe_add_ref(pkt->qp); atomic_inc(&pkt->qp->skb_out); kfree_skb(skb); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 23039768f541..a958ee918a49 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -995,7 +995,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, free_rd_atomic_resource(qp, res); rxe_advance_resp_resource(qp); - memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb)); + memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt)); + memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0, + sizeof(skb->cb) - sizeof(ack_pkt)); res->type = RXE_ATOMIC_MASK; res->atomic.skb = skb; @@ -1217,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) kfree_skb(skb); } + if (notify) + return; + while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) advance_consumer(qp->rq.queue); } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..af90a7d42b96 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, sge = ibwr->sg_list; for (i = 0; i < num_sge; i++, sge++) { - if (qp->is_user && copy_from_user(p, (__user void *) - (uintptr_t)sge->addr, sge->length)) - return -EFAULT; - - else if (!qp->is_user) - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); + memcpy(p, (void *)(uintptr_t)sge->addr, + sge->length); p += sge->length; } @@ -919,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); + if (qp->resp.state == QP_STATE_ERROR) + rxe_run_task(&qp->resp.task, 1); + err1: return err; } @@ -1245,6 +1243,8 @@ int rxe_register_device(struct rxe_dev *rxe) addrconf_addr_eui48((unsigned char *)&dev->node_guid, rxe->ndev->dev_addr); dev->dev.dma_ops = &dma_virt_ops; + dma_coerce_mask_and_coherent(&dev->dev, + dma_get_required_mask(dev->dev.parent)); dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ff50a7bd66d8..7ac25059c40f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -336,6 +336,7 @@ struct ipoib_dev_priv { unsigned long flags; struct rw_semaphore vlan_rwsem; + struct mutex mcast_mutex; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7cbcfdac6529..d69410c2ed97 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -39,6 +39,7 @@ #include <linux/vmalloc.h> #include <linux/moduleparam.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "ipoib.h" @@ -510,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, case IB_CM_REQ_RECEIVED: return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: - p = cm_id->context; ib_send_cm_drep(cm_id, NULL, 0); /* Fall through */ case IB_CM_REJ_RECEIVED: @@ -954,7 +954,7 @@ void ipoib_cm_dev_stop(struct net_device *dev) break; } spin_unlock_irq(&priv->lock); - msleep(1); + usleep_range(1000, 2000); ipoib_drain_cq(dev); spin_lock_irq(&priv->lock); } @@ -1047,9 +1047,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_RC, .qp_context = tx, - .create_flags = IB_QP_CREATE_USE_GFP_NOIO + .create_flags = 0 }; - struct ib_qp *tx_qp; if (dev->features & NETIF_F_SG) @@ -1057,10 +1056,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1); tx_qp = ib_create_qp(priv->pd, &attr); - if (PTR_ERR(tx_qp) == -EINVAL) { - attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; - tx_qp = ib_create_qp(priv->pd, &attr); - } tx->max_send_sge = attr.cap.max_send_sge; return tx_qp; } @@ -1131,10 +1126,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, struct sa_path_rec *pathrec) { struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + unsigned int noio_flag; int ret; - p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring, - GFP_NOIO, PAGE_KERNEL); + noio_flag = memalloc_noio_save(); + p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); if (!p->tx_ring) { ret = -ENOMEM; goto err_tx; @@ -1142,9 +1138,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); p->qp = ipoib_cm_create_tx_qp(p->dev, p); + memalloc_noio_restore(noio_flag); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); - ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); + ipoib_warn(priv, "failed to create tx qp: %d\n", ret); goto err_qp; } @@ -1206,7 +1203,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) goto timeout; } - msleep(1); + usleep_range(1000, 2000); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 874b24366e4d..184a22f48027 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { IPOIB_NETDEV_STAT(tx_bytes), IPOIB_NETDEV_STAT(tx_errors), IPOIB_NETDEV_STAT(rx_dropped), - IPOIB_NETDEV_STAT(tx_dropped) + IPOIB_NETDEV_STAT(tx_dropped), + IPOIB_NETDEV_STAT(multicast), }; #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) @@ -178,7 +179,7 @@ static inline int ib_speed_enum_to_int(int speed) static int ipoib_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct ipoib_dev_priv *priv = netdev_priv(netdev); + struct ipoib_dev_priv *priv = ipoib_priv(netdev); struct ib_port_attr attr; int ret, speed, width; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..2e075377242e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && @@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) return pending; } +static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, + struct ib_qp *qp, + enum ib_qp_state new_state) +{ + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP\n", __func__); + return; + } + /* print according to the new-state and the previous state.*/ + if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) + ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); + else + ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", + new_state, qp_attr.qp_state); +} + int ipoib_ib_dev_stop_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) - ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); + check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); /* Wait for all sends and receives to complete */ begin = jiffies; @@ -770,7 +793,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_drain_cq(dev); - msleep(1); + usleep_range(1000, 2000); } ipoib_dbg(priv, "All sends and receives done.\n"); @@ -863,7 +886,6 @@ dev_stop: set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2869d1adb1de..6c77df34869d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu static int ipoib_change_mtu(struct net_device *dev, int new_mtu) { struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret = 0; /* dev->mtu > 2K ==> connected mode */ if (ipoib_cm_admin_enabled(dev)) { @@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) ipoib_dbg(priv, "MTU must be smaller than the underlying " "link layer MTU - 4 (%u)\n", priv->mcast_mtu); - dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); + new_mtu = min(priv->mcast_mtu, priv->admin_mtu); - return 0; + if (priv->rn_ops->ndo_change_mtu) { + bool carrier_status = netif_carrier_ok(dev); + + netif_carrier_off(dev); + + /* notify lower level on the real mtu */ + ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); + + if (carrier_status) + netif_carrier_on(dev); + } else { + dev->mtu = new_mtu; + } + + return ret; +} + +static void ipoib_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (priv->rn_ops->ndo_get_stats64) + priv->rn_ops->ndo_get_stats64(dev, stats); + else + netdev_stats_to_stats64(stats, &dev->stats); } /* Called with an RCU read lock taken */ @@ -681,7 +707,7 @@ static void push_pseudo_header(struct sk_buff *skb, const char *daddr) { struct ipoib_pseudo_header *phdr; - phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr)); + phdr = skb_push(skb, sizeof(*phdr)); memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); } @@ -1129,7 +1155,7 @@ static int ipoib_hard_header(struct sk_buff *skb, { struct ipoib_header *header; - header = (struct ipoib_header *) skb_push(skb, sizeof *header); + header = skb_push(skb, sizeof *header); header->proto = htons(type); header->reserved = 0; @@ -1534,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) int i, wait_flushed = 0; init_completion(&priv->ntbl.flushed); + set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); spin_lock_irqsave(&priv->lock, flags); @@ -1578,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); init_completion(&priv->ntbl.deleted); - set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); /* Stop GC if called at init fail need to cancel work */ stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); @@ -1590,12 +1616,14 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) wait_for_completion(&priv->ntbl.deleted); } -void ipoib_dev_uninit_default(struct net_device *dev) +static void ipoib_dev_uninit_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); ipoib_transport_dev_cleanup(dev); + netif_napi_del(&priv->napi); + ipoib_cm_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1649,6 +1677,7 @@ out_rx_ring_cleanup: kfree(priv->rx_ring); out: + netif_napi_del(&priv->napi); return -ENOMEM; } @@ -1805,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = { .ndo_get_vf_stats = ipoib_get_vf_stats, .ndo_set_vf_guid = ipoib_set_vf_guid, .ndo_set_mac_address = ipoib_set_mac, + .ndo_get_stats64 = ipoib_get_stats, }; static const struct net_device_ops ipoib_netdev_ops_vf = { @@ -1817,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { .ndo_tx_timeout = ipoib_timeout, .ndo_set_rx_mode = ipoib_set_mcast_list, .ndo_get_iflink = ipoib_get_iflink, + .ndo_get_stats64 = ipoib_get_stats, }; void ipoib_setup_common(struct net_device *dev) @@ -1847,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) priv->dev = dev; spin_lock_init(&priv->lock); init_rwsem(&priv->vlan_rwsem); + mutex_init(&priv->mcast_mutex); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); @@ -1890,6 +1922,7 @@ static struct net_device rn->send = ipoib_send; rn->attach_mcast = ipoib_mcast_attach; rn->detach_mcast = ipoib_mcast_detach; + rn->free_rdma_netdev = free_netdev; rn->hca = hca; dev->netdev_ops = &ipoib_netdev_default_pf; @@ -2142,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->dev_id = port - 1; result = ib_query_port(hca, port, &attr); - if (!result) - priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); - else { + if (result) { printk(KERN_WARNING "%s: ib_query_port %d failed\n", hca->name, port); goto device_init_failed; } + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + /* MTU will be reset when mcast join happens */ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; @@ -2180,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format, printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", hca->name, port, result); goto device_init_failed; - } else - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + } + + memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, + sizeof(union ib_gid)); set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); result = ipoib_dev_init(priv->dev, hca, port); - if (result < 0) { + if (result) { printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", hca->name, port, result); goto device_init_failed; @@ -2208,6 +2243,7 @@ static struct net_device *ipoib_add_port(const char *format, goto register_failed; } + result = -ENOMEM; if (ipoib_cm_add_mode_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_pkey_attr(priv->dev)) @@ -2237,6 +2273,7 @@ event_failed: device_init_failed: free_netdev(priv->dev); + kfree(priv); alloc_mem_failed: return ERR_PTR(result); @@ -2277,13 +2314,15 @@ static void ipoib_add_one(struct ib_device *device) static void ipoib_remove_one(struct ib_device *device, void *client_data) { - struct ipoib_dev_priv *priv, *tmp; + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; struct list_head *dev_list = client_data; if (!dev_list) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { + struct rdma_netdev *rn = netdev_priv(priv->dev); + ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); @@ -2300,7 +2339,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) flush_workqueue(priv->wq); unregister_netdev(priv->dev); - free_netdev(priv->dev); + rn->free_rdma_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + kfree(cpriv); + kfree(priv); } @@ -2326,6 +2369,7 @@ static int __init ipoib_init_module(void) ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); #ifdef CONFIG_INFINIBAND_IPOIB_CM ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); + ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); #endif /* diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 057f58e6afca..93e149efc1f5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - unsigned long flags; ipoib_dbg_mcast(priv, "stopping multicast thread\n"); - spin_lock_irqsave(&priv->lock, flags); - cancel_delayed_work(&priv->mcast_task); - spin_unlock_irqrestore(&priv->lock, flags); - - flush_workqueue(priv->wq); + cancel_delayed_work_sync(&priv->mcast_task); return 0; } @@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) { struct ipoib_mcast *mcast, *tmcast; + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ + list_for_each_entry_safe(mcast, tmcast, remove_list, list) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); @@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) struct ipoib_mcast *mcast, *tmcast; unsigned long flags; + mutex_lock(&priv->mcast_mutex); ipoib_dbg_mcast(priv, "flushing multicast list\n"); spin_lock_irqsave(&priv->lock, flags); @@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); + mutex_unlock(&priv->mcast_mutex); } static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) @@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); /* diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 28884781311b..3e44087935ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -64,8 +64,9 @@ nla_put_failure: return -EMSGSIZE; } -static int ipoib_changelink(struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) { u16 mode, umcast; int ret = 0; @@ -93,7 +94,8 @@ out_err: } static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) { struct net_device *pdev; struct ipoib_dev_priv *ppriv; @@ -133,7 +135,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, child_pkey, IPOIB_RTNL_CHILD); if (!err && data) - err = ipoib_changelink(dev, tb, data); + err = ipoib_changelink(dev, tb, data, extack); return err; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); + if (!rtnl_trylock()) + return restart_syscall(); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); if (!priv) return -ENOMEM; - if (!rtnl_trylock()) - return restart_syscall(); - down_write(&ppriv->vlan_rwsem); /* @@ -167,8 +167,10 @@ out: rtnl_unlock(); - if (result) + if (result) { free_netdev(priv->dev); + kfree(priv); + } return result; } @@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (dev) { free_netdev(dev); + kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5a887efb4bdf..37b33d708c2d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht; static struct iscsi_transport iscsi_iser_transport; static struct scsi_transport_template *iscsi_iser_scsi_transport; static struct workqueue_struct *release_wq; +static DEFINE_MUTEX(unbind_iser_conn_mutex); struct iser_global ig; int iser_debug_level = 0; @@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) */ if (iser_conn) { mutex_lock(&iser_conn->state_mutex); + mutex_lock(&unbind_iser_conn_mutex); iser_conn_terminate(iser_conn); iscsi_conn_stop(cls_conn, flag); /* unbind */ iser_conn->iscsi_conn = NULL; conn->dd_data = NULL; + mutex_unlock(&unbind_iser_conn_mutex); complete(&iser_conn->stop_completion); mutex_unlock(&iser_conn->state_mutex); @@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev) struct iser_conn *iser_conn; struct ib_device *ib_dev; + mutex_lock(&unbind_iser_conn_mutex); + session = starget_to_session(scsi_target(sdev))->dd_data; iser_conn = session->leadconn->dd_data; + if (!iser_conn) { + mutex_unlock(&unbind_iser_conn_mutex); + return -ENOTCONN; + } ib_dev = iser_conn->ib_conn.device->ib_device; if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + mutex_unlock(&unbind_iser_conn_mutex); + return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 12ed62ce9ff7..2a07692007bd 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task, if (unsol_sz < edtl) { hdr->flags |= ISER_WSV; - hdr->write_stag = cpu_to_be32(mem_reg->rkey); - hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + if (buf_out->data_len > imm_sz) { + hdr->write_stag = cpu_to_be32(mem_reg->rkey); + hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + } iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " "VA:%#llX + unsol:%d\n", diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index c538a38c91ce..26a004e97ae0 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, unsigned short sg_tablesize, sup_sg_tablesize; sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); - sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, - device->ib_device->attrs.max_fast_reg_page_list_len); + if (device->ib_device->attrs.device_cap_flags & + IB_DEVICE_MEM_MGT_EXTENSIONS) + sup_sg_tablesize = + min_t( + uint, ISCSI_ISER_MAX_SG_TABLESIZE, + device->ib_device->attrs.max_fast_reg_page_list_len); + else + sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index fcbed35e95a8..0e662656ef42 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1452,7 +1452,7 @@ static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct isert_conn *isert_conn = wc->qp->qp_context; - struct ib_device *ib_dev = isert_conn->cm_id->device; + struct ib_device *ib_dev = isert_conn->device->ib_device; if (unlikely(wc->status != IB_WC_SUCCESS)) { isert_print_wc(wc, "login recv"); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index 2e8fee982436..afa938bd26d6 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -460,7 +460,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb) sc = opa_vnic_get_sc(info, skb); l4_hdr = info->vesw.vesw_id; - mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata)); + mdata = skb_push(skb, sizeof(*mdata)); mdata->vl = opa_vnic_get_vl(adapter, skb); mdata->entropy = entropy; mdata->flags = 0; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c index d66540e24885..62390e9e0023 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -146,15 +146,15 @@ static void vnic_get_ethtool_stats(struct net_device *netdev, int i; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); for (i = 0; i < VNIC_STATS_LEN; i++) { char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset; data[i] = (vnic_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - mutex_unlock(&adapter->stats_lock); } /* vnic_get_strings - get strings */ diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h index 6bba886bec1f..ca29e6d5aedc 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h @@ -214,7 +214,7 @@ struct opa_vnic_adapter { struct mutex mactbl_lock; /* Lock used to protect access to vnic counters */ - struct mutex stats_lock; + spinlock_t stats_lock; u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE]; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index 905f39dda5aa..1a3c25364b64 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -69,9 +69,9 @@ static void opa_vnic_get_stats64(struct net_device *netdev, struct opa_vnic_stats vstats; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); memcpy(stats, &vstats.netstats, sizeof(*stats)); } @@ -103,7 +103,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, int rc; /* pass entropy and vl as metadata in skb */ - mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata)); + mdata = skb_push(skb, sizeof(*mdata)); mdata->entropy = opa_vnic_calc_entropy(adapter, skb); mdata->vl = opa_vnic_get_vl(adapter, skb); rc = adapter->rn_ops->ndo_select_queue(netdev, skb, @@ -323,13 +323,13 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, else if (IS_ERR(netdev)) return ERR_CAST(netdev); + rn = netdev_priv(netdev); adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { rc = -ENOMEM; goto adapter_err; } - rn = netdev_priv(netdev); rn->clnt_priv = adapter; rn->hca = ibdev; rn->port_num = port_num; @@ -344,7 +344,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM; mutex_init(&adapter->lock); mutex_init(&adapter->mactbl_lock); - mutex_init(&adapter->stats_lock); + spin_lock_init(&adapter->stats_lock); SET_NETDEV_DEV(netdev, ibdev->dev.parent); @@ -364,10 +364,9 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, netdev_err: mutex_destroy(&adapter->lock); mutex_destroy(&adapter->mactbl_lock); - mutex_destroy(&adapter->stats_lock); kfree(adapter); adapter_err: - ibdev->free_rdma_netdev(netdev); + rn->free_rdma_netdev(netdev); return ERR_PTR(rc); } @@ -376,14 +375,13 @@ adapter_err: void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct ib_device *ibdev = adapter->ibdev; + struct rdma_netdev *rn = netdev_priv(netdev); v_info("removing\n"); unregister_netdev(netdev); opa_vnic_release_mac_tbl(adapter); mutex_destroy(&adapter->lock); mutex_destroy(&adapter->mactbl_lock); - mutex_destroy(&adapter->stats_lock); kfree(adapter); - ibdev->free_rdma_netdev(netdev); + rn->free_rdma_netdev(netdev); } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 875694f9a7f9..cf768dd78d1b 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -794,7 +794,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0, IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA, - GFP_KERNEL, OPA_MGMT_BASE_VERSION); + GFP_ATOMIC, OPA_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) { c_err("%s:Couldn't allocate send buf\n", __func__); goto err_sndbuf; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c index a51bf977f4d6..c2733964379c 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -89,9 +89,9 @@ void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter, u64 *src; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); cntrs->vp_instance = cpu_to_be16(adapter->vport_num); cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); @@ -128,9 +128,9 @@ void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter, struct opa_vnic_stats vstats; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); cntrs->vp_instance = cpu_to_be16(adapter->vport_num); cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index def723a5df29..2354c742caa1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch) ch->path.sgid = target->sgid; ch->path.dgid = target->orig_dgid; ch->path.pkey = target->pkey; - sa_path_set_service_id(&ch->path, target->service_id); + ch->path.service_id = target->service_id; return 0; } @@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) return 0; err_qp: - srp_destroy_qp(ch, qp); + ib_destroy_qp(qp); err_send_cq: ib_free_cq(send_cq); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 1ced0731c140..402275be0931 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1157,8 +1157,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) } spin_unlock_irqrestore(&ioctx->spinlock, flags); - pr_debug("Aborting cmd with state %d and tag %lld\n", state, - ioctx->cmd.tag); + pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, + ioctx->state, ioctx->cmd.tag); switch (state) { case SRPT_STATE_NEW: |