summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx4/qp.h24
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/rdma/ib_addr.h18
-rw-r--r--include/rdma/ib_cache.h40
-rw-r--r--include/rdma/ib_pack.h2
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_verbs.h222
-rw-r--r--include/rdma/rdma_cm.h8
-rw-r--r--include/uapi/rdma/ib_user_verbs.h26
10 files changed, 242 insertions, 118 deletions
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 5a8677bafe04..7501626ab529 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -214,6 +214,8 @@ enum {
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
+ MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
};
enum {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index de45a51b3f04..fe052e234906 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -135,7 +135,10 @@ struct mlx4_rss_context {
struct mlx4_qp_path {
u8 fl;
- u8 vlan_control;
+ union {
+ u8 vlan_control;
+ u8 control;
+ };
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
@@ -156,9 +159,16 @@ struct mlx4_qp_path {
};
enum { /* fl */
- MLX4_FL_CV = 1 << 6,
- MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
+ MLX4_FL_CV = 1 << 6,
+ MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
+ MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
+ MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
};
+
+enum { /* control */
+ MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
+};
+
enum { /* vlan_control */
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
@@ -254,6 +264,8 @@ enum {
MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
};
enum { /* param3 */
@@ -436,11 +448,13 @@ enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_VSD = 1 << 1,
MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
+ MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1
};
enum mlx4_update_qp_params_flags {
- MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0,
+ MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1,
};
struct mlx4_update_qp_params {
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 7ccc961f33e9..1e4438ea2380 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge {
};
struct svc_rdma_fastreg_mr {
struct ib_mr *mr;
- void *kva;
- struct ib_fast_reg_page_list *page_list;
- int page_list_len;
+ struct scatterlist *sg;
+ int sg_nents;
unsigned long access_flags;
- unsigned long map_len;
enum dma_data_direction direction;
struct list_head frmr_list;
};
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index fde33ac6b58a..11528591d0d7 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -47,6 +47,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <net/ipv6.h>
+#include <net/net_namespace.h>
struct rdma_addr_client {
atomic_t refcount;
@@ -64,6 +65,16 @@ void rdma_addr_register_client(struct rdma_addr_client *client);
*/
void rdma_addr_unregister_client(struct rdma_addr_client *client);
+/**
+ * struct rdma_dev_addr - Contains resolved RDMA hardware addresses
+ * @src_dev_addr: Source MAC address.
+ * @dst_dev_addr: Destination MAC address.
+ * @broadcast: Broadcast address of the device.
+ * @dev_type: The interface hardware type of the device.
+ * @bound_dev_if: An optional device interface index.
+ * @transport: The transport type used.
+ * @net: Network namespace containing the bound_dev_if net_dev.
+ */
struct rdma_dev_addr {
unsigned char src_dev_addr[MAX_ADDR_LEN];
unsigned char dst_dev_addr[MAX_ADDR_LEN];
@@ -71,11 +82,14 @@ struct rdma_dev_addr {
unsigned short dev_type;
int bound_dev_if;
enum rdma_transport_type transport;
+ struct net *net;
};
/**
* rdma_translate_ip - Translate a local IP address to an RDMA hardware
* address.
+ *
+ * The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
u16 *vlan_id);
@@ -90,7 +104,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
* @dst_addr: The destination address to resolve.
* @addr: A reference to a data location that will receive the resolved
* addresses. The data location must remain valid until the callback has
- * been invoked.
+ * been invoked. The net field of the addr struct must be valid.
* @timeout_ms: Amount of time to wait for the address resolution to complete.
* @callback: Call invoked once address resolution has completed, timed out,
* or been canceled. A status of 0 indicates success.
@@ -112,7 +126,7 @@ int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id);
+ u8 *smac, u16 *vlan_id, int if_index);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index bd92130f4ac5..269a27cf0a46 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -43,6 +43,8 @@
* @port_num: The port number of the device to query.
* @index: The index into the cached GID table to query.
* @gid: The GID value found at the specified index.
+ * @attr: The GID attribute found at the specified index (only in RoCE).
+ * NULL means ignore (output parameter).
*
* ib_get_cached_gid() fetches the specified GID table entry stored in
* the local software cache.
@@ -50,13 +52,15 @@
int ib_get_cached_gid(struct ib_device *device,
u8 port_num,
int index,
- union ib_gid *gid);
+ union ib_gid *gid,
+ struct ib_gid_attr *attr);
/**
* ib_find_cached_gid - Returns the port number and GID table index where
* a specified GID value occurs.
* @device: The device to query.
* @gid: The GID value to search for.
+ * @ndev: In RoCE, the net device of the device. NULL means ignore.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the cached GID table where the GID was found. This
* parameter may be NULL.
@@ -64,12 +68,40 @@ int ib_get_cached_gid(struct ib_device *device,
* ib_find_cached_gid() searches for the specified GID value in
* the local software cache.
*/
-int ib_find_cached_gid(struct ib_device *device,
+int ib_find_cached_gid(struct ib_device *device,
const union ib_gid *gid,
- u8 *port_num,
- u16 *index);
+ struct net_device *ndev,
+ u8 *port_num,
+ u16 *index);
/**
+ * ib_find_cached_gid_by_port - Returns the GID table index where a specified
+ * GID value occurs
+ * @device: The device to query.
+ * @gid: The GID value to search for.
+ * @port_num: The port number of the device where the GID value sould be
+ * searched.
+ * @ndev: In RoCE, the net device of the device. Null means ignore.
+ * @index: The index into the cached GID table where the GID was found. This
+ * parameter may be NULL.
+ *
+ * ib_find_cached_gid() searches for the specified GID value in
+ * the local software cache.
+ */
+int ib_find_cached_gid_by_port(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 port_num,
+ struct net_device *ndev,
+ u16 *index);
+
+int ib_find_gid_by_filter(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 port_num,
+ bool (*filter)(const union ib_gid *gid,
+ const struct ib_gid_attr *,
+ void *),
+ void *context, u16 *index);
+/**
* ib_get_cached_pkey - Returns a cached PKey table entry
* @device: The device to query.
* @port_num: The port number of the device to query.
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 709a5331e6b9..e99d8f9a4551 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -76,7 +76,7 @@ enum {
IB_OPCODE_UC = 0x20,
IB_OPCODE_RD = 0x40,
IB_OPCODE_UD = 0x60,
- /* per IBTA 3.1 Table 38, A10.3.2 */
+ /* per IBTA 1.3 vol 1 Table 38, A10.3.2 */
IB_OPCODE_CNP = 0x80,
/* operations -- just used to define real constants */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 7e071a6abb34..301969552d0a 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -39,6 +39,7 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
+#include <linux/netdevice.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h>
@@ -154,11 +155,18 @@ struct ib_sa_path_rec {
u8 packet_life_time_selector;
u8 packet_life_time;
u8 preference;
- u8 smac[ETH_ALEN];
u8 dmac[ETH_ALEN];
- u16 vlan_id;
+ /* ignored in IB */
+ int ifindex;
+ /* ignored in IB */
+ struct net *net;
};
+static inline struct net_device *ib_get_ndev_from_path(struct ib_sa_path_rec *rec)
+{
+ return rec->net ? dev_get_by_index(rec->net, rec->ifindex) : NULL;
+}
+
#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1)
#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 7845fae6f2df..9a68a19532ba 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -137,6 +137,8 @@ enum ib_device_cap_flags {
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
+ IB_DEVICE_RC_IP_CSUM = (1<<25),
+ IB_DEVICE_RAW_IP_CSUM = (1<<26),
IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
@@ -474,7 +476,7 @@ enum ib_event_type {
IB_EVENT_GID_CHANGE,
};
-__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
+const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
struct ib_event {
struct ib_device *device;
@@ -697,7 +699,6 @@ struct ib_ah_attr {
u8 ah_flags;
u8 port_num;
u8 dmac[ETH_ALEN];
- u16 vlan_id;
};
enum ib_wc_status {
@@ -725,7 +726,7 @@ enum ib_wc_status {
IB_WC_GENERAL_ERR
};
-__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
+const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
enum ib_wc_opcode {
IB_WC_SEND,
@@ -736,7 +737,7 @@ enum ib_wc_opcode {
IB_WC_BIND_MW,
IB_WC_LSO,
IB_WC_LOCAL_INV,
- IB_WC_FAST_REG_MR,
+ IB_WC_REG_MR,
IB_WC_MASKED_COMP_SWAP,
IB_WC_MASKED_FETCH_ADD,
/*
@@ -873,7 +874,6 @@ enum ib_qp_create_flags {
IB_QP_CREATE_RESERVED_END = 1 << 31,
};
-
/*
* Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
* callback to destroy the passed in QP.
@@ -957,10 +957,10 @@ enum ib_qp_attr_mask {
IB_QP_PATH_MIG_STATE = (1<<18),
IB_QP_CAP = (1<<19),
IB_QP_DEST_QPN = (1<<20),
- IB_QP_SMAC = (1<<21),
- IB_QP_ALT_SMAC = (1<<22),
- IB_QP_VID = (1<<23),
- IB_QP_ALT_VID = (1<<24),
+ IB_QP_RESERVED1 = (1<<21),
+ IB_QP_RESERVED2 = (1<<22),
+ IB_QP_RESERVED3 = (1<<23),
+ IB_QP_RESERVED4 = (1<<24),
};
enum ib_qp_state {
@@ -1010,10 +1010,6 @@ struct ib_qp_attr {
u8 rnr_retry;
u8 alt_port_num;
u8 alt_timeout;
- u8 smac[ETH_ALEN];
- u8 alt_smac[ETH_ALEN];
- u16 vlan_id;
- u16 alt_vlan_id;
};
enum ib_wr_opcode {
@@ -1028,7 +1024,7 @@ enum ib_wr_opcode {
IB_WR_SEND_WITH_INV,
IB_WR_RDMA_READ_WITH_INV,
IB_WR_LOCAL_INV,
- IB_WR_FAST_REG_MR,
+ IB_WR_REG_MR,
IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
IB_WR_BIND_MW,
@@ -1066,12 +1062,6 @@ struct ib_sge {
u32 lkey;
};
-struct ib_fast_reg_page_list {
- struct ib_device *device;
- u64 *page_list;
- unsigned int max_page_list_len;
-};
-
/**
* struct ib_mw_bind_info - Parameters for a memory window bind operation.
* @mr: A memory region to bind the memory window to.
@@ -1100,54 +1090,89 @@ struct ib_send_wr {
__be32 imm_data;
u32 invalidate_rkey;
} ex;
- union {
- struct {
- u64 remote_addr;
- u32 rkey;
- } rdma;
- struct {
- u64 remote_addr;
- u64 compare_add;
- u64 swap;
- u64 compare_add_mask;
- u64 swap_mask;
- u32 rkey;
- } atomic;
- struct {
- struct ib_ah *ah;
- void *header;
- int hlen;
- int mss;
- u32 remote_qpn;
- u32 remote_qkey;
- u16 pkey_index; /* valid for GSI only */
- u8 port_num; /* valid for DR SMPs on switch only */
- } ud;
- struct {
- u64 iova_start;
- struct ib_fast_reg_page_list *page_list;
- unsigned int page_shift;
- unsigned int page_list_len;
- u32 length;
- int access_flags;
- u32 rkey;
- } fast_reg;
- struct {
- struct ib_mw *mw;
- /* The new rkey for the memory window. */
- u32 rkey;
- struct ib_mw_bind_info bind_info;
- } bind_mw;
- struct {
- struct ib_sig_attrs *sig_attrs;
- struct ib_mr *sig_mr;
- int access_flags;
- struct ib_sge *prot;
- } sig_handover;
- } wr;
- u32 xrc_remote_srq_num; /* XRC TGT QPs only */
};
+struct ib_rdma_wr {
+ struct ib_send_wr wr;
+ u64 remote_addr;
+ u32 rkey;
+};
+
+static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_rdma_wr, wr);
+}
+
+struct ib_atomic_wr {
+ struct ib_send_wr wr;
+ u64 remote_addr;
+ u64 compare_add;
+ u64 swap;
+ u64 compare_add_mask;
+ u64 swap_mask;
+ u32 rkey;
+};
+
+static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_atomic_wr, wr);
+}
+
+struct ib_ud_wr {
+ struct ib_send_wr wr;
+ struct ib_ah *ah;
+ void *header;
+ int hlen;
+ int mss;
+ u32 remote_qpn;
+ u32 remote_qkey;
+ u16 pkey_index; /* valid for GSI only */
+ u8 port_num; /* valid for DR SMPs on switch only */
+};
+
+static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_ud_wr, wr);
+}
+
+struct ib_reg_wr {
+ struct ib_send_wr wr;
+ struct ib_mr *mr;
+ u32 key;
+ int access;
+};
+
+static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_reg_wr, wr);
+}
+
+struct ib_bind_mw_wr {
+ struct ib_send_wr wr;
+ struct ib_mw *mw;
+ /* The new rkey for the memory window. */
+ u32 rkey;
+ struct ib_mw_bind_info bind_info;
+};
+
+static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_bind_mw_wr, wr);
+}
+
+struct ib_sig_handover_wr {
+ struct ib_send_wr wr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_mr *sig_mr;
+ int access_flags;
+ struct ib_sge *prot;
+};
+
+static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
+{
+ return container_of(wr, struct ib_sig_handover_wr, wr);
+}
+
struct ib_recv_wr {
struct ib_recv_wr *next;
u64 wr_id;
@@ -1334,6 +1359,9 @@ struct ib_mr {
struct ib_uobject *uobject;
u32 lkey;
u32 rkey;
+ u64 iova;
+ u32 length;
+ unsigned int page_size;
atomic_t usecnt; /* count number of MWs */
};
@@ -1718,9 +1746,9 @@ struct ib_device {
struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
- struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
- int page_list_len);
- void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
+ int (*map_mr_sg)(struct ib_mr *mr,
+ struct scatterlist *sg,
+ int sg_nents);
int (*rereg_phys_mr)(struct ib_mr *mr,
int mr_rereg_mask,
struct ib_pd *pd,
@@ -2176,7 +2204,8 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
}
int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid);
+ u8 port_num, int index, union ib_gid *gid,
+ struct ib_gid_attr *attr);
int ib_query_pkey(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
@@ -2190,7 +2219,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index);
+ struct net_device *ndev, u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -2829,33 +2858,6 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
u32 max_num_sg);
/**
- * ib_alloc_fast_reg_page_list - Allocates a page list array
- * @device - ib device pointer.
- * @page_list_len - size of the page list array to be allocated.
- *
- * This allocates and returns a struct ib_fast_reg_page_list * and a
- * page_list array that is at least page_list_len in size. The actual
- * size is returned in max_page_list_len. The caller is responsible
- * for initializing the contents of the page_list array before posting
- * a send work request with the IB_WC_FAST_REG_MR opcode.
- *
- * The page_list array entries must be translated using one of the
- * ib_dma_*() functions just like the addresses passed to
- * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
- * ib_fast_reg_page_list must not be modified by the caller until the
- * IB_WC_FAST_REG_MR work request completes.
- */
-struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
- struct ib_device *device, int page_list_len);
-
-/**
- * ib_free_fast_reg_page_list - Deallocates a previously allocated
- * page list array.
- * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
- */
-void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
-
-/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
* R_Key and L_Key.
* @mr - struct ib_mr pointer to be updated.
@@ -3023,4 +3025,28 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
+int ib_map_mr_sg(struct ib_mr *mr,
+ struct scatterlist *sg,
+ int sg_nents,
+ unsigned int page_size);
+
+static inline int
+ib_map_mr_sg_zbva(struct ib_mr *mr,
+ struct scatterlist *sg,
+ int sg_nents,
+ unsigned int page_size)
+{
+ int n;
+
+ n = ib_map_mr_sg(mr, sg, sg_nents, page_size);
+ mr->iova = 0;
+
+ return n;
+}
+
+int ib_sg_to_pages(struct ib_mr *mr,
+ struct scatterlist *sgl,
+ int sg_nents,
+ int (*set_page)(struct ib_mr *, u64));
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c92522c192d2..afe44fde72a5 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -62,7 +62,7 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_TIMEWAIT_EXIT
};
-__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event);
+const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
enum rdma_port_space {
RDMA_PS_SDP = 0x0001,
@@ -160,13 +160,17 @@ struct rdma_cm_id {
/**
* rdma_create_id - Create an RDMA identifier.
*
+ * @net: The network namespace in which to create the new id.
* @event_handler: User callback invoked to report events associated with the
* returned rdma_id.
* @context: User specified context associated with the id.
* @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
+ *
+ * The id holds a reference on the network namespace until it is destroyed.
*/
-struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
+struct rdma_cm_id *rdma_create_id(struct net *net,
+ rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type);
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 978841eeaff1..8126c143a519 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -92,6 +92,7 @@ enum {
enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
+ IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@@ -516,6 +517,25 @@ struct ib_uverbs_create_qp {
__u64 driver_data[0];
};
+struct ib_uverbs_ex_create_qp {
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 send_cq_handle;
+ __u32 recv_cq_handle;
+ __u32 srq_handle;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u8 sq_sig_all;
+ __u8 qp_type;
+ __u8 is_srq;
+ __u8 reserved;
+ __u32 comp_mask;
+ __u32 create_flags;
+};
+
struct ib_uverbs_open_qp {
__u64 response;
__u64 user_handle;
@@ -538,6 +558,12 @@ struct ib_uverbs_create_qp_resp {
__u32 reserved;
};
+struct ib_uverbs_ex_create_qp_resp {
+ struct ib_uverbs_create_qp_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
/*
* This struct needs to remain a multiple of 8 bytes to keep the
* alignment of the modify QP parameters.