summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWissam Shoukair2016-03-21 14:09:13 +0100
committerMichael Brown2016-03-22 18:55:55 +0100
commit0a20373a2fb78622043277b4774676c55bbea22b (patch)
tree25c414a1bb1668c73f20c447cb7e868c2a8bf17f
parent[pxe] Implicitly open network device in PXENV_UDP_OPEN (diff)
downloadipxe-0a20373a2fb78622043277b4774676c55bbea22b.tar.gz
ipxe-0a20373a2fb78622043277b4774676c55bbea22b.tar.xz
ipxe-0a20373a2fb78622043277b4774676c55bbea22b.zip
[golan] Add Connect-IB, ConnectX-4 and ConnectX-4 Lx (Infiniband) support
Signed-off-by: Wissam Shoukair <wissams@mellanox.com> Signed-off-by: Michael Brown <mcb30@ipxe.org>
-rw-r--r--src/Makefile8
-rwxr-xr-xsrc/drivers/infiniband/CIB_PRM.h1168
-rw-r--r--src/drivers/infiniband/flexboot_nodnic.c1479
-rw-r--r--src/drivers/infiniband/flexboot_nodnic.h163
-rwxr-xr-xsrc/drivers/infiniband/golan.c2663
-rwxr-xr-xsrc/drivers/infiniband/golan.h319
-rw-r--r--src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h43
-rw-r--r--src/drivers/infiniband/mlx_nodnic/include/mlx_device.h80
-rw-r--r--src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h201
-rw-r--r--src/drivers/infiniband/mlx_nodnic/include/mlx_port.h229
-rw-r--r--src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c77
-rw-r--r--src/drivers/infiniband/mlx_nodnic/src/mlx_device.c339
-rw-r--r--src/drivers/infiniband/mlx_nodnic/src/mlx_port.c1038
-rw-r--r--src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h113
-rw-r--r--src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h72
-rw-r--r--src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h68
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h47
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h63
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h46
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h115
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h78
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h81
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_types.h27
-rw-r--r--src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h106
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c54
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h46
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c180
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h145
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c60
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h52
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c295
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h140
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c482
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h94
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h259
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c145
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h73
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c90
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h82
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c74
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h60
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c84
-rw-r--r--src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h61
-rw-r--r--src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c9
-rw-r--r--src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c371
-rw-r--r--src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c238
-rw-r--r--src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c117
-rw-r--r--src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c392
-rw-r--r--src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c121
-rw-r--r--src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h61
-rw-r--r--src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h60
-rw-r--r--src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c172
-rw-r--r--src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c182
-rw-r--r--src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c83
-rw-r--r--src/drivers/infiniband/nodnic_prm.h47
-rw-r--r--src/drivers/infiniband/nodnic_shomron_prm.h143
-rw-r--r--src/include/ipxe/errfile.h2
57 files changed, 13097 insertions, 0 deletions
diff --git a/src/Makefile b/src/Makefile
index 0524bc76..58eedb11 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -84,6 +84,14 @@ SRCDIRS += drivers/block
SRCDIRS += drivers/nvs
SRCDIRS += drivers/bitbash
SRCDIRS += drivers/infiniband
+SRCDIRS += drivers/infiniband/mlx_utils_flexboot/src
+SRCDIRS += drivers/infiniband/mlx_utils/src/public
+SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access
+SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig
+SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac
+SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds
+SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed
+SRCDIRS += drivers/infiniband/mlx_nodnic/src
SRCDIRS += drivers/usb
SRCDIRS += interface/pxe interface/efi interface/smbios
SRCDIRS += interface/bofm
diff --git a/src/drivers/infiniband/CIB_PRM.h b/src/drivers/infiniband/CIB_PRM.h
new file mode 100755
index 00000000..6d07c015
--- /dev/null
+++ b/src/drivers/infiniband/CIB_PRM.h
@@ -0,0 +1,1168 @@
+/*
+ * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#ifndef __CIB_PRM__
+#define __CIB_PRM__
+
+typedef unsigned long long __be64;
+typedef uint32_t __be32;
+typedef uint16_t __be16;
+
+#define GOLAN_CMD_DATA_BLOCK_SIZE (1 << 9)
+#define GOLAN_CMD_PAS_CNT (GOLAN_CMD_DATA_BLOCK_SIZE / sizeof(__be64))
+#define MAILBOX_STRIDE (1 << 10)
+#define MAILBOX_MASK (MAILBOX_STRIDE - 1)
+
+#define GOLAN_PCI_CMD_XPORT 7
+#define CMD_OWNER_HW 0x1
+
+#define IB_NUM_PKEYS 0x20
+
+struct health_buffer {
+ __be32 assert_var[5];
+ __be32 rsvd0[3];
+ __be32 assert_exit_ptr;
+ __be32 assert_callra;
+ __be32 rsvd1[2];
+ __be32 fw_ver;
+ __be32 hw_id;
+ __be32 rsvd2;
+ u8 irisc_index;
+ u8 synd;
+ __be16 ext_sync;
+} __attribute ( ( packed ) );
+
+struct golan_hca_init_seg {
+ __be32 fw_rev;
+ __be32 cmdif_rev_fw_sub;
+ __be32 rsvd0[2];
+ __be32 cmdq_addr_h;
+ __be32 cmdq_addr_l_sz;
+ __be32 cmd_dbell;
+ __be32 rsvd1[121];
+ struct health_buffer health;
+ __be32 rsvd2[884];
+ __be32 health_counter;
+ __be32 rsvd3[1023];
+ __be64 ieee1588_clk;
+ __be32 ieee1588_clk_type;
+ __be32 clr_intx;
+} __attribute ( ( packed ) );
+
+enum golan_manage_pages_mode {
+ GOLAN_PAGES_CANT_GIVE = 0,
+ GOLAN_PAGES_GIVE = 1,
+ GOLAN_PAGES_TAKE = 2
+};
+
+enum golan_qry_pages_mode {
+ GOLAN_BOOT_PAGES = 0x1,
+ GOLAN_INIT_PAGES = 0x2,
+ GOLAN_REG_PAGES = 0x3,
+};
+
+enum {
+ GOLAN_REG_PCAP = 0x5001,
+ GOLAN_REG_PMTU = 0x5003,
+ GOLAN_REG_PTYS = 0x5004,
+ GOLAN_REG_PAOS = 0x5006,
+ GOLAN_REG_PMAOS = 0x5012,
+ GOLAN_REG_PUDE = 0x5009,
+ GOLAN_REG_PMPE = 0x5010,
+ GOLAN_REG_PELC = 0x500e,
+ GOLAN_REG_PMLP = 0, /* TBD */
+ GOLAN_REG_NODE_DESC = 0x6001,
+ GOLAN_REG_HOST_ENDIANESS = 0x7004,
+};
+
+enum {
+ GOLAN_CMD_OP_QUERY_HCA_CAP = 0x100,
+ GOLAN_CMD_OP_QUERY_ADAPTER = 0x101,
+ GOLAN_CMD_OP_INIT_HCA = 0x102,
+ GOLAN_CMD_OP_TEARDOWN_HCA = 0x103,
+ GOLAN_CMD_OP_ENABLE_HCA = 0x104,
+ GOLAN_CMD_OP_DISABLE_HCA = 0x105,
+
+ GOLAN_CMD_OP_QUERY_PAGES = 0x107,
+ GOLAN_CMD_OP_MANAGE_PAGES = 0x108,
+ GOLAN_CMD_OP_SET_HCA_CAP = 0x109,
+
+ GOLAN_CMD_OP_CREATE_MKEY = 0x200,
+ GOLAN_CMD_OP_QUERY_MKEY = 0x201,
+ GOLAN_CMD_OP_DESTROY_MKEY = 0x202,
+ GOLAN_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+
+ GOLAN_CMD_OP_CREATE_EQ = 0x301,
+ GOLAN_CMD_OP_DESTROY_EQ = 0x302,
+ GOLAN_CMD_OP_QUERY_EQ = 0x303,
+
+ GOLAN_CMD_OP_CREATE_CQ = 0x400,
+ GOLAN_CMD_OP_DESTROY_CQ = 0x401,
+ GOLAN_CMD_OP_QUERY_CQ = 0x402,
+ GOLAN_CMD_OP_MODIFY_CQ = 0x403,
+
+ GOLAN_CMD_OP_CREATE_QP = 0x500,
+ GOLAN_CMD_OP_DESTROY_QP = 0x501,
+ GOLAN_CMD_OP_RST2INIT_QP = 0x502,
+ GOLAN_CMD_OP_INIT2RTR_QP = 0x503,
+ GOLAN_CMD_OP_RTR2RTS_QP = 0x504,
+ GOLAN_CMD_OP_RTS2RTS_QP = 0x505,
+ GOLAN_CMD_OP_SQERR2RTS_QP = 0x506,
+ GOLAN_CMD_OP_2ERR_QP = 0x507,
+ GOLAN_CMD_OP_RTS2SQD_QP = 0x508,
+ GOLAN_CMD_OP_SQD2RTS_QP = 0x509,
+ GOLAN_CMD_OP_2RST_QP = 0x50a,
+ GOLAN_CMD_OP_QUERY_QP = 0x50b,
+ GOLAN_CMD_OP_CONF_SQP = 0x50c,
+ GOLAN_CMD_OP_MAD_IFC = 0x50d,
+ GOLAN_CMD_OP_INIT2INIT_QP = 0x50e,
+ GOLAN_CMD_OP_SUSPEND_QP = 0x50f,
+ GOLAN_CMD_OP_UNSUSPEND_QP = 0x510,
+ GOLAN_CMD_OP_SQD2SQD_QP = 0x511,
+ GOLAN_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
+ GOLAN_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
+ GOLAN_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
+
+ GOLAN_CMD_OP_CREATE_PSV = 0x600,
+ GOLAN_CMD_OP_DESTROY_PSV = 0x601,
+ GOLAN_CMD_OP_QUERY_PSV = 0x602,
+ GOLAN_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
+ GOLAN_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
+
+ GOLAN_CMD_OP_CREATE_SRQ = 0x700,
+ GOLAN_CMD_OP_DESTROY_SRQ = 0x701,
+ GOLAN_CMD_OP_QUERY_SRQ = 0x702,
+ GOLAN_CMD_OP_ARM_RQ = 0x703,
+ GOLAN_CMD_OP_RESIZE_SRQ = 0x704,
+
+ GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ GOLAN_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+
+ GOLAN_CMD_OP_ALLOC_PD = 0x800,
+ GOLAN_CMD_OP_DEALLOC_PD = 0x801,
+ GOLAN_CMD_OP_ALLOC_UAR = 0x802,
+ GOLAN_CMD_OP_DEALLOC_UAR = 0x803,
+
+ GOLAN_CMD_OP_ATTACH_TO_MCG = 0x806,
+ GOLAN_CMD_OP_DETACH_FROM_MCG = 0x807,
+
+
+ GOLAN_CMD_OP_ALLOC_XRCD = 0x80e,
+ GOLAN_CMD_OP_DEALLOC_XRCD = 0x80f,
+
+ GOLAN_CMD_OP_ACCESS_REG = 0x805,
+};
+
+struct golan_inbox_hdr {
+ __be16 opcode;
+ u8 rsvd[4];
+ __be16 opmod;
+} __attribute ( ( packed ) );
+
+struct golan_cmd_layout {
+ u8 type;
+ u8 rsvd0[3];
+ __be32 inlen;
+ union {
+ __be64 in_ptr;
+ __be32 in_ptr32[2];
+ };
+ __be32 in[4];
+ __be32 out[4];
+ union {
+ __be64 out_ptr;
+ __be32 out_ptr32[2];
+ };
+ __be32 outlen;
+ u8 token;
+ u8 sig;
+ u8 rsvd1;
+ volatile u8 status_own;
+} __attribute ( ( packed ) );
+
+struct golan_outbox_hdr {
+ u8 status;
+ u8 rsvd[3];
+ __be32 syndrome;
+} __attribute ( ( packed ) );
+
+enum {
+ GOLAN_DEV_CAP_FLAG_RC = 1LL << 0,
+ GOLAN_DEV_CAP_FLAG_UC = 1LL << 1,
+ GOLAN_DEV_CAP_FLAG_UD = 1LL << 2,
+ GOLAN_DEV_CAP_FLAG_XRC = 1LL << 3,
+ GOLAN_DEV_CAP_FLAG_SRQ = 1LL << 6,
+ GOLAN_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
+ GOLAN_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
+ GOLAN_DEV_CAP_FLAG_APM = 1LL << 17,
+ GOLAN_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
+ GOLAN_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
+ GOLAN_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
+ GOLAN_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
+ GOLAN_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
+ GOLAN_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
+ GOLAN_DEV_CAP_FLAG_DCT = 1LL << 41,
+ GOLAN_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
+};
+
+
+struct golan_hca_cap {
+ u8 rsvd1[16];
+ u8 log_max_srq_sz;
+ u8 log_max_qp_sz;
+ u8 rsvd2;
+ u8 log_max_qp;
+ u8 log_max_strq_sz;
+ u8 log_max_srqs;
+ u8 rsvd4[2];
+ u8 rsvd5;
+ u8 log_max_cq_sz;
+ u8 rsvd6;
+ u8 log_max_cq;
+ u8 log_max_eq_sz;
+ u8 log_max_mkey;
+ u8 rsvd7;
+ u8 log_max_eq;
+ u8 max_indirection;
+ u8 log_max_mrw_sz;
+ u8 log_max_bsf_list_sz;
+ u8 log_max_klm_list_sz;
+ u8 rsvd_8_0;
+ u8 log_max_ra_req_dc;
+ u8 rsvd_8_1;
+ u8 log_max_ra_res_dc;
+ u8 rsvd9;
+ u8 log_max_ra_req_qp;
+ u8 rsvd10;
+ u8 log_max_ra_res_qp;
+ u8 rsvd11[4];
+ __be16 max_qp_count;
+ __be16 pkey_table_size;
+ u8 rsvd13;
+ u8 local_ca_ack_delay;
+ u8 rsvd14;
+ u8 num_ports;
+ u8 log_max_msg;
+ u8 rsvd15[3];
+ __be16 stat_rate_support;
+ u8 rsvd16[2];
+ __be64 flags;
+ u8 rsvd17;
+ u8 uar_sz;
+ u8 rsvd18;
+ u8 log_pg_sz;
+ __be16 bf_log_bf_reg_size;
+ u8 rsvd19[4];
+ __be16 max_wqe_sz_sq;
+ u8 rsvd20[2];
+ __be16 max_wqe_sz_rq;
+ u8 rsvd21[2];
+ __be16 max_wqe_sz_sq_dc;
+ u8 rsvd22[4];
+ __be16 max_qp_mcg;
+ u8 rsvd23;
+ u8 log_max_mcg;
+ u8 rsvd24;
+ u8 log_max_pd;
+ u8 rsvd25;
+ u8 log_max_xrcd;
+ u8 rsvd26[40];
+ __be32 uar_page_sz;
+ u8 rsvd27[28];
+ u8 log_msx_atomic_size_qp;
+ u8 rsvd28[2];
+ u8 log_msx_atomic_size_dc;
+ u8 rsvd29[76];
+} __attribute ( ( packed ) );
+
+struct golan_query_pages_inbox {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_query_pages_outbox {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[2];
+ __be16 func_id;
+ __be32 num_pages;
+} __attribute ( ( packed ) );
+
+struct golan_cmd_query_hca_cap_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_cmd_query_hca_cap_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct golan_hca_cap hca_cap;
+} __attribute ( ( packed ) );
+
+struct golan_cmd_set_hca_cap_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+ struct golan_hca_cap hca_cap;
+} __attribute ( ( packed ) );
+
+struct golan_cmd_set_hca_cap_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[8];
+} __attribute ( ( packed ) );
+
+struct golan_cmd_init_hca_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 profile;
+ u8 rsvd1[4];
+} __attribute ( ( packed ) );
+
+struct golan_cmd_init_hca_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+enum golan_teardown {
+ GOLAN_TEARDOWN_GRACEFUL = 0x0,
+ GOLAN_TEARDOWN_PANIC = 0x1
+};
+
+struct golan_cmd_teardown_hca_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 profile;
+ u8 rsvd1[4];
+} __attribute ( ( packed ) );
+
+struct golan_cmd_teardown_hca_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_enable_hca_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_enable_hca_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_disable_hca_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_disable_hca_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_manage_pages_inbox_data {
+ u8 rsvd2[16];
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_manage_pages_inbox {
+ struct golan_inbox_hdr hdr;
+ __be16 rsvd0;
+ __be16 func_id;
+ __be32 num_entries;
+ struct golan_manage_pages_inbox_data data;
+} __attribute ( ( packed ) );
+
+struct golan_manage_pages_outbox_data {
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_manage_pages_outbox {
+ struct golan_outbox_hdr hdr;
+ __be32 num_entries;
+ __be32 rsrvd;
+ struct golan_manage_pages_outbox_data data;
+} __attribute ( ( packed ) );
+
+struct golan_reg_host_endianess {
+ u8 he;
+ u8 rsvd[15];
+} __attribute ( ( packed ) );
+
+struct golan_cmd_prot_block {
+ union {
+ __be64 data[GOLAN_CMD_PAS_CNT];
+ u8 bdata[GOLAN_CMD_DATA_BLOCK_SIZE];
+ };
+ u8 rsvd0[48];
+ __be64 next;
+ __be32 block_num;
+ u8 rsvd1;
+ u8 token;
+ u8 ctrl_sig;
+ u8 sig;
+} __attribute ( ( packed ) );
+
+/* MAD IFC structures */
+#define GOLAN_MAD_SIZE 256
+#define GOLAN_MAD_IFC_NO_VALIDATION 0x3
+#define GOLAN_MAD_IFC_RLID_BIT 16
+
+struct golan_mad_ifc_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be16 remote_lid;
+ u8 rsvd0;
+ u8 port;
+ u8 rsvd1[4];
+ u8 mad[GOLAN_MAD_SIZE];
+} __attribute ( ( packed ) );
+
+struct golan_mad_ifc_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+ u8 mad[GOLAN_MAD_SIZE];
+} __attribute ( ( packed ) );
+
+/* UAR Structures */
+struct golan_alloc_uar_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_alloc_uar_mbox_out {
+ struct golan_outbox_hdr hdr;
+ __be32 uarn;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_free_uar_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 uarn;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_free_uar_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+/* Event Queue Structures */
+enum {
+ GOLAN_EQ_STATE_ARMED = 0x9,
+ GOLAN_EQ_STATE_FIRED = 0xa,
+ GOLAN_EQ_STATE_ALWAYS_ARMED = 0xb,
+};
+
+
+struct golan_eq_context {
+ u8 status;
+ u8 ec_oi;
+ u8 st;
+ u8 rsvd2[7];
+ __be16 page_pffset;
+ __be32 log_sz_usr_page;
+ u8 rsvd3[7];
+ u8 intr;
+ u8 log_page_size;
+ u8 rsvd4[15];
+ __be32 consumer_counter;
+ __be32 produser_counter;
+ u8 rsvd5[16];
+} __attribute ( ( packed ) );
+
+struct golan_create_eq_mbox_in_data {
+ struct golan_eq_context ctx;
+ u8 rsvd2[8];
+ __be64 events_mask;
+ u8 rsvd3[176];
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_create_eq_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 input_eqn;
+ u8 rsvd1[4];
+ struct golan_create_eq_mbox_in_data data;
+} __attribute ( ( packed ) );
+
+struct golan_create_eq_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eq_number;
+ u8 rsvd1[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_eq_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eqn;
+ u8 rsvd1[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_eq_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+/***********************************************/
+/************** Query Vport ****************/
+struct golan_query_hca_vport_context_inbox {
+ struct golan_inbox_hdr hdr;
+ __be16 other_vport : 1;
+ __be16 rsvd1 : 7;
+ __be16 port_num : 4;
+ __be16 rsvd2 : 4;
+ __be16 vport_number;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_context_data {
+ __be32 field_select;
+ __be32 rsvd1[7];
+ //****
+ __be16 sm_virt_aware : 1;
+ __be16 has_smi : 1;
+ __be16 has_raw : 1;
+ __be16 grh_required : 1;
+ __be16 rsvd2 : 12;
+ u8 port_physical_state : 4;
+ u8 vport_state_policy : 4;
+ u8 port_state : 4;
+ u8 vport_state : 4;
+ //****
+ u8 rsvd3[4];
+ //****
+ __be32 system_image_guid[2];
+ //****
+ __be32 port_guid[2];
+ //****
+ __be32 node_guid[2];
+ //****
+ __be32 cap_mask1;
+ __be32 cap_mask1_field_select;
+ __be32 cap_mask2;
+ __be32 cap_mask2_field_select;
+ u8 rsvd4[16];
+ __be16 lid;
+ u8 rsvd5 : 4;
+ u8 init_type_reply : 4;
+ u8 lmc : 3;
+ u8 subnet_timeout : 5;
+ __be16 sm_lid;
+ u8 sm_sl : 4;
+ u8 rsvd6 : 4;
+ u8 rsvd7;
+ __be16 qkey_violation_counter;
+ __be16 pkey_violation_counter;
+ u8 rsvd8[100];
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_context_outbox {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+ struct golan_query_hca_vport_context_data context_data;
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_gid_inbox {
+ struct golan_inbox_hdr hdr;
+ u8 other_vport : 1;
+ u8 rsvd1 : 7;
+ u8 port_num : 4;
+ u8 rsvd2 : 4;
+ __be16 vport_number;
+ __be16 rsvd3;
+ __be16 gid_index;
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_gid_outbox {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[4];
+ __be16 gids_num;
+ u8 rsvd1[2];
+ __be32 gid0[4];
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_pkey_inbox {
+ struct golan_inbox_hdr hdr;
+ u8 other_vport : 1;
+ u8 rsvd1 : 7;
+ u8 port_num : 4;
+ u8 rsvd2 : 4;
+ __be16 vport_number;
+ __be16 rsvd3;
+ __be16 pkey_index;
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_pkey_data {
+ __be16 rsvd1;
+ __be16 pkey0;
+} __attribute ( ( packed ) );
+
+struct golan_query_hca_vport_pkey_outbox {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+ struct golan_query_hca_vport_pkey_data *pkey_data;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_comp {
+ __be32 reserved[6];
+ __be32 cqn;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_qp_srq {
+ __be32 reserved[6];
+ __be32 qp_srq_n;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_cq_err {
+ __be32 cqn;
+ u8 reserved1[7];
+ u8 syndrome;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_dropped_packet {
+};
+
+struct golan_eqe_port_state {
+ u8 reserved0[8];
+ u8 port;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_gpio {
+ __be32 reserved0[2];
+ __be64 gpio_event;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_congestion {
+ u8 type;
+ u8 rsvd0;
+ u8 congestion_level;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_stall_vl {
+ u8 rsvd0[3];
+ u8 port_vl;
+} __attribute ( ( packed ) );
+
+struct golan_eqe_cmd {
+ __be32 vector;
+ __be32 rsvd[6];
+} __attribute ( ( packed ) );
+
+struct golan_eqe_page_req {
+ u8 rsvd0[2];
+ __be16 func_id;
+ u8 rsvd1[2];
+ __be16 num_pages;
+ __be32 rsvd2[5];
+} __attribute ( ( packed ) );
+
+union ev_data {
+ __be32 raw[7];
+ struct golan_eqe_cmd cmd;
+ struct golan_eqe_comp comp;
+ struct golan_eqe_qp_srq qp_srq;
+ struct golan_eqe_cq_err cq_err;
+ struct golan_eqe_dropped_packet dp;
+ struct golan_eqe_port_state port;
+ struct golan_eqe_gpio gpio;
+ struct golan_eqe_congestion cong;
+ struct golan_eqe_stall_vl stall_vl;
+ struct golan_eqe_page_req req_pages;
+} __attribute__ ((packed));
+
+struct golan_eqe {
+ u8 rsvd0;
+ u8 type;
+ u8 rsvd1;
+ u8 sub_type;
+ __be32 rsvd2[7];
+ union ev_data data;
+ __be16 rsvd3;
+ u8 signature;
+ u8 owner;
+} __attribute__ ((packed));
+
+/* Protection Domain Structures */
+struct golan_alloc_pd_mbox_in {
+ struct golan_inbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+struct golan_alloc_pd_mbox_out {
+ struct golan_outbox_hdr hdr;
+ __be32 pdn;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_dealloc_pd_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 pdn;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_dealloc_pd_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+/* Memory key structures */
+#define GOLAN_IB_ACCESS_LOCAL_READ (1 << 2)
+#define GOLAN_IB_ACCESS_LOCAL_WRITE (1 << 3)
+#define GOLAN_MKEY_LEN64 (1 << 31)
+#define GOLAN_CREATE_MKEY_SEG_QPN_BIT 8
+
+struct golan_mkey_seg {
+ /*
+ * This is a two bit field occupying bits 31-30.
+ * bit 31 is always 0,
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ */
+ u8 status;
+ u8 pcie_control;
+ u8 flags;
+ u8 version;
+ __be32 qpn_mkey7_0;
+ u8 rsvd1[4];
+ __be32 flags_pd;
+ __be64 start_addr;
+ __be64 len;
+ __be32 bsfs_octo_size;
+ u8 rsvd2[16];
+ __be32 xlt_oct_size;
+ u8 rsvd3[3];
+ u8 log2_page_size;
+ u8 rsvd4[4];
+} __attribute ( ( packed ) );
+
+struct golan_create_mkey_mbox_in_data {
+ struct golan_mkey_seg seg;
+ u8 rsvd1[16];
+ __be32 xlat_oct_act_size;
+ __be32 bsf_coto_act_size;
+ u8 rsvd2[168];
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_create_mkey_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 input_mkey_index;
+ u8 rsvd0[4];
+ struct golan_create_mkey_mbox_in_data data;
+} __attribute ( ( packed ) );
+
+struct golan_create_mkey_mbox_out {
+ struct golan_outbox_hdr hdr;
+ __be32 mkey;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_mkey_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 mkey;
+ u8 rsvd[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_mkey_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd[8];
+} __attribute ( ( packed ) );
+
+/* Completion Queue Structures */
+enum {
+ GOLAN_CQ_STATE_ARMED = 9,
+ GOLAN_CQ_STATE_ALWAYS_ARMED = 0xb,
+ GOLAN_CQ_STATE_FIRED = 0xa
+};
+
+enum {
+ GOLAN_CQE_REQ = 0,
+ GOLAN_CQE_RESP_WR_IMM = 1,
+ GOLAN_CQE_RESP_SEND = 2,
+ GOLAN_CQE_RESP_SEND_IMM = 3,
+ GOLAN_CQE_RESP_SEND_INV = 4,
+ GOLAN_CQE_RESIZE_CQ = 0xff, /* TBD */
+ GOLAN_CQE_REQ_ERR = 13,
+ GOLAN_CQE_RESP_ERR = 14
+};
+
+struct golan_cq_context {
+ u8 status;
+ u8 cqe_sz_flags;
+ u8 st;
+ u8 rsvd3;
+ u8 rsvd4[6];
+ __be16 page_offset;
+ __be32 log_sz_usr_page;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ __be16 rsvd20;
+ __be16 c_eqn;
+ u8 log_pg_sz;
+ u8 rsvd25[7];
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_counter;
+ __be32 producer_counter;
+ u8 rsvd48[8];
+ __be64 db_record_addr;
+} __attribute ( ( packed ) );
+
+
+struct golan_create_cq_mbox_in_data {
+ struct golan_cq_context ctx;
+ u8 rsvd6[192];
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_create_cq_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 input_cqn;
+ u8 rsvdx[4];
+ struct golan_create_cq_mbox_in_data data;
+} __attribute ( ( packed ) );
+
+struct golan_create_cq_mbox_out {
+ struct golan_outbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_cq_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_cq_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[8];
+} __attribute ( ( packed ) );
+
+struct golan_err_cqe {
+ u8 rsvd0[32];
+ __be32 srqn;
+ u8 rsvd1[16];
+ u8 hw_syndrom;
+ u8 rsvd2;
+ u8 vendor_err_synd;
+ u8 syndrome;
+ __be32 s_wqe_opcode_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+} __attribute ( ( packed ) );
+
+struct golan_cqe64 {
+ u8 rsvd0[17];
+ u8 ml_path;
+ u8 rsvd20[4];
+ __be16 slid;
+ __be32 flags_rqpn;
+ u8 rsvd28[4];
+ __be32 srqn;
+ __be32 imm_inval_pkey;
+ u8 rsvd40[4];
+ __be32 byte_cnt;
+ __be64 timestamp;
+ __be32 sop_drop_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+} __attribute ( ( packed ) );
+
+/* Queue Pair Structures */
+#define GOLAN_QP_CTX_ST_BIT 16
+#define GOLAN_QP_CTX_PM_STATE_BIT 11
+#define GOLAN_QP_CTX_FRE_BIT 11
+#define GOLAN_QP_CTX_RLKY_BIT 4
+#define GOLAN_QP_CTX_RQ_SIZE_BIT 3
+#define GOLAN_QP_CTX_SQ_SIZE_BIT 11
+#define GOLAN_QP_CTX_MTU_BIT 5
+#define GOLAN_QP_CTX_ACK_REQ_FREQ_BIT 28
+
+enum {
+ GOLAN_QP_CTX_DONT_USE_RSRVD_LKEY = 0,
+ GOLAN_QP_CTX_USE_RSRVD_LKEY = 1
+};
+
+enum {
+ GOLAN_IB_ACK_REQ_FREQ = 8,
+};
+
+enum golan_qp_optpar {
+ GOLAN_QP_PARAM_ALT_ADDR_PATH = 1 << 0,
+ GOLAN_QP_PARAM_RRE = 1 << 1,
+ GOLAN_QP_PARAM_RAE = 1 << 2,
+ GOLAN_QP_PARAM_RWE = 1 << 3,
+ GOLAN_QP_PARAM_PKEY_INDEX = 1 << 4,
+ GOLAN_QP_PARAM_Q_KEY = 1 << 5,
+ GOLAN_QP_PARAM_RNR_TIMEOUT = 1 << 6,
+ GOLAN_QP_PARAM_PRIMARY_ADDR_PATH = 1 << 7,
+ GOLAN_QP_PARAM_SRA_MAX = 1 << 8,
+ GOLAN_QP_PARAM_RRA_MAX = 1 << 9,
+ GOLAN_QP_PARAM_PM_STATE = 1 << 10,
+ GOLAN_QP_PARAM_RETRY_COUNT = 1 << 12,
+ GOLAN_QP_PARAM_RNR_RETRY = 1 << 13,
+ GOLAN_QP_PARAM_ACK_TIMEOUT = 1 << 14,
+ GOLAN_QP_PARAM_PRI_PORT = 1 << 16,
+ GOLAN_QP_PARAM_SRQN = 1 << 18,
+ GOLAN_QP_PARAM_CQN_RCV = 1 << 19,
+ GOLAN_QP_PARAM_DC_HS = 1 << 20,
+ GOLAN_QP_PARAM_DC_KEY = 1 << 21
+};
+
+#define GOLAN_QP_PARAMS_INIT2RTR_MASK (GOLAN_QP_PARAM_PKEY_INDEX |\
+ GOLAN_QP_PARAM_Q_KEY |\
+ GOLAN_QP_PARAM_RWE |\
+ GOLAN_QP_PARAM_RRE)
+
+#define GOLAN_QP_PARAMS_RTR2RTS_MASK (GOLAN_QP_PARAM_PM_STATE |\
+ GOLAN_QP_PARAM_RNR_TIMEOUT |\
+ GOLAN_QP_PARAM_Q_KEY |\
+ GOLAN_QP_PARAM_RWE |\
+ GOLAN_QP_PARAM_RRE)
+
+
+enum {
+ GOLAN_QP_ST_RC = 0x0,
+ GOLAN_QP_ST_UC = 0x1,
+ GOLAN_QP_ST_UD = 0x2,
+ GOLAN_QP_ST_XRC = 0x3,
+ GOLAN_QP_ST_MLX = 0x4,
+ GOLAN_QP_ST_DC = 0x5,
+ GOLAN_QP_ST_QP0 = 0x7,
+ GOLAN_QP_ST_QP1 = 0x8,
+ GOLAN_QP_ST_RAW_ETHERTYPE = 0x9,
+ GOLAN_QP_ST_RAW_IPV6 = 0xa,
+ GOLAN_QP_ST_SNIFFER = 0xb,
+ GOLAN_QP_ST_SYNC_UMR = 0xe,
+ GOLAN_QP_ST_PTP_1588 = 0xd,
+ GOLAN_QP_ST_REG_UMR = 0xc,
+ GOLAN_QP_ST_MAX
+};
+
+enum {
+ GOLAN_QP_PM_MIGRATED = 0x3,
+ GOLAN_QP_PM_ARMED = 0x0,
+ GOLAN_QP_PM_REARM = 0x1
+};
+
+enum {
+ GOLAN_QP_LAT_SENSITIVE = 1 << 28,
+ GOLAN_QP_ENABLE_SIG = 1 << 31
+};
+
+
+struct golan_qp_db {
+ u8 rsvd0[2];
+ __be16 recv_db;
+ u8 rsvd1[2];
+ __be16 send_db;
+} __attribute ( ( packed ) );
+
+enum {
+ GOLAN_WQE_CTRL_CQ_UPDATE = 2 << 2, /*Wissam, wtf?*/
+ GOLAN_WQE_CTRL_SOLICITED = 1 << 1
+};
+
+struct golan_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ u8 signature;
+ u8 rsvd[2];
+ u8 fm_ce_se;
+ __be32 imm;
+} __attribute ( ( packed ) );
+
+struct golan_av {
+ union {
+ struct {
+ __be32 qkey;
+ __be32 reserved;
+ } qkey;
+ __be64 dc_key;
+ } key;
+ __be32 dqp_dct;
+ u8 stat_rate_sl;
+ u8 fl_mlid;
+ __be16 rlid;
+ u8 reserved0[10];
+ u8 tclass;
+ u8 hop_limit;
+ __be32 grh_gid_fl;
+ u8 rgid[16];
+} __attribute ( ( packed ) );
+
+struct golan_wqe_data_seg {
+ __be32 byte_count;
+ __be32 lkey;
+ __be64 addr;
+} __attribute ( ( packed ) );
+
+struct golan_wqe_signature_seg {
+ u8 rsvd0[4];
+ u8 signature;
+ u8 rsvd1[11];
+} __attribute ( ( packed ) );
+
+struct golan_wqe_inline_seg {
+ __be32 byte_count;
+} __attribute ( ( packed ) );
+
+struct golan_qp_path {
+ u8 fl;
+ u8 rsvd3;
+ u8 free_ar;
+ u8 pkey_index;
+ u8 rsvd0;
+ u8 grh_mlid;
+ __be16 rlid;
+ u8 ackto_lt;
+ u8 mgid_index;
+ u8 static_rate;
+ u8 hop_limit;
+ __be32 tclass_flowlabel;
+ u8 rgid[16];
+ u8 rsvd1[4];
+ u8 sl;
+ u8 port;
+ u8 rsvd2[6];
+} __attribute ( ( packed ) );
+
+struct golan_qp_context {
+ __be32 flags;
+ __be32 flags_pd;
+ u8 mtu_msgmax;
+ u8 rq_size_stride;
+ __be16 sq_crq_size;
+ __be32 qp_counter_set_usr_page;
+ __be32 wire_qpn;
+ __be32 log_pg_sz_remote_qpn;
+ struct golan_qp_path pri_path;
+ struct golan_qp_path alt_path;
+ __be32 params1;
+ u8 reserved2[4];
+ __be32 next_send_psn;
+ __be32 cqn_send;
+ u8 reserved3[8];
+ __be32 last_acked_psn;
+ __be32 ssn;
+ __be32 params2;
+ __be32 rnr_nextrecvpsn;
+ __be32 xrcd;
+ __be32 cqn_recv;
+ __be64 db_rec_addr;
+ __be32 qkey;
+ __be32 rq_type_srqn;
+ __be32 rmsn;
+ __be16 hw_sq_wqe_counter;
+ __be16 sw_sq_wqe_counter;
+ __be16 hw_rcyclic_byte_counter;
+ __be16 hw_rq_counter;
+ __be16 sw_rcyclic_byte_counter;
+ __be16 sw_rq_counter;
+ u8 rsvd0[5];
+ u8 cgs;
+ u8 cs_req;
+ u8 cs_res;
+ __be64 dc_access_key;
+ u8 rsvd1[24];
+} __attribute ( ( packed ) );
+
+struct golan_create_qp_mbox_in_data {
+ __be32 opt_param_mask;
+ u8 rsvd1[4];
+ struct golan_qp_context ctx;
+ u8 rsvd3[16];
+ __be64 pas[0];
+} __attribute ( ( packed ) );
+
+struct golan_create_qp_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 input_qpn;
+ u8 rsvd0[4];
+ struct golan_create_qp_mbox_in_data data;
+} __attribute ( ( packed ) );
+
+struct golan_create_qp_mbox_out {
+ struct golan_outbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_qp_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+} __attribute ( ( packed ) );
+
+struct golan_destroy_qp_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[8];
+} __attribute ( ( packed ) );
+
+struct golan_modify_qp_mbox_in_data {
+ __be32 optparam;
+ u8 rsvd0[4];
+ struct golan_qp_context ctx;
+} __attribute ( ( packed ) );
+
+struct golan_modify_qp_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd1[4];
+ struct golan_modify_qp_mbox_in_data data;
+} __attribute ( ( packed ) );
+
+struct golan_modify_qp_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvd0[8];
+} __attribute ( ( packed ) );
+
+struct golan_attach_mcg_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 qpn;
+ __be32 rsvd;
+ u8 gid[16];
+} __attribute ( ( packed ) );
+
+struct golan_attach_mcg_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvf[8];
+} __attribute ( ( packed ) );
+
+struct golan_detach_mcg_mbox_in {
+ struct golan_inbox_hdr hdr;
+ __be32 qpn;
+ __be32 rsvd;
+ u8 gid[16];
+} __attribute ( ( packed ) );
+
+struct golan_detach_mcg_mbox_out {
+ struct golan_outbox_hdr hdr;
+ u8 rsvf[8];
+} __attribute ( ( packed ) );
+
+
+#define MAILBOX_SIZE sizeof(struct golan_cmd_prot_block)
+
+#endif /* __CIB_PRM__ */
diff --git a/src/drivers/infiniband/flexboot_nodnic.c b/src/drivers/infiniband/flexboot_nodnic.c
new file mode 100644
index 00000000..dea19ca6
--- /dev/null
+++ b/src/drivers/infiniband/flexboot_nodnic.c
@@ -0,0 +1,1479 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <byteswap.h>
+#include <ipxe/pci.h>
+#include <ipxe/malloc.h>
+#include <ipxe/umalloc.h>
+#include <ipxe/if_ether.h>
+#include <ipxe/ethernet.h>
+#include <ipxe/vlan.h>
+#include <ipxe/io.h>
+#include "flexboot_nodnic.h"
+#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
+#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
+#include "mlx_utils/include/public/mlx_pci_gw.h"
+#include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
+#include "mlx_utils/include/public/mlx_types.h"
+#include "mlx_utils/include/public/mlx_utils.h"
+#include "mlx_utils/include/public/mlx_bail.h"
+#include "mlx_nodnic/include/mlx_cmd.h"
+#include "mlx_utils/include/public/mlx_memory.h"
+#include "mlx_utils/include/public/mlx_pci.h"
+#include "mlx_nodnic/include/mlx_device.h"
+#include "mlx_nodnic/include/mlx_port.h"
+
+/***************************************************************************
+ *
+ * Completion queue operations
+ *
+ ***************************************************************************
+ */
+static int flexboot_nodnic_arm_cq ( struct flexboot_nodnic_port *port ) {
+#ifndef DEVICE_CX3
+ mlx_uint32 val = ( port->eth_cq->next_idx & 0xffff );
+ if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val ) ) {
+ MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
+ return MLX_FAILED;
+ }
+#else
+ mlx_utils *utils = port->port_priv.device->utils;
+ nodnic_port_data_flow_gw *ptr = port->port_priv.data_flow_gw;
+ mlx_uint32 data = 0;
+ mlx_uint32 val = 0;
+
+ if ( port->port_priv.device->device_cap.crspace_doorbells == 0 ) {
+ val = ( port->eth_cq->next_idx & 0xffff );
+ if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val ) ) {
+ MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
+ return MLX_FAILED;
+ }
+ } else {
+ /* Arming the CQ with CQ CI should be with this format -
+ * 16 bit - CQ CI - same endianness as the FW (don't swap bytes)
+ * 15 bit - reserved
+ * 1 bit - arm CQ - must correct the endianness with the reserved above */
+ data = ( ( ( port->eth_cq->next_idx & 0xffff ) << 16 ) | 0x0080 );
+ /* Write the new index and update FW that new data was submitted */
+ mlx_pci_mem_write ( utils, MlxPciWidthUint32, 0,
+ ( mlx_uint64 ) & ( ptr->armcq_cq_ci_dword ), 1, &data );
+ }
+#endif
+ return 0;
+}
+
+/**
+ * Create completion queue
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queue
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_create_cq ( struct ib_device *ibdev ,
+ struct ib_completion_queue *cq ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq;
+ mlx_status status = MLX_SUCCESS;
+
+ flexboot_nodnic_cq = (struct flexboot_nodnic_completion_queue *)
+ zalloc(sizeof(*flexboot_nodnic_cq));
+ if ( flexboot_nodnic_cq == NULL ) {
+ status = MLX_OUT_OF_RESOURCES;
+ goto qp_alloc_err;
+ }
+
+ status = nodnic_port_create_cq(&port->port_priv,
+ cq->num_cqes *
+ flexboot_nodnic->callbacks->get_cqe_size(),
+ &flexboot_nodnic_cq->nodnic_completion_queue
+ );
+ MLX_FATAL_CHECK_STATUS(status, create_err,
+ "nodnic_port_create_cq failed");
+ flexboot_nodnic->callbacks->cqe_set_owner(
+ flexboot_nodnic_cq->nodnic_completion_queue->cq_virt,
+ cq->num_cqes);
+
+
+ ib_cq_set_drvdata ( cq, flexboot_nodnic_cq );
+ return status;
+create_err:
+ free(flexboot_nodnic_cq);
+qp_alloc_err:
+ return status;
+}
+
+/**
+ * Destroy completion queue
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queue
+ */
+static void flexboot_nodnic_destroy_cq ( struct ib_device *ibdev ,
+ struct ib_completion_queue *cq ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
+
+ nodnic_port_destroy_cq(&port->port_priv,
+ flexboot_nodnic_cq->nodnic_completion_queue);
+
+ free(flexboot_nodnic_cq);
+}
+
+static
+struct ib_work_queue * flexboot_nodnic_find_wq ( struct ib_device *ibdev ,
+ struct ib_completion_queue *cq,
+ unsigned long qpn, int is_send ) {
+ struct ib_work_queue *wq;
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct nodnic_ring *ring;
+ mlx_uint32 out_qpn;
+ list_for_each_entry ( wq, &cq->work_queues, list ) {
+ flexboot_nodnic_qp = ib_qp_get_drvdata ( wq->qp );
+ if( wq->is_send == is_send && wq->is_send == TRUE ) {
+ ring = &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring;
+ } else if( wq->is_send == is_send && wq->is_send == FALSE ) {
+ ring = &flexboot_nodnic_qp->nodnic_queue_pair->receive.nodnic_ring;
+ } else {
+ continue;
+ }
+ nodnic_port_get_qpn(&port->port_priv, ring, &out_qpn);
+ if ( out_qpn == qpn )
+ return wq;
+ }
+ return NULL;
+}
+
+/**
+ * Handle completion
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queue
+ * @v cqe Hardware completion queue entry
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_complete ( struct ib_device *ibdev,
+ struct ib_completion_queue *cq,
+ struct cqe_data *cqe_data ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct ib_work_queue *wq;
+ struct ib_queue_pair *qp;
+ struct io_buffer *iobuf;
+ struct ib_address_vector recv_dest;
+ struct ib_address_vector recv_source;
+ unsigned long qpn;
+ unsigned long wqe_idx;
+ unsigned long wqe_idx_mask;
+ size_t len;
+ int rc = 0;
+
+ /* Parse completion */
+ qpn = cqe_data->qpn;
+
+ if ( cqe_data->is_error == TRUE ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx syndrome %x vendor %x\n",
+ flexboot_nodnic, cq->cqn, cqe_data->syndrome,
+ cqe_data->vendor_err_syndrome );
+ rc = -EIO;
+ /* Don't return immediately; propagate error to completer */
+ }
+
+ /* Identify work queue */
+ wq = flexboot_nodnic_find_wq( ibdev, cq, qpn, cqe_data->is_send );
+ if ( wq == NULL ) {
+ DBGC ( flexboot_nodnic,
+ "flexboot_nodnic %p CQN %#lx unknown %s QPN %#lx\n",
+ flexboot_nodnic, cq->cqn,
+ ( cqe_data->is_send ? "send" : "recv" ), qpn );
+ return -EIO;
+ }
+ qp = wq->qp;
+
+ /* Identify work queue entry */
+ wqe_idx = cqe_data->wqe_counter;
+ wqe_idx_mask = ( wq->num_wqes - 1 );
+ DBGCP ( flexboot_nodnic,
+ "NODNIC %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
+ flexboot_nodnic, cq->cqn, qp->qpn,
+ ( cqe_data->is_send ? "send" : "recv" ),
+ wqe_idx );
+
+ /* Identify I/O buffer */
+ iobuf = wq->iobufs[wqe_idx & wqe_idx_mask];
+ if ( iobuf == NULL ) {
+ DBGC ( flexboot_nodnic,
+ "NODNIC %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
+ flexboot_nodnic, cq->cqn, qp->qpn,
+ ( cqe_data->is_send ? "send" : "recv" ), wqe_idx );
+ return -EIO;
+ }
+ wq->iobufs[wqe_idx & wqe_idx_mask] = NULL;
+
+ if ( cqe_data->is_send == TRUE ) {
+ /* Hand off to completion handler */
+ ib_complete_send ( ibdev, qp, iobuf, rc );
+ } else if ( rc != 0 ) {
+ /* Propagate error to receive completion handler */
+ ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc );
+ } else {
+ /* Set received length */
+ len = cqe_data->byte_cnt;
+ assert ( len <= iob_tailroom ( iobuf ) );
+ iob_put ( iobuf, len );
+ memset ( &recv_dest, 0, sizeof ( recv_dest ) );
+ recv_dest.qpn = qpn;
+ memset ( &recv_source, 0, sizeof ( recv_source ) );
+ switch ( qp->type ) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ case IB_QPT_RC:
+ break;
+ case IB_QPT_ETH:
+ break;
+ default:
+ assert ( 0 );
+ return -EINVAL;
+ }
+ /* Hand off to completion handler */
+ ib_complete_recv ( ibdev, qp, &recv_dest,
+ &recv_source, iobuf, rc );
+ }
+
+ return rc;
+}
+/**
+ * Poll completion queue
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queues
+ */
+static void flexboot_nodnic_poll_cq ( struct ib_device *ibdev,
+ struct ib_completion_queue *cq) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
+ void *cqe;
+ mlx_size cqe_size;
+ struct cqe_data cqe_data;
+ unsigned int cqe_idx_mask;
+ int rc;
+
+ cqe_size = flexboot_nodnic->callbacks->get_cqe_size();
+ while ( TRUE ) {
+ /* Look for completion entry */
+ cqe_idx_mask = ( cq->num_cqes - 1 );
+ cqe = ((uint8_t *)flexboot_nodnic_cq->nodnic_completion_queue->cq_virt) +
+ cqe_size * (cq->next_idx & cqe_idx_mask);
+
+ /* TODO: check fill_completion */
+ flexboot_nodnic->callbacks->fill_completion(cqe, &cqe_data);
+ if ( cqe_data.owner ^
+ ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
+ /* Entry still owned by hardware; end of poll */
+ break;
+ }
+ /* Handle completion */
+ rc = flexboot_nodnic_complete ( ibdev, cq, &cqe_data );
+ if ( rc != 0 ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx failed to complete: %s\n",
+ flexboot_nodnic, cq->cqn, strerror ( rc ) );
+ DBGC_HDA ( flexboot_nodnic, virt_to_phys ( cqe ),
+ cqe, sizeof ( *cqe ) );
+ }
+
+ /* Update completion queue's index */
+ cq->next_idx++;
+ }
+}
+/***************************************************************************
+ *
+ * Queue pair operations
+ *
+ ***************************************************************************
+ */
+
+
+/**
+ * Create queue pair
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_create_qp ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
+ mlx_status status = MLX_SUCCESS;
+
+ flexboot_nodnic_qp = (struct flexboot_nodnic_queue_pair *)zalloc(sizeof(*flexboot_nodnic_qp));
+ if ( flexboot_nodnic_qp == NULL ) {
+ status = MLX_OUT_OF_RESOURCES;
+ goto qp_alloc_err;
+ }
+
+ status = nodnic_port_create_qp(&port->port_priv, qp->type,
+ qp->send.num_wqes * sizeof(struct nodnic_send_wqbb),
+ qp->send.num_wqes,
+ qp->recv.num_wqes * sizeof(struct nodnic_recv_wqe),
+ qp->recv.num_wqes,
+ &flexboot_nodnic_qp->nodnic_queue_pair);
+ MLX_FATAL_CHECK_STATUS(status, create_err,
+ "nodnic_port_create_qp failed");
+ ib_qp_set_drvdata ( qp, flexboot_nodnic_qp );
+ return status;
+create_err:
+ free(flexboot_nodnic_qp);
+qp_alloc_err:
+ return status;
+}
+
+/**
+ * Modify queue pair
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_modify_qp ( struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp __unused) {
+ /*not needed*/
+ return 0;
+}
+
+/**
+ * Destroy queue pair
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ */
+static void flexboot_nodnic_destroy_qp ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
+
+ nodnic_port_destroy_qp(&port->port_priv, qp->type,
+ flexboot_nodnic_qp->nodnic_queue_pair);
+
+ free(flexboot_nodnic_qp);
+}
+
+/***************************************************************************
+ *
+ * Work request operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Post send work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v av Address vector
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_post_send ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_address_vector *av,
+ struct io_buffer *iobuf) {
+
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct ib_work_queue *wq = &qp->send;
+ struct nodnic_send_wqbb *wqbb;
+ nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
+ struct nodnic_send_ring *send_ring = &nodnic_qp->send;
+ mlx_status status = MLX_SUCCESS;
+ unsigned int wqe_idx_mask;
+ unsigned long wqe_idx;
+
+ if ( ( port->port_priv.dma_state == FALSE ) ||
+ ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic DMA disabled\n");
+ status = -ENETDOWN;
+ goto post_send_done;
+ }
+
+ /* Allocate work queue entry */
+ wqe_idx = wq->next_idx;
+ wqe_idx_mask = ( wq->num_wqes - 1 );
+ if ( wq->iobufs[wqe_idx & wqe_idx_mask] ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx send queue full\n",
+ flexboot_nodnic, qp->qpn );
+ status = -ENOBUFS;
+ goto post_send_done;
+ }
+ wqbb = &send_ring->wqe_virt[wqe_idx & wqe_idx_mask];
+ wq->iobufs[wqe_idx & wqe_idx_mask] = iobuf;
+
+ assert ( flexboot_nodnic->callbacks->
+ fill_send_wqe[qp->type] != NULL );
+ status = flexboot_nodnic->callbacks->
+ fill_send_wqe[qp->type] ( ibdev, qp, av, iobuf,
+ wqbb, wqe_idx );
+ if ( status != 0 ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx fill send wqe failed\n",
+ flexboot_nodnic, qp->qpn );
+ goto post_send_done;
+ }
+
+ wq->next_idx++;
+
+ status = port->port_priv.send_doorbell ( &port->port_priv,
+ &send_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
+ if ( status != 0 ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring send doorbell failed\n", flexboot_nodnic );
+ }
+
+post_send_done:
+ return status;
+}
+
+/**
+ * Post receive work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_post_recv ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct io_buffer *iobuf ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct ib_work_queue *wq = &qp->recv;
+ nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
+ struct nodnic_recv_ring *recv_ring = &nodnic_qp->receive;
+ struct nodnic_recv_wqe *wqe;
+ unsigned int wqe_idx_mask;
+ mlx_status status = MLX_SUCCESS;
+
+ /* Allocate work queue entry */
+ wqe_idx_mask = ( wq->num_wqes - 1 );
+ if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
+ DBGC ( flexboot_nodnic,
+ "flexboot_nodnic %p QPN %#lx receive queue full\n",
+ flexboot_nodnic, qp->qpn );
+ status = -ENOBUFS;
+ goto post_recv_done;
+ }
+ wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
+ wqe = &((struct nodnic_recv_wqe*)recv_ring->wqe_virt)[wq->next_idx & wqe_idx_mask];
+
+ MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
+ MLX_FILL_1 ( &wqe->data[0], 1, l_key, flexboot_nodnic->device_priv.lkey );
+ MLX_FILL_H ( &wqe->data[0], 2,
+ local_address_h, virt_to_bus ( iobuf->data ) );
+ MLX_FILL_1 ( &wqe->data[0], 3,
+ local_address_l, virt_to_bus ( iobuf->data ) );
+
+ wq->next_idx++;
+
+ status = port->port_priv.recv_doorbell ( &port->port_priv,
+ &recv_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
+ if ( status != 0 ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring receive doorbell failed\n", flexboot_nodnic );
+ }
+post_recv_done:
+ return status;
+}
+
+/***************************************************************************
+ *
+ * Event queues
+ *
+ ***************************************************************************
+ */
+
+static void flexboot_nodnic_poll_eq ( struct ib_device *ibdev ) {
+ struct flexboot_nodnic *flexboot_nodnic;
+ struct flexboot_nodnic_port *port;
+ struct net_device *netdev;
+ nodnic_port_state state = 0;
+ mlx_status status;
+
+ if ( ! ibdev ) {
+ DBG ( "%s: ibdev = NULL!!!\n", __FUNCTION__ );
+ return;
+ }
+
+ flexboot_nodnic = ib_get_drvdata ( ibdev );
+ port = &flexboot_nodnic->port[ibdev->port - 1];
+ netdev = port->netdev;
+
+ if ( ! netdev_is_open ( netdev ) ) {
+ DBG2( "%s: port %d is closed\n", __FUNCTION__, port->ibdev->port );
+ return;
+ }
+
+ /* we don't poll EQ. Just poll link status if it's not active */
+ if ( ! netdev_link_ok ( netdev ) ) {
+ status = nodnic_port_get_state ( &port->port_priv, &state );
+ MLX_FATAL_CHECK_STATUS(status, state_err, "nodnic_port_get_state failed");
+
+ if ( state == nodnic_port_state_active ) {
+ DBG( "%s: port %d physical link is up\n", __FUNCTION__,
+ port->ibdev->port );
+ port->type->state_change ( flexboot_nodnic, port, 1 );
+ }
+ }
+state_err:
+ return;
+}
+
+/***************************************************************************
+ *
+ * Multicast group operations
+ *
+ ***************************************************************************
+ */
+static int flexboot_nodnic_mcast_attach ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ union ib_gid *gid) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ mlx_mac_address mac;
+ mlx_status status = MLX_SUCCESS;
+
+ switch (qp->type) {
+ case IB_QPT_ETH:
+ memcpy(&mac, &gid, sizeof(mac));
+ status = nodnic_port_add_mac_filter(&port->port_priv, mac);
+ MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
+ "nodnic_port_add_mac_filter failed");
+ break;
+ default:
+ break;
+ }
+mac_err:
+ return status;
+}
+static void flexboot_nodnic_mcast_detach ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ union ib_gid *gid ) {
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ mlx_mac_address mac;
+ mlx_status status = MLX_SUCCESS;
+
+ switch (qp->type) {
+ case IB_QPT_ETH:
+ memcpy(&mac, &gid, sizeof(mac));
+ status = nodnic_port_remove_mac_filter(&port->port_priv, mac);
+ MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
+ "nodnic_port_remove_mac_filter failed");
+ break;
+ default:
+ break;
+ }
+mac_err:
+ return;
+}
+/***************************************************************************
+ *
+ * Infiniband link-layer operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Initialise Infiniband link
+ *
+ * @v ibdev Infiniband device
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_ib_open ( struct ib_device *ibdev __unused) {
+ int rc = 0;
+
+ /*TODO: add implementation*/
+ return rc;
+}
+
+/**
+ * Close Infiniband link
+ *
+ * @v ibdev Infiniband device
+ */
+static void flexboot_nodnic_ib_close ( struct ib_device *ibdev __unused) {
+ /*TODO: add implementation*/
+}
+
+/**
+ * Inform embedded subnet management agent of a received MAD
+ *
+ * @v ibdev Infiniband device
+ * @v mad MAD
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_inform_sma ( struct ib_device *ibdev __unused,
+ union ib_mad *mad __unused) {
+ /*TODO: add implementation*/
+ return 0;
+}
+
+/** flexboot_nodnic Infiniband operations */
+static struct ib_device_operations flexboot_nodnic_ib_operations = {
+ .create_cq = flexboot_nodnic_create_cq,
+ .destroy_cq = flexboot_nodnic_destroy_cq,
+ .create_qp = flexboot_nodnic_create_qp,
+ .modify_qp = flexboot_nodnic_modify_qp,
+ .destroy_qp = flexboot_nodnic_destroy_qp,
+ .post_send = flexboot_nodnic_post_send,
+ .post_recv = flexboot_nodnic_post_recv,
+ .poll_cq = flexboot_nodnic_poll_cq,
+ .poll_eq = flexboot_nodnic_poll_eq,
+ .open = flexboot_nodnic_ib_open,
+ .close = flexboot_nodnic_ib_close,
+ .mcast_attach = flexboot_nodnic_mcast_attach,
+ .mcast_detach = flexboot_nodnic_mcast_detach,
+ .set_port_info = flexboot_nodnic_inform_sma,
+ .set_pkey_table = flexboot_nodnic_inform_sma,
+};
+/***************************************************************************
+ *
+ *
+ *
+ ***************************************************************************
+ */
+
+#define FLEX_NODNIC_TX_POLL_TOUT 500000
+#define FLEX_NODNIC_TX_POLL_USLEEP 10
+
+static void flexboot_nodnic_complete_all_tx ( struct flexboot_nodnic_port *port ) {
+ struct ib_device *ibdev = port->ibdev;
+ struct ib_completion_queue *cq;
+ struct ib_work_queue *wq;
+ int keep_polling = 0;
+ int timeout = FLEX_NODNIC_TX_POLL_TOUT;
+
+ list_for_each_entry ( cq, &ibdev->cqs, list ) {
+ do {
+ ib_poll_cq ( ibdev, cq );
+ keep_polling = 0;
+ list_for_each_entry ( wq, &cq->work_queues, list ) {
+ if ( wq->is_send )
+ keep_polling += ( wq->fill > 0 );
+ }
+ udelay ( FLEX_NODNIC_TX_POLL_USLEEP );
+ } while ( keep_polling && ( timeout-- > 0 ) );
+ }
+}
+
+static void flexboot_nodnic_port_disable_dma ( struct flexboot_nodnic_port *port ) {
+ nodnic_port_priv *port_priv = & ( port->port_priv );
+ mlx_status status;
+
+ if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
+ return;
+
+ port_priv->port_state |= NODNIC_PORT_DISABLING_DMA;
+ flexboot_nodnic_complete_all_tx ( port );
+ if ( ( status = nodnic_port_disable_dma ( port_priv ) ) ) {
+ MLX_DEBUG_WARN ( port, "Failed to disable DMA %d\n", status );
+ }
+
+ port_priv->port_state &= ~NODNIC_PORT_DISABLING_DMA;
+}
+
+/***************************************************************************
+ *
+ * Ethernet operation
+ *
+ ***************************************************************************
+ */
+
+/** Number of flexboot_nodnic Ethernet send work queue entries */
+#define FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES 64
+
+/** Number of flexboot_nodnic Ethernet receive work queue entries */
+#define FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES 64
+/** flexboot nodnic Ethernet queue pair operations */
+static struct ib_queue_pair_operations flexboot_nodnic_eth_qp_op = {
+ .alloc_iob = alloc_iob,
+};
+
+/**
+ * Transmit packet via flexboot_nodnic Ethernet device
+ *
+ * @v netdev Network device
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_eth_transmit ( struct net_device *netdev,
+ struct io_buffer *iobuf) {
+ struct flexboot_nodnic_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ int rc;
+
+ rc = ib_post_send ( ibdev, port->eth_qp, NULL, iobuf);
+ /* Transmit packet */
+ if ( rc != 0) {
+ DBGC ( flexboot_nodnic, "NODNIC %p port %d could not transmit: %s\n",
+ flexboot_nodnic, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Handle flexboot_nodnic Ethernet device send completion
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v iobuf I/O buffer
+ * @v rc Completion status code
+ */
+static void flexboot_nodnic_eth_complete_send ( struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp,
+ struct io_buffer *iobuf,
+ int rc) {
+ struct net_device *netdev = ib_qp_get_ownerdata ( qp );
+
+ netdev_tx_complete_err ( netdev, iobuf, rc );
+}
+
+/**
+ * Handle flexboot_nodnic Ethernet device receive completion
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v av Address vector, or NULL
+ * @v iobuf I/O buffer
+ * @v rc Completion status code
+ */
+static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp,
+ struct ib_address_vector *dest __unused,
+ struct ib_address_vector *source,
+ struct io_buffer *iobuf,
+ int rc) {
+ struct net_device *netdev = ib_qp_get_ownerdata ( qp );
+
+ if ( rc != 0 ) {
+ DBG ( "Received packet with error\n" );
+ netdev_rx_err ( netdev, iobuf, rc );
+ return;
+ }
+
+ if ( source == NULL ) {
+ DBG ( "Received packet without address vector\n" );
+ netdev_rx_err ( netdev, iobuf, -ENOTTY );
+ return;
+ }
+ netdev_rx ( netdev, iobuf );
+}
+
+/** flexboot_nodnic Ethernet device completion operations */
+static struct ib_completion_queue_operations flexboot_nodnic_eth_cq_op = {
+ .complete_send = flexboot_nodnic_eth_complete_send,
+ .complete_recv = flexboot_nodnic_eth_complete_recv,
+};
+
+/**
+ * Poll flexboot_nodnic Ethernet device
+ *
+ * @v netdev Network device
+ */
+static void flexboot_nodnic_eth_poll ( struct net_device *netdev) {
+ struct flexboot_nodnic_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+
+ ib_poll_eq ( ibdev );
+}
+
+/**
+ * Open flexboot_nodnic Ethernet device
+ *
+ * @v netdev Network device
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_eth_open ( struct net_device *netdev ) {
+ struct flexboot_nodnic_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ mlx_status status = MLX_SUCCESS;
+ struct ib_completion_queue *dummy_cq = NULL;
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = NULL;
+ mlx_uint64 cq_size = 0;
+ mlx_uint32 qpn = 0;
+ nodnic_port_state state = nodnic_port_state_down;
+
+ if ( port->port_priv.port_state & NODNIC_PORT_OPENED ) {
+ DBGC ( flexboot_nodnic, "%s: port %d is already opened\n",
+ __FUNCTION__, port->ibdev->port );
+ return 0;
+ }
+
+ port->port_priv.port_state |= NODNIC_PORT_OPENED;
+
+ dummy_cq = zalloc ( sizeof ( struct ib_completion_queue ) );
+ if ( dummy_cq == NULL ) {
+ DBGC ( flexboot_nodnic, "%s: Failed to allocate dummy CQ\n", __FUNCTION__ );
+ status = MLX_OUT_OF_RESOURCES;
+ goto err_create_dummy_cq;
+ }
+ INIT_LIST_HEAD ( &dummy_cq->work_queues );
+
+ port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH,
+ FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES, dummy_cq,
+ FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES, dummy_cq,
+ &flexboot_nodnic_eth_qp_op, netdev->name );
+ if ( !port->eth_qp ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not create queue pair\n",
+ flexboot_nodnic, ibdev->port );
+ status = MLX_OUT_OF_RESOURCES;
+ goto err_create_qp;
+ }
+
+ ib_qp_set_ownerdata ( port->eth_qp, netdev );
+
+ status = nodnic_port_get_cq_size(&port->port_priv, &cq_size);
+ MLX_FATAL_CHECK_STATUS(status, get_cq_size_err,
+ "nodnic_port_get_cq_size failed");
+
+ port->eth_cq = ib_create_cq ( ibdev, cq_size,
+ &flexboot_nodnic_eth_cq_op );
+ if ( !port->eth_cq ) {
+ DBGC ( flexboot_nodnic,
+ "flexboot_nodnic %p port %d could not create completion queue\n",
+ flexboot_nodnic, ibdev->port );
+ status = MLX_OUT_OF_RESOURCES;
+ goto err_create_cq;
+ }
+ port->eth_qp->send.cq = port->eth_cq;
+ list_del(&port->eth_qp->send.list);
+ list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
+ port->eth_qp->recv.cq = port->eth_cq;
+ list_del(&port->eth_qp->recv.list);
+ list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
+
+ status = nodnic_port_allocate_eq(&port->port_priv,
+ flexboot_nodnic->device_priv.device_cap.log_working_buffer_size);
+ MLX_FATAL_CHECK_STATUS(status, eq_alloc_err,
+ "nodnic_port_allocate_eq failed");
+
+ status = nodnic_port_init(&port->port_priv);
+ MLX_FATAL_CHECK_STATUS(status, init_err,
+ "nodnic_port_init failed");
+
+ /* update qp - qpn */
+ flexboot_nodnic_qp = ib_qp_get_drvdata ( port->eth_qp );
+ status = nodnic_port_get_qpn(&port->port_priv,
+ &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring,
+ &qpn);
+ MLX_FATAL_CHECK_STATUS(status, qpn_err,
+ "nodnic_port_get_qpn failed");
+ port->eth_qp->qpn = qpn;
+
+ /* Fill receive rings */
+ ib_refill_recv ( ibdev, port->eth_qp );
+
+ status = nodnic_port_enable_dma(&port->port_priv);
+ MLX_FATAL_CHECK_STATUS(status, dma_err,
+ "nodnic_port_enable_dma failed");
+
+ if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
+ status = nodnic_port_set_promisc(&port->port_priv, TRUE);
+ MLX_FATAL_CHECK_STATUS(status, promisc_err,
+ "nodnic_port_set_promisc failed");
+ }
+
+ status = nodnic_port_get_state(&port->port_priv, &state);
+ MLX_FATAL_CHECK_STATUS(status, state_err,
+ "nodnic_port_get_state failed");
+
+ port->type->state_change (
+ flexboot_nodnic, port, state == nodnic_port_state_active );
+
+ DBGC ( flexboot_nodnic, "%s: port %d opened (link is %s)\n",
+ __FUNCTION__, port->ibdev->port,
+ ( ( state == nodnic_port_state_active ) ? "Up" : "Down" ) );
+
+ free(dummy_cq);
+ return 0;
+state_err:
+promisc_err:
+dma_err:
+qpn_err:
+ nodnic_port_close(&port->port_priv);
+init_err:
+ nodnic_port_free_eq(&port->port_priv);
+eq_alloc_err:
+err_create_cq:
+get_cq_size_err:
+ ib_destroy_qp(ibdev, port->eth_qp );
+err_create_qp:
+ free(dummy_cq);
+err_create_dummy_cq:
+ port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
+ return status;
+}
+
+/**
+ * Close flexboot_nodnic Ethernet device
+ *
+ * @v netdev Network device
+ */
+static void flexboot_nodnic_eth_close ( struct net_device *netdev) {
+ struct flexboot_nodnic_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ mlx_status status = MLX_SUCCESS;
+
+ if ( ! ( port->port_priv.port_state & NODNIC_PORT_OPENED ) ) {
+ DBGC ( flexboot_nodnic, "%s: port %d is already closed\n",
+ __FUNCTION__, port->ibdev->port );
+ return;
+ }
+
+ if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
+ if ( ( status = nodnic_port_set_promisc( &port->port_priv, FALSE ) ) ) {
+ DBGC ( flexboot_nodnic,
+ "nodnic_port_set_promisc failed (status = %d)\n", status );
+ }
+ }
+
+ flexboot_nodnic_port_disable_dma ( port );
+
+ port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
+
+ port->type->state_change ( flexboot_nodnic, port, FALSE );
+
+ /* Close port */
+ status = nodnic_port_close(&port->port_priv);
+ if ( status != MLX_SUCCESS ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not close port: %s\n",
+ flexboot_nodnic, ibdev->port, strerror ( status ) );
+ /* Nothing we can do about this */
+ }
+
+ ib_destroy_qp ( ibdev, port->eth_qp );
+ port->eth_qp = NULL;
+ ib_destroy_cq ( ibdev, port->eth_cq );
+ port->eth_cq = NULL;
+
+ nodnic_port_free_eq(&port->port_priv);
+
+ DBGC ( flexboot_nodnic, "%s: port %d closed\n", __FUNCTION__, port->ibdev->port );
+}
+
+void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable ) {
+ struct flexboot_nodnic_port *port = netdev->priv;
+
+ if ( enable ) {
+ if ( ( port->port_priv.port_state & NODNIC_PORT_OPENED ) &&
+ ! ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
+ flexboot_nodnic_arm_cq ( port );
+ } else {
+ /* do nothing */
+ }
+ } else {
+ nodnic_device_clear_int( port->port_priv.device );
+ }
+}
+
+/** flexboot_nodnic Ethernet network device operations */
+static struct net_device_operations flexboot_nodnic_eth_operations = {
+ .open = flexboot_nodnic_eth_open,
+ .close = flexboot_nodnic_eth_close,
+ .transmit = flexboot_nodnic_eth_transmit,
+ .poll = flexboot_nodnic_eth_poll,
+};
+
+/**
+ * Register flexboot_nodnic Ethernet device
+ */
+static int flexboot_nodnic_register_netdev ( struct flexboot_nodnic *flexboot_nodnic,
+ struct flexboot_nodnic_port *port) {
+ mlx_status status = MLX_SUCCESS;
+ struct net_device *netdev;
+ struct ib_device *ibdev = port->ibdev;
+ union {
+ uint8_t bytes[8];
+ uint32_t dwords[2];
+ } mac;
+
+ /* Allocate network devices */
+ netdev = alloc_etherdev ( 0 );
+ if ( netdev == NULL ) {
+ DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not allocate net device\n",
+ flexboot_nodnic, ibdev->port );
+ status = MLX_OUT_OF_RESOURCES;
+ goto alloc_err;
+ }
+ port->netdev = netdev;
+ netdev_init ( netdev, &flexboot_nodnic_eth_operations );
+ netdev->dev = ibdev->dev;
+ netdev->priv = port;
+
+ status = nodnic_port_query(&port->port_priv,
+ nodnic_port_option_mac_high,
+ &mac.dwords[0]);
+ MLX_FATAL_CHECK_STATUS(status, mac_err,
+ "failed to query mac high");
+ status = nodnic_port_query(&port->port_priv,
+ nodnic_port_option_mac_low,
+ &mac.dwords[1]);
+ MLX_FATAL_CHECK_STATUS(status, mac_err,
+ "failed to query mac low");
+ mac.dwords[0] = htonl(mac.dwords[0]);
+ mac.dwords[1] = htonl(mac.dwords[1]);
+ memcpy ( netdev->hw_addr,
+ &mac.bytes[2], ETH_ALEN);
+ /* Register network device */
+ status = register_netdev ( netdev );
+ if ( status != MLX_SUCCESS ) {
+ DBGC ( flexboot_nodnic,
+ "flexboot_nodnic %p port %d could not register network device: %s\n",
+ flexboot_nodnic, ibdev->port, strerror ( status ) );
+ goto reg_err;
+ }
+ return status;
+reg_err:
+mac_err:
+ netdev_put ( netdev );
+alloc_err:
+ return status;
+}
+
+/**
+ * Handle flexboot_nodnic Ethernet device port state change
+ */
+static void flexboot_nodnic_state_change_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
+ struct flexboot_nodnic_port *port,
+ int link_up ) {
+ struct net_device *netdev = port->netdev;
+
+ if ( link_up )
+ netdev_link_up ( netdev );
+ else
+ netdev_link_down ( netdev );
+
+}
+
+/**
+ * Unregister flexboot_nodnic Ethernet device
+ */
+static void flexboot_nodnic_unregister_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
+ struct flexboot_nodnic_port *port ) {
+ struct net_device *netdev = port->netdev;
+ unregister_netdev ( netdev );
+ netdev_nullify ( netdev );
+ netdev_put ( netdev );
+}
+
+/** flexboot_nodnic Ethernet port type */
+static struct flexboot_nodnic_port_type flexboot_nodnic_port_type_eth = {
+ .register_dev = flexboot_nodnic_register_netdev,
+ .state_change = flexboot_nodnic_state_change_netdev,
+ .unregister_dev = flexboot_nodnic_unregister_netdev,
+};
+
+/***************************************************************************
+ *
+ * PCI interface helper functions
+ *
+ ***************************************************************************
+ */
+static
+mlx_status
+flexboot_nodnic_allocate_infiniband_devices( struct flexboot_nodnic *flexboot_nodnic_priv ) {
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
+ struct pci_device *pci = flexboot_nodnic_priv->pci;
+ struct ib_device *ibdev = NULL;
+ unsigned int i = 0;
+
+ /* Allocate Infiniband devices */
+ for (; i < device_priv->device_cap.num_ports; i++) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ ibdev = alloc_ibdev(0);
+ if (ibdev == NULL) {
+ status = MLX_OUT_OF_RESOURCES;
+ goto err_alloc_ibdev;
+ }
+ flexboot_nodnic_priv->port[i].ibdev = ibdev;
+ ibdev->op = &flexboot_nodnic_ib_operations;
+ ibdev->dev = &pci->dev;
+ ibdev->port = ( FLEXBOOT_NODNIC_PORT_BASE + i);
+ ib_set_drvdata(ibdev, flexboot_nodnic_priv);
+ }
+ return status;
+err_alloc_ibdev:
+ for ( i-- ; ( signed int ) i >= 0 ; i-- )
+ ibdev_put ( flexboot_nodnic_priv->port[i].ibdev );
+ return status;
+}
+
+static
+mlx_status
+flexboot_nodnic_thin_init_ports( struct flexboot_nodnic *flexboot_nodnic_priv ) {
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
+ nodnic_port_priv *port_priv = NULL;
+ unsigned int i = 0;
+
+ for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ port_priv = &flexboot_nodnic_priv->port[i].port_priv;
+ status = nodnic_port_thin_init( device_priv, port_priv, i );
+ MLX_FATAL_CHECK_STATUS(status, thin_init_err,
+ "flexboot_nodnic_thin_init_ports failed");
+ }
+thin_init_err:
+ return status;
+}
+
+
+static
+mlx_status
+flexboot_nodnic_set_ports_type ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
+ nodnic_port_priv *port_priv = NULL;
+ nodnic_port_type type = NODNIC_PORT_TYPE_UNKNOWN;
+ unsigned int i = 0;
+
+ for ( i = 0 ; i < device_priv->device_cap.num_ports ; i++ ) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ port_priv = &flexboot_nodnic_priv->port[i].port_priv;
+ status = nodnic_port_get_type(port_priv, &type);
+ MLX_FATAL_CHECK_STATUS(status, type_err,
+ "nodnic_port_get_type failed");
+ switch ( type ) {
+ case NODNIC_PORT_TYPE_ETH:
+ DBGC ( flexboot_nodnic_priv, "Port %d type is Ethernet\n", i );
+ flexboot_nodnic_priv->port[i].type = &flexboot_nodnic_port_type_eth;
+ break;
+ case NODNIC_PORT_TYPE_IB:
+ DBGC ( flexboot_nodnic_priv, "Port %d type is Infiniband\n", i );
+ status = MLX_UNSUPPORTED;
+ goto type_err;
+ default:
+ DBGC ( flexboot_nodnic_priv, "Port %d type is unknown\n", i );
+ status = MLX_UNSUPPORTED;
+ goto type_err;
+ }
+ }
+type_err:
+ return status;
+}
+
+static
+mlx_status
+flexboot_nodnic_ports_register_dev( struct flexboot_nodnic *flexboot_nodnic_priv ) {
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
+ struct flexboot_nodnic_port *port = NULL;
+ unsigned int i = 0;
+
+ for (; i < device_priv->device_cap.num_ports; i++) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ port = &flexboot_nodnic_priv->port[i];
+ status = port->type->register_dev ( flexboot_nodnic_priv, port );
+ MLX_FATAL_CHECK_STATUS(status, reg_err,
+ "port register_dev failed");
+ }
+reg_err:
+ return status;
+}
+
+static
+mlx_status
+flexboot_nodnic_ports_unregister_dev ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
+ struct flexboot_nodnic_port *port;
+ nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
+ int i = (device_priv->device_cap.num_ports - 1);
+
+ for (; i >= 0; i--) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ port = &flexboot_nodnic_priv->port[i];
+ port->type->unregister_dev(flexboot_nodnic_priv, port);
+ ibdev_put(flexboot_nodnic_priv->port[i].ibdev);
+ }
+ return MLX_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * flexboot nodnic interface
+ *
+ ***************************************************************************
+ */
+__unused static void flexboot_nodnic_enable_dma ( struct flexboot_nodnic *nodnic ) {
+ nodnic_port_priv *port_priv;
+ mlx_status status;
+ int i;
+
+ for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
+ if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
+ continue;
+ port_priv = & ( nodnic->port[i].port_priv );
+ if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
+ continue;
+
+ if ( ( status = nodnic_port_enable_dma ( port_priv ) ) ) {
+ MLX_DEBUG_WARN ( nodnic, "Failed to enable DMA %d\n", status );
+ }
+ }
+}
+
+__unused static void flexboot_nodnic_disable_dma ( struct flexboot_nodnic *nodnic ) {
+ int i;
+
+ for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
+ if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
+ continue;
+ flexboot_nodnic_port_disable_dma ( & ( nodnic->port[i] ) );
+ }
+}
+
+int flexboot_nodnic_is_supported ( struct pci_device *pci ) {
+ mlx_utils utils;
+ mlx_pci_gw_buffer buffer;
+ mlx_status status;
+ int is_supported = 0;
+
+ DBG ( "%s: start\n", __FUNCTION__ );
+
+ memset ( &utils, 0, sizeof ( utils ) );
+
+ status = mlx_utils_init ( &utils, pci );
+ MLX_CHECK_STATUS ( pci, status, utils_init_err, "mlx_utils_init failed" );
+
+ status = mlx_pci_gw_init ( &utils );
+ MLX_CHECK_STATUS ( pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" );
+
+ status = mlx_pci_gw_read ( &utils, PCI_GW_SPACE_NODNIC,
+ NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET, &buffer );
+
+ if ( status == MLX_SUCCESS ) {
+ buffer >>= NODNIC_NIC_INTERFACE_SUPPORTED_BIT;
+ is_supported = ( buffer & 0x1 );
+ }
+
+ mlx_pci_gw_teardown( &utils );
+
+pci_gw_init_err:
+utils_init_err:
+ DBG ( "%s: NODNIC is %s supported (status = %d)\n",
+ __FUNCTION__, ( is_supported ? "": "not" ), status );
+ return is_supported;
+}
+
+void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte,
+ uint16_t high_byte ) {
+ union mac_addr {
+ struct {
+ uint32_t low_byte;
+ uint16_t high_byte;
+ };
+ uint8_t mac_addr[ETH_ALEN];
+ } mac_addr_aux;
+
+ mac_addr_aux.high_byte = high_byte;
+ mac_addr_aux.low_byte = low_byte;
+
+ mac_addr[0] = mac_addr_aux.mac_addr[5];
+ mac_addr[1] = mac_addr_aux.mac_addr[4];
+ mac_addr[2] = mac_addr_aux.mac_addr[3];
+ mac_addr[3] = mac_addr_aux.mac_addr[2];
+ mac_addr[4] = mac_addr_aux.mac_addr[1];
+ mac_addr[5] = mac_addr_aux.mac_addr[0];
+}
+
+static mlx_status flexboot_nodnic_get_factory_mac (
+ struct flexboot_nodnic *flexboot_nodnic_priv, uint8_t port __unused ) {
+ struct mlx_vmac_query_virt_mac virt_mac;
+ mlx_status status;
+
+ memset ( & virt_mac, 0, sizeof ( virt_mac ) );
+ status = mlx_vmac_query_virt_mac ( flexboot_nodnic_priv->device_priv.utils,
+ &virt_mac );
+ if ( ! status ) {
+ DBGC ( flexboot_nodnic_priv, "NODNIC %p Failed to set the virtual MAC\n",
+ flexboot_nodnic_priv );
+ }
+
+ return status;
+}
+
+/**
+ * Set port masking
+ *
+ * @v flexboot_nodnic nodnic device
+ * @ret rc Return status code
+ */
+static int flexboot_nodnic_set_port_masking ( struct flexboot_nodnic *flexboot_nodnic ) {
+ unsigned int i;
+ nodnic_device_priv *device_priv = &flexboot_nodnic->device_priv;
+
+ flexboot_nodnic->port_mask = 0;
+ for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
+ flexboot_nodnic->port_mask |= (i + 1);
+ }
+
+ if ( ! flexboot_nodnic->port_mask ) {
+ /* No port was enabled */
+ DBGC ( flexboot_nodnic, "NODNIC %p No port was enabled for "
+ "booting\n", flexboot_nodnic );
+ return -ENETUNREACH;
+ }
+
+ return 0;
+}
+
+int flexboot_nodnic_probe ( struct pci_device *pci,
+ struct flexboot_nodnic_callbacks *callbacks,
+ void *drv_priv __unused ) {
+ mlx_status status = MLX_SUCCESS;
+ struct flexboot_nodnic *flexboot_nodnic_priv = NULL;
+ nodnic_device_priv *device_priv = NULL;
+ int i = 0;
+
+ if ( ( pci == NULL ) || ( callbacks == NULL ) ) {
+ DBGC ( flexboot_nodnic_priv, "%s: Bad Parameter\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ flexboot_nodnic_priv = zalloc( sizeof ( *flexboot_nodnic_priv ) );
+ if ( flexboot_nodnic_priv == NULL ) {
+ DBGC ( flexboot_nodnic_priv, "%s: Failed to allocate priv data\n", __FUNCTION__ );
+ status = MLX_OUT_OF_RESOURCES;
+ goto device_err_alloc;
+ }
+
+ /* Register settings
+ * Note that pci->priv will be the device private data */
+ flexboot_nodnic_priv->pci = pci;
+ flexboot_nodnic_priv->callbacks = callbacks;
+ pci_set_drvdata ( pci, flexboot_nodnic_priv );
+
+ device_priv = &flexboot_nodnic_priv->device_priv;
+ device_priv->utils = (mlx_utils *)zalloc( sizeof ( mlx_utils ) );
+ if ( device_priv->utils == NULL ) {
+ DBGC ( flexboot_nodnic_priv, "%s: Failed to allocate utils\n", __FUNCTION__ );
+ status = MLX_OUT_OF_RESOURCES;
+ goto utils_err_alloc;
+ }
+
+ status = mlx_utils_init( device_priv->utils, pci );
+ MLX_FATAL_CHECK_STATUS(status, utils_init_err,
+ "mlx_utils_init failed");
+
+ /* nodnic init*/
+ status = mlx_pci_gw_init( device_priv->utils );
+ MLX_FATAL_CHECK_STATUS(status, cmd_init_err,
+ "mlx_pci_gw_init failed");
+
+ /* init device */
+ status = nodnic_device_init( device_priv );
+ MLX_FATAL_CHECK_STATUS(status, device_init_err,
+ "nodnic_device_init failed");
+
+ status = nodnic_device_get_cap( device_priv );
+ MLX_FATAL_CHECK_STATUS(status, get_cap_err,
+ "nodnic_device_get_cap failed");
+
+ status = flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
+ MLX_FATAL_CHECK_STATUS(status, err_set_masking,
+ "flexboot_nodnic_set_port_masking failed");
+
+ status = flexboot_nodnic_allocate_infiniband_devices( flexboot_nodnic_priv );
+ MLX_FATAL_CHECK_STATUS(status, err_alloc_ibdev,
+ "flexboot_nodnic_allocate_infiniband_devices failed");
+
+ /* port init */
+ status = flexboot_nodnic_thin_init_ports( flexboot_nodnic_priv );
+ MLX_FATAL_CHECK_STATUS(status, err_thin_init_ports,
+ "flexboot_nodnic_thin_init_ports failed");
+
+ /* device reg */
+ status = flexboot_nodnic_set_ports_type( flexboot_nodnic_priv );
+ MLX_CHECK_STATUS( flexboot_nodnic_priv, status, err_set_ports_types,
+ "flexboot_nodnic_set_ports_type failed");
+
+ status = flexboot_nodnic_ports_register_dev( flexboot_nodnic_priv );
+ MLX_FATAL_CHECK_STATUS(status, reg_err,
+ "flexboot_nodnic_ports_register_dev failed");
+
+ for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
+ if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
+ continue;
+ flexboot_nodnic_get_factory_mac ( flexboot_nodnic_priv, i );
+ }
+
+ /* Update ETH operations with IRQ function if supported */
+ DBGC ( flexboot_nodnic_priv, "%s: %s IRQ function\n",
+ __FUNCTION__, ( callbacks->irq ? "Valid" : "No" ) );
+ flexboot_nodnic_eth_operations.irq = callbacks->irq;
+ return 0;
+
+ flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
+reg_err:
+err_set_ports_types:
+err_thin_init_ports:
+err_alloc_ibdev:
+err_set_masking:
+get_cap_err:
+ nodnic_device_teardown ( device_priv );
+device_init_err:
+ mlx_pci_gw_teardown ( device_priv->utils );
+cmd_init_err:
+utils_init_err:
+ free ( device_priv->utils );
+utils_err_alloc:
+ free ( flexboot_nodnic_priv );
+device_err_alloc:
+ return status;
+}
+
+void flexboot_nodnic_remove ( struct pci_device *pci )
+{
+ struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
+ nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
+
+ flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
+ nodnic_device_teardown( device_priv );
+ mlx_pci_gw_teardown( device_priv->utils );
+ free( device_priv->utils );
+ free( flexboot_nodnic_priv );
+}
diff --git a/src/drivers/infiniband/flexboot_nodnic.h b/src/drivers/infiniband/flexboot_nodnic.h
new file mode 100644
index 00000000..80272296
--- /dev/null
+++ b/src/drivers/infiniband/flexboot_nodnic.h
@@ -0,0 +1,163 @@
+#ifndef SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_
+#define SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_nodnic/include/mlx_nodnic_data_structures.h"
+#include "nodnic_prm.h"
+#include <ipxe/io.h>
+#include <ipxe/infiniband.h>
+#include <ipxe/netdevice.h>
+
+/*
+ * If defined, use interrupts in NODNIC driver
+ */
+#define NODNIC_IRQ_ENABLED
+
+#define FLEXBOOT_NODNIC_MAX_PORTS 2
+#define FLEXBOOT_NODNIC_PORT_BASE 1
+
+#define FLEXBOOT_NODNIC_OPCODE_SEND 0xa
+
+/* Port protocol */
+enum flexboot_nodnic_protocol {
+ FLEXBOOT_NODNIC_PROT_IB_IPV6 = 0,
+ FLEXBOOT_NODNIC_PROT_ETH,
+ FLEXBOOT_NODNIC_PROT_IB_IPV4,
+ FLEXBOOT_NODNIC_PROT_FCOE
+};
+
+/** A flexboot nodnic port */
+struct flexboot_nodnic_port {
+ /** Infiniband device */
+ struct ib_device *ibdev;
+ /** Network device */
+ struct net_device *netdev;
+ /** nodic port */
+ nodnic_port_priv port_priv;
+ /** Port type */
+ struct flexboot_nodnic_port_type *type;
+ /** Ethernet completion queue */
+ struct ib_completion_queue *eth_cq;
+ /** Ethernet queue pair */
+ struct ib_queue_pair *eth_qp;
+};
+
+
+/** A flexboot nodnic queue pair */
+struct flexboot_nodnic_queue_pair {
+ nodnic_qp *nodnic_queue_pair;
+};
+
+/** A flexboot nodnic cq */
+struct flexboot_nodnic_completion_queue {
+ nodnic_cq *nodnic_completion_queue;
+};
+
+/** A flexboot_nodnic device */
+struct flexboot_nodnic {
+ /** PCI device */
+ struct pci_device *pci;
+ /** nic specific data*/
+ struct flexboot_nodnic_callbacks *callbacks;
+ /**nodnic device*/
+ nodnic_device_priv device_priv;
+ /**flexboot_nodnic ports*/
+ struct flexboot_nodnic_port port[FLEXBOOT_NODNIC_MAX_PORTS];
+ /** Device open request counter */
+ unsigned int open_count;
+ /** Port masking */
+ u16 port_mask;
+ /** device private data */
+ void *priv_data;
+};
+
+/** A flexboot_nodnic port type */
+struct flexboot_nodnic_port_type {
+ /** Register port
+ *
+ * @v flexboot_nodnic flexboot_nodnic device
+ * @v port flexboot_nodnic port
+ * @ret mlx_status Return status code
+ */
+ mlx_status ( * register_dev ) (
+ struct flexboot_nodnic *flexboot_nodnic,
+ struct flexboot_nodnic_port *port
+ );
+ /** Port state changed
+ *
+ * @v flexboot_nodnic flexboot_nodnic device
+ * @v port flexboot_nodnic port
+ * @v link_up Link is up
+ */
+ void ( * state_change ) (
+ struct flexboot_nodnic *flexboot_nodnic,
+ struct flexboot_nodnic_port *port,
+ int link_up
+ );
+ /** Unregister port
+ *
+ * @v flexboot_nodnic flexboot_nodnic device
+ * @v port flexboot_nodnic port
+ */
+ void ( * unregister_dev ) (
+ struct flexboot_nodnic *flexboot_nodnic,
+ struct flexboot_nodnic_port *port
+ );
+};
+
+struct cqe_data{
+ mlx_boolean owner;
+ mlx_uint32 qpn;
+ mlx_uint32 is_send;
+ mlx_uint32 is_error;
+ mlx_uint32 syndrome;
+ mlx_uint32 vendor_err_syndrome;
+ mlx_uint32 wqe_counter;
+ mlx_uint32 byte_cnt;
+};
+
+struct flexboot_nodnic_callbacks {
+ mlx_status ( * fill_completion ) ( void *cqe, struct cqe_data *cqe_data );
+ mlx_status ( * cqe_set_owner ) ( void *cq, unsigned int num_cqes );
+ mlx_size ( * get_cqe_size ) ();
+ mlx_status ( * fill_send_wqe[5] ) (
+ struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_address_vector *av,
+ struct io_buffer *iobuf,
+ struct nodnic_send_wqbb *wqbb,
+ unsigned long wqe_idx
+ );
+ void ( * irq ) ( struct net_device *netdev, int enable );
+};
+
+int flexboot_nodnic_probe ( struct pci_device *pci,
+ struct flexboot_nodnic_callbacks *callbacks,
+ void *drv_priv );
+void flexboot_nodnic_remove ( struct pci_device *pci );
+void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable );
+int flexboot_nodnic_is_supported ( struct pci_device *pci );
+void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte,
+ uint16_t high_byte );
+
+#endif /* SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_ */
diff --git a/src/drivers/infiniband/golan.c b/src/drivers/infiniband/golan.c
new file mode 100755
index 00000000..9225c187
--- /dev/null
+++ b/src/drivers/infiniband/golan.c
@@ -0,0 +1,2663 @@
+/*
+ * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <errno.h>
+#include <strings.h>
+#include <byteswap.h>
+#include <ipxe/malloc.h>
+#include <ipxe/umalloc.h>
+#include <ipxe/infiniband.h>
+#include <ipxe/ib_smc.h>
+#include <ipxe/iobuf.h>
+#include <ipxe/netdevice.h>
+#include <ipxe/ethernet.h>
+#include <ipxe/if_ether.h>
+#include <ipxe/in.h>
+#include <ipxe/ipoib.h>
+#include "flexboot_nodnic.h"
+#include "nodnic_shomron_prm.h"
+#include "golan.h"
+#include "mlx_utils/include/public/mlx_bail.h"
+#include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
+#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
+#include "mlx_utils/include/public/mlx_pci_gw.h"
+#include "mlx_nodnic/include/mlx_port.h"
+
+/******************************************************************************/
+/************* Very simple memory management for umalloced pages **************/
+/******* Temporary solution until full memory management is implemented *******/
+/******************************************************************************/
+#define GOLAN_PAGES 20
+struct golan_page {
+ struct list_head list;
+ userptr_t addr;
+};
+
+static void golan_free_pages ( struct list_head *head ) {
+ struct golan_page *page, *tmp;
+ list_for_each_entry_safe ( page, tmp, head, list ) {
+ list_del ( &page->list );
+ ufree ( page->addr );
+ free ( page );
+ }
+}
+
+static int golan_init_pages ( struct list_head *head ) {
+ struct golan_page *new_entry;
+ int rc, i;
+
+ if ( !head ) {
+ rc = -EINVAL;
+ goto err_golan_init_pages_bad_param;
+ }
+
+ INIT_LIST_HEAD ( head );
+
+ for ( i = 0; i < GOLAN_PAGES; i++ ) {
+ new_entry = zalloc ( sizeof ( *new_entry ) );
+ if ( new_entry == NULL ) {
+ rc = -ENOMEM;
+ goto err_golan_init_pages_alloc_page;
+ }
+ new_entry->addr = umalloc ( GOLAN_PAGE_SIZE );
+ if ( new_entry->addr == UNULL ) {
+ free ( new_entry );
+ rc = -ENOMEM;
+ goto err_golan_init_pages_alloc_page;
+ }
+ list_add ( &new_entry->list, head );
+ }
+
+ return 0;
+
+err_golan_init_pages_alloc_page:
+ golan_free_pages ( head );
+err_golan_init_pages_bad_param:
+ return rc;
+}
+
+static userptr_t golan_get_page ( struct list_head *head ) {
+ struct golan_page *page;
+ userptr_t addr;
+
+ if ( list_empty ( head ) )
+ return UNULL;
+
+ page = list_first_entry ( head, struct golan_page, list );
+ list_del ( &page->list );
+ addr = page->addr;
+ free ( page );
+ return addr;
+}
+
+/******************************************************************************/
+
+const char *golan_qp_state_as_string[] = {
+ "RESET",
+ "INIT",
+ "RTR",
+ "RTS",
+ "SQD",
+ "SQE",
+ "ERR"
+};
+
+inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
+ struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
+ if ( rc == -EBUSY ) {
+ DBG ( "HCA is busy (rc = -EBUSY)\n" );
+ return rc;
+ } else if ( out_hdr->status ) {
+ DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
+ out_hdr->status, be32_to_cpu(out_hdr->syndrome));
+ return out_hdr->status;
+ }
+ return 0;
+}
+
+#define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \
+ do { \
+ if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \
+ goto _lable; \
+ } while (0)
+
+#define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc )
+
+
+struct mbox {
+ union {
+ struct golan_cmd_prot_block mblock;
+ u8 data[MAILBOX_STRIDE];
+ __be64 qdata[MAILBOX_STRIDE >> 3];
+ };
+};
+
+static inline uint32_t ilog2(uint32_t mem)
+{
+ return ( fls ( mem ) - 1 );
+}
+
+#define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
+
+static inline u8 xor8_buf(void *buf, int len)
+{
+ u8 sum = 0;
+ int i;
+ u8 *ptr = buf;
+
+ for (i = 0; i < len; ++i)
+ sum ^= ptr[i];
+
+ return sum;
+}
+
+static inline int verify_block_sig(struct golan_cmd_prot_block *block)
+{
+ if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+ return -EINVAL;
+
+ if (xor8_buf(block, sizeof(*block)) != 0xff)
+ return -EINVAL;
+ return 0;
+}
+
+static inline const char *cmd_status_str(u8 status)
+{
+ switch (status) {
+ case 0x0: return "OK";
+ case 0x1: return "internal error";
+ case 0x2: return "bad operation";
+ case 0x3: return "bad parameter";
+ case 0x4: return "bad system state";
+ case 0x5: return "bad resource";
+ case 0x6: return "resource busy";
+ case 0x8: return "limits exceeded";
+ case 0x9: return "bad resource state";
+ case 0xa: return "bad index";
+ case 0xf: return "no resources";
+ case 0x50: return "bad input length";
+ case 0x51: return "bad output length";
+ case 0x10: return "bad QP state";
+ case 0x30: return "bad packet (discarded)";
+ case 0x40: return "bad size too many outstanding CQEs";
+ case 0xff: return "Command Timed Out";
+ default: return "unknown status";
+ }
+}
+
+static inline uint16_t fw_rev_maj(struct golan *golan)
+{
+ return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
+}
+
+static inline u16 fw_rev_min(struct golan *golan)
+{
+ return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
+}
+
+static inline u16 fw_rev_sub(struct golan *golan)
+{
+ return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
+}
+
+static inline u16 cmdif_rev(struct golan *golan)
+{
+ return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
+}
+
+
+static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
+{
+ return golan->cmd.addr + (idx << golan->cmd.log_stride);
+}
+
+static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
+ uint32_t inbox_idx, uint32_t outbox_idx)
+{
+ struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx);
+ struct mbox *mailbox = NULL;
+
+ if (inbox_idx != NO_MBOX) {
+ mailbox = GET_INBOX(golan, inbox_idx);
+ mailbox->mblock.token = cmd->token;
+ mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
+ CTRL_SIG_SZ);
+ }
+ if (outbox_idx != NO_MBOX) {
+ mailbox = GET_OUTBOX(golan, outbox_idx);
+ mailbox->mblock.token = cmd->token;
+ mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
+ CTRL_SIG_SZ);
+ }
+ cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
+}
+
+/**
+ * Get Golan FW
+ */
+static int fw_ver_and_cmdif ( struct golan *golan ) {
+ DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
+ golan->iseg->fw_rev,
+ golan->iseg->cmdif_rev_fw_sub,
+ fw_rev_maj ( golan ), fw_rev_min ( golan ),
+ fw_rev_sub ( golan ), cmdif_rev ( golan));
+
+ if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
+ DBGC (golan ,"CMDIF %d not supported current is %d\n",
+ cmdif_rev ( golan ), PXE_CMDIF_REF);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void show_out_status(uint32_t *out)
+{
+ DBG("%x\n", be32_to_cpu(out[0]));
+ DBG("%x\n", be32_to_cpu(out[1]));
+ DBG("%x\n", be32_to_cpu(out[2]));
+ DBG("%x\n", be32_to_cpu(out[3]));
+}
+/**
+ * Check if CMD has finished.
+ */
+static inline uint32_t is_command_finished( struct golan *golan, int idx)
+{
+ wmb();
+ return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
+}
+
+/**
+ * Wait for Golan command completion
+ *
+ * @v golan Golan device
+ * @ret rc Return status code
+ */
+static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
+{
+ unsigned int wait;
+ int rc = -EBUSY;
+
+ for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
+ if (is_command_finished(golan, idx)) {
+ rc = CMD_STATUS(golan, idx);
+ rmb();
+ break;
+ } else {
+ mdelay ( 1 );
+ }
+ }
+ if (rc) {
+ DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
+ }
+
+ golan->cmd_bm &= ~(1 << idx);
+ return rc;
+}
+
+/**
+ * Notify the HW that commands are ready
+ */
+static inline void send_command(struct golan *golan)
+{
+ wmb(); //Make sure the command is visible in "memory".
+ writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell);
+}
+
+static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
+ uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
+{
+ golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
+ send_command(golan);
+ return golan_cmd_wait(golan, cmd_idx, command);
+}
+
+/**
+ * Prepare a FW command,
+ * In - comamnd idx (Must be valid)
+ * writes the command parameters.
+ */
+static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
+ uint16_t opcode, uint16_t opmod,
+ uint16_t inbox_idx,
+ uint16_t outbox_idx, uint16_t inlen,
+ uint16_t outlen)
+{
+ struct golan_cmd_layout *cmd = get_cmd(golan , idx);
+ struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in;
+ static uint8_t token;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->type = GOLAN_PCI_CMD_XPORT;
+ cmd->status_own = CMD_OWNER_HW;
+ cmd->outlen = cpu_to_be32(outlen);
+ cmd->inlen = cpu_to_be32(inlen);
+ hdr->opcode = cpu_to_be16(opcode);
+ hdr->opmod = cpu_to_be16(opmod);
+
+ if (inbox_idx != NO_MBOX) {
+ memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
+ cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
+ cmd->token = ++token;
+ }
+ if (outbox_idx != NO_MBOX) {
+ memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
+ cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
+ }
+
+ golan->cmd_bm |= 1 << idx;
+
+ assert ( cmd != NULL );
+ return cmd;
+}
+
+static inline int golan_core_enable_hca(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ int rc = 0;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_enable_hca_mbox_in),
+ sizeof(struct golan_enable_hca_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ return rc;
+}
+
+static inline void golan_disable_hca(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_disable_hca_mbox_in),
+ sizeof(struct golan_disable_hca_mbox_out));
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+}
+
+static inline int golan_set_hca_cap(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_cmd_set_hca_cap_mbox_in),
+ sizeof(struct golan_cmd_set_hca_cap_mbox_out));
+
+ golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM;
+ DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
+ DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
+ DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
+ golan->caps.uar_page_sz = 0;
+
+
+ memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)),
+ &(golan->caps),
+ sizeof(struct golan_hca_cap));
+
+ //if command failed we should reset the caps in golan->caps
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ return rc;
+}
+
+static inline int golan_qry_hca_cap(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ int rc = 0;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1,
+ NO_MBOX, GEN_MBOX,
+ sizeof(struct golan_cmd_query_hca_cap_mbox_in),
+ sizeof(struct golan_cmd_query_hca_cap_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
+
+ memcpy(&(golan->caps),
+ ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
+ sizeof(struct golan_hca_cap));
+err_query_hca_cap:
+ return rc;
+}
+
+static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
+ uint32_t out_num_entries = 0;
+ int size_ibox = sizeof(struct golan_manage_pages_inbox);
+ int size_obox = sizeof(struct golan_manage_pages_outbox);
+ int rc = 0;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ while ( pages > 0 ) {
+ uint32_t pas_num = min(pages, MAX_PASE_MBOX);
+ unsigned i;
+ struct golan_cmd_layout *cmd;
+ struct golan_manage_pages_inbox *in;
+ struct golan_manage_pages_outbox_data *out;
+
+ size_ibox += (pas_num * GOLAN_PAS_SIZE);
+ size_obox += (pas_num * GOLAN_PAS_SIZE);
+
+ cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE,
+ MEM_MBOX, MEM_MBOX,
+ size_ibox,
+ size_obox);
+
+ in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
+
+ in->func_id = func_id; /* Already BE */
+ in->num_entries = cpu_to_be32(pas_num);
+
+ if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
+ out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
+ out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
+ for (i = 0; i < out_num_entries; ++i) {
+ ufree(BE64_BUS_2_USR(out->pas[i]));
+ }
+ } else {
+ if ( rc == -EBUSY ) {
+ DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
+ } else {
+ DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
+ __FUNCTION__, rc, cmd_status_str(rc),
+ CMD_SYND(golan, MEM_CMD_IDX),
+ get_cmd( golan , MEM_CMD_IDX )->status_own,
+ be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
+ }
+ return rc;
+ }
+
+ pages -= out_num_entries;
+ }
+ DBGC( golan , "%s Pages handled\n", __FUNCTION__);
+ return 0;
+}
+
+static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
+ struct mbox *mailbox;
+ int size_ibox = sizeof(struct golan_manage_pages_inbox);
+ int size_obox = sizeof(struct golan_manage_pages_outbox);
+ int rc = 0;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ while ( pages > 0 ) {
+ uint32_t pas_num = min(pages, MAX_PASE_MBOX);
+ unsigned i, j;
+ struct golan_cmd_layout *cmd;
+ struct golan_manage_pages_inbox *in;
+ userptr_t addr = 0;
+
+ mailbox = GET_INBOX(golan, MEM_MBOX);
+ size_ibox += (pas_num * GOLAN_PAS_SIZE);
+ size_obox += (pas_num * GOLAN_PAS_SIZE);
+
+ cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE,
+ MEM_MBOX, MEM_MBOX,
+ size_ibox,
+ size_obox);
+
+ in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
+
+ in->func_id = func_id; /* Already BE */
+ in->num_entries = cpu_to_be32(pas_num);
+
+ for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
+ if (!(addr = umalloc(GOLAN_PAGE_SIZE))) {
+ rc = -ENOMEM;
+ DBGC (golan ,"Couldnt allocated page \n");
+ goto malloc_dma_failed;
+ }
+ if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
+ DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
+ }
+ mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
+ }
+
+ if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
+ pages -= pas_num;
+ golan->total_dma_pages += pas_num;
+ } else {
+ if ( rc == -EBUSY ) {
+ DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
+ } else {
+ DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
+ __FUNCTION__, rc, cmd_status_str(rc),
+ CMD_SYND(golan, MEM_CMD_IDX),
+ get_cmd( golan , MEM_CMD_IDX )->status_own,
+ be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
+ }
+ ufree ( addr );
+ goto err_send_command;
+ }
+ }
+ DBGC( golan , "%s Pages handled\n", __FUNCTION__);
+ return 0;
+
+err_send_command:
+malloc_dma_failed:
+ /* Go over In box and free pages */
+ /* Send Error to FW */
+ /* What is next - Disable HCA? */
+ DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static inline int golan_handle_pages(struct golan *golan,
+ enum golan_qry_pages_mode qry,
+ enum golan_manage_pages_mode mode)
+{
+ struct golan_cmd_layout *cmd;
+
+ int rc = 0;
+ int32_t pages;
+ uint16_t total_pages;
+ __be16 func_id;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_query_pages_inbox),
+ sizeof(struct golan_query_pages_outbox));
+
+ rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
+
+ pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages);
+
+ DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
+
+ func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
+
+ total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
+
+ if ( mode == GOLAN_PAGES_GIVE ) {
+ rc = golan_provide_pages(golan, total_pages, func_id);
+ } else {
+ rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
+ golan->total_dma_pages = 0;
+ }
+
+ if ( rc ) {
+ DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
+ ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
+ return rc;
+ }
+
+ return 0;
+
+err_handle_pages_query:
+ DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused )))
+{
+#if 0
+ write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_reg_host_endianess),
+ sizeof(struct golan_reg_host_endianess));
+ in->arg = cpu_to_be32(arg);
+ in->register_id = cpu_to_be16(reg_num);
+#endif
+ DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
+ return 0;
+}
+
+static inline void golan_cmd_uninit ( struct golan *golan )
+{
+ free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
+ free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
+ free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
+}
+
+/**
+ * Initialise Golan Command Q parameters
+ * -- Alocate a 4kb page for the Command Q
+ * -- Read the stride and log num commands available
+ * -- Write the address to cmdq_phy_addr in iseg
+ * @v golan Golan device
+ */
+static inline int golan_cmd_init ( struct golan *golan )
+{
+ int rc = 0;
+ uint32_t addr_l_sz;
+
+ if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ rc = -ENOMEM;
+ goto malloc_dma_failed;
+ }
+ if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ rc = -ENOMEM;
+ goto malloc_dma_inbox_failed;
+ }
+ if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ rc = -ENOMEM;
+ goto malloc_dma_outbox_failed;
+ }
+ addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
+
+ golan->cmd.log_stride = addr_l_sz & 0xf;
+ golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf);
+
+ addr_l_sz = virt_to_bus(golan->cmd.addr);
+ writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
+ writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
+ wmb(); //Make sure the addr is visible in "memory".
+
+ addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
+
+ DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
+ return 0;
+
+malloc_dma_outbox_failed:
+ free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
+malloc_dma_inbox_failed:
+ free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
+malloc_dma_failed:
+ DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
+ __FUNCTION__, rc);
+ return rc;
+}
+
+static inline int golan_hca_init(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ int rc = 0;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_cmd_init_hca_mbox_in),
+ sizeof(struct golan_cmd_init_hca_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ return rc;
+}
+
+static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
+{
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ DBGC (golan, "%s in\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_cmd_teardown_hca_mbox_in),
+ sizeof(struct golan_cmd_teardown_hca_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+
+ DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
+}
+
+static inline int golan_alloc_uar(struct golan *golan)
+{
+ struct golan_uar *uar = &golan->uar;
+ struct golan_cmd_layout *cmd;
+ struct golan_alloc_uar_mbox_out *out;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_alloc_uar_mbox_in),
+ sizeof(struct golan_alloc_uar_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
+ out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
+
+ uar->index = be32_to_cpu(out->uarn) & 0xffffff;
+
+ uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT));
+ uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
+
+ DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
+ return 0;
+
+err_alloc_uar_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static void golan_dealloc_uar(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ uint32_t uar_index = golan->uar.index;
+ int rc;
+
+ DBGC (golan, "%s in\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_free_uar_mbox_in),
+ sizeof(struct golan_free_uar_mbox_out));
+
+ ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ golan->uar.index = 0;
+
+ DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
+}
+
+static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
+{
+ __be32 *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+ writel(cpu_to_be32(val) , addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ wmb();
+}
+
+static int golan_create_eq(struct golan *golan)
+{
+ struct golan_event_queue *eq = &golan->eq;
+ struct golan_create_eq_mbox_in_data *in;
+ struct golan_cmd_layout *cmd;
+ struct golan_create_eq_mbox_out *out;
+ int rc, i;
+ userptr_t addr;
+
+ eq->cons_index = 0;
+ eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
+ addr = golan_get_page ( &golan->pages );
+ if (!addr) {
+ rc = -ENOMEM;
+ goto err_create_eq_eqe_alloc;
+ }
+ eq->eqes = (struct golan_eqe *)user_to_virt(addr, 0);
+
+ /* Set EQEs ownership bit to HW ownership */
+ for (i = 0; i < GOLAN_NUM_EQES; ++i) {
+ eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP;
+ }
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
+ sizeof(struct golan_create_eq_mbox_out));
+
+ in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
+
+ /* Fill the physical address of the page */
+ in->pas[0] = USR_2_BE64_BUS(addr);
+ in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
+ DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
+ in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
+ out = (struct golan_create_eq_mbox_out *)cmd->out;
+
+ eq->eqn = out->eq_number;
+ eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
+
+ /* EQs are created in ARMED state */
+ golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
+
+ DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
+ return 0;
+
+err_create_eq_cmd:
+ ufree(virt_to_user(golan->eq.eqes));
+err_create_eq_eqe_alloc:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static void golan_destory_eq(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ uint8_t eqn = golan->eq.eqn;
+ int rc;
+
+ DBGC (golan, "%s in\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_destroy_eq_mbox_in),
+ sizeof(struct golan_destroy_eq_mbox_out));
+
+ ((struct golan_destroy_eq_mbox_in *)(cmd->in))->eqn = eqn;
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+
+ ufree(virt_to_user(golan->eq.eqes));
+ golan->eq.eqn = 0;
+
+ DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
+}
+
+static int golan_alloc_pd(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ struct golan_alloc_pd_mbox_out *out;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_alloc_pd_mbox_in),
+ sizeof(struct golan_alloc_pd_mbox_out));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
+ out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
+
+ golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
+ DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
+ golan->pdn);
+ return 0;
+
+err_alloc_pd_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static void golan_dealloc_pd(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ uint32_t pdn = golan->pdn;
+ int rc;
+
+ DBGC (golan,"%s in\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_alloc_pd_mbox_in),
+ sizeof(struct golan_alloc_pd_mbox_out));
+
+ ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ golan->pdn = 0;
+
+ DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
+}
+
+static int golan_create_mkey(struct golan *golan)
+{
+ struct golan_create_mkey_mbox_in_data *in;
+ struct golan_cmd_layout *cmd;
+ struct golan_create_mkey_mbox_out *out;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_create_mkey_mbox_in),
+ sizeof(struct golan_create_mkey_mbox_out));
+
+ in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
+
+ in->seg.flags = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ;
+ in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
+ out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
+
+ golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
+ DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
+ __FUNCTION__, golan->mkey);
+ return 0;
+err_create_mkey_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static void golan_destroy_mkey(struct golan *golan)
+{
+ struct golan_cmd_layout *cmd;
+ u32 mkey = golan->mkey;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_destroy_mkey_mbox_in),
+ sizeof(struct golan_destroy_mkey_mbox_out));
+ ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ golan->mkey = 0;
+
+ DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
+ , __FUNCTION__, mkey);
+}
+
+
+/**
+ * Initialise Golan PCI parameters
+ *
+ * @v golan Golan device
+ */
+static inline void golan_pci_init(struct golan *golan)
+{
+ struct pci_device *pci = golan->pci;
+
+ /* Fix up PCI device */
+ adjust_pci_device ( pci );
+
+ /* Get HCA BAR */
+ golan->iseg = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR),
+ GOLAN_PCI_CONFIG_BAR_SIZE );
+}
+
+static inline struct golan *golan_alloc()
+{
+ void *golan = zalloc(sizeof(struct golan));
+ if ( !golan )
+ goto err_zalloc;
+
+ return golan;
+
+err_zalloc:
+ return NULL;
+}
+
+/**
+ * Create completion queue
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queue
+ * @ret rc Return status code
+ */
+static int golan_create_cq(struct ib_device *ibdev,
+ struct ib_completion_queue *cq)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_completion_queue *golan_cq;
+ struct golan_cmd_layout *cmd;
+ struct golan_create_cq_mbox_in_data *in;
+ struct golan_create_cq_mbox_out *out;
+ int rc;
+ unsigned int i;
+ userptr_t addr;
+
+ golan_cq = zalloc(sizeof(*golan_cq));
+ if (!golan_cq) {
+ rc = -ENOMEM;
+ goto err_create_cq;
+ }
+ golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
+ golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
+ GOLAN_CQ_DB_RECORD_SIZE);
+ if (!golan_cq->doorbell_record) {
+ rc = -ENOMEM;
+ goto err_create_cq_db_alloc;
+ }
+
+ addr = golan_get_page ( &golan->pages );
+ if (!addr) {
+ rc = -ENOMEM;
+ goto err_create_cq_cqe_alloc;
+ }
+ golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
+
+ /* Set CQEs ownership bit to HW ownership */
+ for (i = 0; i < cq->num_cqes; ++i) {
+ golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
+ GOLAN_CQE_OPCODE_BIT) |
+ GOLAN_CQE_HW_OWNERSHIP);
+ }
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
+ sizeof(struct golan_create_cq_mbox_out));
+
+ in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
+
+ /* Fill the physical address of the page */
+ in->pas[0] = USR_2_BE64_BUS(addr);
+ in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
+ in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
+ in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
+ in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
+ out = (struct golan_create_cq_mbox_out *) ( cmd->out );
+
+ cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
+
+ ib_cq_set_drvdata(cq, golan_cq);
+
+ DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
+ return 0;
+
+err_create_cq_cmd:
+ ufree(virt_to_user(golan_cq->cqes));
+err_create_cq_cqe_alloc:
+ free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
+err_create_cq_db_alloc:
+ free ( golan_cq );
+err_create_cq:
+ DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
+ return rc;
+}
+
+/**
+ * Destroy completion queue
+ *
+ * @v ibdev Infiniband device
+ * @v cq Completion queue
+ */
+static void golan_destroy_cq(struct ib_device *ibdev,
+ struct ib_completion_queue *cq)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
+ struct golan_cmd_layout *cmd;
+ uint32_t cqn = cq->cqn;
+ int rc;
+
+ DBGC (golan, "%s in\n", __FUNCTION__);
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_destroy_cq_mbox_in),
+ sizeof(struct golan_destroy_cq_mbox_out));
+ ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ cq->cqn = 0;
+
+ ib_cq_set_drvdata(cq, NULL);
+ ufree(virt_to_user(golan_cq->cqes));
+ free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
+ free(golan_cq);
+
+ DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
+}
+
+static void golan_cq_clean(struct ib_completion_queue *cq)
+{
+ ib_poll_cq(cq->ibdev, cq);
+}
+
+static int golan_qp_type_to_st(enum ib_queue_pair_type type)
+{
+ int qpt = type;
+
+ switch (qpt) {
+ case IB_QPT_RC:
+ return GOLAN_QP_ST_RC;
+ case IB_QPT_UD:
+ return GOLAN_QP_ST_UD;
+ case IB_QPT_SMI:
+ return GOLAN_QP_ST_QP0;
+ case IB_QPT_GSI:
+ return GOLAN_QP_ST_QP1;
+ case IB_QPT_ETH:
+ default:
+ return -EINVAL;
+ }
+}
+#if 0
+static int golan_is_special_qp(enum ib_queue_pair_type type)
+{
+ return (type == IB_QPT_GSI || type == IB_QPT_SMI);
+}
+#endif
+static int golan_create_qp_aux(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ int *qpn)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp;
+ struct golan_create_qp_mbox_in_data *in;
+ struct golan_cmd_layout *cmd;
+ struct golan_wqe_data_seg *data;
+ struct golan_create_qp_mbox_out *out;
+ userptr_t addr;
+ uint32_t wqe_size_in_bytes;
+ uint32_t max_qp_size_in_wqes;
+ unsigned int i;
+ int rc;
+
+ golan_qp = zalloc(sizeof(*golan_qp));
+ if (!golan_qp) {
+ rc = -ENOMEM;
+ goto err_create_qp;
+ }
+
+ if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
+ ( qp->type == IB_QPT_UD ) ) {
+ golan_qp->rq.grh_size = ( qp->recv.num_wqes *
+ sizeof ( golan_qp->rq.grh[0] ));
+ }
+
+ /* Calculate receive queue size */
+ golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
+ if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) {
+ DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
+ GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq));
+ rc = -EINVAL;
+ goto err_create_qp_rq_size;
+ }
+
+ wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
+ /* Calculate send queue size */
+ if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
+ DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
+ wqe_size_in_bytes,
+ be16_to_cpu(golan->caps.max_wqe_sz_sq));
+ rc = -EINVAL;
+ goto err_create_qp_sq_wqe_size;
+ }
+ golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
+ max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
+ if (qp->send.num_wqes > max_qp_size_in_wqes) {
+ DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
+ golan_qp->sq.size, max_qp_size_in_wqes);
+ rc = -EINVAL;
+ goto err_create_qp_sq_size;
+ }
+
+ golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
+
+ /* allocate dma memory for WQEs (1 page is enough) - should change it */
+ addr = golan_get_page ( &golan->pages );
+ if (!addr) {
+ rc = -ENOMEM;
+ goto err_create_qp_wqe_alloc;
+ }
+ golan_qp->wqes = user_to_virt(addr, 0);
+ golan_qp->rq.wqes = golan_qp->wqes;
+ golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
+ //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
+
+ if ( golan_qp->rq.grh_size ) {
+ golan_qp->rq.grh = ( golan_qp->wqes +
+ golan_qp->sq.size +
+ golan_qp->rq.size );
+ }
+
+ /* Invalidate all WQEs */
+ data = &golan_qp->rq.wqes[0].data[0];
+ for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
+ data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
+ data++;
+ }
+
+ golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
+ sizeof(struct golan_qp_db));
+ if (!golan_qp->doorbell_record) {
+ rc = -ENOMEM;
+ goto err_create_qp_db_alloc;
+ }
+ memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
+ sizeof(struct golan_create_qp_mbox_out));
+
+ in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
+
+ /* Fill the physical address of the page */
+ in->pas[0] = USR_2_BE64_BUS(addr);
+ in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
+
+ in->ctx.flags_pd = cpu_to_be32(golan->pdn);
+ in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type)
+ << GOLAN_QP_CTX_ST_BIT) |
+ (GOLAN_QP_PM_MIGRATED <<
+ GOLAN_QP_CTX_PM_STATE_BIT));
+// cgs set to 0, initialy.
+// atomic mode
+ in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) <<
+ GOLAN_QP_CTX_RQ_SIZE_BIT) |
+ (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
+ in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
+ << GOLAN_QP_CTX_SQ_SIZE_BIT);
+ in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn);
+ in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn);
+ in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
+ out = (struct golan_create_qp_mbox_out *)cmd->out;
+
+ *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
+ /*
+ * Hardware wants QPN written in big-endian order (after
+ * shifting) for send doorbell. Precompute this value to save
+ * a little bit when posting sends.
+ */
+ golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8);
+ golan_qp->state = GOLAN_IB_QPS_RESET;
+
+ ib_qp_set_drvdata(qp, golan_qp);
+
+ return 0;
+
+err_create_qp_cmd:
+ free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
+err_create_qp_db_alloc:
+ ufree((userptr_t)golan_qp->wqes);
+err_create_qp_wqe_alloc:
+err_create_qp_sq_size:
+err_create_qp_sq_wqe_size:
+err_create_qp_rq_size:
+ free ( golan_qp );
+err_create_qp:
+ return rc;
+}
+
+/**
+ * Create queue pair
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @ret rc Return status code
+ */
+static int golan_create_qp(struct ib_device *ibdev,
+ struct ib_queue_pair *qp)
+{
+ int rc, qpn = -1;
+
+ switch (qp->type) {
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ rc = golan_create_qp_aux(ibdev, qp, &qpn);
+ if (rc) {
+ DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
+ return rc;
+ }
+ qp->qpn = qpn;
+
+ break;
+ case IB_QPT_ETH:
+ case IB_QPT_RC:
+ default:
+ DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
+ struct ib_queue_pair *qp __unused,
+ struct golan_modify_qp_mbox_in_data *in)
+{
+ int rc = 0;
+
+ in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey));
+
+ in->ctx.pri_path.port = ibdev->port;
+ in->ctx.flags |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT);
+ in->ctx.pri_path.pkey_index = 0; /* default index */
+ /* QK is 0 */
+ /* QP cntr set 0 */
+ return rc;
+}
+
+static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp __unused,
+ struct golan_modify_qp_mbox_in_data *in)
+{
+ int rc = 0;
+
+ in->optparam = 0;
+ return rc;
+}
+
+static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp __unused,
+ struct golan_modify_qp_mbox_in_data *in __unused)
+{
+ int rc = 0;
+
+ in->optparam = 0;
+ /* In good flow psn in 0 */
+ return rc;
+}
+
+static int golan_modify_qp_to_rst(struct ib_device *ibdev,
+ struct ib_queue_pair *qp)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_modify_qp_mbox_in),
+ sizeof(struct golan_modify_qp_mbox_out));
+ ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
+
+ golan_qp->state = GOLAN_IB_QPS_RESET;
+ DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
+ __FUNCTION__, qp->qpn);
+
+ return 0;
+
+err_modify_qp_2rst_cmd:
+ DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
+ __FUNCTION__, qp->qpn, rc);
+ return rc;
+}
+
+static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct golan_modify_qp_mbox_in_data *in) = {
+
+ [GOLAN_IB_QPS_RESET] = golan_modify_qp_rst_to_init,
+ [GOLAN_IB_QPS_INIT] = golan_modify_qp_init_to_rtr,
+ [GOLAN_IB_QPS_RTR] = golan_modify_qp_rtr_to_rts
+};
+
+static int golan_modify_qp(struct ib_device *ibdev,
+ struct ib_queue_pair *qp)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
+ struct golan_modify_qp_mbox_in_data *in;
+ struct golan_cmd_layout *cmd;
+ enum golan_ib_qp_state prev_state;
+ int rc;
+ int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
+ GOLAN_CMD_OP_INIT2RTR_QP,
+ GOLAN_CMD_OP_RTR2RTS_QP};
+
+ while (golan_qp->state < GOLAN_IB_QPS_RTS) {
+ prev_state = golan_qp->state;
+ cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_modify_qp_mbox_in),
+ sizeof(struct golan_modify_qp_mbox_out));
+
+ in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
+ ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
+ rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
+ if (rc) {
+ goto err_modify_qp_fill_inbox;
+ }
+// in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
+
+ ++(golan_qp->state);
+
+ DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
+ __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
+ golan_qp_state_as_string[golan_qp->state]);
+ }
+
+ DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
+ __FUNCTION__, qp->qpn);
+ return 0;
+
+err_modify_qp_cmd:
+err_modify_qp_fill_inbox:
+ DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
+ __FUNCTION__, qp->qpn, rc);
+ return rc;
+}
+
+/**
+ * Destroy queue pair
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ */
+static void golan_destroy_qp(struct ib_device *ibdev,
+ struct ib_queue_pair *qp)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
+ struct golan_cmd_layout *cmd;
+ unsigned long qpn = qp->qpn;
+ int rc;
+
+ DBGC (golan, "%s in\n", __FUNCTION__);
+
+ if (golan_qp->state != GOLAN_IB_QPS_RESET) {
+ if (golan_modify_qp_to_rst(ibdev, qp)) {
+ DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
+ qp->qpn);
+ }
+ }
+
+ if (qp->recv.cq) {
+ golan_cq_clean(qp->recv.cq);
+ }
+ if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
+ golan_cq_clean(qp->send.cq);
+ }
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0,
+ NO_MBOX, NO_MBOX,
+ sizeof(struct golan_destroy_qp_mbox_in),
+ sizeof(struct golan_destroy_qp_mbox_out));
+ ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+ qp->qpn = 0;
+
+ ib_qp_set_drvdata(qp, NULL);
+ free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
+ ufree((userptr_t)golan_qp->wqes);
+ free(golan_qp);
+
+ DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
+}
+
+/**
+ * Calculate transmission rate
+ *
+ * @v av Address vector
+ * @ret golan_rate Golan rate
+ */
+static unsigned int golan_rate(enum ib_rate rate) {
+ return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
+}
+
+/**
+ * Post send work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v av Address vector
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int golan_post_send(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct ib_address_vector *av,
+ struct io_buffer *iobuf)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
+ struct golan_send_wqe_ud *wqe = NULL;
+ struct golan_av *datagram = NULL;
+ unsigned long wqe_idx_mask;
+ unsigned long wqe_idx;
+ struct golan_wqe_data_seg *data = NULL;
+ struct golan_wqe_ctrl_seg *ctrl = NULL;
+// static uint8_t toggle = 0;
+
+
+ wqe_idx_mask = (qp->send.num_wqes - 1);
+ wqe_idx = (qp->send.next_idx & wqe_idx_mask);
+ if (qp->send.iobufs[wqe_idx]) {
+ DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
+ return -ENOMEM;
+ }
+
+ qp->send.iobufs[wqe_idx] = iobuf;
+
+ // change to this
+ //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
+
+ wqe = &golan_qp->sq.wqes[wqe_idx].ud;
+
+ //CHECK HW OWNERSHIP BIT ???
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ ctrl = &wqe->ctrl;
+ ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE |
+ ((u32)(golan_qp->sq.next_idx) <<
+ GOLAN_WQE_CTRL_WQE_IDX_BIT));
+ ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
+ golan_qp->doorbell_qpn;
+ ctrl->fm_ce_se = 0x8;//10 - 0 - 0
+ data = &wqe->data;
+ data->byte_count = cpu_to_be32(iob_len(iobuf));
+ data->lkey = cpu_to_be32(golan->mkey);
+ data->addr = VIRT_2_BE64_BUS(iobuf->data);
+
+ datagram = &wqe->datagram;
+ datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
+ datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn);
+ datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl);
+ datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
+ datagram->rlid = cpu_to_be16(av->lid);
+ datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30);
+ memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
+
+ /*
+ * Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ ++(qp->send.next_idx);
+ golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
+ golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
+ wmb();
+ writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
+// ((toggle++ & 0x1) ? 0x100 : 0x0));
+ return 0;
+}
+
+/**
+ * Post receive work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int golan_post_recv(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ struct io_buffer *iobuf)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
+ struct ib_work_queue *wq = &qp->recv;
+ struct golan_recv_wqe_ud *wqe;
+ struct ib_global_route_header *grh;
+ struct golan_wqe_data_seg *data;
+ unsigned int wqe_idx_mask;
+
+ /* Allocate work queue entry */
+ wqe_idx_mask = (wq->num_wqes - 1);
+ if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
+ DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
+ return -ENOMEM;
+ }
+
+ wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
+ wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
+
+ memset(wqe, 0, sizeof(*wqe));
+ data = &wqe->data[0];
+ if ( golan_qp->rq.grh ) {
+ grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
+ data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
+ data->lkey = cpu_to_be32 ( golan->mkey );
+ data->addr = VIRT_2_BE64_BUS ( grh );
+ data++;
+ }
+
+ data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
+ data->lkey = cpu_to_be32(golan->mkey);
+ data->addr = VIRT_2_BE64_BUS(iobuf->data);
+
+ ++wq->next_idx;
+
+ /*
+ * Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+ golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
+
+ return 0;
+}
+
+static int golan_query_vport_context ( struct ib_device *ibdev ) {
+ struct golan *golan = ib_get_drvdata ( ibdev );
+ struct golan_cmd_layout *cmd;
+ struct golan_query_hca_vport_context_data *context_data;
+ int rc;
+
+ cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT,
+ 0x0, GEN_MBOX, GEN_MBOX,
+ sizeof(struct golan_query_hca_vport_context_inbox),
+ sizeof(struct golan_query_hca_vport_context_outbox) );
+
+ ((struct golan_query_hca_vport_context_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
+
+ rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
+
+ context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
+
+ ibdev->node_guid.dwords[0] = context_data->node_guid[0];
+ ibdev->node_guid.dwords[1] = context_data->node_guid[1];
+ ibdev->lid = be16_to_cpu( context_data->lid );
+ ibdev->sm_lid = be16_to_cpu( context_data->sm_lid );
+ ibdev->sm_sl = context_data->sm_sl;
+ ibdev->port_state = context_data->port_state;
+
+ return 0;
+err_query_vport_context_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+
+static int golan_query_vport_gid ( struct ib_device *ibdev ) {
+ struct golan *golan = ib_get_drvdata( ibdev );
+ struct golan_cmd_layout *cmd;
+ union ib_gid *ib_gid;
+ int rc;
+
+ cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID,
+ 0x0, GEN_MBOX, GEN_MBOX,
+ sizeof(struct golan_query_hca_vport_gid_inbox),
+ sizeof(struct golan_query_hca_vport_gid_outbox) );
+
+ ((struct golan_query_hca_vport_gid_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
+ ((struct golan_query_hca_vport_gid_inbox *)(cmd->in))->gid_index = 0;
+ rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
+
+ ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
+
+ memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
+
+ return 0;
+err_query_vport_gid_cmd:
+ DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
+ struct golan *golan = ib_get_drvdata ( ibdev );
+ struct golan_cmd_layout *cmd;
+ //struct golan_query_hca_vport_pkey_data *pkey_table;
+ int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
+ int rc;
+
+ cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY,
+ 0x0, GEN_MBOX, GEN_MBOX,
+ sizeof(struct golan_query_hca_vport_pkey_inbox),
+ sizeof(struct golan_outbox_hdr) + 8 +
+ sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
+
+ ((struct golan_query_hca_vport_pkey_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
+ ((struct golan_query_hca_vport_pkey_inbox *)(cmd->in))->pkey_index = 0xffff;
+ rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
+
+ //pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
+
+ return 0;
+err_query_vport_pkey_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static int golan_get_ib_info ( struct ib_device *ibdev ) {
+ int rc;
+
+ rc = golan_query_vport_context ( ibdev );
+ if ( rc != 0 ) {
+ DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
+ goto err_query_vport_context;
+ }
+
+ rc = golan_query_vport_gid ( ibdev );
+ if ( rc != 0 ) {
+ DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
+ goto err_query_vport_gid;
+ }
+
+ rc = golan_query_vport_pkey ( ibdev );
+ if ( rc != 0 ) {
+ DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
+ goto err_query_vport_pkey;
+ }
+ return rc;
+err_query_vport_pkey:
+err_query_vport_gid:
+err_query_vport_context:
+ DBG ( "%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static int golan_complete(struct ib_device *ibdev,
+ struct ib_completion_queue *cq,
+ struct golan_cqe64 *cqe64)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct ib_work_queue *wq;
+ struct golan_queue_pair *golan_qp;
+ struct ib_queue_pair *qp;
+ struct io_buffer *iobuf = NULL;
+ struct ib_address_vector recv_dest;
+ struct ib_address_vector recv_source;
+ struct ib_global_route_header *grh;
+ struct golan_err_cqe *err_cqe64;
+ int gid_present, idx;
+ u16 wqe_ctr;
+ uint8_t opcode;
+ static int error_state;
+ uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
+ int is_send = 0;
+ size_t len;
+
+ opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
+ DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
+
+ if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) {
+ is_send = 1;
+ } else {
+ is_send = 0;
+ }
+ if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) {
+ err_cqe64 = (struct golan_err_cqe *)cqe64;
+ int i = 0;
+ if (!error_state++) {
+ DBGC (golan ,"\n");
+ for ( i = 0 ; i < 16 ; i += 2 ) {
+ DBGC (golan ,"%x %x\n",
+ be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
+ be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
+ }
+ DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
+ err_cqe64->syndrome, err_cqe64->vendor_err_synd,
+ err_cqe64->hw_syndrom);
+ }
+ }
+ /* Identify work queue */
+ wq = ib_find_wq(cq, qpn, is_send);
+ if (!wq) {
+ DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
+ __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
+ return -EINVAL;
+ }
+
+ qp = wq->qp;
+ golan_qp = ib_qp_get_drvdata ( qp );
+
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ if (is_send) {
+ wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
+ idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
+ } else {
+ idx = wqe_ctr & (wq->num_wqes - 1);
+ }
+
+ iobuf = wq->iobufs[idx];
+ if (!iobuf) {
+ DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
+ __FUNCTION__, idx, qpn);
+ return -EINVAL;
+ }
+ wq->iobufs[idx] = NULL;
+
+ if (is_send) {
+ ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
+ } else {
+ len = be32_to_cpu(cqe64->byte_cnt);
+ memset(&recv_dest, 0, sizeof(recv_dest));
+ recv_dest.qpn = qpn;
+ /* Construct address vector */
+ memset(&recv_source, 0, sizeof(recv_source));
+ switch (qp->type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ /* Locate corresponding GRH */
+ assert ( golan_qp->rq.grh != NULL );
+ grh = &golan_qp->rq.grh[ idx ];
+
+ recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
+ recv_source.lid = be16_to_cpu(cqe64->slid);
+ recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
+ gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
+ if (!gid_present) {
+ recv_dest.gid_present = recv_source.gid_present = 0;
+ } else {
+ recv_dest.gid_present = recv_source.gid_present = 1;
+ //if (recv_source.gid_present == 0x1) {
+ memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
+ memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
+ //} else { // recv_source.gid_present = 0x3
+ /* GRH is located in the upper 64 byte of the CQE128
+ * currently not supported */
+ //;
+ //}
+ }
+ len -= sizeof ( *grh );
+ break;
+ case IB_QPT_RC:
+ case IB_QPT_ETH:
+ default:
+ DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
+ return -EINVAL;
+ }
+ assert(len <= iob_tailroom(iobuf));
+ iob_put(iobuf, len);
+ ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
+ }
+ return 0;
+}
+
+static int golan_is_hw_ownership(struct ib_completion_queue *cq,
+ struct golan_cqe64 *cqe64)
+{
+ return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
+ ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
+}
+static void golan_poll_cq(struct ib_device *ibdev,
+ struct ib_completion_queue *cq)
+{
+ unsigned int i;
+ int rc = 0;
+ unsigned int cqe_idx_mask;
+ struct golan_cqe64 *cqe64;
+ struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
+ struct golan *golan = ib_get_drvdata(ibdev);
+
+ for (i = 0; i < cq->num_cqes; ++i) {
+ /* Look for completion entry */
+ cqe_idx_mask = (cq->num_cqes - 1);
+ cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
+ /* temporary valid only for 64 byte CQE */
+ if (golan_is_hw_ownership(cq, cqe64) ||
+ ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
+ GOLAN_CQE_OPCODE_NOT_VALID)) {
+ break; /* HW ownership */
+ }
+
+ DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit. (PRM - 6.5.3.2)
+ */
+ rmb();
+ rc = golan_complete(ibdev, cq, cqe64);
+ if (rc != 0) {
+ DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
+ }
+
+ /* Update completion queue's index */
+ cq->next_idx++;
+
+ /* Update doorbell record */
+ *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
+ }
+}
+
+static const char *golan_eqe_type_str(u8 type)
+{
+ switch (type) {
+ case GOLAN_EVENT_TYPE_COMP:
+ return "GOLAN_EVENT_TYPE_COMP";
+ case GOLAN_EVENT_TYPE_PATH_MIG:
+ return "GOLAN_EVENT_TYPE_PATH_MIG";
+ case GOLAN_EVENT_TYPE_COMM_EST:
+ return "GOLAN_EVENT_TYPE_COMM_EST";
+ case GOLAN_EVENT_TYPE_SQ_DRAINED:
+ return "GOLAN_EVENT_TYPE_SQ_DRAINED";
+ case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
+ return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
+ case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
+ return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
+ case GOLAN_EVENT_TYPE_CQ_ERROR:
+ return "GOLAN_EVENT_TYPE_CQ_ERROR";
+ case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
+ return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
+ case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
+ return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
+ case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
+ case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
+ return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
+ case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
+ case GOLAN_EVENT_TYPE_INTERNAL_ERROR:
+ return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
+ case GOLAN_EVENT_TYPE_PORT_CHANGE:
+ return "GOLAN_EVENT_TYPE_PORT_CHANGE";
+ case GOLAN_EVENT_TYPE_GPIO_EVENT:
+ return "GOLAN_EVENT_TYPE_GPIO_EVENT";
+ case GOLAN_EVENT_TYPE_REMOTE_CONFIG:
+ return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
+ case GOLAN_EVENT_TYPE_DB_BF_CONGESTION:
+ return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
+ case GOLAN_EVENT_TYPE_STALL_EVENT:
+ return "GOLAN_EVENT_TYPE_STALL_EVENT";
+ case GOLAN_EVENT_TYPE_CMD:
+ return "GOLAN_EVENT_TYPE_CMD";
+ case GOLAN_EVENT_TYPE_PAGE_REQUEST:
+ return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
+ default:
+ return "Unrecognized event";
+ }
+}
+
+static const char *golan_eqe_port_subtype_str(u8 subtype)
+{
+ switch (subtype) {
+ case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
+ case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
+ case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
+ case GOLAN_PORT_CHANGE_SUBTYPE_LID:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
+ case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
+ case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
+ case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
+ default:
+ return "Unrecognized event";
+ }
+}
+
+/**
+ * Update Infiniband parameters using Commands
+ *
+ * @v ibdev Infiniband device
+ * @ret rc Return status code
+ */
+static int golan_ib_update ( struct ib_device *ibdev ) {
+ int rc;
+
+ /* Get IB parameters */
+ if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
+ return rc;
+
+ /* Notify Infiniband core of potential link state change */
+ ib_link_state_changed ( ibdev );
+
+ return 0;
+}
+
+static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
+{
+ struct ib_device *ibdev;
+ u8 port;
+
+ port = (eqe->data.port.port >> 4) & 0xf;
+ ibdev = golan->ports[port - 1].ibdev;
+
+ if ( ! ib_is_open ( ibdev ) )
+ return;
+
+ switch (eqe->sub_type) {
+ case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
+ golan_ib_update ( ibdev );
+ case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
+ case GOLAN_PORT_CHANGE_SUBTYPE_LID:
+ case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
+ case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
+ case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
+ __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
+ golan_eqe_port_subtype_str(eqe->sub_type),
+ eqe->sub_type, port);
+ break;
+ default:
+ DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
+ __FUNCTION__, port, eqe->sub_type);
+ }
+}
+
+static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq)
+{
+ uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
+ struct golan_eqe *eqe = &(eq->eqes[entry]);
+ return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
+}
+
+
+/**
+ * Poll event queue
+ *
+ * @v ibdev Infiniband device
+ */
+static void golan_poll_eq(struct ib_device *ibdev)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_event_queue *eq = &(golan->eq);
+ struct golan_eqe *eqe;
+ u32 cqn;
+ int counter = 0;
+
+ while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ rmb();
+
+ DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
+ golan_eqe_type_str(eqe->type));
+ switch (eqe->type) {
+ case GOLAN_EVENT_TYPE_COMP:
+ /* We dont need to handle completion events since we
+ * poll all the CQs after polling the EQ */
+ break;
+ case GOLAN_EVENT_TYPE_PATH_MIG:
+ case GOLAN_EVENT_TYPE_COMM_EST:
+ case GOLAN_EVENT_TYPE_SQ_DRAINED:
+ case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
+ case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
+ case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
+ case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
+ DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
+ golan_eqe_type_str(eqe->type), eqe->type);
+ break;
+ case GOLAN_EVENT_TYPE_CMD:
+// golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
+ break;
+ case GOLAN_EVENT_TYPE_PORT_CHANGE:
+ golan_handle_port_event(golan, eqe);
+ break;
+ case GOLAN_EVENT_TYPE_CQ_ERROR:
+ cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
+ cqn, eqe->data.cq_err.syndrome);
+// mlx5_cq_event(dev, cqn, eqe->type);
+ break;
+ case GOLAN_EVENT_TYPE_PAGE_REQUEST:
+ {
+ /* we should check if we get this event while we
+ * waiting for a command */
+ u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
+ s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+
+ DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
+ __FUNCTION__, func_id, npages);
+ golan_provide_pages(golan, npages, func_id);
+ }
+ break;
+ default:
+ DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
+ eqe->type, eq->eqn);
+ break;
+ }
+
+ ++eq->cons_index;
+ golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
+ ++counter;
+ }
+}
+
+/**
+ * Attach to multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ * @ret rc Return status code
+ */
+static int golan_mcast_attach(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ union ib_gid *gid)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ if ( qp == NULL ) {
+ DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
+ __FUNCTION__ );
+ return -EFAULT;
+ }
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_attach_mcg_mbox_in),
+ sizeof(struct golan_attach_mcg_mbox_out));
+ ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
+
+ memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
+
+ DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
+ return 0;
+err_attach_to_mcg_cmd:
+ DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
+ return rc;
+}
+
+/**
+ * Detach from multicast group
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v gid Multicast GID
+ * @ret rc Return status code
+ */
+static void golan_mcast_detach(struct ib_device *ibdev,
+ struct ib_queue_pair *qp,
+ union ib_gid *gid)
+{
+ struct golan *golan = ib_get_drvdata(ibdev);
+ struct golan_cmd_layout *cmd;
+ int rc;
+
+ cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0,
+ GEN_MBOX, NO_MBOX,
+ sizeof(struct golan_detach_mcg_mbox_in),
+ sizeof(struct golan_detach_mcg_mbox_out));
+ ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
+
+ memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
+
+ rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
+ GOLAN_PRINT_RC_AND_CMD_STATUS;
+
+ DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
+}
+
+/**
+ * Inform embedded subnet management agent of a received MAD
+ *
+ * @v ibdev Infiniband device
+ * @v mad MAD
+ * @ret rc Return status code
+ */
+static int golan_inform_sma(struct ib_device *ibdev,
+ union ib_mad *mad)
+{
+ if (!ibdev || !mad) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int golan_register_ibdev(struct golan_port *port)
+{
+ struct ib_device *ibdev = port->ibdev;
+ int rc;
+
+ golan_get_ib_info ( ibdev );
+ /* Register Infiniband device */
+ if ((rc = register_ibdev(ibdev)) != 0) {
+ DBG ( "%s port %d could not register IB device: (rc = %d)\n",
+ __FUNCTION__, ibdev->port, rc);
+ return rc;
+ }
+
+ port->netdev = ipoib_netdev( ibdev );
+
+ return 0;
+}
+
+static inline void golan_bring_down(struct golan *golan)
+{
+
+ DBGC(golan, "%s: start\n", __FUNCTION__);
+
+ if (~golan->flags & GOLAN_OPEN) {
+ DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
+ return;
+ }
+
+ golan_destroy_mkey(golan);
+ golan_dealloc_pd(golan);
+ golan_destory_eq(golan);
+ golan_dealloc_uar(golan);
+ golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
+ golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE);
+ golan_disable_hca(golan);
+ golan_cmd_uninit(golan);
+ golan->flags &= ~GOLAN_OPEN;
+ DBGC(golan, "%s: end\n", __FUNCTION__);
+}
+
+static int golan_set_link_speed ( struct golan *golan ){
+ mlx_utils utils;
+ mlx_status status;
+ int i = 0;
+
+ memset ( &utils, 0, sizeof ( utils ) );
+
+ status = mlx_utils_init ( &utils, golan->pci );
+ MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
+
+ status = mlx_pci_gw_init ( &utils );
+ MLX_CHECK_STATUS ( golan->pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" );
+
+ for ( i = 0; i < golan->caps.num_ports; ++i ) {
+ status = mlx_set_link_speed( &utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR );
+ MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
+ }
+
+set_link_speed_err:
+ mlx_pci_gw_teardown( &utils );
+pci_gw_init_err:
+utils_init_err:
+ return status;
+}
+
+static inline int golan_bring_up(struct golan *golan)
+{
+ int rc = 0;
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ if (golan->flags & GOLAN_OPEN)
+ return 0;
+
+ if (( rc = golan_cmd_init(golan) ))
+ goto out;
+
+ if (( rc = golan_core_enable_hca(golan) ))
+ goto cmd_uninit;
+
+ /* Query for need for boot pages */
+ if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) ))
+ goto disable;
+
+ if (( rc = golan_qry_hca_cap(golan) ))
+ goto pages;
+
+ if (( rc = golan_set_hca_cap(golan) ))
+ goto pages;
+
+ if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) ))
+ goto pages;
+
+ if (( rc = golan_set_link_speed ( golan ) ))
+ goto pages_teardown;
+
+ //Reg Init?
+ if (( rc = golan_hca_init(golan) ))
+ goto pages_2;
+
+ if (( rc = golan_alloc_uar(golan) ))
+ goto teardown;
+
+ if (( rc = golan_create_eq(golan) ))
+ goto de_uar;
+
+ if (( rc = golan_alloc_pd(golan) ))
+ goto de_eq;
+
+ if (( rc = golan_create_mkey(golan) ))
+ goto de_pd;
+
+ golan->flags |= GOLAN_OPEN;
+ return 0;
+
+ golan_destroy_mkey(golan);
+de_pd:
+ golan_dealloc_pd(golan);
+de_eq:
+ golan_destory_eq(golan);
+de_uar:
+ golan_dealloc_uar(golan);
+teardown:
+ golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
+pages_2:
+pages_teardown:
+ golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE);
+pages:
+ golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE);
+disable:
+ golan_disable_hca(golan);
+cmd_uninit:
+ golan_cmd_uninit(golan);
+out:
+ return rc;
+}
+
+/**
+ * Close Infiniband link
+ *
+ * @v ibdev Infiniband device
+ */
+static void golan_ib_close ( struct ib_device *ibdev __unused ) {}
+
+/**
+ * Initialise Infiniband link
+ *
+ * @v ibdev Infiniband device
+ * @ret rc Return status code
+ */
+static int golan_ib_open ( struct ib_device *ibdev ) {
+ DBG ( "%s start\n", __FUNCTION__ );
+
+ if ( ! ibdev )
+ return -EINVAL;
+
+ golan_ib_update ( ibdev );
+
+ DBG ( "%s end\n", __FUNCTION__ );
+ return 0;
+}
+
+/** Golan Infiniband operations */
+static struct ib_device_operations golan_ib_operations = {
+ .create_cq = golan_create_cq,
+ .destroy_cq = golan_destroy_cq,
+ .create_qp = golan_create_qp,
+ .modify_qp = golan_modify_qp,
+ .destroy_qp = golan_destroy_qp,
+ .post_send = golan_post_send,
+ .post_recv = golan_post_recv,
+ .poll_cq = golan_poll_cq,
+ .poll_eq = golan_poll_eq,
+ .open = golan_ib_open,
+ .close = golan_ib_close,
+ .mcast_attach = golan_mcast_attach,
+ .mcast_detach = golan_mcast_detach,
+ .set_port_info = golan_inform_sma,
+ .set_pkey_table = golan_inform_sma,
+};
+
+static int golan_probe_normal ( struct pci_device *pci ) {
+ struct golan *golan;
+ struct ib_device *ibdev;
+ struct golan_port *port;
+ int i;
+ int rc = 0;
+
+ golan = golan_alloc();
+ if ( !golan ) {
+ rc = -ENOMEM;
+ goto err_golan_alloc;
+ }
+
+ if ( golan_init_pages( &golan->pages ) ) {
+ rc = -ENOMEM;
+ goto err_golan_golan_init_pages;
+ }
+
+ /* Setup PCI bus and HCA BAR */
+ pci_set_drvdata( pci, golan );
+ golan->pci = pci;
+ golan_pci_init( golan );
+ /* config command queues */
+ if ( fw_ver_and_cmdif( golan ) ) {
+ rc = -1;
+ goto err_fw_ver_cmdif;
+ }
+
+ if ( golan_bring_up( golan ) ) {
+ DBGC (golan ,"golan bringup failed\n");
+ rc = -1;
+ goto err_golan_bringup;
+ }
+
+ /* Allocate Infiniband devices */
+ for (i = 0; i < golan->caps.num_ports; ++i) {
+ ibdev = alloc_ibdev( 0 );
+ if ( !ibdev ) {
+ rc = -ENOMEM;
+ goto err_golan_probe_alloc_ibdev;
+ }
+ golan->ports[i].ibdev = ibdev;
+ golan->ports[i].vep_number = 0;
+ ibdev->op = &golan_ib_operations;
+ ibdev->dev = &pci->dev;
+ ibdev->port = (GOLAN_PORT_BASE + i);
+ ib_set_drvdata( ibdev, golan );
+ }
+
+ /* Register devices */
+ for ( i = 0; i < golan->caps.num_ports; ++i ) {
+ port = &golan->ports[i];
+ if ((rc = golan_register_ibdev ( port ) ) != 0 )
+ goto err_golan_probe_register_ibdev;
+ }
+
+ return 0;
+
+ i = golan->caps.num_ports;
+err_golan_probe_register_ibdev:
+ for ( i-- ; ( signed int ) i >= 0 ; i-- )
+ unregister_ibdev ( golan->ports[i].ibdev );
+
+ i = golan->caps.num_ports;
+err_golan_probe_alloc_ibdev:
+ for ( i-- ; ( signed int ) i >= 0 ; i-- )
+ ibdev_put ( golan->ports[i].ibdev );
+
+ golan_bring_down ( golan );
+err_golan_bringup:
+err_fw_ver_cmdif:
+ golan_free_pages( &golan->pages );
+err_golan_golan_init_pages:
+ free ( golan );
+err_golan_alloc:
+ DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
+ return rc;
+}
+
+static void golan_remove_normal ( struct pci_device *pci ) {
+ struct golan *golan = pci_get_drvdata(pci);
+ struct golan_port *port;
+ int i;
+
+ DBGC(golan, "%s\n", __FUNCTION__);
+
+ for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
+ port = &golan->ports[i];
+ unregister_ibdev ( port->ibdev );
+ }
+ for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
+ netdev_nullify ( golan->ports[i].netdev );
+ netdev_put ( golan->ports[i].netdev );
+ }
+ for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
+ ibdev_put ( golan->ports[i].ibdev );
+ }
+
+ golan_bring_down(golan);
+
+ golan_free_pages( &golan->pages );
+ free(golan);
+}
+
+/***************************************************************************
+ * NODNIC operations
+ **************************************************************************/
+static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
+ struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
+ unsigned long wqe_index ) {
+ mlx_status status = MLX_SUCCESS;
+ struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
+ struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL;
+ struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
+ struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
+ ib_qp_get_drvdata ( qp );
+ nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
+ struct nodnic_send_ring *send_ring = &nodnic_qp->send;
+ mlx_uint32 qpn = 0;
+
+ eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
+ memset ( ( ( ( void * ) eth_wqe ) ), 0,
+ ( sizeof ( *eth_wqe ) ) );
+
+ status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
+ &qpn);
+ if ( status != MLX_SUCCESS ) {
+ DBG("nodnic_port_get_qpn failed\n");
+ goto err;
+ }
+
+#define SHOMRON_GENERATE_CQE 0x3
+#define SHOMRON_INLINE_HEADERS_SIZE 18
+#define SHOMRON_INLINE_HEADERS_OFFSET 32
+ MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
+ wqe_index, wqe_index & 0xFFFF);
+ MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
+ MLX_FILL_1 ( &eth_wqe->ctrl, 2,
+ ce, SHOMRON_GENERATE_CQE /* generate completion */
+ );
+ MLX_FILL_2 ( &eth_wqe->ctrl, 7,
+ inline_headers1,
+ cpu_to_be16(*(mlx_uint16 *)iobuf->data),
+ inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
+ );
+ memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
+ iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
+ iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE);
+ MLX_FILL_1 ( &eth_wqe->data[0], 0,
+ byte_count, iob_len ( iobuf ) );
+ MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
+ flexboot_nodnic->device_priv.lkey );
+ MLX_FILL_H ( &eth_wqe->data[0], 2,
+ local_address_h, virt_to_bus ( iobuf->data ) );
+ MLX_FILL_1 ( &eth_wqe->data[0], 3,
+ local_address_l, virt_to_bus ( iobuf->data ) );
+err:
+ return status;
+}
+
+static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
+ union shomronprm_completion_entry *cq_entry;
+ uint32_t opcode;
+
+ cq_entry = (union shomronprm_completion_entry *)cqe;
+ cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
+ opcode = MLX_GET ( &cq_entry->normal, opcode );
+#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
+#define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
+#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
+#define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
+ cqe_data->is_error =
+ ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR);
+ if ( cqe_data->is_error ) {
+ cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
+ cqe_data->vendor_err_syndrome =
+ MLX_GET ( &cq_entry->error, vendor_error_syndrome );
+ cqe_data->is_send =
+ (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR);
+ } else {
+ cqe_data->is_send =
+ (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND);
+ cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
+ cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
+
+ }
+ if ( cqe_data->is_send == TRUE )
+ cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
+ else
+ cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
+
+ return 0;
+}
+
+static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
+ unsigned int i = 0;
+ union shomronprm_completion_entry *cq_list;
+
+ cq_list = (union shomronprm_completion_entry *)cq;
+ for ( ; i < num_cqes ; i++ )
+ MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
+ return 0;
+}
+
+static mlx_size shomron_get_cqe_size () {
+ return sizeof ( union shomronprm_completion_entry );
+}
+
+struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = {
+ .get_cqe_size = shomron_get_cqe_size,
+ .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
+ .fill_completion = shomron_fill_completion,
+ .cqe_set_owner = shomron_cqe_set_owner,
+ .irq = flexboot_nodnic_eth_irq,
+};
+
+static int shomron_nodnic_supported = 0;
+
+static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
+ if ( pci->device == 0x1011 )
+ return 0;
+
+ return flexboot_nodnic_is_supported ( pci );
+}
+/**************************************************************************/
+
+static int golan_probe ( struct pci_device *pci ) {
+ int rc = -ENOTSUP;
+
+ DBG ( "%s: start\n", __FUNCTION__ );
+
+ if ( ! pci ) {
+ DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
+ rc = -EINVAL;
+ goto probe_done;
+ }
+
+ shomron_nodnic_supported = shomron_nodnic_is_supported ( pci );
+ if ( shomron_nodnic_supported ) {
+ rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL );
+ if ( rc == 0 ) {
+ DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
+ goto probe_done;
+ }
+ shomron_nodnic_supported = 0;
+ }
+
+ if ( ! shomron_nodnic_supported ) {
+ DBG ( "%s: Using normal driver\n", __FUNCTION__ );
+ rc = golan_probe_normal ( pci );
+ }
+
+probe_done:
+ DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
+ return rc;
+}
+
+static void golan_remove ( struct pci_device *pci ) {
+ DBG ( "%s: start\n", __FUNCTION__ );
+
+ if ( ! shomron_nodnic_supported ) {
+ DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
+ golan_remove_normal ( pci );
+ return;
+ }
+
+ DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
+
+ flexboot_nodnic_remove ( pci );
+
+ DBG ( "%s: end\n", __FUNCTION__ );
+}
+
+static struct pci_device_id golan_nics[] = {
+ PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
+ PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
+ PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
+};
+
+struct pci_driver golan_driver __pci_driver = {
+ .ids = golan_nics,
+ .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])),
+ .probe = golan_probe,
+ .remove = golan_remove,
+};
diff --git a/src/drivers/infiniband/golan.h b/src/drivers/infiniband/golan.h
new file mode 100755
index 00000000..6e96f750
--- /dev/null
+++ b/src/drivers/infiniband/golan.h
@@ -0,0 +1,319 @@
+#ifndef _GOLAN_H_
+#define _GOLAN_H_
+
+/*
+ * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <byteswap.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <ipxe/io.h>
+#include <ipxe/pci.h>
+#include <ipxe/pcibackup.h>
+#include "CIB_PRM.h"
+
+#define GOLAN_PCI_CONFIG_BAR_SIZE 0x100000//HERMON_PCI_CONFIG_BAR_SIZE //TODO: What is the BAR size?
+
+#define GOLAN_PAS_SIZE sizeof(uint64_t)
+
+#define GOLAN_INVALID_LKEY 0x00000100UL
+
+#define GOLAN_MAX_PORTS 2
+#define GOLAN_PORT_BASE 1
+
+#define MELLANOX_VID 0x15b3
+#define GOLAN_HCA_BAR PCI_BASE_ADDRESS_0 //BAR 0
+
+#define GOLAN_HCR_MAX_WAIT_MS 10000
+
+#define min(a,b) ((a)<(b)?(a):(b))
+
+#define GOLAN_PAGE_SHIFT 12
+#define GOLAN_PAGE_SIZE (1 << GOLAN_PAGE_SHIFT)
+#define GOLAN_PAGE_MASK (GOLAN_PAGE_SIZE - 1)
+
+#define MAX_MBOX ( GOLAN_PAGE_SIZE / MAILBOX_STRIDE )
+#define DEF_CMD_IDX 1
+#define MEM_CMD_IDX 0
+#define NO_MBOX 0xffff
+#define MEM_MBOX MEM_CMD_IDX
+#define GEN_MBOX DEF_CMD_IDX
+
+#define CMD_IF_REV 4
+
+#define MAX_PASE_MBOX ((GOLAN_CMD_PAS_CNT) - 2)
+
+#define CMD_STATUS( golan , idx ) ((struct golan_outbox_hdr *)(get_cmd( (golan) , (idx) )->out))->status
+#define CMD_SYND( golan , idx ) ((struct golan_outbox_hdr *)(get_cmd( (golan) , (idx) )->out))->syndrome
+#define QRY_PAGES_OUT( golan, idx ) ((struct golan_query_pages_outbox *)(get_cmd( (golan) , (idx) )->out))
+
+#define VIRT_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )virt_to_bus(addr)))
+#define BE64_BUS_2_VIRT( addr ) bus_to_virt(be64_to_cpu(addr))
+#define USR_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )user_to_phys(addr, 0)))
+#define BE64_BUS_2_USR( addr ) be64_to_cpu(phys_to_user(addr))
+
+#define GET_INBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.inbox))[idx]))
+#define GET_OUTBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.outbox))[idx]))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+/* Fw status fields */
+typedef enum {
+ NO_ERRORS = 0x0,
+ SIGNATURE_ERROR = 0x1,
+ TOKEN_ERROR = 0x2,
+ BAD_BLOCK_NUMBER = 0x3,
+ BAD_OUTPUT_POINTER = 0x4, // pointer not align to mailbox size
+ BAD_INPUT_POINTER = 0x5, // pointer not align to mailbox size
+ INTERNAL_ERROR = 0x6,
+ INPUT_LEN_ERROR = 0x7, // input length less than 0x8.
+ OUTPUT_LEN_ERROR = 0x8, // output length less than 0x8.
+ RESERVE_NOT_ZERO = 0x9,
+ BAD_CMD_TYPE = 0x10,
+} return_hdr_t;
+
+struct golan_cmdq_md {
+ void *addr;
+ u16 log_stride;
+ u16 size;
+};
+
+struct golan_uar {
+ uint32_t index;
+ void *virt;
+ unsigned long phys;
+};
+
+/* Queue Pair */
+#define GOLAN_SEND_WQE_BB_SIZE 64
+#define GOLAN_SEND_UD_WQE_SIZE sizeof(struct golan_send_wqe_ud)
+#define GOLAN_RECV_WQE_SIZE sizeof(struct golan_recv_wqe_ud)
+#define GOLAN_WQEBBS_PER_SEND_UD_WQE DIV_ROUND_UP(GOLAN_SEND_UD_WQE_SIZE, GOLAN_SEND_WQE_BB_SIZE)
+#define GOLAN_SEND_OPCODE 0x0a
+#define GOLAN_WQE_CTRL_WQE_IDX_BIT 8
+
+enum golan_ib_qp_state {
+ GOLAN_IB_QPS_RESET,
+ GOLAN_IB_QPS_INIT,
+ GOLAN_IB_QPS_RTR,
+ GOLAN_IB_QPS_RTS,
+ GOLAN_IB_QPS_SQD,
+ GOLAN_IB_QPS_SQE,
+ GOLAN_IB_QPS_ERR
+};
+
+struct golan_send_wqe_ud {
+ struct golan_wqe_ctrl_seg ctrl;
+ struct golan_av datagram;
+ struct golan_wqe_data_seg data;
+};
+
+union golan_send_wqe {
+ struct golan_send_wqe_ud ud;
+ uint8_t pad[GOLAN_WQEBBS_PER_SEND_UD_WQE * GOLAN_SEND_WQE_BB_SIZE];
+};
+
+struct golan_recv_wqe_ud {
+ struct golan_wqe_data_seg data[2];
+};
+
+struct golan_recv_wq {
+ struct golan_recv_wqe_ud *wqes;
+ /* WQ size in bytes */
+ int size;
+ /* In SQ, it will be increased in wqe_size (number of WQEBBs per WQE) */
+ u16 next_idx;
+ /** GRH buffers (if applicable) */
+ struct ib_global_route_header *grh;
+ /** Size of GRH buffers */
+ size_t grh_size;
+};
+
+struct golan_send_wq {
+ union golan_send_wqe *wqes;
+ /* WQ size in bytes */
+ int size;
+ /* In SQ, it will be increased in wqe_size (number of WQEBBs per WQE) */
+ u16 next_idx;
+};
+
+struct golan_queue_pair {
+ void *wqes;
+ int size;
+ struct golan_recv_wq rq;
+ struct golan_send_wq sq;
+ struct golan_qp_db *doorbell_record;
+ u32 doorbell_qpn;
+ enum golan_ib_qp_state state;
+};
+
+/* Completion Queue */
+#define GOLAN_CQE_OPCODE_NOT_VALID 0x0f
+#define GOLAN_CQE_OPCODE_BIT 4
+#define GOLAN_CQ_DB_RECORD_SIZE sizeof(uint64_t)
+#define GOLAN_CQE_OWNER_MASK 1
+
+#define MANAGE_PAGES_PSA_OFFSET 0
+#define PXE_CMDIF_REF 5
+
+enum {
+ GOLAN_CQE_SW_OWNERSHIP = 0x0,
+ GOLAN_CQE_HW_OWNERSHIP = 0x1
+};
+
+enum {
+ GOLAN_CQE_SIZE_64 = 0,
+ GOLAN_CQE_SIZE_128 = 1
+};
+
+struct golan_completion_queue {
+ struct golan_cqe64 *cqes;
+ int size;
+ __be64 *doorbell_record;
+};
+
+
+/* Event Queue */
+#define GOLAN_EQE_SIZE sizeof(struct golan_eqe)
+#define GOLAN_NUM_EQES 8
+#define GOLAN_EQ_DOORBELL_OFFSET 0x40
+
+#define GOLAN_EQ_MAP_ALL_EVENTS \
+ ((1 << GOLAN_EVENT_TYPE_PATH_MIG )| \
+ (1 << GOLAN_EVENT_TYPE_COMM_EST )| \
+ (1 << GOLAN_EVENT_TYPE_SQ_DRAINED )| \
+ (1 << GOLAN_EVENT_TYPE_SRQ_LAST_WQE )| \
+ (1 << GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT )| \
+ (1 << GOLAN_EVENT_TYPE_CQ_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_WQ_CATAS_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_PATH_MIG_FAILED )| \
+ (1 << GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_INTERNAL_ERROR )| \
+ (1 << GOLAN_EVENT_TYPE_PORT_CHANGE )| \
+ (1 << GOLAN_EVENT_TYPE_GPIO_EVENT )| \
+ (1 << GOLAN_EVENT_TYPE_CLIENT_RE_REGISTER )| \
+ (1 << GOLAN_EVENT_TYPE_REMOTE_CONFIG )| \
+ (1 << GOLAN_EVENT_TYPE_DB_BF_CONGESTION )| \
+ (1 << GOLAN_EVENT_TYPE_STALL_EVENT )| \
+ (1 << GOLAN_EVENT_TYPE_PACKET_DROPPED )| \
+ (1 << GOLAN_EVENT_TYPE_CMD )| \
+ (1 << GOLAN_EVENT_TYPE_PAGE_REQUEST ))
+
+enum golan_event {
+ GOLAN_EVENT_TYPE_COMP = 0x0,
+
+ GOLAN_EVENT_TYPE_PATH_MIG = 0x01,
+ GOLAN_EVENT_TYPE_COMM_EST = 0x02,
+ GOLAN_EVENT_TYPE_SQ_DRAINED = 0x03,
+ GOLAN_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
+ GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
+
+ GOLAN_EVENT_TYPE_CQ_ERROR = 0x04,
+ GOLAN_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ GOLAN_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
+ GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+ GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
+ GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+
+ GOLAN_EVENT_TYPE_INTERNAL_ERROR = 0x08,
+ GOLAN_EVENT_TYPE_PORT_CHANGE = 0x09,
+ GOLAN_EVENT_TYPE_GPIO_EVENT = 0x15,
+// GOLAN_EVENT_TYPE_CLIENT_RE_REGISTER = 0x16,
+ GOLAN_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+
+ GOLAN_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
+ GOLAN_EVENT_TYPE_STALL_EVENT = 0x1b,
+
+ GOLAN_EVENT_TYPE_PACKET_DROPPED = 0x1f,
+
+ GOLAN_EVENT_TYPE_CMD = 0x0a,
+ GOLAN_EVENT_TYPE_PAGE_REQUEST = 0x0b,
+ GOLAN_EVENT_TYPE_PAGE_FAULT = 0x0C,
+};
+
+enum golan_port_sub_event {
+ GOLAN_PORT_CHANGE_SUBTYPE_DOWN = 1,
+ GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
+ GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
+ GOLAN_PORT_CHANGE_SUBTYPE_LID = 6,
+ GOLAN_PORT_CHANGE_SUBTYPE_PKEY = 7,
+ GOLAN_PORT_CHANGE_SUBTYPE_GUID = 8,
+ GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9
+};
+
+
+enum {
+ GOLAN_EQE_SW_OWNERSHIP = 0x0,
+ GOLAN_EQE_HW_OWNERSHIP = 0x1
+};
+
+enum {
+ GOLAN_EQ_UNARMED = 0,
+ GOLAN_EQ_ARMED = 1,
+};
+
+struct golan_event_queue {
+ uint8_t eqn;
+ uint64_t mask;
+ struct golan_eqe *eqes;
+ int size;
+ __be32 *doorbell;
+ uint32_t cons_index;
+};
+
+struct golan_port {
+ /** Infiniband device */
+ struct ib_device *ibdev;
+ /** Network device */
+ struct net_device *netdev;
+ /** VEP number */
+ u8 vep_number;
+};
+
+struct golan_mboxes {
+ void *inbox;
+ void *outbox;
+};
+
+#define GOLAN_OPEN 0x1
+
+struct golan {
+ struct pci_device *pci;
+ struct golan_hca_init_seg *iseg;
+ struct golan_cmdq_md cmd;
+ struct golan_hca_cap caps; /* stored as big indian*/
+ struct golan_mboxes mboxes;
+ struct list_head pages;
+ uint32_t cmd_bm;
+ uint32_t total_dma_pages;
+ struct golan_uar uar;
+ struct golan_event_queue eq;
+ uint32_t pdn;
+ u32 mkey;
+ u32 flags;
+
+ struct golan_port ports[GOLAN_MAX_PORTS];
+};
+
+#endif /* _GOLAN_H_*/
diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h
new file mode 100644
index 00000000..e1e89b4c
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h
@@ -0,0 +1,43 @@
+#ifndef NODNIC_CMD_H_
+#define NODNIC_CMD_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_nodnic_data_structures.h"
+#include "../../mlx_utils/include/public/mlx_utils.h"
+#include "../../mlx_utils/include/public/mlx_pci_gw.h"
+
+mlx_status
+nodnic_cmd_read(
+ IN nodnic_device_priv *device_priv,
+ IN mlx_uint32 address,
+ OUT mlx_pci_gw_buffer *buffer
+ );
+
+mlx_status
+nodnic_cmd_write(
+ IN nodnic_device_priv *device_priv,
+ IN mlx_uint32 address,
+ IN mlx_pci_gw_buffer buffer
+ );
+
+#endif /* STUB_NODNIC_CMD_H_ */
diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h
new file mode 100644
index 00000000..b0cc7f72
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h
@@ -0,0 +1,80 @@
+#ifndef NODNIC_DEVICE_H_
+#define NODNIC_DEVICE_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_nodnic_data_structures.h"
+
+#define NODIC_SUPPORTED_REVISION 1
+//Initialization segment
+#define NODNIC_CMDQ_PHY_ADDR_HIGH_OFFSET 0x10
+#define NODNIC_CMDQ_PHY_ADDR_LOW_OFFSET 0x14
+#define NODNIC_NIC_INTERFACE_OFFSET 0x14
+#define NODNIC_INITIALIZING_OFFSET 0x1fc
+#define NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET 0x1fc
+#define NODNIC_LOCATION_OFFSET 0x240
+
+#define NODNIC_CMDQ_PHY_ADDR_LOW_MASK 0xFFFFE000
+#define NODNIC_NIC_INTERFACE_SUPPORTED_MASK 0x4000000
+
+#define NODNIC_NIC_INTERFACE_BIT 9
+#define NODNIC_DISABLE_INTERFACE_BIT 8
+#define NODNIC_NIC_INTERFACE_SUPPORTED_BIT 26
+#define NODNIC_INITIALIZING_BIT 31
+
+#define NODNIC_NIC_DISABLE_INT_OFFSET 0x100c
+
+//nodnic segment
+#define NODNIC_REVISION_OFFSET 0x0
+#define NODNIC_HARDWARE_FORMAT_OFFSET 0x0
+
+
+
+mlx_status
+nodnic_device_init(
+ IN nodnic_device_priv *device_priv
+ );
+
+mlx_status
+nodnic_device_teardown(
+ IN nodnic_device_priv *device_priv
+ );
+
+
+mlx_status
+nodnic_device_get_cap(
+ IN nodnic_device_priv *device_priv
+ );
+
+mlx_status
+nodnic_device_clear_int (
+ IN nodnic_device_priv *device_priv
+ );
+
+mlx_status
+nodnic_device_get_fw_version(
+ IN nodnic_device_priv *device_priv,
+ OUT mlx_uint16 *fw_ver_minor,
+ OUT mlx_uint16 *fw_ver_sub_minor,
+ OUT mlx_uint16 *fw_ver_major
+ );
+#endif /* STUB_NODNIC_DEVICE_H_ */
diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h
new file mode 100644
index 00000000..f58213b9
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h
@@ -0,0 +1,201 @@
+#ifndef NODNIC_NODNICDATASTRUCTURES_H_
+#define NODNIC_NODNICDATASTRUCTURES_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_utils/include/public/mlx_utils.h"
+
+/* todo: fix coding convention */
+#define NODNIC_MEMORY_ALIGN 0x1000
+
+#define NODNIC_MAX_MAC_FILTERS 5
+#define NODNIC_MAX_MGID_FILTERS 4
+
+typedef struct _nodnic_device_priv nodnic_device_priv;
+typedef struct _nodnic_port_priv nodnic_port_priv;
+typedef struct _nodnic_device_capabilites nodnic_device_capabilites;
+typedef struct _nodnic_qp nodnic_qp;
+typedef struct _nodnic_cq nodnic_cq;
+typedef struct _nodnic_eq nodnic_eq;
+
+/* NODNIC Port states
+ * Bit 0 - port open/close
+ * Bit 1 - port is [not] in disabling DMA
+ * 0 - closed and not disabling DMA
+ * 1 - opened and not disabling DMA
+ * 3 - opened and disabling DMA
+ */
+#define NODNIC_PORT_OPENED 0b00000001
+#define NODNIC_PORT_DISABLING_DMA 0b00000010
+
+typedef enum {
+ ConnectX3 = 0,
+ Connectx4
+}nodnic_hardware_format;
+
+
+typedef enum {
+ NODNIC_QPT_SMI,
+ NODNIC_QPT_GSI,
+ NODNIC_QPT_UD,
+ NODNIC_QPT_RC,
+ NODNIC_QPT_ETH,
+}nodnic_queue_pair_type;
+typedef enum {
+ NODNIC_PORT_TYPE_IB = 0,
+ NODNIC_PORT_TYPE_ETH,
+ NODNIC_PORT_TYPE_UNKNOWN,
+}nodnic_port_type;
+
+
+#define RECV_WQE_SIZE 16
+#define NODNIC_WQBB_SIZE 64
+/** A nodnic send wqbb */
+struct nodnic_send_wqbb {
+ mlx_uint8 force_align[NODNIC_WQBB_SIZE];
+};
+struct nodnic_ring {
+ mlx_uint32 offset;
+ /** Work queue entries */
+ /* TODO: add to memory entity */
+ mlx_physical_address wqe_physical;
+ mlx_void *map;
+ /** Size of work queue */
+ mlx_size wq_size;
+ /** Next work queue entry index
+ *
+ * This is the index of the next entry to be filled (i.e. the
+ * first empty entry). This value is not bounded by num_wqes;
+ * users must logical-AND with (num_wqes-1) to generate an
+ * array index.
+ */
+ mlx_uint32 num_wqes;
+ mlx_uint32 qpn;
+ mlx_uint32 next_idx;
+ mlx_uint32 ring_pi;
+};
+
+struct nodnic_send_ring{
+ struct nodnic_ring nodnic_ring;
+ struct nodnic_send_wqbb *wqe_virt;
+};
+
+
+struct nodnic_recv_ring{
+ struct nodnic_ring nodnic_ring;
+ void *wqe_virt;
+};
+struct _nodnic_qp{
+ nodnic_queue_pair_type type;
+ struct nodnic_send_ring send;
+ struct nodnic_recv_ring receive;
+};
+
+struct _nodnic_cq{
+ /** cq entries */
+ mlx_void *cq_virt;
+ mlx_physical_address cq_physical;
+ mlx_void *map;
+ /** cq */
+ mlx_size cq_size;
+};
+
+struct _nodnic_eq{
+ mlx_void *eq_virt;
+ mlx_physical_address eq_physical;
+ mlx_void *map;
+ mlx_size eq_size;
+};
+struct _nodnic_device_capabilites{
+ mlx_boolean support_mac_filters;
+ mlx_boolean support_promisc_filter;
+ mlx_boolean support_promisc_multicast_filter;
+ mlx_uint8 log_working_buffer_size;
+ mlx_uint8 log_pkey_table_size;
+ mlx_boolean num_ports; // 0 - single port, 1 - dual port
+ mlx_uint8 log_max_ring_size;
+#ifdef DEVICE_CX3
+ mlx_uint8 crspace_doorbells;
+#endif
+};
+
+#ifdef DEVICE_CX3
+/* This is the structure of the data in the scratchpad
+ * Read/Write data from/to its field using PCI accesses only */
+typedef struct _nodnic_port_data_flow_gw nodnic_port_data_flow_gw;
+struct _nodnic_port_data_flow_gw {
+ mlx_uint32 send_doorbell;
+ mlx_uint32 recv_doorbell;
+ mlx_uint32 reserved2[2];
+ mlx_uint32 armcq_cq_ci_dword;
+ mlx_uint32 dma_en;
+} __attribute__ ((packed));
+#endif
+
+struct _nodnic_device_priv{
+ mlx_boolean is_initiailzied;
+ mlx_utils *utils;
+
+ //nodnic structure offset in init segment
+ mlx_uint32 device_offset;
+
+ nodnic_device_capabilites device_cap;
+
+ mlx_uint8 nodnic_revision;
+ nodnic_hardware_format hardware_format;
+ mlx_uint32 pd;
+ mlx_uint32 lkey;
+ mlx_uint64 device_guid;
+ nodnic_port_priv *ports;
+#ifdef DEVICE_CX3
+ mlx_void *crspace_clear_int;
+#endif
+};
+
+struct _nodnic_port_priv{
+ nodnic_device_priv *device;
+ mlx_uint32 port_offset;
+ mlx_uint8 port_state;
+ mlx_boolean network_state;
+ mlx_boolean dma_state;
+ nodnic_port_type port_type;
+ mlx_uint8 port_num;
+ nodnic_eq eq;
+ mlx_mac_address mac_filters[5];
+ mlx_status (*send_doorbell)(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ IN mlx_uint16 index);
+ mlx_status (*recv_doorbell)(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ IN mlx_uint16 index);
+ mlx_status (*set_dma)(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value);
+#ifdef DEVICE_CX3
+ nodnic_port_data_flow_gw *data_flow_gw;
+#endif
+};
+
+
+#endif /* STUB_NODNIC_NODNICDATASTRUCTURES_H_ */
diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h
new file mode 100644
index 00000000..4fd96a6d
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h
@@ -0,0 +1,229 @@
+#ifndef NODNIC_PORT_H_
+#define NODNIC_PORT_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_nodnic_data_structures.h"
+
+#define NODNIC_PORT_MAC_FILTERS_OFFSET 0x10
+
+typedef enum {
+ nodnic_port_option_link_type = 0,
+ nodnic_port_option_mac_low,
+ nodnic_port_option_mac_high,
+ nodnic_port_option_log_cq_size,
+ nodnic_port_option_reset_needed,
+ nodnic_port_option_mac_filters_en,
+ nodnic_port_option_port_state,
+ nodnic_port_option_network_en,
+ nodnic_port_option_dma_en,
+ nodnic_port_option_eq_addr_low,
+ nodnic_port_option_eq_addr_high,
+ nodnic_port_option_cq_addr_low,
+ nodnic_port_option_cq_addr_high,
+ nodnic_port_option_port_management_change_event,
+ nodnic_port_option_port_promisc_en,
+ nodnic_port_option_arm_cq,
+ nodnic_port_option_port_promisc_multicast_en,
+#ifdef DEVICE_CX3
+ nodnic_port_option_crspace_en,
+#endif
+}nodnic_port_option;
+
+struct nodnic_port_data_entry{
+ nodnic_port_option option;
+ mlx_uint32 offset;
+ mlx_uint8 align;
+ mlx_uint32 mask;
+};
+
+struct nodnic_qp_data_entry{
+ nodnic_queue_pair_type type;
+ mlx_uint32 send_offset;
+ mlx_uint32 recv_offset;
+};
+
+
+typedef enum {
+ nodnic_port_state_down = 0,
+ nodnic_port_state_initialize,
+ nodnic_port_state_armed,
+ nodnic_port_state_active,
+}nodnic_port_state;
+
+mlx_status
+nodnic_port_get_state(
+ IN nodnic_port_priv *port_priv,
+ OUT nodnic_port_state *state
+ );
+
+mlx_status
+nodnic_port_get_type(
+ IN nodnic_port_priv *port_priv,
+ OUT nodnic_port_type *type
+ );
+
+mlx_status
+nodnic_port_query(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_port_option option,
+ OUT mlx_uint32 *out
+ );
+
+mlx_status
+nodnic_port_set(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_port_option option,
+ IN mlx_uint32 in
+ );
+
+mlx_status
+nodnic_port_create_cq(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_size cq_size,
+ OUT nodnic_cq **cq
+ );
+
+mlx_status
+nodnic_port_destroy_cq(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_cq *cq
+ );
+
+mlx_status
+nodnic_port_create_qp(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_queue_pair_type type,
+ IN mlx_size send_wq_size,
+ IN mlx_uint32 send_wqe_num,
+ IN mlx_size receive_wq_size,
+ IN mlx_uint32 recv_wqe_num,
+ OUT nodnic_qp **qp
+ );
+
+mlx_status
+nodnic_port_destroy_qp(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_queue_pair_type type,
+ IN nodnic_qp *qp
+ );
+mlx_status
+nodnic_port_get_qpn(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ OUT mlx_uint32 *qpn
+ );
+mlx_status
+nodnic_port_update_ring_doorbell(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ IN mlx_uint16 index
+ );
+mlx_status
+nodnic_port_get_cq_size(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_uint64 *cq_size
+ );
+
+mlx_status
+nodnic_port_allocate_eq(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_uint8 log_eq_size
+ );
+mlx_status
+nodnic_port_free_eq(
+ IN nodnic_port_priv *port_priv
+ );
+
+mlx_status
+nodnic_port_add_mac_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ );
+
+mlx_status
+nodnic_port_remove_mac_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ );
+mlx_status
+nodnic_port_add_mgid_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ );
+
+mlx_status
+nodnic_port_remove_mgid_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ );
+mlx_status
+nodnic_port_thin_init(
+ IN nodnic_device_priv *device_priv,
+ IN nodnic_port_priv *port_priv,
+ IN mlx_uint8 port_index
+ );
+
+mlx_status
+nodnic_port_set_promisc(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ );
+
+mlx_status
+nodnic_port_set_promisc_multicast(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ );
+
+mlx_status
+nodnic_port_init(
+ IN nodnic_port_priv *port_priv
+ );
+
+mlx_status
+nodnic_port_close(
+ IN nodnic_port_priv *port_priv
+ );
+
+mlx_status
+nodnic_port_enable_dma(
+ IN nodnic_port_priv *port_priv
+ );
+
+mlx_status
+nodnic_port_disable_dma(
+ IN nodnic_port_priv *port_priv
+ );
+
+mlx_status
+nodnic_port_read_reset_needed(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_boolean *reset_needed
+ );
+
+mlx_status
+nodnic_port_read_port_management_change_event(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_boolean *change_event
+ );
+#endif /* STUB_NODNIC_PORT_H_ */
diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c
new file mode 100644
index 00000000..69f85358
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../include/mlx_cmd.h"
+#include "../../mlx_utils/include/public/mlx_pci_gw.h"
+#include "../../mlx_utils/include/public/mlx_bail.h"
+#include "../../mlx_utils/include/public/mlx_pci.h"
+#include "../../mlx_utils/include/public/mlx_logging.h"
+
+mlx_status
+nodnic_cmd_read(
+ IN nodnic_device_priv *device_priv,
+ IN mlx_uint32 address,
+ OUT mlx_pci_gw_buffer *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_utils *utils = NULL;
+
+ if ( device_priv == NULL || buffer == NULL ) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ utils = device_priv->utils;
+
+ status = mlx_pci_gw_read(utils, PCI_GW_SPACE_NODNIC, address, buffer);
+ MLX_CHECK_STATUS(device_priv, status, read_error,"mlx_pci_gw_read failed");
+
+read_error:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_cmd_write(
+ IN nodnic_device_priv *device_priv,
+ IN mlx_uint32 address,
+ IN mlx_pci_gw_buffer buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_utils *utils = NULL;
+
+
+ if ( device_priv == NULL ) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ utils = device_priv->utils;
+
+
+ status = mlx_pci_gw_write(utils, PCI_GW_SPACE_NODNIC, address, buffer);
+ MLX_CHECK_STATUS(device_priv, status, write_error,"mlx_pci_gw_write failed");
+write_error:
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c
new file mode 100644
index 00000000..4acc94fa
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../include/mlx_device.h"
+#include "../include/mlx_cmd.h"
+#include "../../mlx_utils/include/public/mlx_bail.h"
+#include "../../mlx_utils/include/public/mlx_pci.h"
+#include "../../mlx_utils/include/public/mlx_memory.h"
+#include "../../mlx_utils/include/public/mlx_logging.h"
+
+#define CHECK_BIT(field, offset) (((field) & ((mlx_uint32)1 << (offset))) != 0)
+
+static
+mlx_status
+check_nodnic_interface_supported(
+ IN nodnic_device_priv* device_priv,
+ OUT mlx_boolean *out
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 output = 0;
+ status = nodnic_cmd_read(device_priv, NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET,
+ &output);
+ MLX_FATAL_CHECK_STATUS(status, read_error, "failed to read nic_interface_supported");
+ *out = CHECK_BIT(output, NODNIC_NIC_INTERFACE_SUPPORTED_BIT);
+read_error:
+ return status;
+}
+
+static
+mlx_status
+wait_for_device_initialization(
+ IN nodnic_device_priv* device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint8 try = 0;
+ mlx_uint32 buffer = 0;
+
+#define CHECK_DEVICE_INIT_TRIES 10
+ for( ; try < CHECK_DEVICE_INIT_TRIES ; try++){
+ status = nodnic_cmd_read(device_priv, NODNIC_INITIALIZING_OFFSET, &buffer);
+ MLX_CHECK_STATUS(device_priv, status, read_error, "failed to read initializing");
+ if( !CHECK_BIT(buffer, NODNIC_INITIALIZING_BIT)){
+ goto init_done;
+ }
+ mlx_utils_delay_in_ms(100);
+ }
+ status = MLX_FAILED;
+read_error:
+init_done:
+ return status;
+}
+
+static
+mlx_status
+disable_nodnic_inteface(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+
+ buffer = (1 << NODNIC_DISABLE_INTERFACE_BIT);
+ status = nodnic_cmd_write(device_priv, NODNIC_CMDQ_PHY_ADDR_LOW_OFFSET, buffer);
+ MLX_FATAL_CHECK_STATUS(status, write_err, "failed to write cmdq_phy_addr + nic_interface");
+
+ status = wait_for_device_initialization(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, init_err, "failed to initialize device");
+init_err:
+write_err:
+ return status;
+}
+static
+mlx_status
+nodnic_device_start_nodnic(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+ mlx_boolean nodnic_supported = 0;
+
+ status = wait_for_device_initialization(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, wait_for_fw_err, "failed to initialize device");
+
+ status = check_nodnic_interface_supported(device_priv, &nodnic_supported);
+ MLX_FATAL_CHECK_STATUS(status, read_err,"failed to check nic_interface_supported");
+
+ if( nodnic_supported == 0 ){
+ status = MLX_UNSUPPORTED;
+ goto nodnic_unsupported;
+ }
+ buffer = (1 << NODNIC_NIC_INTERFACE_BIT);
+ status = nodnic_cmd_write(device_priv, NODNIC_NIC_INTERFACE_OFFSET, buffer);
+ MLX_FATAL_CHECK_STATUS(status, write_err, "failed to write cmdq_phy_addr + nic_interface");
+
+ status = wait_for_device_initialization(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, init_err, "failed to initialize device");
+init_err:
+read_err:
+write_err:
+nodnic_unsupported:
+wait_for_fw_err:
+ return status;
+}
+
+static
+mlx_status
+nodnic_device_get_nodnic_data(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+
+ status = nodnic_cmd_read(device_priv, NODNIC_LOCATION_OFFSET, &device_priv->device_offset);
+ MLX_FATAL_CHECK_STATUS(status, nodnic_offset_read_err, "failed to read nodnic offset");
+
+ status = nodnic_cmd_read(device_priv,
+ device_priv->device_offset + NODNIC_REVISION_OFFSET, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, nodnic_revision_read_err, "failed to read nodnic revision");
+
+ device_priv->nodnic_revision = (buffer >> 24) & 0xFF;
+ if( device_priv->nodnic_revision != NODIC_SUPPORTED_REVISION ){
+ MLX_DEBUG_ERROR(device_priv, "nodnic revision not supported\n");
+ status = MLX_UNSUPPORTED;
+ goto unsupported_revision;
+ }
+
+ status = nodnic_cmd_read(device_priv,
+ device_priv->device_offset + NODNIC_HARDWARE_FORMAT_OFFSET, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, nodnic_hardware_format_read_err, "failed to read nodnic revision");
+ device_priv->hardware_format = (buffer >> 16) & 0xFF;
+
+ return status;
+
+unsupported_revision:
+nodnic_hardware_format_read_err:
+nodnic_offset_read_err:
+nodnic_revision_read_err:
+ disable_nodnic_inteface(device_priv);
+ return status;
+}
+
+mlx_status
+nodnic_device_clear_int (
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 disable = 1;
+#ifndef DEVICE_CX3
+ status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
+ MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
+#else
+ mlx_utils *utils = device_priv->utils;
+ mlx_uint64 clear_int = (mlx_uint64)(device_priv->crspace_clear_int);
+ mlx_uint32 swapped = 0;
+
+ if (device_priv->device_cap.crspace_doorbells == 0) {
+ status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
+ MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
+ } else {
+ /* Write the new index and update FW that new data was submitted */
+ disable = 0x80000000;
+ mlx_memory_cpu_to_be32(utils, disable, &swapped);
+ mlx_pci_mem_write (utils, MlxPciWidthUint32, 0, clear_int, 1, &swapped);
+ mlx_pci_mem_read (utils, MlxPciWidthUint32, 0, clear_int, 1, &swapped);
+ }
+#endif
+clear_int_done:
+ return status;
+}
+
+mlx_status
+nodnic_device_init(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if( device_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto parm_err;
+ }
+ status = nodnic_device_start_nodnic(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, start_nodnic_err, "nodnic_device_start_nodnic failed");
+
+ status = nodnic_device_get_nodnic_data(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, data_err, "nodnic_device_get_nodnic_data failed");
+ return status;
+data_err:
+start_nodnic_err:
+parm_err:
+ return status;
+}
+
+mlx_status
+nodnic_device_teardown(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ status = disable_nodnic_inteface(device_priv);
+ MLX_FATAL_CHECK_STATUS(status, disable_failed, "failed to disable nodnic interface");
+disable_failed:
+ return status;
+}
+
+mlx_status
+nodnic_device_get_cap(
+ IN nodnic_device_priv *device_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_capabilites *device_cap = NULL;
+ mlx_uint32 buffer = 0;
+ mlx_uint64 guid_l = 0;
+ mlx_uint64 guid_h = 0;
+ if( device_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto parm_err;
+ }
+
+ device_cap = &device_priv->device_cap;
+
+ //get device capabilities
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x0, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic first dword");
+
+#define NODNIC_DEVICE_SUPPORT_MAC_FILTERS_OFFSET 15
+#define NODNIC_DEVICE_SUPPORT_PROMISC_FILTER_OFFSET 14
+#define NODNIC_DEVICE_SUPPORT_PROMISC_MULT_FILTER_OFFSET 13
+#define NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_OFFSET 8
+#define NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_MASK 0x7
+#define NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_OFFSET 4
+#define NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_MASK 0xF
+#define NODNIC_DEVICE_NUM_PORTS_OFFSET 0
+ device_cap->support_mac_filters = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_MAC_FILTERS_OFFSET);
+
+ device_cap->support_promisc_filter = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_PROMISC_FILTER_OFFSET);
+
+ device_cap->support_promisc_multicast_filter = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_PROMISC_MULT_FILTER_OFFSET);
+
+ device_cap->log_working_buffer_size =
+ (buffer >> NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_OFFSET) & NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_MASK;
+
+ device_cap->log_pkey_table_size =
+ (buffer >> NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_OFFSET) & NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_MASK;
+
+ device_cap->num_ports = CHECK_BIT(buffer, NODNIC_DEVICE_NUM_PORTS_OFFSET) + 1;
+
+#ifdef DEVICE_CX3
+#define NODNIC_DEVICE_CRSPACE_DB_OFFSET 12
+ device_cap->crspace_doorbells = CHECK_BIT(buffer, NODNIC_DEVICE_CRSPACE_DB_OFFSET);
+#endif
+
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x4, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic second dword");
+
+#define NODNIC_DEVICE_LOG_MAX_RING_SIZE_OFFSET 24
+#define NODNIC_DEVICE_LOG_MAX_RING_SIZE_MASK 0x3F
+#define NODNIC_DEVICE_PD_MASK 0xFFFFFF
+ device_cap->log_max_ring_size =
+ (buffer >> NODNIC_DEVICE_LOG_MAX_RING_SIZE_OFFSET) & NODNIC_DEVICE_LOG_MAX_RING_SIZE_MASK;
+
+ //get device magic numbers
+ device_priv->pd = buffer & NODNIC_DEVICE_PD_MASK;
+
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x8, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic third dword");
+ device_priv->lkey = buffer;
+
+#ifdef DEVICE_CX3
+ if ( device_cap->crspace_doorbells ) {
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x18, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic_crspace_clear_int address");
+ device_priv->crspace_clear_int = device_priv->utils->config + buffer;
+ }
+#endif
+
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x10, (mlx_uint32*)&guid_h);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic guid_h");
+ status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x14, (mlx_uint32*)&guid_l);
+ MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic guid_l");
+ device_priv->device_guid = guid_l | (guid_h << 32);
+read_err:
+parm_err:
+ return status;
+}
+
+mlx_status
+nodnic_device_get_fw_version(
+ IN nodnic_device_priv *device_priv,
+ OUT mlx_uint16 *fw_ver_minor,
+ OUT mlx_uint16 *fw_ver_sub_minor,
+ OUT mlx_uint16 *fw_ver_major
+ ){
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+
+ if( device_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto parm_err;
+ }
+
+ status = nodnic_cmd_read(device_priv, 0x0, &buffer);
+ MLX_CHECK_STATUS(device_priv, status, read_err, "failed to read fw revision major and minor");
+
+ *fw_ver_minor = (mlx_uint16)(buffer >> 16);
+ *fw_ver_major = (mlx_uint16)buffer;
+
+ status = nodnic_cmd_read(device_priv, 0x4, &buffer);
+ MLX_CHECK_STATUS(device_priv, status, read_err, "failed to read fw revision sub minor");
+
+ *fw_ver_sub_minor = (mlx_uint16)buffer;
+read_err:
+parm_err:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c
new file mode 100644
index 00000000..a7afdab6
--- /dev/null
+++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../include/mlx_port.h"
+#include "../include/mlx_cmd.h"
+#include "../../mlx_utils/include/public/mlx_memory.h"
+#include "../../mlx_utils/include/public/mlx_pci.h"
+#include "../../mlx_utils/include/public/mlx_bail.h"
+
+#define PortDataEntry( _option, _offset, _align, _mask) { \
+ .option = _option, \
+ .offset = _offset, \
+ .align = _align, \
+ .mask = _mask, \
+ }
+
+#define QpDataEntry( _type, _send_offset, _recv_offset) { \
+ .type = _type, \
+ .send_offset = _send_offset, \
+ .recv_offset = _recv_offset, \
+ }
+
+
+struct nodnic_port_data_entry nodnic_port_data_table[] = {
+ PortDataEntry(nodnic_port_option_link_type, 0x0, 4, 0x1),
+ PortDataEntry(nodnic_port_option_mac_low, 0xc, 0, 0xFFFFFFFF),
+ PortDataEntry(nodnic_port_option_mac_high, 0x8, 0, 0xFFFF),
+ PortDataEntry(nodnic_port_option_log_cq_size, 0x6c, 0, 0x3F),
+ PortDataEntry(nodnic_port_option_reset_needed, 0x0, 31, 0x1),
+ PortDataEntry(nodnic_port_option_mac_filters_en, 0x4, 0, 0x1F),
+ PortDataEntry(nodnic_port_option_port_state, 0x0, 0, 0xF),
+ PortDataEntry(nodnic_port_option_network_en, 0x4, 31, 0x1),
+ PortDataEntry(nodnic_port_option_dma_en, 0x4, 30, 0x1),
+ PortDataEntry(nodnic_port_option_eq_addr_low, 0x74, 0, 0xFFFFFFFF),
+ PortDataEntry(nodnic_port_option_eq_addr_high, 0x70, 0, 0xFFFFFFFF),
+ PortDataEntry(nodnic_port_option_cq_addr_low, 0x6c, 12, 0xFFFFF),
+ PortDataEntry(nodnic_port_option_cq_addr_high, 0x68, 0, 0xFFFFFFFF),
+ PortDataEntry(nodnic_port_option_port_management_change_event, 0x0, 30, 0x1),
+ PortDataEntry(nodnic_port_option_port_promisc_en, 0x4, 29, 0x1),
+ PortDataEntry(nodnic_port_option_arm_cq, 0x78, 8, 0xffff),
+ PortDataEntry(nodnic_port_option_port_promisc_multicast_en, 0x4, 28, 0x1),
+#ifdef DEVICE_CX3
+ PortDataEntry(nodnic_port_option_crspace_en, 0x4, 27, 0x1),
+#endif
+};
+
+#define MAX_QP_DATA_ENTRIES 5
+struct nodnic_qp_data_entry nodnic_qp_data_teable[MAX_QP_DATA_ENTRIES] = {
+ QpDataEntry(NODNIC_QPT_SMI, 0, 0),
+ QpDataEntry(NODNIC_QPT_GSI, 0, 0),
+ QpDataEntry(NODNIC_QPT_UD, 0, 0),
+ QpDataEntry(NODNIC_QPT_RC, 0, 0),
+ QpDataEntry(NODNIC_QPT_ETH, 0x80, 0xC0),
+};
+
+#define MAX_NODNIC_PORTS 2
+int nodnic_port_offset_table[MAX_NODNIC_PORTS] = {
+ 0x100, //port 1 offset
+ 0x280, //port 1 offset
+};
+
+mlx_status
+nodnic_port_get_state(
+ IN nodnic_port_priv *port_priv,
+ OUT nodnic_port_state *state
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 out = 0;
+
+ status = nodnic_port_query(port_priv,
+ nodnic_port_option_port_state, &out);
+ MLX_CHECK_STATUS(port_priv->device, status, query_err,
+ "nodnic_port_query failed");
+ *state = (nodnic_port_state)out;
+query_err:
+ return status;
+}
+mlx_status
+nodnic_port_get_type(
+ IN nodnic_port_priv *port_priv,
+ OUT nodnic_port_type *type
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 out = 0;
+
+ if ( port_priv->port_type == NODNIC_PORT_TYPE_UNKNOWN){
+ status = nodnic_port_query(port_priv,
+ nodnic_port_option_link_type, &out);
+ MLX_FATAL_CHECK_STATUS(status, query_err,
+ "nodnic_port_query failed");
+ port_priv->port_type = (nodnic_port_type)out;
+ }
+ *type = port_priv->port_type;
+query_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_query(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_port_option option,
+ OUT mlx_uint32 *out
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+ struct nodnic_port_data_entry *data_entry;
+ mlx_uint32 buffer = 0;
+ if( port_priv == NULL || out == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+ device_priv = port_priv->device;
+
+ data_entry = &nodnic_port_data_table[option];
+
+ status = nodnic_cmd_read(device_priv,
+ port_priv->port_offset + data_entry->offset , &buffer);
+ MLX_CHECK_STATUS(device_priv, status, read_err,
+ "nodnic_cmd_read failed");
+ *out = (buffer >> data_entry->align) & data_entry->mask;
+read_err:
+invalid_parm:
+ return status;
+}
+
+mlx_status
+nodnic_port_set(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_port_option option,
+ IN mlx_uint32 in
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+ struct nodnic_port_data_entry *data_entry;
+ mlx_uint32 buffer = 0;
+
+ if( port_priv == NULL ){
+ MLX_DEBUG_FATAL_ERROR("port_priv is NULL\n");
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+ device_priv = port_priv->device;
+ data_entry = &nodnic_port_data_table[option];
+
+ if( in > data_entry->mask ){
+ MLX_DEBUG_FATAL_ERROR("in > data_entry->mask (%d > %d)\n",
+ in, data_entry->mask);
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+ status = nodnic_cmd_read(device_priv,
+ port_priv->port_offset + data_entry->offset, &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err,
+ "nodnic_cmd_read failed");
+ buffer = buffer & ~(data_entry->mask << data_entry->align);
+ buffer = buffer | (in << data_entry->align);
+ status = nodnic_cmd_write(device_priv,
+ port_priv->port_offset + data_entry->offset, buffer);
+ MLX_FATAL_CHECK_STATUS(status, write_err,
+ "nodnic_cmd_write failed");
+write_err:
+read_err:
+invalid_parm:
+ return status;
+}
+
+mlx_status
+nodnic_port_read_reset_needed(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_boolean *reset_needed
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 out = 0;
+ status = nodnic_port_query(port_priv,
+ nodnic_port_option_reset_needed, &out);
+ MLX_CHECK_STATUS(port_priv->device, status, query_err,
+ "nodnic_port_query failed");
+ *reset_needed = (mlx_boolean)out;
+query_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_read_port_management_change_event(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_boolean *change_event
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 out = 0;
+ status = nodnic_port_query(port_priv,
+ nodnic_port_option_port_management_change_event, &out);
+ MLX_CHECK_STATUS(port_priv->device, status, query_err,
+ "nodnic_port_query failed");
+ *change_event = (mlx_boolean)out;
+query_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_create_cq(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_size cq_size,
+ OUT nodnic_cq **cq
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+ mlx_uint64 address = 0;
+ if( port_priv == NULL || cq == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+
+ device_priv = port_priv->device;
+
+ status = mlx_memory_zalloc(device_priv->utils,
+ sizeof(nodnic_cq),(mlx_void **)cq);
+ MLX_FATAL_CHECK_STATUS(status, alloc_err,
+ "cq priv allocation error");
+
+ (*cq)->cq_size = cq_size;
+ status = mlx_memory_alloc_dma(device_priv->utils,
+ (*cq)->cq_size, NODNIC_MEMORY_ALIGN,
+ &(*cq)->cq_virt);
+ MLX_FATAL_CHECK_STATUS(status, dma_alloc_err,
+ "cq allocation error");
+
+ status = mlx_memory_map_dma(device_priv->utils,
+ (*cq)->cq_virt,
+ (*cq)->cq_size,
+ &(*cq)->cq_physical,
+ &(*cq)->map);
+ MLX_FATAL_CHECK_STATUS(status, cq_map_err,
+ "cq map error");
+
+ /* update cq address */
+#define NODIC_CQ_ADDR_HIGH 0x68
+#define NODIC_CQ_ADDR_LOW 0x6c
+ address = (mlx_uint64)(*cq)->cq_physical;
+ nodnic_port_set(port_priv, nodnic_port_option_cq_addr_low,
+ (mlx_uint32)(address >> 12));
+ address = address >> 32;
+ nodnic_port_set(port_priv, nodnic_port_option_cq_addr_high,
+ (mlx_uint32)address);
+
+ return status;
+ mlx_memory_ummap_dma(device_priv->utils, (*cq)->map);
+cq_map_err:
+ mlx_memory_free_dma(device_priv->utils, (*cq)->cq_size,
+ (void **)&((*cq)->cq_virt));
+dma_alloc_err:
+ mlx_memory_free(device_priv->utils, (void **)cq);
+alloc_err:
+invalid_parm:
+ return status;
+}
+
+mlx_status
+nodnic_port_destroy_cq(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_cq *cq
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+
+ if( port_priv == NULL || cq == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+ device_priv = port_priv->device;
+
+ mlx_memory_ummap_dma(device_priv->utils, cq->map);
+
+ mlx_memory_free_dma(device_priv->utils, cq->cq_size,
+ (void **)&(cq->cq_virt));
+
+ mlx_memory_free(device_priv->utils, (void **)&cq);
+invalid_parm:
+ return status;
+}
+mlx_status
+nodnic_port_create_qp(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_queue_pair_type type,
+ IN mlx_size send_wq_size,
+ IN mlx_uint32 send_wqe_num,
+ IN mlx_size receive_wq_size,
+ IN mlx_uint32 recv_wqe_num,
+ OUT nodnic_qp **qp
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+ mlx_uint32 max_ring_size = 0;
+ mlx_uint64 address = 0;
+ mlx_uint32 log_size = 0;
+ if( port_priv == NULL || qp == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+
+ device_priv = port_priv->device;
+ max_ring_size = (1 << device_priv->device_cap.log_max_ring_size);
+ if( send_wq_size > max_ring_size ||
+ receive_wq_size > max_ring_size ){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+
+ status = mlx_memory_zalloc(device_priv->utils,
+ sizeof(nodnic_qp),(mlx_void **)qp);
+ MLX_FATAL_CHECK_STATUS(status, alloc_err,
+ "qp allocation error");
+
+ if( nodnic_qp_data_teable[type].send_offset == 0 ||
+ nodnic_qp_data_teable[type].recv_offset == 0){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_type;
+ }
+
+ (*qp)->send.nodnic_ring.offset = port_priv->port_offset +
+ nodnic_qp_data_teable[type].send_offset;
+ (*qp)->receive.nodnic_ring.offset = port_priv->port_offset +
+ nodnic_qp_data_teable[type].recv_offset;
+
+ status = mlx_memory_alloc_dma(device_priv->utils,
+ send_wq_size, NODNIC_MEMORY_ALIGN,
+ (void*)&(*qp)->send.wqe_virt);
+ MLX_FATAL_CHECK_STATUS(status, send_alloc_err,
+ "send wq allocation error");
+
+ status = mlx_memory_alloc_dma(device_priv->utils,
+ receive_wq_size, NODNIC_MEMORY_ALIGN,
+ &(*qp)->receive.wqe_virt);
+ MLX_FATAL_CHECK_STATUS(status, receive_alloc_err,
+ "receive wq allocation error");
+
+ status = mlx_memory_map_dma(device_priv->utils,
+ (*qp)->send.wqe_virt,
+ send_wq_size,
+ &(*qp)->send.nodnic_ring.wqe_physical,
+ &(*qp)->send.nodnic_ring.map);
+ MLX_FATAL_CHECK_STATUS(status, send_map_err,
+ "send wq map error");
+
+ status = mlx_memory_map_dma(device_priv->utils,
+ (*qp)->receive.wqe_virt,
+ receive_wq_size,
+ &(*qp)->receive.nodnic_ring.wqe_physical,
+ &(*qp)->receive.nodnic_ring.map);
+ MLX_FATAL_CHECK_STATUS(status, receive_map_err,
+ "receive wq map error");
+
+ (*qp)->send.nodnic_ring.wq_size = send_wq_size;
+ (*qp)->send.nodnic_ring.num_wqes = send_wqe_num;
+ (*qp)->receive.nodnic_ring.wq_size = receive_wq_size;
+ (*qp)->receive.nodnic_ring.num_wqes = recv_wqe_num;
+
+ /* Set Ownership bit in Send/receive queue (0 - recv ; 1 - send) */
+ mlx_memory_set(device_priv->utils, (*qp)->send.wqe_virt, 0xff, send_wq_size );
+ mlx_memory_set(device_priv->utils, (*qp)->receive.wqe_virt, 0, recv_wqe_num );
+
+ /* update send ring */
+#define NODIC_RING_QP_ADDR_HIGH 0x0
+#define NODIC_RING_QP_ADDR_LOW 0x4
+ address = (mlx_uint64)(*qp)->send.nodnic_ring.wqe_physical;
+ status = nodnic_cmd_write(device_priv, (*qp)->send.nodnic_ring.offset +
+ NODIC_RING_QP_ADDR_HIGH,
+ (mlx_uint32)(address >> 32));
+ MLX_FATAL_CHECK_STATUS(status, write_send_addr_err,
+ "send address write error 1");
+ mlx_utils_ilog2((*qp)->send.nodnic_ring.wq_size, &log_size);
+ address = address | log_size;
+ status = nodnic_cmd_write(device_priv, (*qp)->send.nodnic_ring.offset +
+ NODIC_RING_QP_ADDR_LOW,
+ (mlx_uint32)address);
+ MLX_FATAL_CHECK_STATUS(status, write_send_addr_err,
+ "send address write error 2");
+ /* update receive ring */
+ address = (mlx_uint64)(*qp)->receive.nodnic_ring.wqe_physical;
+ status = nodnic_cmd_write(device_priv, (*qp)->receive.nodnic_ring.offset +
+ NODIC_RING_QP_ADDR_HIGH,
+ (mlx_uint32)(address >> 32));
+ MLX_FATAL_CHECK_STATUS(status, write_recv_addr_err,
+ "receive address write error 1");
+ mlx_utils_ilog2((*qp)->receive.nodnic_ring.wq_size, &log_size);
+ address = address | log_size;
+ status = nodnic_cmd_write(device_priv, (*qp)->receive.nodnic_ring.offset +
+ NODIC_RING_QP_ADDR_LOW,
+ (mlx_uint32)address);
+ MLX_FATAL_CHECK_STATUS(status, write_recv_addr_err,
+ "receive address write error 2");
+
+ return status;
+write_recv_addr_err:
+write_send_addr_err:
+ mlx_memory_ummap_dma(device_priv->utils, (*qp)->receive.nodnic_ring.map);
+receive_map_err:
+ mlx_memory_ummap_dma(device_priv->utils, (*qp)->send.nodnic_ring.map);
+send_map_err:
+ mlx_memory_free_dma(device_priv->utils, receive_wq_size,
+ &((*qp)->receive.wqe_virt));
+receive_alloc_err:
+ mlx_memory_free_dma(device_priv->utils, send_wq_size,
+ (void **)&((*qp)->send.wqe_virt));
+send_alloc_err:
+invalid_type:
+ mlx_memory_free(device_priv->utils, (void **)qp);
+alloc_err:
+invalid_parm:
+ return status;
+}
+
+mlx_status
+nodnic_port_destroy_qp(
+ IN nodnic_port_priv *port_priv,
+ IN nodnic_queue_pair_type type __attribute__((unused)),
+ IN nodnic_qp *qp
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = port_priv->device;
+
+ status = mlx_memory_ummap_dma(device_priv->utils,
+ qp->receive.nodnic_ring.map);
+ if( status != MLX_SUCCESS){
+ MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status);
+ }
+
+ status = mlx_memory_ummap_dma(device_priv->utils, qp->send.nodnic_ring.map);
+ if( status != MLX_SUCCESS){
+ MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status);
+ }
+
+ status = mlx_memory_free_dma(device_priv->utils,
+ qp->receive.nodnic_ring.wq_size,
+ (void **)&(qp->receive.wqe_virt));
+ if( status != MLX_SUCCESS){
+ MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status);
+ }
+ status = mlx_memory_free_dma(device_priv->utils,
+ qp->send.nodnic_ring.wq_size,
+ (void **)&(qp->send.wqe_virt));
+ if( status != MLX_SUCCESS){
+ MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status);
+ }
+ status = mlx_memory_free(device_priv->utils, (void **)&qp);
+ if( status != MLX_SUCCESS){
+ MLX_DEBUG_ERROR(device_priv, "mlx_memory_free failed (Status = %d)\n", status);
+ }
+ return status;
+}
+
+mlx_status
+nodnic_port_get_qpn(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ OUT mlx_uint32 *qpn
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+ if( ring == NULL || qpn == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ if( ring->qpn != 0 ){
+ *qpn = ring->qpn;
+ goto success;
+ }
+#define NODNIC_RING_QPN_OFFSET 0xc
+#define NODNIC_RING_QPN_MASK 0xFFFFFF
+ status = nodnic_cmd_read(port_priv->device,
+ ring->offset + NODNIC_RING_QPN_OFFSET,
+ &buffer);
+ MLX_FATAL_CHECK_STATUS(status, read_err,
+ "nodnic_cmd_read failed");
+ ring->qpn = buffer & NODNIC_RING_QPN_MASK;
+ *qpn = ring->qpn;
+read_err:
+success:
+bad_param:
+ return status;
+}
+
+#ifdef DEVICE_CX3
+static
+mlx_status
+nodnic_port_send_db_connectx3(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring __attribute__((unused)),
+ IN mlx_uint16 index
+ )
+{
+ nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw;
+ mlx_uint32 index32 = index;
+ mlx_pci_mem_write(port_priv->device->utils, MlxPciWidthUint32, 0,
+ (mlx_uint64)&(ptr->send_doorbell), 1, &index32);
+ return MLX_SUCCESS;
+}
+
+static
+mlx_status
+nodnic_port_recv_db_connectx3(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring __attribute__((unused)),
+ IN mlx_uint16 index
+ )
+{
+ nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw;
+ mlx_uint32 index32 = index;
+ mlx_pci_mem_write(port_priv->device->utils, MlxPciWidthUint32, 0,
+ (mlx_uint64)&(ptr->recv_doorbell), 1, &index32);
+ return MLX_SUCCESS;
+}
+#endif
+
+mlx_status
+nodnic_port_update_ring_doorbell(
+ IN nodnic_port_priv *port_priv,
+ IN struct nodnic_ring *ring,
+ IN mlx_uint16 index
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = 0;
+ if( ring == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+#define NODNIC_RING_RING_OFFSET 0x8
+ buffer = (mlx_uint32)((index & 0xFFFF)<< 8);
+ status = nodnic_cmd_write(port_priv->device,
+ ring->offset + NODNIC_RING_RING_OFFSET,
+ buffer);
+ MLX_CHECK_STATUS(port_priv->device, status, write_err,
+ "nodnic_cmd_write failed");
+write_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_get_cq_size(
+ IN nodnic_port_priv *port_priv,
+ OUT mlx_uint64 *cq_size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 out = 0;
+ status = nodnic_port_query(port_priv, nodnic_port_option_log_cq_size, &out);
+ MLX_FATAL_CHECK_STATUS(status, query_err,
+ "nodnic_port_query failed");
+ *cq_size = 1 << out;
+query_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_allocate_eq(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_uint8 log_eq_size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+ mlx_uint64 address = 0;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ device_priv = port_priv->device;
+ port_priv->eq.eq_size = ( ( 1 << log_eq_size ) * 1024 ); /* Size is in KB */
+ status = mlx_memory_alloc_dma(device_priv->utils,
+ port_priv->eq.eq_size,
+ NODNIC_MEMORY_ALIGN,
+ &port_priv->eq.eq_virt);
+ MLX_FATAL_CHECK_STATUS(status, alloc_err,
+ "eq allocation error");
+
+ status = mlx_memory_map_dma(device_priv->utils,
+ port_priv->eq.eq_virt,
+ port_priv->eq.eq_size,
+ &port_priv->eq.eq_physical,
+ &port_priv->eq.map);
+ MLX_FATAL_CHECK_STATUS(status, map_err,
+ "eq map error");
+
+ address = port_priv->eq.eq_physical;
+ status = nodnic_port_set(port_priv, nodnic_port_option_eq_addr_low,
+ (mlx_uint32)address);
+ MLX_FATAL_CHECK_STATUS(status, set_err,
+ "failed to set eq addr low");
+ address = (address >> 32);
+ status = nodnic_port_set(port_priv, nodnic_port_option_eq_addr_high,
+ (mlx_uint32)address);
+ MLX_FATAL_CHECK_STATUS(status, set_err,
+ "failed to set eq addr high");
+ return status;
+set_err:
+ mlx_memory_ummap_dma(device_priv->utils, port_priv->eq.map);
+map_err:
+ mlx_memory_free_dma(device_priv->utils,
+ port_priv->eq.eq_size,
+ (void **)&(port_priv->eq.eq_virt));
+alloc_err:
+bad_param:
+ return status;
+}
+mlx_status
+nodnic_port_free_eq(
+ IN nodnic_port_priv *port_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device_priv = NULL;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ device_priv = port_priv->device;
+ mlx_memory_ummap_dma(device_priv->utils, port_priv->eq.map);
+
+ mlx_memory_free_dma(device_priv->utils,
+ port_priv->eq.eq_size,
+ (void **)&(port_priv->eq.eq_virt));
+
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_add_mac_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device= NULL;;
+ mlx_uint8 index = 0;
+ mlx_uint32 out = 0;
+ mlx_uint32 mac_filters_en = 0;
+ mlx_uint32 address = 0;
+ mlx_mac_address zero_mac;
+ mlx_utils *utils = NULL;
+
+ if( port_priv == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ memset(&zero_mac, 0, sizeof(zero_mac));
+
+ device = port_priv->device;
+ utils = device->utils;
+
+ /* check if mac already exists */
+ for( ; index < NODNIC_MAX_MAC_FILTERS ; index ++) {
+ mlx_memory_cmp(utils, &port_priv->mac_filters[index], &mac,
+ sizeof(mac), &out);
+ if ( out == 0 ){
+ status = MLX_FAILED;
+ goto already_exists;
+ }
+ }
+
+ /* serch for available mac filter slot */
+ for (index = 0 ; index < NODNIC_MAX_MAC_FILTERS ; index ++) {
+ mlx_memory_cmp(utils, &port_priv->mac_filters[index], &zero_mac,
+ sizeof(zero_mac), &out);
+ if ( out == 0 ){
+ break;
+ }
+ }
+ if ( index >= NODNIC_MAX_MAC_FILTERS ){
+ status = MLX_FAILED;
+ goto mac_list_full;
+ }
+
+ status = nodnic_port_query(port_priv, nodnic_port_option_mac_filters_en,
+ &mac_filters_en);
+ MLX_CHECK_STATUS(device, status , query_err,
+ "nodnic_port_query failed");
+ if(mac_filters_en & (1 << index)){
+ status = MLX_FAILED;
+ goto mac_list_full;
+ }
+ port_priv->mac_filters[index] = mac;
+
+ // set mac filter
+ address = port_priv->port_offset + NODNIC_PORT_MAC_FILTERS_OFFSET +
+ (0x8 * index);
+
+ status = nodnic_cmd_write(device, address, mac.high );
+ MLX_CHECK_STATUS(device, status, write_err, "set mac high failed");
+ status = nodnic_cmd_write(device, address + 0x4, mac.low );
+ MLX_CHECK_STATUS(device, status, write_err, "set mac low failed");
+
+ // enable mac filter
+ mac_filters_en = mac_filters_en | (1 << index);
+ status = nodnic_port_set(port_priv, nodnic_port_option_mac_filters_en,
+ mac_filters_en);
+ MLX_CHECK_STATUS(device, status , set_err,
+ "nodnic_port_set failed");
+set_err:
+write_err:
+query_err:
+mac_list_full:
+already_exists:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_remove_mac_filter(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_mac_address mac
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ nodnic_device_priv *device= NULL;;
+ mlx_uint8 index = 0;
+ mlx_uint32 out = 0;
+ mlx_uint32 mac_filters_en = 0;
+ mlx_mac_address zero_mac;
+ mlx_utils *utils = NULL;
+
+ if( port_priv == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ memset(&zero_mac, 0, sizeof(zero_mac));
+
+ device = port_priv->device;
+ utils = device->utils;
+
+ /* serch for mac filter */
+ for( ; index < NODNIC_MAX_MAC_FILTERS ; index ++) {
+ mlx_memory_cmp(utils, &port_priv->mac_filters[index], &mac,
+ sizeof(mac), &out);
+ if ( out == 0 ){
+ break;
+ }
+ }
+ if ( index == NODNIC_MAX_MAC_FILTERS ){
+ status = MLX_FAILED;
+ goto mac_not_found;
+ }
+
+ status = nodnic_port_query(port_priv, nodnic_port_option_mac_filters_en,
+ &mac_filters_en);
+ MLX_CHECK_STATUS(device, status , query_err,
+ "nodnic_port_query failed");
+ if((mac_filters_en & (1 << index)) == 0){
+ status = MLX_FAILED;
+ goto mac_not_en;
+ }
+ port_priv->mac_filters[index] = zero_mac;
+
+ // disable mac filter
+ mac_filters_en = mac_filters_en & ~(1 << index);
+ status = nodnic_port_set(port_priv, nodnic_port_option_mac_filters_en,
+ mac_filters_en);
+ MLX_CHECK_STATUS(device, status , set_err,
+ "nodnic_port_set failed");
+set_err:
+query_err:
+mac_not_en:
+mac_not_found:
+bad_param:
+ return status;
+}
+
+static
+mlx_status
+nodnic_port_set_network(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ /*mlx_uint32 network_valid = 0;
+ mlx_uint8 try = 0;*/
+
+ status = nodnic_port_set(port_priv, nodnic_port_option_network_en, value);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_set failed");
+ port_priv->network_state = value;
+set_err:
+ return status;
+}
+
+#ifdef DEVICE_CX3
+static
+mlx_status
+nodnic_port_set_dma_connectx3(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ )
+{
+ mlx_utils *utils = port_priv->device->utils;
+ nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw;
+ mlx_uint32 data = (value ? 0xffffffff : 0x0);
+ mlx_pci_mem_write(utils, MlxPciWidthUint32, 0,
+ (mlx_uint64)&(ptr->dma_en), 1, &data);
+ return MLX_SUCCESS;
+}
+#endif
+
+static
+mlx_status
+nodnic_port_set_dma(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ )
+{
+ return nodnic_port_set(port_priv, nodnic_port_option_dma_en, value);
+}
+
+static
+mlx_status
+nodnic_port_check_and_set_dma(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( port_priv->dma_state == value ) {
+ MLX_DEBUG_WARN(port_priv->device,
+ "nodnic_port_check_and_set_dma: already %s\n",
+ (value ? "enabled" : "disabled"));
+ status = MLX_SUCCESS;
+ goto set_out;
+ }
+
+ status = port_priv->set_dma(port_priv, value);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_set failed");
+ port_priv->dma_state = value;
+set_err:
+set_out:
+ return status;
+}
+
+
+mlx_status
+nodnic_port_set_promisc(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ ){
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = value;
+
+ status = nodnic_port_set(port_priv, nodnic_port_option_port_promisc_en, buffer);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_set failed");
+set_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_set_promisc_multicast(
+ IN nodnic_port_priv *port_priv,
+ IN mlx_boolean value
+ ){
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer = value;
+
+ status = nodnic_port_set(port_priv, nodnic_port_option_port_promisc_multicast_en, buffer);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_set failed");
+set_err:
+ return status;
+}
+
+mlx_status
+nodnic_port_init(
+ IN nodnic_port_priv *port_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nodnic_port_set_network(port_priv, TRUE);
+ MLX_FATAL_CHECK_STATUS(status, set_err,
+ "nodnic_port_set_network failed");
+set_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_close(
+ IN nodnic_port_priv *port_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nodnic_port_set_network(port_priv, FALSE);
+ MLX_FATAL_CHECK_STATUS(status, set_err,
+ "nodnic_port_set_network failed");
+set_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_enable_dma(
+ IN nodnic_port_priv *port_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nodnic_port_check_and_set_dma(port_priv, TRUE);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_check_and_set_dma failed");
+set_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_disable_dma(
+ IN nodnic_port_priv *port_priv
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if( port_priv == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nodnic_port_check_and_set_dma(port_priv, FALSE);
+ MLX_CHECK_STATUS(port_priv->device, status, set_err,
+ "nodnic_port_check_and_set_dma failed");
+set_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+nodnic_port_thin_init(
+ IN nodnic_device_priv *device_priv,
+ IN nodnic_port_priv *port_priv,
+ IN mlx_uint8 port_index
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_boolean reset_needed = 0;
+#ifdef DEVICE_CX3
+ mlx_uint32 offset;
+#endif
+
+ if( device_priv == NULL || port_priv == NULL || port_index > 1){
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_parm;
+ }
+
+ port_priv->device = device_priv;
+
+ port_priv->port_offset = device_priv->device_offset +
+ nodnic_port_offset_table[port_index];
+
+ port_priv->port_num = port_index + 1;
+
+ port_priv->send_doorbell = nodnic_port_update_ring_doorbell;
+ port_priv->recv_doorbell = nodnic_port_update_ring_doorbell;
+ port_priv->set_dma = nodnic_port_set_dma;
+#ifdef DEVICE_CX3
+ if (device_priv->device_cap.crspace_doorbells) {
+ status = nodnic_cmd_read(device_priv, (port_priv->port_offset + 0x100),
+ &offset);
+ if (status != MLX_SUCCESS) {
+ return status;
+ } else {
+ port_priv->data_flow_gw = (nodnic_port_data_flow_gw *)
+ (device_priv->utils->config + offset);
+ }
+ if ( nodnic_port_set ( port_priv, nodnic_port_option_crspace_en, 1 ) ) {
+ return MLX_FAILED;
+ }
+ port_priv->send_doorbell = nodnic_port_send_db_connectx3;
+ port_priv->recv_doorbell = nodnic_port_recv_db_connectx3;
+ port_priv->set_dma = nodnic_port_set_dma_connectx3;
+ }
+#endif
+ /* clear reset_needed */
+ nodnic_port_read_reset_needed(port_priv, &reset_needed);
+
+ port_priv->port_type = NODNIC_PORT_TYPE_UNKNOWN;
+invalid_parm:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h
new file mode 100644
index 00000000..1f8ba89e
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h
@@ -0,0 +1,113 @@
+#ifndef MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_
+#define MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../../mlx_utils/include/public/mlx_utils.h"
+
+mlx_status
+mlx_memory_alloc_priv(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_zalloc_priv(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_free_priv(
+ IN mlx_utils *utils,
+ IN mlx_void *ptr
+ );
+mlx_status
+mlx_memory_alloc_dma_priv(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_size align,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_free_dma_priv(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_void *ptr
+ );
+mlx_status
+mlx_memory_map_dma_priv(
+ IN mlx_utils *utils,
+ IN mlx_void *addr ,
+ IN mlx_size number_of_bytes,
+ OUT mlx_physical_address *phys_addr,
+ OUT mlx_void **mapping
+ );
+
+mlx_status
+mlx_memory_ummap_dma_priv(
+ IN mlx_utils *utils,
+ IN mlx_void *mapping
+ );
+
+mlx_status
+mlx_memory_cmp_priv(
+ IN mlx_utils *utils,
+ IN mlx_void *first_block,
+ IN mlx_void *second_block,
+ IN mlx_size size,
+ OUT mlx_uint32 *out
+ );
+
+mlx_status
+mlx_memory_set_priv(
+ IN mlx_utils *utils,
+ IN mlx_void *block,
+ IN mlx_int32 value,
+ IN mlx_size size
+ );
+
+mlx_status
+mlx_memory_cpy_priv(
+ IN mlx_utils *utils,
+ OUT mlx_void *destination_buffer,
+ IN mlx_void *source_buffer,
+ IN mlx_size length
+ );
+
+mlx_status
+mlx_memory_cpu_to_be32_priv(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ );
+
+mlx_status
+mlx_memory_be32_to_cpu_priv(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ );
+#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h
new file mode 100644
index 00000000..89cad75e
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h
@@ -0,0 +1,72 @@
+#ifndef STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_
+#define STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_pci.h"
+#include "../../include/public/mlx_utils.h"
+
+mlx_status
+mlx_pci_init_priv(
+ IN mlx_utils *utils
+ );
+
+mlx_status
+mlx_pci_read_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_write_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_mem_read_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_mem_write_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ );
+
+
+#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h
new file mode 100644
index 00000000..268b76fa
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h
@@ -0,0 +1,68 @@
+#ifndef SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_
+#define SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_utils.h"
+
+mlx_status
+mlx_utils_delay_in_ms_priv(
+ IN mlx_uint32 msecs
+ );
+
+mlx_status
+mlx_utils_delay_in_us_priv(
+ IN mlx_uint32 usecs
+ );
+
+mlx_status
+mlx_utils_ilog2_priv(
+ IN mlx_uint32 i,
+ OUT mlx_uint32 *log
+ );
+
+mlx_status
+mlx_utils_init_lock_priv(
+ OUT void **lock
+ );
+
+mlx_status
+mlx_utils_free_lock_priv(
+ IN void *lock
+ );
+
+mlx_status
+mlx_utils_acquire_lock_priv (
+ IN void *lock
+ );
+
+mlx_status
+mlx_utils_release_lock_priv (
+ IN void *lock
+ );
+
+mlx_status
+mlx_utils_rand_priv (
+ IN mlx_utils *utils,
+ OUT mlx_uint32 *rand_num
+ );
+#endif /* SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h
new file mode 100644
index 00000000..a4f4b37b
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h
@@ -0,0 +1,47 @@
+#ifndef INCLUDE_PUBLIC_MLXBAIL_H_
+#define INCLUDE_PUBLIC_MLXBAIL_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_types.h"
+
+#define MLX_BAIL_ERROR(id, status,message) MLX_CHECK_STATUS(id, status, bail, message)
+
+#define MLX_FATAL_CHECK_STATUS(status, label, message) \
+ do { \
+ if (status != MLX_SUCCESS) { \
+ MLX_DEBUG_FATAL_ERROR(message " (Status = %d)\n", status); \
+ goto label; \
+ } \
+ } while (0)
+
+#define MLX_CHECK_STATUS(id, status, label, message) \
+ do { \
+ if (status != MLX_SUCCESS) { \
+ MLX_DEBUG_ERROR(id, message " (Status = %d)\n", status);\
+ goto label; \
+ } \
+ } while (0)
+
+
+
+#endif /* INCLUDE_PUBLIC_MLXBAIL_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h
new file mode 100644
index 00000000..1ed423da
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h
@@ -0,0 +1,63 @@
+#ifndef MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_
+#define MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_utils.h"
+
+#define MLX_ICMD_MB_ADDR 0x100000
+#define MLX_ICMD_MB_SIZE_ADDR 0x1000
+#define MLX_ICMD_CTRL_ADDR 0x0
+
+#define MLX_ICMD_SEMAPHORE_ADDR 0x0
+
+#define MLX_ICMD_SEMAPHORE_ID 1234
+
+enum {
+ FLASH_REG_ACCESS = 0x9001,
+ GET_FW_INFO = 0x8007,
+ QUERY_VIRTUAL_MAC = 0x9003,
+ SET_VIRTUAL_MAC = 0x9004,
+ QUERY_WOL_ROL = 0x9005,
+ SET_WOL_ROL = 0x9006,
+ OCBB_INIT = 0x9007,
+ OCBB_QUERY_HEADER_STATUS = 0x9008,
+ OCBB_QUERY_ETOC_STATUS = 0x9009,
+ OCBB_QUERY_SET_EVENT = 0x900A,
+ OCSD_INIT = 0xf004,
+};
+
+struct mlx_icmd_ocsd {
+ mlx_uint32 reserved;
+ mlx_uint64 address;
+};
+
+mlx_status
+mlx_icmd_send_command(
+ IN mlx_utils *utils,
+ IN mlx_uint16 opcode,
+ IN OUT mlx_void* data,
+ IN mlx_uint32 write_data_size,
+ IN mlx_uint32 read_data_size
+ );
+
+#endif /* MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h
new file mode 100644
index 00000000..7b7b852d
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h
@@ -0,0 +1,46 @@
+#ifndef PUBLIC_INCLUDE_MLX_LOGGER_H_
+#define PUBLIC_INCLUDE_MLX_LOGGER_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../../mlx_utils_flexboot/include/mlx_logging_priv.h"
+
+#define MLX_DEBUG_FATAL_ERROR(...) MLX_DEBUG_FATAL_ERROR_PRIVATE(__VA_ARGS__)
+#define MLX_DEBUG_ERROR(...) MLX_DEBUG_ERROR_PRIVATE(__VA_ARGS__)
+#define MLX_DEBUG_WARN(...) MLX_DEBUG_WARN_PRIVATE(__VA_ARGS__)
+#define MLX_DEBUG_INFO1(...) MLX_DEBUG_INFO1_PRIVATE(__VA_ARGS__)
+#define MLX_DEBUG_INFO2(...) MLX_DEBUG_INFO2_PRIVATE(__VA_ARGS__)
+#define MLX_DBG_ERROR(...) MLX_DBG_ERROR_PRIVATE(__VA_ARGS__)
+#define MLX_DBG_WARN(...) MLX_DBG_WARN_PRIVATE(__VA_ARGS__)
+#define MLX_DBG_INFO1(...) MLX_DBG_INFO1_PRIVATE(__VA_ARGS__)
+#define MLX_DBG_INFO2(...) MLX_DBG_INFO2_PRIVATE(__VA_ARGS__)
+
+#define MLX_TRACE_1_START() MLX_DBG_INFO1_PRIVATE("Start\n")
+#define MLX_TRACE_1_END() MLX_DBG_INFO1_PRIVATE("End\n")
+#define MLX_TRACE_1_END_STATUS(status) MLX_DBG_INFO1_PRIVATE("End (%s=%d)\n", #status,status)
+#define MLX_TRACE_2_START() MLX_DBG_INFO2_PRIVATE("Start\n")
+#define MLX_TRACE_2_END() MLX_DBG_INFO2_PRIVATE("End\n")
+#define MLX_TRACE_2_END_STATUS(status) MLX_DBG_INFO2_PRIVATE("End (%s=%d)\n", #status,status)
+
+
+
+#endif /* PUBLIC_INCLUDE_MLX_LOGGER_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h
new file mode 100644
index 00000000..05675666
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h
@@ -0,0 +1,115 @@
+#ifndef MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_
+#define MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_utils.h"
+
+
+mlx_status
+mlx_memory_alloc(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_zalloc(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_free(
+ IN mlx_utils *utils,
+ IN mlx_void **ptr
+ );
+mlx_status
+mlx_memory_alloc_dma(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_size align,
+ OUT mlx_void **ptr
+ );
+
+mlx_status
+mlx_memory_free_dma(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_void **ptr
+ );
+mlx_status
+mlx_memory_map_dma(
+ IN mlx_utils *utils,
+ IN mlx_void *Addr ,
+ IN mlx_size NumberOfBytes,
+ OUT mlx_physical_address *PhysAddr,
+ OUT mlx_void **Mapping
+ );
+
+mlx_status
+mlx_memory_ummap_dma(
+ IN mlx_utils *utils,
+ IN mlx_void *Mapping
+ );
+
+mlx_status
+mlx_memory_cmp(
+ IN mlx_utils *utils,
+ IN mlx_void *first_block,
+ IN mlx_void *second_block,
+ IN mlx_size size,
+ OUT mlx_uint32 *out
+ );
+
+mlx_status
+mlx_memory_set(
+ IN mlx_utils *utils,
+ IN mlx_void *block,
+ IN mlx_int32 value,
+ IN mlx_size size
+ );
+
+mlx_status
+mlx_memory_cpy(
+ IN mlx_utils *utils,
+ OUT mlx_void *destination_buffer,
+ IN mlx_void *source_buffer,
+ IN mlx_size length
+ );
+
+mlx_status
+mlx_memory_cpu_to_be32(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ );
+
+mlx_status
+mlx_memory_be32_to_cpu(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ );
+
+#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h
new file mode 100644
index 00000000..416bdb66
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h
@@ -0,0 +1,78 @@
+#ifndef STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_
+#define STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_utils.h"
+
+typedef enum {
+ MlxPciWidthUint8 = 0,
+ MlxPciWidthUint16,
+ MlxPciWidthUint32,
+ MlxPciWidthUint64,
+} mlx_pci_width;
+
+mlx_status
+mlx_pci_init(
+ IN mlx_utils *utils
+ );
+
+mlx_status
+mlx_pci_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_mem_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ );
+
+mlx_status
+mlx_pci_mem_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ );
+
+
+#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h
new file mode 100644
index 00000000..c074a22e
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h
@@ -0,0 +1,81 @@
+#ifndef INCLUDE_PUBLIC_MLX_PCI_GW_H_
+#define INCLUDE_PUBLIC_MLX_PCI_GW_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_utils.h"
+
+#define PCI_GW_FIRST_CAPABILITY_POINTER_OFFSET 0x34
+
+#define PCI_GW_CAPABILITY_ID 0x9
+
+#define PCI_GW_CAPABILITY_ID_OFFSET 0x0
+#define PCI_GW_CAPABILITY_NEXT_POINTER_OFFSET 0x1
+#define PCI_GW_CAPABILITY_SPACE_OFFSET 0x4
+#define PCI_GW_CAPABILITY_STATUS_OFFSET 0x7
+#define PCI_GW_CAPABILITY_COUNTER_OFFSET 0x8
+#define PCI_GW_CAPABILITY_SEMAPHORE_OFFSET 0xC
+#define PCI_GW_CAPABILITY_ADDRESS_OFFSET 0x10
+#define PCI_GW_CAPABILITY_FLAG_OFFSET 0x10
+#define PCI_GW_CAPABILITY_DATA_OFFSET 0x14
+
+#define PCI_GW_SEMPHORE_TRIES 3000000
+#define PCI_GW_GET_OWNERSHIP_TRIES 5000
+#define PCI_GW_READ_FLAG_TRIES 3000000
+
+#define PCI_GW_WRITE_FLAG 0x80000000
+
+#define PCI_GW_SPACE_NODNIC 0x4
+#define PCI_GW_SPACE_ALL_ICMD 0x3
+#define PCI_GW_SPACE_SEMAPHORE 0xa
+#define PCI_GW_SPACE_CR0 0x2
+
+typedef mlx_uint32 mlx_pci_gw_buffer;
+
+
+mlx_status
+mlx_pci_gw_init(
+ IN mlx_utils *utils
+ );
+mlx_status
+mlx_pci_gw_teardown(
+ IN mlx_utils *utils
+ );
+mlx_status
+mlx_pci_gw_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_gw_space space,
+ IN mlx_uint32 address,
+ OUT mlx_pci_gw_buffer *buffer
+ );
+
+mlx_status
+mlx_pci_gw_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_gw_space space,
+ IN mlx_uint32 address,
+ IN mlx_pci_gw_buffer buffer
+ );
+
+
+
+#endif /* INCLUDE_PUBLIC_MLX_PCI_GW_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h
new file mode 100644
index 00000000..9c66567a
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h
@@ -0,0 +1,27 @@
+#ifndef INCLUDE_PUBLIC_MLXTYPES_H_
+#define INCLUDE_PUBLIC_MLXTYPES_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../../mlx_utils_flexboot/include/mlx_types_priv.h"
+
+#endif /* INCLUDE_PUBLIC_MLXBAIL_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h
new file mode 100644
index 00000000..46ad97c3
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h
@@ -0,0 +1,106 @@
+#ifndef MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_
+#define MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_logging.h"
+#include "mlx_types.h"
+
+#define IN
+#define OUT
+
+typedef mlx_uint16 mlx_pci_gw_space;
+
+typedef struct{
+ mlx_uint32 pci_cmd_offset;
+ mlx_pci_gw_space space;
+} __attribute__ (( packed )) mlx_pci_gw;
+
+typedef struct {
+ mlx_boolean icmd_opened;
+ mlx_boolean took_semaphore;
+ mlx_uint32 max_cmd_size;
+} __attribute__ (( packed )) mlx_icmd ;
+
+typedef struct{
+ mlx_pci *pci;
+ mlx_pci_gw pci_gw;
+ mlx_icmd icmd;
+ void *lock;
+#ifdef DEVICE_CX3
+ /* ACCESS to BAR0 */
+ void *config;
+#endif
+} __attribute__ (( packed )) mlx_utils;
+
+mlx_status
+mlx_utils_init(
+ IN mlx_utils *utils,
+ IN mlx_pci *pci
+ );
+
+mlx_status
+mlx_utils_teardown(
+ IN mlx_utils *utils
+ );
+mlx_status
+mlx_utils_delay_in_ms(
+ IN mlx_uint32 msecs
+ );
+
+mlx_status
+mlx_utils_delay_in_us(
+ IN mlx_uint32 usecs
+ );
+
+mlx_status
+mlx_utils_ilog2(
+ IN mlx_uint32 i,
+ OUT mlx_uint32 *log
+ );
+
+mlx_status
+mlx_utils_init_lock(
+ IN OUT mlx_utils *utils
+ );
+
+mlx_status
+mlx_utils_free_lock(
+ IN OUT mlx_utils *utils
+ );
+
+mlx_status
+mlx_utils_acquire_lock (
+ IN OUT mlx_utils *utils
+ );
+
+mlx_status
+mlx_utils_release_lock (
+ IN OUT mlx_utils *utils
+ );
+
+mlx_status
+mlx_utils_rand (
+ IN mlx_utils *utils,
+ OUT mlx_uint32 *rand_num
+ );
+#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c
new file mode 100644
index 00000000..ba56e72f
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_blink_leds/mlx_blink_leds.h"
+#include "../../include/public/mlx_memory.h"
+#include "../../include/public/mlx_bail.h"
+
+mlx_status
+mlx_blink_leds(
+ IN mlx_utils *utils,
+ IN mlx_uint16 secs
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_led_control led_control;
+ mlx_uint32 reg_status;
+
+ if (utils == NULL ) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ mlx_memory_set(utils, &led_control, 0, sizeof(led_control));
+ led_control.beacon_duration = secs;
+ status = mlx_reg_access(utils, REG_ID_MLCR, REG_ACCESS_WRITE, &led_control, sizeof(led_control),
+ &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+reg_err:
+bad_param:
+ return status;
+}
+
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h
new file mode 100644
index 00000000..886645fe
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h
@@ -0,0 +1,46 @@
+#ifndef MLX_BLINK_LEDS_H_
+#define MLX_BLINK_LEDS_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
+#include "../../include/public/mlx_utils.h"
+
+struct mlx_led_control {
+ mlx_uint32 reserved1 :16;
+ mlx_uint32 port :8;
+ mlx_uint32 bla :8;
+/* -------------- */
+ mlx_uint32 beacon_duration :16;
+ mlx_uint32 reserved2 :16;
+/* -------------- */
+ mlx_uint32 beacon_remain :16;
+ mlx_uint32 reserved3 :16;
+};
+
+mlx_status
+mlx_blink_leds(
+ IN mlx_utils *utils,
+ IN mlx_uint16 secs
+ );
+
+#endif /* MLX_NVCONFIG_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c
new file mode 100644
index 00000000..d3155302
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_link_speed/mlx_link_speed.h"
+#include "../../include/public/mlx_memory.h"
+#include "../../include/public/mlx_bail.h"
+
+mlx_status
+mlx_set_link_speed(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ IN LINK_SPEED_TYPE type,
+ IN LINK_SPEED speed
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_link_speed link_speed;
+ mlx_uint32 reg_status;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &link_speed, 0, sizeof(link_speed));
+
+ link_speed.loacl_port = port_num;
+ link_speed.proto_mask = 1 << type;
+
+ status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_READ, &link_speed,
+ sizeof(link_speed), &reg_status);
+
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+ switch (speed) {
+ case LINK_SPEED_1GB:
+ link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_1GB_MASK;
+ break;
+ case LINK_SPEED_10GB:
+ link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_10GB_MASK;
+ break;
+ case LINK_SPEED_40GB:
+ link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_40GB_MASK;
+ break;
+ case LINK_SPEED_100GB:
+ link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_100GB_MASK;
+ break;
+ case LINK_SPEED_SDR:
+ link_speed.ib_proto_admin = link_speed.ib_proto_capability & LINK_SPEED_SDR_MASK;
+ break;
+ case LINK_SPEED_DEFAULT:
+ if (type == LINK_SPEED_ETH) {
+ link_speed.eth_proto_admin = link_speed.eth_proto_capability;
+ } else {
+ link_speed.ib_proto_admin = link_speed.ib_proto_capability;
+ }
+ break;
+ }
+ status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_WRITE, &link_speed,
+ sizeof(link_speed), &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+reg_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_get_max_speed(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ IN LINK_SPEED_TYPE type,
+ OUT mlx_uint64 *speed
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_link_speed link_speed;
+ mlx_uint32 reg_status;
+ mlx_uint64 speed_giga = 0;
+ mlx_uint8 lanes_number = 1;
+
+ *speed = 0;
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &link_speed, 0, sizeof(link_speed));
+
+ link_speed.loacl_port = port_num;
+ link_speed.proto_mask = 1 << type;
+
+ status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_READ, &link_speed,
+ sizeof(link_speed), &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+
+ if ( type == LINK_SPEED_ETH ) {
+ if ( link_speed.eth_proto_capability & LINK_SPEED_100GB_MASK ) {
+ speed_giga = 100;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_56GB_MASK ) {
+ speed_giga = 56;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_50GB_MASK ) {
+ speed_giga = 50;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_40GB_MASK ) {
+ speed_giga = 40;
+ } else if (link_speed.eth_proto_capability & LINK_SPEED_25GB_MASK) {
+ speed_giga = 25;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_20GB_MASK ) {
+ speed_giga = 20;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_10GB_MASK) {
+ speed_giga = 10;
+ } else if ( link_speed.eth_proto_capability & LINK_SPEED_1GB_MASK ) {
+ speed_giga = 1;
+ }
+ } else {
+ if ( link_speed.ib_proto_capability & LINK_SPEED_EDR_MASK ) {
+ speed_giga = 25;
+ } else if ( link_speed.ib_proto_capability & LINK_SPEED_EDR20_MASK ) {
+ speed_giga = 20;
+ } else if ( link_speed.ib_proto_capability & LINK_SPEED_FDR_MASK ) {
+ speed_giga = 14;
+ } else if ( link_speed.ib_proto_capability & LINK_SPEED_QDR_MASK ) {
+ speed_giga = 10;
+ } else if ( link_speed.ib_proto_capability & LINK_SPEED_DDR_MASK ) {
+ speed_giga = 5;
+ } else if ( link_speed.ib_proto_capability & LINK_SPEED_SDR_MASK ) {
+ speed_giga = 2.5;
+ }
+ if ( link_speed.ib_link_width_capability & LINK_SPEED_WITDH_12_MASK ) {
+ lanes_number = 12;
+ } else if ( link_speed.ib_link_width_capability & LINK_SPEED_WITDH_8_MASK ) {
+ lanes_number = 8;
+ } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_4_MASK ) {
+ lanes_number = 4;
+ } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_2_MASK ) {
+ lanes_number = 2;
+ } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_1_MASK ) {
+ lanes_number = 1;
+ }
+ speed_giga = speed_giga * lanes_number;
+ }
+ // Return data in bits
+ *speed = speed_giga * GIGA_TO_BIT;
+reg_err:
+bad_param:
+ return status;
+}
+
+
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h
new file mode 100644
index 00000000..15b28f57
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h
@@ -0,0 +1,145 @@
+#ifndef MLX_LINK_SPEED_H_
+#define MLX_LINK_SPEED_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
+#include "../../include/public/mlx_utils.h"
+
+#define LINK_SPEED_100GB_MASK (ETH_SPEED_ENABLE_MASK_100GBASECR4 | ETH_SPEED_ENABLE_MASK_100GBASESR4 | ETH_SPEED_ENABLE_MASK_100GBASEKR4 | ETH_SPEED_ENABLE_MASK_100GBASELR4)
+#define LINK_SPEED_56GB_MASK (ETH_SPEED_ENABLE_MASK_56GBASER4)
+#define LINK_SPEED_50GB_MASK (ETH_SPEED_ENABLE_MASK_50GBASECR2 | ETH_SPEED_ENABLE_MASK_50GBASEKR2)
+#define LINK_SPEED_40GB_MASK (ETH_SPEED_ENABLE_MASK_40GBASECR4 | ETH_SPEED_ENABLE_MASK_40GBASEKR4 | ETH_SPEED_ENABLE_MASK_40GBASESR4 | ETH_SPEED_ENABLE_MASK_40GBASELR4)
+#define LINK_SPEED_25GB_MASK (ETH_SPEED_ENABLE_MASK_25GBASECR | ETH_SPEED_ENABLE_MASK_25GBASEKR | ETH_SPEED_ENABLE_MASK_25GBASESR)
+#define LINK_SPEED_20GB_MASK (ETH_SPEED_ENABLE_MASK_20GBASER2)
+#define LINK_SPEED_10GB_MASK (ETH_SPEED_ENABLE_MASK_10GBASECR | ETH_SPEED_ENABLE_MASK_10GBASESR | ETH_SPEED_ENABLE_MASK_10GBASELR | ETH_SPEED_ENABLE_MASK_10GBASEKR)
+#define LINK_SPEED_1GB_MASK (ETH_SPEED_ENABLE_MASK_1000BASECX | ETH_SPEED_ENABLE_MASK_1000BASEKX | ETH_SPEED_ENABLE_MASK_100BaseTX | ETH_SPEED_ENABLE_MASK_1000BASET)
+
+#define LINK_SPEED_SDR_MASK 0x1
+#define LINK_SPEED_DDR_MASK 0x2
+#define LINK_SPEED_QDR_MASK 0xC
+#define LINK_SPEED_FDR_MASK 0x10
+#define LINK_SPEED_EDR20_MASK 0x200
+#define LINK_SPEED_EDR_MASK 0x20
+
+#define LINK_SPEED_WITDH_1_MASK 0x1
+#define LINK_SPEED_WITDH_2_MASK 0x2
+#define LINK_SPEED_WITDH_4_MASK 0x4
+#define LINK_SPEED_WITDH_8_MASK 0x8
+#define LINK_SPEED_WITDH_12_MASK 0x10
+
+#define GIGA_TO_BIT 0x40000000
+
+enum {
+ ETH_SPEED_ENABLE_MASK_1000BASECX = 0x0001,
+ ETH_SPEED_ENABLE_MASK_1000BASEKX = 0x0002,
+ ETH_SPEED_ENABLE_MASK_10GBASECX4 = 0x0004,
+ ETH_SPEED_ENABLE_MASK_10GBASEKX4 = 0x0008,
+ ETH_SPEED_ENABLE_MASK_10GBASEKR = 0x0010,
+ ETH_SPEED_ENABLE_MASK_20GBASER2 = 0x0020,
+ ETH_SPEED_ENABLE_MASK_40GBASECR4 = 0x0040,
+ ETH_SPEED_ENABLE_MASK_40GBASEKR4 = 0x0080,
+ ETH_SPEED_ENABLE_MASK_56GBASER4 = 0x0100,
+ ETH_SPEED_ENABLE_MASK_10GBASECR = 0x1000,
+ ETH_SPEED_ENABLE_MASK_10GBASESR = 0x2000,
+ ETH_SPEED_ENABLE_MASK_10GBASELR = 0x4000,
+ ETH_SPEED_ENABLE_MASK_40GBASESR4 = 0x8000,
+ ETH_SPEED_ENABLE_MASK_40GBASELR4 = 0x10000,
+ ETH_SPEED_ENABLE_MASK_50GBASEKR4 = 0x80000,
+ ETH_SPEED_ENABLE_MASK_100GBASECR4 = 0x100000,
+ ETH_SPEED_ENABLE_MASK_100GBASESR4 = 0x200000,
+ ETH_SPEED_ENABLE_MASK_100GBASEKR4 = 0x400000,
+ ETH_SPEED_ENABLE_MASK_100GBASELR4 = 0x800000,
+ ETH_SPEED_ENABLE_MASK_100BaseTX = 0x1000000,
+ ETH_SPEED_ENABLE_MASK_1000BASET = 0x2000000,
+ ETH_SPEED_ENABLE_MASK_10GBASET = 0x4000000,
+ ETH_SPEED_ENABLE_MASK_25GBASECR = 0x8000000,
+ ETH_SPEED_ENABLE_MASK_25GBASEKR = 0x10000000,
+ ETH_SPEED_ENABLE_MASK_25GBASESR = 0x20000000,
+ ETH_SPEED_ENABLE_MASK_50GBASECR2 = 0x40000000,
+ ETH_SPEED_ENABLE_MASK_50GBASEKR2 = 0x80000000,
+ ETH_SPEED_ENABLE_MASK_BAD = 0xffff,
+};
+
+
+typedef enum {
+ LINK_SPEED_IB = 0,
+ LINK_SPEED_FC,
+ LINK_SPEED_ETH,
+} LINK_SPEED_TYPE;
+
+typedef enum {
+ LINK_SPEED_1GB = 0,
+ LINK_SPEED_10GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_SDR,
+ LINK_SPEED_DEFAULT,
+} LINK_SPEED;
+
+struct mlx_link_speed {
+ mlx_uint32 proto_mask :3;
+ mlx_uint32 reserved1 :13;
+ mlx_uint32 loacl_port :8;
+ mlx_uint32 reserved2 :8;
+ /* -------------- */
+ mlx_uint32 reserved3 :32;
+ /* -------------- */
+ mlx_uint32 reserved4 :32;
+ /* -------------- */
+ mlx_uint32 eth_proto_capability :32;
+ /* -------------- */
+ mlx_uint32 ib_proto_capability :16;
+ mlx_uint32 ib_link_width_capability :16;
+ /* -------------- */
+ mlx_uint32 reserved5 :32;
+ /* -------------- */
+ mlx_uint32 eth_proto_admin :32;
+ /* -------------- */
+ mlx_uint32 ib_proto_admin :16;
+ mlx_uint32 ib_link_width_admin :16;
+ /* -------------- */
+ mlx_uint32 reserved6 :32;
+ /* -------------- */
+ mlx_uint32 eth_proto_oper :32;
+ /* -------------- */
+ mlx_uint32 ib_proto_oper :16;
+ mlx_uint32 ib_link_width_oper :16;
+};
+
+mlx_status
+mlx_set_link_speed(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ IN LINK_SPEED_TYPE type,
+ IN LINK_SPEED speed
+ );
+
+mlx_status
+mlx_get_max_speed(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ IN LINK_SPEED_TYPE type,
+ OUT mlx_uint64 *speed
+ );
+
+#endif /* MLX_LINK_SPEED_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c
new file mode 100644
index 00000000..75573028
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_mtu.h"
+#include "mlx_memory.h"
+#include "mlx_bail.h"
+
+mlx_status
+mlx_get_max_mtu(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ OUT mlx_uint32 *max_mtu
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_mtu mtu;
+ mlx_uint32 reg_status;
+ *max_mtu = 0;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &mtu, 0, sizeof(mtu));
+
+ mtu.local_port = port_num;
+
+ status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_READ, &mtu,
+ sizeof(mtu), &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+ // Return data in bits
+ *max_mtu = mtu.max_mtu * BYTE_TO_BIT;
+reg_err:
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h
new file mode 100644
index 00000000..c6222625
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h
@@ -0,0 +1,52 @@
+#ifndef MLX_MTU_H_
+#define MLX_MTU_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_reg_access.h"
+#include "mlx_utils.h"
+
+#define BYTE_TO_BIT 0x8
+
+struct mlx_mtu {
+ mlx_uint32 reserved1 :16;
+ mlx_uint32 local_port :8;
+ mlx_uint32 reserved2 :8;
+ /* -------------- */
+ mlx_uint32 reserved3 :16;
+ mlx_uint32 max_mtu :16;
+ /* -------------- */
+ mlx_uint32 reserved4 :16;
+ mlx_uint32 admin_mtu :16;
+ /* -------------- */
+ mlx_uint32 reserved5 :16;
+ mlx_uint32 oper_mtu :16;
+};
+
+mlx_status
+mlx_get_max_mtu(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port_num,
+ OUT mlx_uint32 *max_mtu
+ );
+
+#endif /* MLX_MTU_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c
new file mode 100644
index 00000000..2277e0c7
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
+#include "../../include/public/mlx_memory.h"
+#include "../../include/public/mlx_bail.h"
+
+#define TlvMappingEntry( _tlv_type, _real_tlv_type, _class_code, _fw_reset_needed) { \
+ .tlv_type = _tlv_type, \
+ .real_tlv_type = _real_tlv_type, \
+ .class_code = _class_code, \
+ .fw_reset_needed = _fw_reset_needed, \
+ }
+
+struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = {
+ TlvMappingEntry(0x10, 0x10, NVRAM_TLV_CLASS_HOST, TRUE),
+ TlvMappingEntry(0x12, 0x12, NVRAM_TLV_CLASS_PHYSICAL_PORT, TRUE),
+ TlvMappingEntry(0x80, 0x80, NVRAM_TLV_CLASS_GLOBAL, TRUE),
+ TlvMappingEntry(0x81, 0x81, NVRAM_TLV_CLASS_GLOBAL, TRUE),
+ TlvMappingEntry(0x100, 0x100, NVRAM_TLV_CLASS_GLOBAL, TRUE),
+ TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE),
+ TlvMappingEntry(0x2020, 0x2020, NVRAM_TLV_CLASS_PHYSICAL_PORT, FALSE),
+ TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2100, 0x230, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2101, 0x231, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2102, 0x232, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2103, 0x233, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2104, 0x234, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2105, 0x235, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2106, 0x236, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2107, 0x237, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2108, 0x238, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2109, 0x239, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x210A, 0x23A, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2200, 0x240, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2201, 0x241, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2202, 0x242, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2203, 0x243, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2204, 0x244, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2205, 0x245, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0x2207, 0x247, NVRAM_TLV_CLASS_HOST, FALSE),
+ TlvMappingEntry(0, 0, 0, 0),
+};
+
+static
+mlx_status
+nvconfig_set_fw_reset_level(
+ IN mlx_utils *utils,
+ IN mlx_uint16 tlv_type
+ )
+{
+#define WARM_REBOOT_RESET ((mlx_uint64)0x1 << 38)
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 reg_status;
+ mlx_uint64 mfrl = WARM_REBOOT_RESET ;
+ mlx_uint8 index = 0;
+ mlx_boolean reset_needed = FALSE;
+
+ for (index = 0 ; nvconfig_tlv_mapping[index].tlv_type != 0 ; index++) {
+ if (nvconfig_tlv_mapping[index].tlv_type == tlv_type) {
+ reset_needed = nvconfig_tlv_mapping[index].fw_reset_needed;
+ }
+ }
+
+ if (reset_needed == FALSE) {
+ goto no_fw_reset_needed;
+ }
+ status = mlx_reg_access(utils, REG_ID_MFRL, REG_ACCESS_WRITE, &mfrl, sizeof(mfrl),
+ &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"nvconfig_set_fw_reset_level failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+reg_err:
+no_fw_reset_needed:
+ return status;
+}
+
+
+static
+mlx_status
+nvconfig_get_tlv_type_and_class(
+ IN mlx_uint16 tlv_type,
+ OUT mlx_uint16 *real_tlv_type,
+ OUT NVRAM_CLASS_CODE *class_code
+ )
+{
+ mlx_uint8 index = 0;
+ for ( ; nvconfig_tlv_mapping[index].tlv_type != 0 ; index ++) {
+ if ( nvconfig_tlv_mapping[index].tlv_type == tlv_type) {
+ *real_tlv_type = nvconfig_tlv_mapping[index].real_tlv_type;
+ *class_code = nvconfig_tlv_mapping[index].class_code;
+ return MLX_SUCCESS;
+ }
+ }
+ return MLX_NOT_FOUND;
+}
+static
+void
+nvconfig_fill_tlv_type(
+ IN mlx_uint8 port,
+ IN NVRAM_CLASS_CODE class_code,
+ IN mlx_uint16 tlv_type,
+ OUT union nvconfig_tlv_type *nvconfig_tlv_type
+ )
+{
+ switch (class_code) {
+ case NVRAM_TLV_CLASS_GLOBAL:
+ nvconfig_tlv_type->global.param_class = NVRAM_TLV_CLASS_GLOBAL;
+ nvconfig_tlv_type->global.param_idx = tlv_type;
+ break;
+ case NVRAM_TLV_CLASS_HOST:
+ nvconfig_tlv_type->per_host.param_class = NVRAM_TLV_CLASS_HOST;
+ nvconfig_tlv_type->per_host.param_idx = tlv_type;
+ break;
+ case NVRAM_TLV_CLASS_PHYSICAL_PORT:
+ nvconfig_tlv_type->per_port.param_class = NVRAM_TLV_CLASS_PHYSICAL_PORT;
+ nvconfig_tlv_type->per_port.param_idx = tlv_type;
+ nvconfig_tlv_type->per_port.port = port;
+ break;
+ }
+}
+mlx_status
+nvconfig_query_capability(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type,
+ OUT mlx_boolean *read_supported,
+ OUT mlx_boolean *write_supported
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct nvconfig_nvqc nvqc;
+ mlx_uint32 reg_status;
+ NVRAM_CLASS_CODE class_code;
+ mlx_uint16 real_tlv_type;
+
+ if (utils == NULL || read_supported == NULL || write_supported == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code);
+ MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported");
+
+ mlx_memory_set(utils, &nvqc, 0, sizeof(nvqc));
+ nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvqc.tlv_type);
+
+ status = mlx_reg_access(utils, REG_ID_NVQC, REG_ACCESS_READ, &nvqc, sizeof(nvqc),
+ &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+ *read_supported = nvqc.support_rd;
+ *write_supported = nvqc.support_wr;
+reg_err:
+tlv_not_supported:
+bad_param:
+ return status;
+}
+
+mlx_status
+nvconfig_nvdata_invalidate(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct nvconfig_header nv_header;
+ mlx_uint32 reg_status;
+ NVRAM_CLASS_CODE class_code;
+ mlx_uint16 real_tlv_type;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code);
+ MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported");
+
+ mlx_memory_set(utils, &nv_header, 0, sizeof(nv_header));
+ nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nv_header.tlv_type);
+
+ status = mlx_reg_access(utils, REG_ID_NVDI, REG_ACCESS_WRITE, &nv_header, sizeof(nv_header),
+ &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+reg_err:
+tlv_not_supported:
+bad_param:
+ return status;
+}
+
+mlx_status
+nvconfig_nvdata_access(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type,
+ IN REG_ACCESS_OPT opt,
+ IN mlx_size data_size,
+ IN NV_DEFAULT_OPT def_en,
+ IN OUT mlx_uint8 *version,
+ IN OUT mlx_void *data
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct nvconfig_nvda nvda;
+ mlx_uint32 reg_status;
+ mlx_uint32 real_size_to_read;
+ mlx_uint32 index;
+ NVRAM_CLASS_CODE class_code;
+ mlx_uint16 real_tlv_type;
+ mlx_size data_size_align_to_dword;
+
+ if (utils == NULL || data == NULL || data_size > NVCONFIG_MAX_TLV_SIZE) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code);
+ MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported");
+
+ data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32);
+ mlx_memory_set(utils, &nvda, 0, sizeof(nvda));
+ nvda.nv_header.length = data_size_align_to_dword;
+ nvda.nv_header.rd_en = 0;
+ nvda.nv_header.def_en = def_en;
+ nvda.nv_header.over_en = 1;
+ nvda.nv_header.version = *version;
+
+ nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type);
+
+ mlx_memory_cpy(utils, nvda.data, data, data_size);
+ for (index = 0 ; index * 4 < NVCONFIG_MAX_TLV_SIZE ; index++) {
+ mlx_memory_be32_to_cpu(utils,(((mlx_uint32 *)nvda.data)[index]), ((mlx_uint32 *)nvda.data) + index);
+ }
+ status = mlx_reg_access(utils, REG_ID_NVDA, opt, &nvda,
+ data_size_align_to_dword + sizeof(nvda.nv_header), &reg_status);
+ MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
+ if (reg_status != 0) {
+ MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
+ status = MLX_FAILED;
+ goto reg_err;
+ }
+ for (index = 0 ; index * 4 < NVCONFIG_MAX_TLV_SIZE ; index++) {
+ mlx_memory_cpu_to_be32(utils,(((mlx_uint32 *)nvda.data)[index]), ((mlx_uint32 *)nvda.data) + index);
+ }
+ if (opt == REG_ACCESS_READ) {
+ real_size_to_read = (nvda.nv_header.length > data_size) ? data_size :
+ nvda.nv_header.length;
+ mlx_memory_cpy(utils, data, nvda.data, real_size_to_read);
+ *version = nvda.nv_header.version;
+ } else {
+ nvconfig_set_fw_reset_level(utils, tlv_type);
+ }
+reg_err:
+tlv_not_supported:
+bad_param:
+ return status;
+}
+
+
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h
new file mode 100644
index 00000000..8333e836
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h
@@ -0,0 +1,140 @@
+#ifndef MLX_NVCONFIG_H_
+#define MLX_NVCONFIG_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../mlx_reg_access/mlx_reg_access.h"
+#include "../../include/public/mlx_utils.h"
+
+typedef enum {
+ NVRAM_TLV_CLASS_GLOBAL = 0,
+ NVRAM_TLV_CLASS_PHYSICAL_PORT = 1,
+ NVRAM_TLV_CLASS_HOST = 3,
+} NVRAM_CLASS_CODE;
+
+struct nvconfig_tlv_type_per_port {
+ mlx_uint32 param_idx :16;
+ mlx_uint32 port :8;
+ mlx_uint32 param_class :8;
+};
+
+struct nvconfig_tlv_type_per_host {
+ mlx_uint32 param_idx :10;
+ mlx_uint32 function :8;
+ mlx_uint32 host :6;
+ mlx_uint32 param_class :8;
+};
+
+struct nvconfig_tlv_type_global {
+ mlx_uint32 param_idx :24;
+ mlx_uint32 param_class :8;
+};
+
+struct nvconfig_tlv_mapping{
+ mlx_uint16 tlv_type;
+ mlx_uint16 real_tlv_type;
+ NVRAM_CLASS_CODE class_code;
+ mlx_boolean fw_reset_needed;
+};
+
+union nvconfig_tlv_type {
+ struct nvconfig_tlv_type_per_port per_port;
+ struct nvconfig_tlv_type_per_host per_host;
+ struct nvconfig_tlv_type_global global;
+};
+
+
+struct nvconfig_nvqc {
+ union nvconfig_tlv_type tlv_type;
+/* -------------- */
+ mlx_uint32 support_rd :1; /*the configuration item is supported and can be read */
+ mlx_uint32 support_wr :1; /*the configuration item is supported and can be updated */
+ mlx_uint32 reserved1 :2;
+ mlx_uint32 version :4; /*The maximum version of the configuration item currently supported by the firmware. */
+ mlx_uint32 reserved2 :24;
+};
+
+
+struct nvconfig_header {
+ mlx_uint32 length :9; /*Size of configuration item data in bytes between 0..256 */
+ mlx_uint32 reserved0 :3;
+ mlx_uint32 version :4; /* Configuration item version */
+ mlx_uint32 reserved1 :7;
+
+ mlx_uint32 def_en :1; /*Choose whether to access the default value or the user-defined value.
+ 0x0 Read or write the user-defined value.
+ 0x1 Read the default value (only valid for reads).*/
+
+ mlx_uint32 rd_en :1; /*enables reading the TLV by lower priorities
+ 0 - TLV can be read by the subsequent lifecycle priorities.
+ 1 - TLV cannot be read by the subsequent lifecycle priorities. */
+ mlx_uint32 over_en :1; /*enables overwriting the TLV by lower priorities
+ 0 - Can only be overwritten by the current lifecycle priority
+ 1 - Allowed to be overwritten by subsequent lifecycle priorities */
+ mlx_uint32 header_type :2;
+ mlx_uint32 priority :2;
+ mlx_uint32 valid :2;
+/* -------------- */
+ union nvconfig_tlv_type tlv_type;;
+/* -------------- */
+ mlx_uint32 crc :16;
+ mlx_uint32 reserved :16;
+};
+
+#define NVCONFIG_MAX_TLV_SIZE 256
+
+struct nvconfig_nvda {
+ struct nvconfig_header nv_header;
+ mlx_uint8 data[NVCONFIG_MAX_TLV_SIZE];
+};
+
+
+mlx_status
+nvconfig_query_capability(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type,
+ OUT mlx_boolean *read_supported,
+ OUT mlx_boolean *write_supported
+ );
+
+
+mlx_status
+nvconfig_nvdata_invalidate(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type
+ );
+
+mlx_status
+nvconfig_nvdata_access(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type,
+ IN REG_ACCESS_OPT opt,
+ IN mlx_size data_size,
+ IN NV_DEFAULT_OPT def_en,
+ IN OUT mlx_uint8 *version,
+ IN OUT mlx_void *data
+ );
+
+#endif /* MLX_NVCONFIG_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c
new file mode 100644
index 00000000..77eda8a5
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE( GPL2_OR_LATER);
+
+#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
+#include "../../include/public/mlx_memory.h"
+#include "../../include/public/mlx_bail.h"
+#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
+
+struct tlv_default {
+ mlx_uint16 tlv_type;
+ mlx_size data_size;
+ mlx_status (*set_defaults)( IN void *data, IN int status,
+ OUT void *def_struct);
+};
+
+#define TlvDefaultEntry( _tlv_type, _data_size, _set_defaults) { \
+ .tlv_type = _tlv_type, \
+ .data_size = sizeof ( _data_size ), \
+ .set_defaults = _set_defaults, \
+ }
+
+static
+mlx_status
+nvconfig_get_boot_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_nic_boot_conf *nic_boot_conf =
+ (union mlx_nvconfig_nic_boot_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ /* boot_option_rom_en is deprecated - enabled always */
+ port_conf_def->boot_option_rom_en = DEFAULT_OPTION_ROM_EN;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ port_conf_def->boot_vlan = nic_boot_conf->vlan_id;
+ port_conf_def->boot_protocol = nic_boot_conf->legacy_boot_prot;
+ port_conf_def->boot_retry_count = nic_boot_conf->boot_retry_count;
+ port_conf_def->boot_vlan_en = nic_boot_conf->en_vlan;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ port_conf_def->boot_vlan = DEFAULT_BOOT_VLAN;
+ port_conf_def->boot_protocol = DEFAULT_BOOT_PROTOCOL;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_boot_ext_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_nic_boot_ext_conf *nic_boot_ext_conf =
+ (union mlx_nvconfig_nic_boot_ext_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ port_conf_def->linkup_timeout = nic_boot_ext_conf->linkup_timeout;
+ port_conf_def->ip_ver = nic_boot_ext_conf->ip_ver;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ port_conf_def->linkup_timeout = DEFAULT_BOOT_LINK_UP_TO;
+ port_conf_def->ip_ver = DEFAULT_BOOT_IP_VER;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_iscsi_init_dhcp_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_iscsi_init_dhcp_conf *iscsi_init_dhcp_conf =
+ (union mlx_nvconfig_iscsi_init_dhcp_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ port_conf_def->iscsi_dhcp_params_en = iscsi_init_dhcp_conf->dhcp_iscsi_en;
+ port_conf_def->iscsi_ipv4_dhcp_en = iscsi_init_dhcp_conf->ipv4_dhcp_en;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ port_conf_def->iscsi_dhcp_params_en = DEFAULT_ISCSI_DHCP_PARAM_EN;
+ port_conf_def->iscsi_ipv4_dhcp_en = DEFAULT_ISCSI_IPV4_DHCP_EN;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_ib_boot_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_nic_ib_boot_conf *ib_boot_conf =
+ (union mlx_nvconfig_nic_ib_boot_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ port_conf_def->boot_pkey = ib_boot_conf->boot_pkey;
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_wol_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_wol_conf *wol_conf = (union mlx_nvconfig_wol_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ port_conf_def->en_wol_magic = wol_conf->en_wol_magic;
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_iscsi_gen_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct)
+{
+ union mlx_nvconfig_iscsi_general *iscsi_gen =
+ (union mlx_nvconfig_iscsi_general *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ port_conf_def->iscsi_boot_to_target = iscsi_gen->boot_to_target;
+ port_conf_def->iscsi_vlan_en = iscsi_gen->vlan_en;
+ port_conf_def->iscsi_tcp_timestamps_en = iscsi_gen->tcp_timestamps_en;
+ port_conf_def->iscsi_chap_mutual_auth_en = iscsi_gen->chap_mutual_auth_en;
+ port_conf_def->iscsi_chap_auth_en = iscsi_gen->chap_auth_en;
+ port_conf_def->iscsi_lun_busy_retry_count = iscsi_gen->lun_busy_retry_count;
+ port_conf_def->iscsi_link_up_delay_time = iscsi_gen->link_up_delay_time;
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_ib_dhcp_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_ib_dhcp_conf *ib_dhcp =
+ (union mlx_nvconfig_ib_dhcp_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ port_conf_def->client_identifier = ib_dhcp->client_identifier;
+ port_conf_def->mac_admin_bit = ib_dhcp->mac_admin_bit;
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_ocsd_ocbb_default_conf( IN void *data,
+ IN int status, OUT void *def_struct) {
+ union mlx_nvconfig_ocsd_ocbb_conf *ocsd_ocbb =
+ (union mlx_nvconfig_ocsd_ocbb_conf *) data;
+ struct mlx_nvconfig_conf_defaults *conf_def =
+ (struct mlx_nvconfig_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ conf_def->ocsd_ocbb_en = ocsd_ocbb->ocsd_ocbb_en;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ conf_def->ocsd_ocbb_en = DEFAULT_OCSD_OCBB_EN;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_vpi_link_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_vpi_link_conf *vpi_link =
+ (union mlx_nvconfig_vpi_link_conf *) data;
+ struct mlx_nvconfig_port_conf_defaults *port_conf_def =
+ (struct mlx_nvconfig_port_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ port_conf_def->network_link_type = vpi_link->network_link_type;
+ port_conf_def->default_link_type = vpi_link->default_link_type;
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_rom_banner_to_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_rom_banner_timeout_conf *rom_banner_timeout_conf =
+ (union mlx_nvconfig_rom_banner_timeout_conf *) data;
+ struct mlx_nvconfig_conf_defaults *conf_def =
+ (struct mlx_nvconfig_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ conf_def->flexboot_menu_to = rom_banner_timeout_conf->rom_banner_to;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ conf_def->flexboot_menu_to = DEFAULT_FLEXBOOT_MENU_TO;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_nv_virt_caps_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_virt_caps *nv_virt_caps =
+ (union mlx_nvconfig_virt_caps *) data;
+ struct mlx_nvconfig_conf_defaults *conf_def =
+ (struct mlx_nvconfig_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "TLV not found. Using hard-coded defaults ");
+ conf_def->max_vfs = nv_virt_caps->max_vfs_per_pf;
+
+ return MLX_SUCCESS;
+
+nvdata_access_err:
+ conf_def->max_vfs = DEFAULT_MAX_VFS;
+
+ return status;
+}
+
+static
+mlx_status
+nvconfig_get_nv_virt_default_conf(
+ IN void *data,
+ IN int status,
+ OUT void *def_struct
+ )
+{
+ union mlx_nvconfig_virt_conf *nv_virt_conf =
+ (union mlx_nvconfig_virt_conf *) data;
+ struct mlx_nvconfig_conf_defaults *conf_def =
+ (struct mlx_nvconfig_conf_defaults *) def_struct;
+
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_default_access failed ");
+ conf_def->total_vfs = nv_virt_conf->num_of_vfs;
+ conf_def->sriov_en = nv_virt_conf->virt_mode;
+
+nvdata_access_err:
+ return status;
+}
+
+static struct tlv_default tlv_port_defaults[] = {
+ TlvDefaultEntry(BOOT_SETTINGS_TYPE, union mlx_nvconfig_nic_boot_conf, &nvconfig_get_boot_default_conf),
+ TlvDefaultEntry(BOOT_SETTINGS_EXT_TYPE, union mlx_nvconfig_nic_boot_ext_conf, &nvconfig_get_boot_ext_default_conf),
+ TlvDefaultEntry(ISCSI_INITIATOR_DHCP_CONF_TYPE, union mlx_nvconfig_iscsi_init_dhcp_conf, &nvconfig_get_iscsi_init_dhcp_default_conf),
+ TlvDefaultEntry(IB_BOOT_SETTING_TYPE, union mlx_nvconfig_nic_ib_boot_conf, &nvconfig_get_ib_boot_default_conf),
+ TlvDefaultEntry(WAKE_ON_LAN_TYPE, union mlx_nvconfig_wol_conf, &nvconfig_get_wol_default_conf),
+ TlvDefaultEntry(ISCSI_GENERAL_SETTINGS_TYPE, union mlx_nvconfig_iscsi_general, &nvconfig_get_iscsi_gen_default_conf),
+ TlvDefaultEntry(IB_DHCP_SETTINGS_TYPE, union mlx_nvconfig_ib_dhcp_conf, &nvconfig_get_ib_dhcp_default_conf),
+ TlvDefaultEntry(VPI_LINK_TYPE, union mlx_nvconfig_vpi_link_conf, &nvconfig_get_vpi_link_default_conf),
+};
+
+static struct tlv_default tlv_general_defaults[] = {
+ TlvDefaultEntry(BANNER_TO_TYPE, union mlx_nvconfig_rom_banner_timeout_conf, &nvconfig_get_rom_banner_to_default_conf),
+ TlvDefaultEntry(GLOPAL_PCI_CAPS_TYPE, union mlx_nvconfig_virt_caps, &nvconfig_get_nv_virt_caps_default_conf),
+ TlvDefaultEntry(GLOPAL_PCI_SETTINGS_TYPE, union mlx_nvconfig_virt_conf, &nvconfig_get_nv_virt_default_conf),
+ TlvDefaultEntry(OCSD_OCBB_TYPE, union mlx_nvconfig_ocsd_ocbb_conf, &nvconfig_get_ocsd_ocbb_default_conf),
+};
+
+static
+mlx_status
+nvconfig_nvdata_default_access(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ IN mlx_uint16 tlv_type,
+ IN mlx_size data_size,
+ OUT mlx_void *data
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 index;
+ mlx_uint8 version = 0;
+
+ status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ,
+ data_size, TLV_ACCESS_DEFAULT_EN, &version, data);
+ MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
+ "nvconfig_nvdata_access failed ");
+ for (index = 0; index * 4 < data_size; index++) {
+ mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) data)[index]),
+ ((mlx_uint32 *) data) + index);
+ }
+
+nvdata_access_err:
+ return status;
+}
+
+static
+mlx_status
+nvconfig_nvdata_read_default_value(
+ IN mlx_utils *utils,
+ IN mlx_uint8 modifier,
+ IN struct tlv_default *def,
+ OUT void *def_struct
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ void *data = NULL;
+
+ status = mlx_memory_zalloc(utils, def->data_size,&data);
+ MLX_CHECK_STATUS(utils, status, memory_err,
+ "mlx_memory_zalloc failed ");
+ status = nvconfig_nvdata_default_access(utils, modifier, def->tlv_type,
+ def->data_size, data);
+ def->set_defaults(data, status, def_struct);
+ mlx_memory_free(utils, &data);
+
+memory_err:
+ return status;
+}
+
+static
+void
+nvconfig_nvdata_read_default_values(
+ IN mlx_utils *utils,
+ IN mlx_uint8 modifier,
+ IN struct tlv_default defaults_table[],
+ IN mlx_uint8 defaults_table_size,
+ OUT void *def_strct
+ )
+{
+ struct tlv_default *defs;
+ unsigned int i;
+
+ for (i = 0; i < defaults_table_size; i++) {
+ defs = &defaults_table[i];
+ nvconfig_nvdata_read_default_value(utils, modifier, defs, def_strct);
+ }
+}
+
+mlx_status
+nvconfig_read_port_default_values(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ OUT struct mlx_nvconfig_port_conf_defaults *port_conf_def
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if (utils == NULL || port_conf_def == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ MLX_DEBUG_ERROR(utils,"bad params.");
+ goto bad_param;
+ }
+ mlx_memory_set(utils, port_conf_def, 0, sizeof(*port_conf_def));
+ nvconfig_nvdata_read_default_values(utils, port, tlv_port_defaults,
+ (sizeof(tlv_port_defaults)/sizeof(tlv_port_defaults[0])),
+ port_conf_def);
+
+bad_param:
+ return status;
+}
+
+mlx_status
+nvconfig_read_general_default_values(
+ IN mlx_utils *utils,
+ OUT struct mlx_nvconfig_conf_defaults *conf_def
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if (utils == NULL || conf_def == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ MLX_DEBUG_ERROR(utils,"bad params.");
+ goto bad_param;
+ }
+ mlx_memory_set(utils, conf_def, 0, sizeof(*conf_def));
+ nvconfig_nvdata_read_default_values(utils, 0, tlv_general_defaults,
+ (sizeof(tlv_general_defaults)/sizeof(tlv_general_defaults[0])),
+ conf_def);
+
+bad_param:
+ return status;
+}
+
+mlx_status
+nvconfig_read_rom_ini_values(
+ IN mlx_utils *utils,
+ OUT struct mlx_nvcofnig_romini *rom_ini
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if (utils == NULL || rom_ini == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ MLX_DEBUG_ERROR(utils,"bad params.");
+ goto bad_param;
+ }
+ mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini));
+
+ status = nvconfig_nvdata_default_access(utils, 0, GLOBAL_ROM_INI_TYPE,
+ sizeof(*rom_ini), rom_ini);
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h
new file mode 100644
index 00000000..163c2a35
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h
@@ -0,0 +1,94 @@
+#ifndef MLX_NVCONFIG_DEFAULTS_H_
+#define MLX_NVCONFIG_DEFAULTS_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+#include "mlx_nvconfig_prm.h"
+/*
+ * Default values
+ */
+#define DEFAULT_FLEXBOOT_MENU_TO 4
+#define DEFAULT_MAX_VFS 8
+#define DEFAULT_BOOT_PROTOCOL 1
+#define DEFAULT_OPTION_ROM_EN 1
+#define DEFAULT_BOOT_VLAN 1
+#define DEFAULT_ISCSI_DHCP_PARAM_EN 1
+#define DEFAULT_ISCSI_IPV4_DHCP_EN 1
+#define DEFAULT_OCSD_OCBB_EN 1
+#define DEFAULT_BOOT_IP_VER 0
+#define DEFAULT_BOOT_LINK_UP_TO 0
+
+struct mlx_nvconfig_port_conf_defaults {
+ mlx_uint8 pptx;
+ mlx_uint8 pprx;
+ mlx_boolean boot_option_rom_en;
+ mlx_boolean boot_vlan_en;
+ mlx_uint8 boot_retry_count;
+ mlx_uint8 boot_protocol;
+ mlx_uint8 boot_vlan;
+ mlx_uint8 boot_pkey;
+ mlx_boolean en_wol_magic;
+ mlx_uint8 network_link_type;
+ mlx_uint8 iscsi_boot_to_target;
+ mlx_boolean iscsi_vlan_en;
+ mlx_boolean iscsi_tcp_timestamps_en;
+ mlx_boolean iscsi_chap_mutual_auth_en;
+ mlx_boolean iscsi_chap_auth_en;
+ mlx_boolean iscsi_dhcp_params_en;
+ mlx_boolean iscsi_ipv4_dhcp_en;
+ mlx_uint8 iscsi_lun_busy_retry_count;
+ mlx_uint8 iscsi_link_up_delay_time;
+ mlx_uint8 client_identifier;
+ mlx_uint8 mac_admin_bit;
+ mlx_uint8 default_link_type;
+ mlx_uint8 linkup_timeout;
+ mlx_uint8 ip_ver;
+};
+
+struct mlx_nvconfig_conf_defaults {
+ mlx_uint8 max_vfs;
+ mlx_uint8 total_vfs;
+ mlx_uint8 sriov_en;
+ mlx_uint8 maximum_uar_bar_size;
+ mlx_uint8 uar_bar_size;
+ mlx_uint8 flexboot_menu_to;
+ mlx_boolean ocsd_ocbb_en;
+};
+
+mlx_status
+nvconfig_read_port_default_values(
+ IN mlx_utils *utils,
+ IN mlx_uint8 port,
+ OUT struct mlx_nvconfig_port_conf_defaults *port_conf_def
+ );
+
+mlx_status
+nvconfig_read_general_default_values(
+ IN mlx_utils *utils,
+ OUT struct mlx_nvconfig_conf_defaults *conf_def
+ );
+
+mlx_status
+nvconfig_read_rom_ini_values(
+ IN mlx_utils *utils,
+ OUT struct mlx_nvcofnig_romini *rom_ini
+ );
+#endif /* MLX_NVCONFIG_DEFAULTS_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h
new file mode 100644
index 00000000..5b3af1e7
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h
@@ -0,0 +1,259 @@
+#ifndef MLX_NVCONFIG_PRM_H_
+#define MLX_NVCONFIG_PRM_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_types.h"
+
+enum {
+ WAKE_ON_LAN_TYPE = 0x10,
+ VIRTUALIZATION_TYPE = 0x11,
+ VPI_LINK_TYPE = 0x12,
+ BOOT_SETTINGS_EXT_TYPE = 0x2001,
+ BANNER_TO_TYPE = 0x2010,
+ OCSD_OCBB_TYPE = 0x2011,
+ FLOW_CONTROL_TYPE = 0x2020,
+ BOOT_SETTINGS_TYPE = 0x2021,
+ ISCSI_GENERAL_SETTINGS_TYPE = 0x2100,
+ IB_BOOT_SETTING_TYPE = 0x2022,
+ IB_DHCP_SETTINGS_TYPE = 0x2023,
+ GLOPAL_PCI_SETTINGS_TYPE = 0x80,
+ GLOPAL_PCI_CAPS_TYPE = 0x81,
+ GLOBAL_ROM_INI_TYPE = 0x100,
+
+ // Types for iSCSI strings
+ DHCP_VEND_ID = 0x2101,
+ ISCSI_INITIATOR_IPV4_ADDR = 0x2102,
+ ISCSI_INITIATOR_SUBNET = 0x2103,
+ ISCSI_INITIATOR_IPV4_GATEWAY = 0x2104,
+ ISCSI_INITIATOR_IPV4_PRIM_DNS = 0x2105,
+ ISCSI_INITIATOR_IPV4_SECDNS = 0x2106,
+ ISCSI_INITIATOR_NAME = 0x2107,
+ ISCSI_INITIATOR_CHAP_ID = 0x2108,
+ ISCSI_INITIATOR_CHAP_PWD = 0x2109,
+ ISCSI_INITIATOR_DHCP_CONF_TYPE = 0x210a,
+
+ CONNECT_FIRST_TGT = 0x2200,
+ FIRST_TGT_IP_ADDRESS = 0x2201,
+ FIRST_TGT_TCP_PORT = 0x2202,
+ FIRST_TGT_BOOT_LUN = 0x2203,
+ FIRST_TGT_ISCSI_NAME = 0x2204,
+ FIRST_TGT_CHAP_ID = 0x2205,
+ FIRST_TGT_CHAP_PWD = 0x2207,
+};
+
+union mlx_nvconfig_nic_boot_conf {
+ struct {
+ mlx_uint32 vlan_id : 12;
+ mlx_uint32 link_speed : 4;
+ mlx_uint32 legacy_boot_prot : 8;
+ mlx_uint32 boot_retry_count : 3;
+ mlx_uint32 boot_strap_type : 3;
+ mlx_uint32 en_vlan : 1;
+ mlx_uint32 en_option_rom : 1;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_nic_boot_ext_conf {
+ struct {
+ mlx_uint32 linkup_timeout : 8;
+ mlx_uint32 ip_ver : 2;
+ mlx_uint32 reserved0 : 22;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_rom_banner_timeout_conf {
+ struct {
+ mlx_uint32 rom_banner_to : 4;
+ mlx_uint32 reserved : 28;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_virt_conf {
+ struct {
+ mlx_uint32 reserved0 :24;
+ mlx_uint32 pf_bar_size_valid :1;
+ mlx_uint32 vf_bar_size_valid :1;
+ mlx_uint32 num_pf_msix_valid :1;
+ mlx_uint32 num_vf_msix_valid :1;
+ mlx_uint32 num_pfs_valid :1;
+ mlx_uint32 fpp_valid :1;
+ mlx_uint32 full_vf_qos_valid :1;
+ mlx_uint32 sriov_valid :1;
+ /*-------------------*/
+ mlx_uint32 num_of_vfs :16;
+ mlx_uint32 num_of_pfs :4;
+ mlx_uint32 reserved1 :9;
+ mlx_uint32 fpp_en :1;
+ mlx_uint32 full_vf_qos :1;
+ mlx_uint32 virt_mode :1; //sriov_en
+ /*-------------------*/
+ mlx_uint32 log_pf_uar_bar_size :6;
+ mlx_uint32 log_vf_uar_bar_size :6;
+ mlx_uint32 num_pf_msix :10;
+ mlx_uint32 num_vf_msix :10;
+ };
+ mlx_uint32 dword[3];
+};
+
+union mlx_nvconfig_virt_caps {
+ struct {
+ mlx_uint32 reserved0 :24;
+ mlx_uint32 max_vfs_per_pf_valid :1;
+ mlx_uint32 max_total_msix_valid :1;
+ mlx_uint32 max_total_bar_valid :1;
+ mlx_uint32 num_pfs_supported :1;
+ mlx_uint32 num_vf_msix_supported :1;
+ mlx_uint32 num_pf_msix_supported :1;
+ mlx_uint32 vf_bar_size_supported :1;
+ mlx_uint32 pf_bar_size_supported :1;
+ /*-------------------*/
+ mlx_uint32 max_vfs_per_pf :16;
+ mlx_uint32 max_num_pfs :4;
+ mlx_uint32 reserved1 :9;
+ mlx_uint32 fpp_support :1;
+ mlx_uint32 vf_qos_control_support :1;
+ mlx_uint32 sriov_support :1;
+ /*-------------------*/
+ mlx_uint32 max_log_pf_uar_bar_size :6;
+ mlx_uint32 max_log_vf_uar_bar_size :6;
+ mlx_uint32 max_num_pf_msix :10;
+ mlx_uint32 max_num_vf_msix :10;
+ /*-------------------*/
+ mlx_uint32 max_total_msix;
+ /*-------------------*/
+ mlx_uint32 max_total_bar;
+ };
+ mlx_uint32 dword[5];
+};
+
+union mlx_nvconfig_iscsi_init_dhcp_conf {
+ struct {
+ mlx_uint32 reserved0 :30;
+ mlx_uint32 dhcp_iscsi_en :1;
+ mlx_uint32 ipv4_dhcp_en :1;
+
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_nic_ib_boot_conf {
+ struct {
+ mlx_uint32 boot_pkey : 16;
+ mlx_uint32 reserved0 : 16;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_wol_conf {
+ struct {
+ mlx_uint32 reserved0 :9;
+ mlx_uint32 en_wol_passwd :1;
+ mlx_uint32 en_wol_magic :1;
+ mlx_uint32 reserved1 :21;
+ mlx_uint32 reserved2 :32;
+ };
+ mlx_uint32 dword[2];
+};
+
+union mlx_nvconfig_iscsi_general {
+ struct {
+ mlx_uint32 reserved0 :22;
+ mlx_uint32 boot_to_target :2;
+ mlx_uint32 reserved1 :2;
+ mlx_uint32 vlan_en :1;
+ mlx_uint32 tcp_timestamps_en :1;
+ mlx_uint32 chap_mutual_auth_en :1;
+ mlx_uint32 chap_auth_en :1;
+ mlx_uint32 reserved2 :2;
+ /*-------------------*/
+ mlx_uint32 vlan :12;
+ mlx_uint32 reserved3 :20;
+ /*-------------------*/
+ mlx_uint32 lun_busy_retry_count:8;
+ mlx_uint32 link_up_delay_time :8;
+ mlx_uint32 reserved4 :16;
+ };
+ mlx_uint32 dword[3];
+};
+
+union mlx_nvconfig_ib_dhcp_conf {
+ struct {
+ mlx_uint32 reserved :24;
+ mlx_uint32 client_identifier :4;
+ mlx_uint32 mac_admin_bit :4;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_ocsd_ocbb_conf {
+ struct {
+ mlx_uint32 reserved :31;
+ mlx_uint32 ocsd_ocbb_en :1;
+ };
+ mlx_uint32 dword;
+};
+
+union mlx_nvconfig_vpi_link_conf {
+ struct {
+ mlx_uint32 network_link_type :2;
+ mlx_uint32 default_link_type :2;
+ mlx_uint32 reserved :28;
+ };
+ mlx_uint32 dword;
+};
+
+struct mlx_nvcofnig_romini {
+ mlx_uint32 reserved0 :1;
+ mlx_uint32 shared_memory_en :1;
+ mlx_uint32 hii_vpi_en :1;
+ mlx_uint32 tech_enum :1;
+ mlx_uint32 reserved1 :4;
+ mlx_uint32 static_component_name_string :1;
+ mlx_uint32 hii_iscsi_configuration :1;
+ mlx_uint32 hii_ibm_aim :1;
+ mlx_uint32 hii_platform_setup :1;
+ mlx_uint32 hii_bdf_decimal :1;
+ mlx_uint32 hii_read_only :1;
+ mlx_uint32 reserved2 :10;
+ mlx_uint32 mac_enum :1;
+ mlx_uint32 port_enum :1;
+ mlx_uint32 flash_en :1;
+ mlx_uint32 fmp_en :1;
+ mlx_uint32 bofm_en :1;
+ mlx_uint32 platform_to_driver_en :1;
+ mlx_uint32 hii_en :1;
+ mlx_uint32 undi_en :1;
+ /* -------------- */
+ mlx_uint64 dhcp_user_class;
+ /* -------------- */
+ mlx_uint32 reserved3 :22;
+ mlx_uint32 uri_boot_retry_delay :4;
+ mlx_uint32 uri_boot_retry :4;
+ mlx_uint32 option_rom_debug :1;
+ mlx_uint32 promiscuous_vlan :1;
+};
+
+#endif /* MLX_NVCONFIG_PRM_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c
new file mode 100644
index 00000000..3852efbf
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_ocbb.h"
+#include "mlx_icmd.h"
+#include "mlx_bail.h"
+
+mlx_status
+mlx_ocbb_init (
+ IN mlx_utils *utils,
+ IN mlx_uint64 address
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_ocbb_init ocbb_init;
+ ocbb_init.address_hi = (mlx_uint32)(address >> 32);
+ ocbb_init.address_lo = (mlx_uint32)address;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = mlx_icmd_send_command(
+ utils,
+ OCBB_INIT,
+ &ocbb_init,
+ sizeof(ocbb_init),
+ 0
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+icmd_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_ocbb_query_header_status (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *ocbb_status
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_ocbb_query_status ocbb_query_status;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = mlx_icmd_send_command(
+ utils,
+ OCBB_QUERY_HEADER_STATUS,
+ &ocbb_query_status,
+ 0,
+ sizeof(ocbb_query_status)
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+ *ocbb_status = ocbb_query_status.status;
+icmd_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_ocbb_query_etoc_status (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *ocbb_status
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_ocbb_query_status ocbb_query_status;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = mlx_icmd_send_command(
+ utils,
+ OCBB_QUERY_ETOC_STATUS,
+ &ocbb_query_status,
+ 0,
+ sizeof(ocbb_query_status)
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+ *ocbb_status = ocbb_query_status.status;
+icmd_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_ocbb_set_event (
+ IN mlx_utils *utils,
+ IN mlx_uint64 event_data,
+ IN mlx_uint8 event_number,
+ IN mlx_uint8 event_length,
+ IN mlx_uint8 data_length,
+ IN mlx_uint8 data_start_offset
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_ocbb_set_event ocbb_event;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ ocbb_event.data_length = data_length;
+ ocbb_event.data_start_offset = data_start_offset;
+ ocbb_event.event_number = event_number;
+ ocbb_event.event_data = event_data;
+ ocbb_event.event_length = event_length;
+ status = mlx_icmd_send_command(
+ utils,
+ OCBB_QUERY_SET_EVENT,
+ &ocbb_event,
+ sizeof(ocbb_event),
+ 0
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+icmd_err:
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h
new file mode 100644
index 00000000..49312b98
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_ocbb/mlx_ocbb.h
@@ -0,0 +1,73 @@
+#ifndef MLX_OCBB_H_
+#define MLX_OCBB_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_utils.h"
+
+#define MLX_OCBB_EVENT_DATA_SIZE 2
+struct mlx_ocbb_init {
+ mlx_uint32 address_hi;
+ mlx_uint32 address_lo;
+};
+
+struct mlx_ocbb_query_status {
+ mlx_uint32 reserved :24;
+ mlx_uint32 status :8;
+};
+
+struct mlx_ocbb_set_event {
+ mlx_uint64 event_data;
+ mlx_uint32 event_number :8;
+ mlx_uint32 event_length :8;
+ mlx_uint32 data_length :8;
+ mlx_uint32 data_start_offset :8;
+};
+
+mlx_status
+mlx_ocbb_init (
+ IN mlx_utils *utils,
+ IN mlx_uint64 address
+ );
+
+mlx_status
+mlx_ocbb_query_header_status (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *ocbb_status
+ );
+
+mlx_status
+mlx_ocbb_query_etoc_status (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *ocbb_status
+ );
+
+mlx_status
+mlx_ocbb_set_event (
+ IN mlx_utils *utils,
+ IN mlx_uint64 EventData,
+ IN mlx_uint8 EventNumber,
+ IN mlx_uint8 EventLength,
+ IN mlx_uint8 DataLength,
+ IN mlx_uint8 DataStartOffset
+ );
+#endif /* MLX_OCBB_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c
new file mode 100644
index 00000000..143ab1b0
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
+#include "../../include/public/mlx_icmd.h"
+#include "../../include/public/mlx_bail.h"
+#include "../../include/public/mlx_memory.h"
+
+static
+mlx_status
+init_operation_tlv(
+ IN struct mail_box_tlv *mail_box_tlv,
+ IN mlx_uint16 reg_id,
+ IN REG_ACCESS_OPT reg_opt
+ )
+{
+#define TLV_OPERATION 1
+ mail_box_tlv->operation_tlv.Type = TLV_OPERATION;
+#define MAD_CLASS_REG_ACCESS 1
+ mail_box_tlv->operation_tlv.cls = MAD_CLASS_REG_ACCESS;
+#define TLV_OPERATION_SIZE 4
+ mail_box_tlv->operation_tlv.len = TLV_OPERATION_SIZE;
+ mail_box_tlv->operation_tlv.method = reg_opt;
+ mail_box_tlv->operation_tlv.register_id = reg_id;
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_reg_access(
+ IN mlx_utils *utils,
+ IN mlx_uint16 reg_id,
+ IN REG_ACCESS_OPT reg_opt,
+ IN OUT mlx_void *reg_data,
+ IN mlx_size reg_size,
+ OUT mlx_uint32 *reg_status
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mail_box_tlv mail_box_tlv;
+
+ if (utils == NULL || reg_data == NULL || reg_status == NULL
+ || reg_size > REG_ACCESS_MAX_REG_SIZE) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &mail_box_tlv, 0, sizeof(mail_box_tlv));
+
+ init_operation_tlv(&mail_box_tlv, reg_id, reg_opt);
+
+#define REG_ACCESS_TLV_REG 3
+#define REG_TLV_HEADER_LEN 4
+#define OP_TLV_SIZE 16
+ mail_box_tlv.reg_tlv.Type = REG_ACCESS_TLV_REG;
+ mail_box_tlv.reg_tlv.len = ((reg_size + REG_TLV_HEADER_LEN + 3) >> 2); // length is in dwords round up
+ mlx_memory_cpy(utils, &mail_box_tlv.reg_tlv.data, reg_data, reg_size);
+
+ reg_size += OP_TLV_SIZE + REG_TLV_HEADER_LEN;
+
+ status = mlx_icmd_send_command(utils, FLASH_REG_ACCESS, &mail_box_tlv, reg_size, reg_size);
+ MLX_CHECK_STATUS(utils, status, icmd_err, "failed to send icmd");
+
+ mlx_memory_cpy(utils, reg_data, &mail_box_tlv.reg_tlv.data,
+ reg_size - (OP_TLV_SIZE + REG_TLV_HEADER_LEN));
+
+ *reg_status = mail_box_tlv.operation_tlv.status;
+icmd_err:
+bad_param:
+ return status;
+}
+
+
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h
new file mode 100644
index 00000000..9fbf5163
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#ifndef MLX_REG_ACCESS_H_
+#define MLX_REG_ACCESS_H_
+
+#include "../../include/public/mlx_icmd.h"
+
+#define REG_ACCESS_MAX_REG_SIZE 236
+
+typedef enum {
+ REG_ACCESS_READ = 1,
+ REG_ACCESS_WRITE = 2,
+} REG_ACCESS_OPT;
+
+typedef enum {
+ TLV_ACCESS_DEFAULT_DIS = 0,
+ TLV_ACCESS_DEFAULT_EN = 1,
+} NV_DEFAULT_OPT;
+
+#define REG_ID_NVDA 0x9024
+#define REG_ID_NVDI 0x9025
+#define REG_ID_NVIA 0x9029
+#define REG_ID_MLCR 0x902b
+#define REG_ID_NVQC 0x9030
+#define REG_ID_MFRL 0x9028
+#define REG_ID_PTYS 0x5004
+#define REG_ID_PMTU 0x5003
+
+struct operation_tlv {
+ mlx_uint32 reserved0 :8; /* bit_offset:0 */ /* element_size: 8 */
+ mlx_uint32 status :7; /* bit_offset:8 */ /* element_size: 7 */
+ mlx_uint32 dr :1; /* bit_offset:15 */ /* element_size: 1 */
+ mlx_uint32 len :11; /* bit_offset:16 */ /* element_size: 11 */
+ mlx_uint32 Type :5; /* bit_offset:27 */ /* element_size: 5 */
+ mlx_uint32 cls :8; /* bit_offset:32 */ /* element_size: 8 */
+ mlx_uint32 method :7; /* bit_offset:40 */ /* element_size: 7 */
+ mlx_uint32 r :1; /* bit_offset:47 */ /* element_size: 1 */
+ mlx_uint32 register_id :16; /* bit_offset:48 */ /* element_size: 16 */
+ mlx_uint64 tid ; /* bit_offset:64 */ /* element_size: 64 */
+};
+
+struct reg_tlv {
+ mlx_uint32 reserved0 :16; /* bit_offset:0 */ /* element_size: 16 */
+ mlx_uint32 len :11; /* bit_offset:16 */ /* element_size: 11 */
+ mlx_uint32 Type :5; /* bit_offset:27 */ /* element_size: 5 */
+ mlx_uint8 data[REG_ACCESS_MAX_REG_SIZE];
+};
+
+struct mail_box_tlv {
+ struct operation_tlv operation_tlv;
+ struct reg_tlv reg_tlv;
+};
+mlx_status
+mlx_reg_access(
+ IN mlx_utils *utils,
+ IN mlx_uint16 reg_id,
+ IN REG_ACCESS_OPT reg_opt,
+ IN OUT mlx_void *reg_data,
+ IN mlx_size reg_size,
+ OUT mlx_uint32 *reg_status
+ );
+
+#endif /* MLX_REG_ACCESS_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c
new file mode 100644
index 00000000..65d04c96
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../mlx_lib/mlx_vmac/mlx_vmac.h"
+#include "../../include/public/mlx_icmd.h"
+#include "../../include/public/mlx_bail.h"
+
+mlx_status
+mlx_vmac_query_virt_mac (
+ IN mlx_utils *utils,
+ OUT struct mlx_vmac_query_virt_mac *virt_mac
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if (utils == NULL || virt_mac == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = mlx_icmd_send_command(
+ utils,
+ QUERY_VIRTUAL_MAC,
+ virt_mac,
+ 0,
+ sizeof(*virt_mac)
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+icmd_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_vmac_set_virt_mac (
+ IN mlx_utils *utils,
+ OUT struct mlx_vmac_set_virt_mac *virt_mac
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if (utils == NULL || virt_mac == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ status = mlx_icmd_send_command(
+ utils,
+ SET_VIRTUAL_MAC,
+ virt_mac,
+ sizeof(*virt_mac),
+ 0
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+icmd_err:
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h
new file mode 100644
index 00000000..2214d918
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h
@@ -0,0 +1,60 @@
+#ifndef MLX_VMAC_H_
+#define MLX_VMAC_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_utils.h"
+
+struct mlx_vmac_query_virt_mac {
+ mlx_uint32 reserved0 :30;
+ mlx_uint32 mac_aux_v :1;
+ mlx_uint32 virtual_mac_en :1;
+ mlx_uint32 parmanent_mac_high :16;
+ mlx_uint32 reserved1 :16;
+ mlx_uint32 parmanent_mac_low :32;
+ mlx_uint32 virtual_mac_high :16;
+ mlx_uint32 Reserved2 :16;
+ mlx_uint32 virtual_mac_low :32;
+};
+
+struct mlx_vmac_set_virt_mac {
+ mlx_uint32 Reserved0 :30;
+ mlx_uint32 mac_aux_v :1;
+ mlx_uint32 virtual_mac_en :1;
+ mlx_uint32 reserved1 :32;
+ mlx_uint32 reserved2 :32;
+ mlx_uint32 virtual_mac_high;
+ mlx_uint32 virtual_mac_low;
+};
+
+mlx_status
+mlx_vmac_query_virt_mac (
+ IN mlx_utils *utils,
+ OUT struct mlx_vmac_query_virt_mac *virt_mac
+ );
+
+mlx_status
+mlx_vmac_set_virt_mac (
+ IN mlx_utils *utils,
+ OUT struct mlx_vmac_set_virt_mac *virt_mac
+ );
+#endif /* MLX_VMAC_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c
new file mode 100644
index 00000000..a6c23c4a
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "mlx_wol_rol.h"
+#include "mlx_icmd.h"
+#include "mlx_memory.h"
+#include "mlx_bail.h"
+
+mlx_status
+mlx_set_wol (
+ IN mlx_utils *utils,
+ IN mlx_uint8 wol_mask
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_wol_rol wol_rol;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
+ wol_rol.wol_mode_valid = TRUE;
+ wol_rol.wol_mode = wol_mask;
+ status = mlx_icmd_send_command(
+ utils,
+ SET_WOL_ROL,
+ &wol_rol,
+ sizeof(wol_rol),
+ 0
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+icmd_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_query_wol (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *wol_mask
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ struct mlx_wol_rol wol_rol;
+
+ if (utils == NULL || wol_mask == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
+ status = mlx_icmd_send_command(
+ utils,
+ QUERY_WOL_ROL,
+ &wol_rol,
+ 0,
+ sizeof(wol_rol)
+ );
+ MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
+ *wol_mask = wol_rol.wol_mode;
+icmd_err:
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h
new file mode 100644
index 00000000..610419d5
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_wol_rol/mlx_wol_rol.h
@@ -0,0 +1,61 @@
+#ifndef MLX_WOL_ROL_H_
+#define MLX_WOL_ROL_H_
+
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+
+#include "mlx_utils.h"
+
+typedef enum {
+ WOL_MODE_DISABLE = 0x0,
+ WOL_MODE_SECURE = 0x2,
+ WOL_MODE_MAGIC = 0x4,
+ WOL_MODE_ARP = 0x8,
+ WOL_MODE_BC = 0x10,
+ WOL_MODE_MC = 0x20,
+ WOL_MODE_UC = 0x40,
+ WOL_MODE_PHY = 0x80,
+} WOL_MODE;
+
+struct mlx_wol_rol {
+ mlx_uint32 reserved0 :32;
+ mlx_uint32 reserved1 :32;
+ mlx_uint32 wol_mode :8;
+ mlx_uint32 rol_mode :8;
+ mlx_uint32 reserved3 :14;
+ mlx_uint32 wol_mode_valid :1;
+ mlx_uint32 rol_mode_valid :1;
+};
+
+mlx_status
+mlx_set_wol (
+ IN mlx_utils *utils,
+ IN mlx_uint8 wol_mask
+ );
+
+mlx_status
+mlx_query_wol (
+ IN mlx_utils *utils,
+ OUT mlx_uint8 *wol_mask
+ );
+
+#endif /* MLX_WOL_ROL_H_ */
diff --git a/src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c b/src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c
new file mode 100644
index 00000000..4386ad9b
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/private/uefi/mlx_logging_impl.c
@@ -0,0 +1,9 @@
+MlxDebugLogImpl()
+ {
+ DBGC((DEBUG),"");
+ }
+MlxInfoLogImpl()
+{
+ DBGC((INFO),"");
+ }
+}
diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c
new file mode 100644
index 00000000..e4206739
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_bail.h"
+#include "../../include/public/mlx_icmd.h"
+#include "../../include/public/mlx_pci_gw.h"
+#include "../../include/public/mlx_utils.h"
+
+static
+mlx_status
+mlx_icmd_get_semaphore(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 retries = 0;
+ mlx_uint32 semaphore_id;
+ mlx_uint32 buffer;
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ status = mlx_utils_rand(utils, &semaphore_id);
+ MLX_CHECK_STATUS(utils, status, rand_err, "failed to get random number");
+#define ICMD_GET_SEMAPHORE_TRIES 2560
+ for (retries = 0 ; retries < ICMD_GET_SEMAPHORE_TRIES ; retries++) {
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_SEMAPHORE,
+ MLX_ICMD_SEMAPHORE_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd semaphore");
+ if (buffer != 0) {
+ mlx_utils_delay_in_ms(10);
+ continue;
+ }
+ mlx_pci_gw_write( utils, PCI_GW_SPACE_SEMAPHORE,
+ MLX_ICMD_SEMAPHORE_ADDR, semaphore_id);
+ MLX_CHECK_STATUS(utils, status, set_err, "failed to set icmd semaphore");
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_SEMAPHORE,
+ MLX_ICMD_SEMAPHORE_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd semaphore");
+ if (semaphore_id == buffer) {
+ status = MLX_SUCCESS;
+ utils->icmd.took_semaphore = TRUE;
+ break;
+ }
+ mlx_utils_delay_in_ms(10);
+ }
+ if (semaphore_id != buffer) {
+ status = MLX_FAILED;
+ }
+read_err:
+set_err:
+rand_err:
+invalid_param:
+ return status;
+}
+static
+mlx_status
+mlx_icmd_clear_semaphore(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ if (utils->icmd.took_semaphore == FALSE) {
+ goto semaphore_not_taken;
+ }
+ status = mlx_pci_gw_write( utils, PCI_GW_SPACE_SEMAPHORE,
+ MLX_ICMD_SEMAPHORE_ADDR, 0);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to clear icmd semaphore");
+
+ utils->icmd.took_semaphore = FALSE;
+read_err:
+semaphore_not_taken:
+invalid_param:
+ return status;
+}
+
+static
+mlx_status
+mlx_icmd_init(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+ if (utils->icmd.icmd_opened == TRUE) {
+ goto already_opened;
+ }
+
+ utils->icmd.took_semaphore = FALSE;
+
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_MB_SIZE_ADDR, &utils->icmd.max_cmd_size);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd mail box size");
+
+ utils->icmd.icmd_opened = TRUE;
+read_err:
+already_opened:
+invalid_param:
+ return status;
+}
+
+static
+mlx_status
+mlx_icmd_set_opcode(
+ IN mlx_utils *utils,
+ IN mlx_uint16 opcode
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl");
+
+#define MLX_ICMD_OPCODE_ALIGN 16
+#define MLX_ICMD_OPCODE_MASK 0xffff
+
+ buffer = buffer & ~(MLX_ICMD_OPCODE_MASK << MLX_ICMD_OPCODE_ALIGN);
+ buffer = buffer | (opcode << MLX_ICMD_OPCODE_ALIGN);
+
+ status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, buffer);
+ MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd ctrl");
+write_err:
+read_err:
+invalid_param:
+ return status;
+}
+
+static
+mlx_status
+mlx_icmd_go(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer;
+ mlx_uint32 busy;
+ mlx_uint32 wait_iteration = 0;
+
+ if (utils == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl");
+
+#define MLX_ICMD_BUSY_ALIGN 0
+#define MLX_ICMD_BUSY_MASK 0x1
+
+ busy = (buffer >> MLX_ICMD_BUSY_ALIGN) & MLX_ICMD_BUSY_MASK;
+ if (busy != 0) {
+ status = MLX_FAILED;
+ goto already_busy;
+ }
+
+ buffer = buffer | (1 << MLX_ICMD_BUSY_ALIGN);
+
+ status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, buffer);
+ MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd ctrl");
+
+#define MLX_ICMD_BUSY_MAX_ITERATIONS 1024
+ do {
+ if (++wait_iteration > MLX_ICMD_BUSY_MAX_ITERATIONS) {
+ status = MLX_FAILED;
+ MLX_DEBUG_ERROR(utils, "ICMD time out");
+ goto busy_timeout;
+ }
+
+ mlx_utils_delay_in_ms(10);
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl");
+ busy = (buffer >> MLX_ICMD_BUSY_ALIGN) & MLX_ICMD_BUSY_MASK;
+ } while (busy != 0);
+
+busy_timeout:
+write_err:
+already_busy:
+read_err:
+invalid_param:
+ return status;
+}
+
+static
+mlx_status
+mlx_icmd_get_status(
+ IN mlx_utils *utils,
+ OUT mlx_uint32 *out_status
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 buffer;
+
+ if (utils == NULL || out_status == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_CTRL_ADDR, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl");
+
+#define MLX_ICMD_STATUS_ALIGN 8
+#define MLX_ICMD_STATUS_MASK 0xff
+
+ *out_status = (buffer >> MLX_ICMD_STATUS_ALIGN) & MLX_ICMD_STATUS_MASK;
+
+read_err:
+invalid_param:
+ return status;
+}
+
+static
+mlx_status
+mlx_icmd_write_buffer(
+ IN mlx_utils *utils,
+ IN mlx_void* data,
+ IN mlx_uint32 data_size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 data_offset = 0;
+ mlx_size dword_size = sizeof(mlx_uint32);
+
+ if (utils == NULL || data == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ for (data_offset = 0 ; data_offset*dword_size < data_size ; data_offset++) {
+ status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_MB_ADDR + data_offset*dword_size,
+ ((mlx_uint32*)data)[data_offset]);
+ MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd MB");
+ }
+write_err:
+invalid_param:
+ return status;
+}
+
+
+static
+mlx_status
+mlx_icmd_read_buffer(
+ IN mlx_utils *utils,
+ OUT mlx_void* data,
+ IN mlx_uint32 data_size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 data_offset = 0;
+ mlx_size dword_size = sizeof(mlx_uint32);
+
+ if (utils == NULL || data == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+
+ for (data_offset = 0 ; data_offset*dword_size < data_size ; data_offset++) {
+ status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD,
+ MLX_ICMD_MB_ADDR + data_offset*dword_size,
+ (mlx_uint32*)data + data_offset);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd MB");
+ }
+read_err:
+invalid_param:
+ return status;
+}
+mlx_status
+mlx_icmd_send_command(
+ IN mlx_utils *utils,
+ IN mlx_uint16 opcode,
+ IN OUT mlx_void* data,
+ IN mlx_uint32 write_data_size,
+ IN mlx_uint32 read_data_size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 icmd_status;
+
+ if (utils == NULL || data == NULL) {
+ status = MLX_INVALID_PARAMETER;
+ goto invalid_param;
+ }
+ status = mlx_icmd_init(utils);
+ MLX_CHECK_STATUS(utils, status, open_err, "failed to open icmd");
+
+ if (write_data_size > utils->icmd.max_cmd_size ||
+ read_data_size > utils->icmd.max_cmd_size) {
+ status = MLX_INVALID_PARAMETER;
+ goto size_err;
+ }
+
+ status = mlx_icmd_get_semaphore(utils);
+ MLX_CHECK_STATUS(utils, status, semaphore_err, "failed to get icmd semaphore");
+
+ status = mlx_icmd_set_opcode(utils, opcode);
+ MLX_CHECK_STATUS(utils, status, opcode_err, "failed to set icmd opcode");
+
+ if (write_data_size != 0) {
+ status = mlx_icmd_write_buffer(utils, data, write_data_size);
+ MLX_CHECK_STATUS(utils, status, opcode_err, "failed to write icmd MB");
+ }
+
+ status = mlx_icmd_go(utils);
+ MLX_CHECK_STATUS(utils, status, go_err, "failed to activate icmd");
+
+ status = mlx_icmd_get_status(utils, &icmd_status);
+ MLX_CHECK_STATUS(utils, status, get_status_err, "failed to set icmd opcode");
+
+ if (icmd_status != 0) {
+ MLX_DEBUG_ERROR(utils, "icmd failed with status = %d\n", icmd_status);
+ status = MLX_FAILED;
+ goto icmd_failed;
+ }
+ if (read_data_size != 0) {
+ status = mlx_icmd_read_buffer(utils, data, read_data_size);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd MB");
+ }
+read_err:
+icmd_failed:
+get_status_err:
+go_err:
+opcode_err:
+ mlx_icmd_clear_semaphore(utils);
+semaphore_err:
+size_err:
+open_err:
+invalid_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c
new file mode 100644
index 00000000..5aa5a53d
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <stddef.h>
+#include "../../include/private/mlx_memory_priv.h"
+#include "../../include/public/mlx_memory.h"
+
+mlx_status
+mlx_memory_alloc(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = NULL;
+ if ( utils == NULL || size == 0 || *ptr != NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_alloc_priv(utils, size, ptr);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_zalloc(
+ IN mlx_utils *utils,
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = NULL;
+ if ( utils == NULL || size == 0 || *ptr != NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_zalloc_priv(utils, size, ptr);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_free(
+ IN mlx_utils *utils,
+ IN mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || ptr == NULL || *ptr == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_free_priv(utils, *ptr);
+ *ptr = NULL;
+bad_param:
+ return status;
+}
+mlx_status
+mlx_memory_alloc_dma(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_size align,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = NULL;
+ if ( utils == NULL || size == 0 || *ptr != NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_alloc_dma_priv(utils, size, align, ptr);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_free_dma(
+ IN mlx_utils *utils,
+ IN mlx_size size ,
+ IN mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || size == 0 || ptr == NULL || *ptr == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_free_dma_priv(utils, size, *ptr);
+ *ptr = NULL;
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_map_dma(
+ IN mlx_utils *utils,
+ IN mlx_void *addr ,
+ IN mlx_size number_of_bytes,
+ OUT mlx_physical_address *phys_addr,
+ OUT mlx_void **mapping
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || phys_addr == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_map_dma_priv(utils, addr, number_of_bytes, phys_addr, mapping);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_ummap_dma(
+ IN mlx_utils *utils,
+ IN mlx_void *mapping
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_ummap_dma_priv(utils, mapping);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_cmp(
+ IN mlx_utils *utils,
+ IN mlx_void *first_block,
+ IN mlx_void *second_block,
+ IN mlx_size size,
+ OUT mlx_uint32 *out
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || first_block == NULL || second_block == NULL ||
+ out == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_cmp_priv(utils, first_block, second_block, size, out);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_set(
+ IN mlx_utils *utils,
+ IN mlx_void *block,
+ IN mlx_int32 value,
+ IN mlx_size size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || block == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_set_priv(utils, block, value, size);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_cpy(
+ IN mlx_utils *utils,
+ OUT mlx_void *destination_buffer,
+ IN mlx_void *source_buffer,
+ IN mlx_size length
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || destination_buffer == NULL || source_buffer == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_cpy_priv(utils, destination_buffer, source_buffer, length);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_cpu_to_be32(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || destination == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_cpu_to_be32_priv(utils, source, destination);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_memory_be32_to_cpu(
+ IN mlx_utils *utils,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if ( utils == NULL || destination == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+ status = mlx_memory_be32_to_cpu_priv(utils, source, destination);
+bad_param:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c
new file mode 100644
index 00000000..91c44d99
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <stddef.h>
+#include "../../include/private/mlx_pci_priv.h"
+#include "../../include/public/mlx_pci.h"
+
+mlx_status
+mlx_pci_init(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( utils == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ status = mlx_pci_init_priv(utils);
+bail:
+ return status;
+}
+
+mlx_status
+mlx_pci_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( utils == NULL || count == 0){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ status = mlx_pci_read_priv(utils, width, offset, count, buffer);
+bail:
+ return status;
+}
+
+mlx_status
+mlx_pci_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( utils == NULL || count == 0){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ status = mlx_pci_write_priv(utils, width, offset, count, buffer);
+bail:
+ return status;
+}
+
+mlx_status
+mlx_pci_mem_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( utils == NULL || count == 0){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ status = mlx_pci_mem_read_priv(utils, bar_index, width, offset, count, buffer);
+bail:
+ return status;
+}
+
+mlx_status
+mlx_pci_mem_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint8 bar_index,
+ IN mlx_uint64 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( utils == NULL || count == 0){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ status = mlx_pci_mem_write_priv(utils, width, bar_index, offset, count, buffer);
+bail:
+ return status;
+}
diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c
new file mode 100644
index 00000000..30c1e644
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include "../../include/public/mlx_pci_gw.h"
+#include "../../include/public/mlx_bail.h"
+#include "../../include/public/mlx_pci.h"
+#include "../../include/public/mlx_logging.h"
+
+/* Lock/unlock GW on each VSEC access */
+#undef VSEC_DEBUG
+
+static
+mlx_status
+mlx_pci_gw_check_capability_id(
+ IN mlx_utils *utils,
+ IN mlx_uint8 cap_pointer,
+ OUT mlx_boolean *bool
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint8 offset = cap_pointer + PCI_GW_CAPABILITY_ID_OFFSET;
+ mlx_uint8 id = 0;
+ status = mlx_pci_read(utils, MlxPciWidthUint8, offset,
+ 1, &id);
+ MLX_CHECK_STATUS(utils, status, read_err,"failed to read capability id");
+ *bool = ( id == PCI_GW_CAPABILITY_ID );
+read_err:
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_gw_get_ownership(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset;
+ mlx_uint32 semaphore = 0;
+ mlx_uint32 counter = 0;
+ mlx_uint32 get_semaphore_try = 0;
+ mlx_uint32 get_ownership_try = 0;
+
+ for( ; get_ownership_try < PCI_GW_GET_OWNERSHIP_TRIES; get_ownership_try ++){
+ for( ; get_semaphore_try <= PCI_GW_SEMPHORE_TRIES ; get_semaphore_try++){
+ status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET,
+ 1, &semaphore);
+ MLX_CHECK_STATUS(utils, status, read_err,"failed to read semaphore");
+ if( semaphore == 0 ){
+ break;
+ }
+ mlx_utils_delay_in_us(10);
+ }
+ if( semaphore != 0 ){
+ status = MLX_FAILED;
+ goto semaphore_err;
+ }
+
+ status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_COUNTER_OFFSET,
+ 1, &counter);
+ MLX_CHECK_STATUS(utils, status, read_err, "failed to read counter");
+
+ status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET,
+ 1, &counter);
+ MLX_CHECK_STATUS(utils, status, write_err,"failed to write semaphore");
+
+ status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET,
+ 1, &semaphore);
+ MLX_CHECK_STATUS(utils, status, read_err,"failed to read semaphore");
+ if( counter == semaphore ){
+ break;
+ }
+ }
+ if( counter != semaphore ){
+ status = MLX_FAILED;
+ }
+write_err:
+read_err:
+semaphore_err:
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_gw_free_ownership(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset;
+ mlx_uint32 value = 0;
+
+ status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET,
+ 1, &value);
+ MLX_CHECK_STATUS(utils, status, write_err,"failed to write semaphore");
+write_err:
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_gw_set_space(
+ IN mlx_utils *utils,
+ IN mlx_pci_gw_space space
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset;;
+ mlx_uint8 space_status = 0;
+
+ /* set nodnic*/
+ status = mlx_pci_write(utils, MlxPciWidthUint16, cap_offset + PCI_GW_CAPABILITY_SPACE_OFFSET, 1, &space);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability space");
+
+ status = mlx_pci_read(utils, MlxPciWidthUint8, cap_offset + PCI_GW_CAPABILITY_STATUS_OFFSET, 1, &space_status);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to read capability status");
+ if( (space_status & 0x20) == 0){
+ status = MLX_FAILED;
+ goto space_unsupported;
+ }
+read_error:
+space_unsupported:
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_gw_wait_for_flag_value(
+ IN mlx_utils *utils,
+ IN mlx_boolean value
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint32 try = 0;
+ mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset;
+ mlx_uint32 flag = 0;
+
+ for(; try < PCI_GW_READ_FLAG_TRIES ; try ++ ) {
+ status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_FLAG_OFFSET, 1, &flag);
+ MLX_CHECK_STATUS(utils, status, read_error, "failed to read capability flag");
+ if( ((flag & 0x80000000) != 0) == value ){
+ goto flag_valid;
+ }
+ mlx_utils_delay_in_us(10);
+ }
+ status = MLX_FAILED;
+flag_valid:
+read_error:
+ return status;
+}
+static
+mlx_status
+mlx_pci_gw_search_capability(
+ IN mlx_utils *utils,
+ OUT mlx_uint32 *cap_offset
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint8 cap_pointer = 0;
+ mlx_boolean is_capability = FALSE;
+
+ if( cap_offset == NULL || utils == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ //get first capability pointer
+ status = mlx_pci_read(utils, MlxPciWidthUint8, PCI_GW_FIRST_CAPABILITY_POINTER_OFFSET,
+ 1, &cap_pointer);
+ MLX_CHECK_STATUS(utils, status, read_err,
+ "failed to read capability pointer");
+
+ //search the right capability
+ while( cap_pointer != 0 ){
+ status = mlx_pci_gw_check_capability_id(utils, cap_pointer, &is_capability);
+ MLX_CHECK_STATUS(utils, status, check_err
+ ,"failed to check capability id");
+
+ if( is_capability == TRUE ){
+ *cap_offset = cap_pointer;
+ break;
+ }
+
+ status = mlx_pci_read(utils, MlxPciWidthUint8, cap_pointer +
+ PCI_GW_CAPABILITY_NEXT_POINTER_OFFSET ,
+ 1, &cap_pointer);
+ MLX_CHECK_STATUS(utils, status, read_err,
+ "failed to read capability pointer");
+ }
+ if( is_capability != TRUE ){
+ status = MLX_NOT_FOUND;
+ }
+check_err:
+read_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_pci_gw_init(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_pci_gw *pci_gw = NULL;
+
+ if( utils == NULL){
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ pci_gw = &utils->pci_gw;
+
+ status = mlx_pci_gw_search_capability(utils, &pci_gw->pci_cmd_offset);
+ MLX_CHECK_STATUS(utils, status, cap_err,
+ "mlx_pci_gw_search_capability failed");
+
+#if ! defined ( VSEC_DEBUG )
+ status = mlx_pci_gw_get_ownership(utils);
+ MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership");
+ownership_err:
+#endif
+cap_err:
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_pci_gw_teardown(
+ IN mlx_utils *utils __attribute__ ((unused))
+ )
+{
+#if ! defined ( VSEC_DEBUG )
+ mlx_pci_gw_free_ownership(utils);
+#endif
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_pci_gw_read(
+ IN mlx_utils *utils,
+ IN mlx_pci_gw_space space,
+ IN mlx_uint32 address,
+ OUT mlx_pci_gw_buffer *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_pci_gw *pci_gw = NULL;
+ mlx_uint32 cap_offset = 0;
+
+ if (utils == NULL || buffer == NULL || utils->pci_gw.pci_cmd_offset == 0) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_utils_acquire_lock(utils);
+
+ pci_gw = &utils->pci_gw;
+ cap_offset = pci_gw->pci_cmd_offset;
+
+#if ! defined ( VSEC_DEBUG )
+ if (pci_gw->space != space) {
+ status = mlx_pci_gw_set_space(utils, space);
+ MLX_CHECK_STATUS(utils, status, space_error,"failed to set space");
+ pci_gw->space = space;
+ }
+#else
+ status = mlx_pci_gw_get_ownership(utils);
+ MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership");
+
+ status = mlx_pci_gw_set_space(utils, space);
+ MLX_CHECK_STATUS(utils, status, space_error,"failed to set space");
+ pci_gw->space = space;
+#endif
+
+ status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_ADDRESS_OFFSET, 1, &address);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability address");
+
+#if defined ( DEVICE_CX3 )
+ /* WA for PCI issue (race) */
+ mlx_utils_delay_in_us ( 10 );
+#endif
+
+ status = mlx_pci_gw_wait_for_flag_value(utils, TRUE);
+ MLX_CHECK_STATUS(utils, status, read_error, "flag failed to change");
+
+ status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_DATA_OFFSET, 1, buffer);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to read capability data");
+
+#if defined ( VSEC_DEBUG )
+ status = mlx_pci_gw_free_ownership(utils);
+ MLX_CHECK_STATUS(utils, status, free_err,
+ "mlx_pci_gw_free_ownership failed");
+free_err:
+ mlx_utils_release_lock(utils);
+ return status;
+#endif
+read_error:
+space_error:
+#if defined ( VSEC_DEBUG )
+ mlx_pci_gw_free_ownership(utils);
+ownership_err:
+#endif
+mlx_utils_release_lock(utils);
+bad_param:
+ return status;
+}
+
+mlx_status
+mlx_pci_gw_write(
+ IN mlx_utils *utils,
+ IN mlx_pci_gw_space space,
+ IN mlx_uint32 address,
+ IN mlx_pci_gw_buffer buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_pci_gw *pci_gw = NULL;
+ mlx_uint32 cap_offset = 0;
+ mlx_uint32 fixed_address = address | PCI_GW_WRITE_FLAG;
+
+ if (utils == NULL || utils->pci_gw.pci_cmd_offset == 0) {
+ status = MLX_INVALID_PARAMETER;
+ goto bad_param;
+ }
+
+ mlx_utils_acquire_lock(utils);
+
+ pci_gw = &utils->pci_gw;
+ cap_offset = pci_gw->pci_cmd_offset;
+
+#if ! defined ( VSEC_DEBUG )
+ if (pci_gw->space != space) {
+ status = mlx_pci_gw_set_space(utils, space);
+ MLX_CHECK_STATUS(utils, status, space_error,"failed to set space");
+ pci_gw->space = space;
+ }
+#else
+ status = mlx_pci_gw_get_ownership(utils);
+ MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership");
+
+ status = mlx_pci_gw_set_space(utils, space);
+ MLX_CHECK_STATUS(utils, status, space_error,"failed to set space");
+ pci_gw->space = space;
+#endif
+ status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_DATA_OFFSET, 1, &buffer);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability data");
+
+ status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_ADDRESS_OFFSET, 1, &fixed_address);
+ MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability address");
+
+ status = mlx_pci_gw_wait_for_flag_value(utils, FALSE);
+ MLX_CHECK_STATUS(utils, status, read_error, "flag failed to change");
+#if defined ( VSEC_DEBUG )
+ status = mlx_pci_gw_free_ownership(utils);
+ MLX_CHECK_STATUS(utils, status, free_err,
+ "mlx_pci_gw_free_ownership failed");
+free_err:
+mlx_utils_release_lock(utils);
+ return status;
+#endif
+read_error:
+space_error:
+#if defined ( VSEC_DEBUG )
+ mlx_pci_gw_free_ownership(utils);
+ownership_err:
+#endif
+mlx_utils_release_lock(utils);
+bad_param:
+ return status;
+}
+
+
+
diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c
new file mode 100644
index 00000000..c824b17e
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#include <stddef.h>
+#include "../../include/private/mlx_utils_priv.h"
+#include "../../include/public/mlx_pci.h"
+#include "../../include/public/mlx_utils.h"
+
+mlx_status
+mlx_utils_init(
+ IN mlx_utils *utils,
+ IN mlx_pci *pci
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if( pci == NULL || utils == NULL ){
+ status = MLX_INVALID_PARAMETER;
+ goto bail;
+ }
+ utils->pci = pci;
+ status = mlx_pci_init(utils);
+ status = mlx_utils_init_lock(utils);
+bail:
+ return status;
+}
+
+mlx_status
+mlx_utils_teardown(
+ IN mlx_utils *utils __attribute__ ((unused))
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_utils_free_lock(utils);
+ return status;
+}
+
+mlx_status
+mlx_utils_delay_in_ms(
+ IN mlx_uint32 msecs
+ )
+{
+ mlx_utils_delay_in_ms_priv(msecs);
+ return MLX_SUCCESS;
+}
+mlx_status
+mlx_utils_delay_in_us(
+ IN mlx_uint32 usecs
+ )
+{
+ mlx_utils_delay_in_us_priv(usecs);
+ return MLX_SUCCESS;
+}
+mlx_status
+mlx_utils_ilog2(
+ IN mlx_uint32 i,
+ OUT mlx_uint32 *log
+ )
+{
+ mlx_utils_ilog2_priv(i, log);
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_init_lock(
+ IN OUT mlx_utils *utils
+ )
+{
+ return mlx_utils_init_lock_priv(&(utils->lock));
+
+}
+
+mlx_status
+mlx_utils_free_lock(
+ IN OUT mlx_utils *utils
+ )
+{
+ return mlx_utils_free_lock_priv(utils->lock);
+}
+
+mlx_status
+mlx_utils_acquire_lock (
+ IN OUT mlx_utils *utils
+ )
+{
+ return mlx_utils_acquire_lock_priv(utils->lock);
+}
+
+mlx_status
+mlx_utils_release_lock (
+ IN OUT mlx_utils *utils
+ )
+{
+ return mlx_utils_release_lock_priv(utils->lock);
+}
+
+mlx_status
+mlx_utils_rand (
+ IN mlx_utils *utils,
+ OUT mlx_uint32 *rand_num
+ )
+{
+ return mlx_utils_rand_priv(utils, rand_num);
+}
diff --git a/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h
new file mode 100644
index 00000000..af7e86f4
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h
@@ -0,0 +1,61 @@
+/*
+ * DebugPriv.h
+ *
+ * Created on: Jan 19, 2015
+ * Author: maord
+ */
+
+#ifndef STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_
+#define STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_
+
+#include <stdio.h>
+#include <compiler.h>
+
+#define MLX_DEBUG_FATAL_ERROR_PRIVATE(...) do { \
+ DBG("%s: ",__func__); \
+ DBG(__VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DEBUG_ERROR_PRIVATE(id, ...) do { \
+ DBGC(id, "%s: ",__func__); \
+ DBGC(id, __VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DEBUG_WARN_PRIVATE(id, ...) do { \
+ DBGC(id, "%s: ",__func__); \
+ DBGC(id, __VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DEBUG_INFO1_PRIVATE(id, ...) do { \
+ DBGC(id, "%s: ",__func__); \
+ DBGC(id, __VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DEBUG_INFO2_PRIVATE(id, ...) do { \
+ DBGC2(id, "%s: ",__func__); \
+ DBGC2(id, __VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DBG_ERROR_PRIVATE(...) do { \
+ DBG("%s: ",__func__); \
+ DBG(__VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DBG_WARN_PRIVATE(...) do { \
+ DBG("%s: ",__func__); \
+ DBG(__VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DBG_INFO1_PRIVATE(...) do { \
+ DBG("%s: ",__func__); \
+ DBG(__VA_ARGS__); \
+ } while ( 0 )
+
+#define MLX_DBG_INFO2_PRIVATE(...) do { \
+ DBG2("%s: ",__func__); \
+ DBG2(__VA_ARGS__); \
+ } while ( 0 )
+
+
+
+#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_ */
diff --git a/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h
new file mode 100644
index 00000000..feaeae67
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h
@@ -0,0 +1,60 @@
+/*
+ * types.h
+ *
+ * Created on: Jan 18, 2015
+ * Author: maord
+ */
+
+#ifndef A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_
+#define A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_
+#include <stdint.h>
+//#include <errno.h>
+#include <ipxe/pci.h>
+
+#define MLX_SUCCESS 0
+#define MLX_OUT_OF_RESOURCES (-1)
+//(-ENOMEM)
+#define MLX_INVALID_PARAMETER (-2)
+//(-EINVAL)
+#define MLX_UNSUPPORTED (-3)
+//(-ENOSYS)
+#define MLX_NOT_FOUND (-4)
+
+#define MLX_FAILED (-5)
+
+#undef TRUE
+#define TRUE 1
+#undef FALSE
+#define FALSE !TRUE
+
+typedef int mlx_status;
+
+typedef uint8_t mlx_uint8;
+typedef uint16_t mlx_uint16;
+typedef uint32_t mlx_uint32;
+typedef uint64_t mlx_uint64;
+typedef uint32_t mlx_uintn;
+
+typedef int8_t mlx_int8;
+typedef int16_t mlx_int16;;
+typedef int32_t mlx_int32;
+typedef int64_t mlx_int64;
+typedef uint8_t mlx_boolean;
+
+typedef struct pci_device mlx_pci;
+
+typedef size_t mlx_size;
+
+typedef void mlx_void;
+
+#define MAC_ADDR_LEN 6
+typedef unsigned long mlx_physical_address;
+typedef union {
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } __attribute__ (( packed ));
+ uint8_t addr[MAC_ADDR_LEN];
+} mlx_mac_address;
+
+#endif /* A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_ */
diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c
new file mode 100644
index 00000000..cb9e759b
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c
@@ -0,0 +1,172 @@
+/*
+ * MemoryPriv.c
+ *
+ * Created on: Jan 21, 2015
+ * Author: maord
+ */
+
+#include <ipxe/malloc.h>
+#include <stddef.h>
+#include <byteswap.h>
+#include <ipxe/io.h>
+#include "../../mlx_utils/include/private/mlx_memory_priv.h"
+
+
+mlx_status
+mlx_memory_alloc_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = malloc(size);
+ if(*ptr == NULL){
+ status = MLX_OUT_OF_RESOURCES;
+ }
+ return status;
+}
+
+mlx_status
+mlx_memory_zalloc_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_size size,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = zalloc(size);
+ if(*ptr == NULL){
+ status = MLX_OUT_OF_RESOURCES;
+ }
+ return status;
+}
+
+mlx_status
+mlx_memory_free_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_void *ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ free(ptr);
+ return status;
+}
+mlx_status
+mlx_memory_alloc_dma_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_size size ,
+ IN mlx_size align,
+ OUT mlx_void **ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *ptr = malloc_dma(size, align);
+ if (*ptr == NULL) {
+ status = MLX_OUT_OF_RESOURCES;
+ } else {
+ memset(*ptr, 0, size);
+ }
+ return status;
+}
+
+mlx_status
+mlx_memory_free_dma_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_size size ,
+ IN mlx_void *ptr
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ free_dma(ptr, size);
+ return status;
+}
+mlx_status
+mlx_memory_map_dma_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_void *addr ,
+ IN mlx_size number_of_bytes __attribute__ ((unused)),
+ OUT mlx_physical_address *phys_addr,
+ OUT mlx_void **mapping __attribute__ ((unused))
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *phys_addr = virt_to_bus(addr);
+ return status;
+}
+
+mlx_status
+mlx_memory_ummap_dma_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_void *mapping __attribute__ ((unused))
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ return status;
+}
+
+mlx_status
+mlx_memory_cmp_priv(
+ IN mlx_utils *utils __unused,
+ IN mlx_void *first_block,
+ IN mlx_void *second_block,
+ IN mlx_size size,
+ OUT mlx_uint32 *out
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *out = memcmp(first_block, second_block, size);
+ return status;
+}
+
+mlx_status
+mlx_memory_set_priv(
+ IN mlx_utils *utils __unused,
+ IN mlx_void *block,
+ IN mlx_int32 value,
+ IN mlx_size size
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ memset(block, value, size);
+ return status;
+}
+
+mlx_status
+mlx_memory_cpy_priv(
+ IN mlx_utils *utils __unused,
+ OUT mlx_void *destination_buffer,
+ IN mlx_void *source_buffer,
+ IN mlx_size length
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ memcpy(destination_buffer, source_buffer, length);
+ return status;
+}
+
+mlx_status
+mlx_memory_cpu_to_be32_priv(
+ IN mlx_utils *utils __unused,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *destination = cpu_to_be32(source);
+ return status;
+}
+
+
+mlx_status
+mlx_memory_be32_to_cpu_priv(
+ IN mlx_utils *utils __unused,
+ IN mlx_uint32 source,
+ IN mlx_uint32 *destination
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ *destination = be32_to_cpu(source);
+ return status;
+}
+
diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c
new file mode 100644
index 00000000..f8caefdc
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c
@@ -0,0 +1,182 @@
+/*
+ * MlxPciPriv.c
+ *
+ * Created on: Jan 21, 2015
+ * Author: maord
+ */
+
+#include <ipxe/pci.h>
+#include "../../mlx_utils/include/private/mlx_pci_priv.h"
+
+
+static
+mlx_status
+mlx_pci_config_byte(
+ IN mlx_utils *utils,
+ IN mlx_boolean read,
+ IN mlx_uint32 offset,
+ IN OUT mlx_uint8 *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if (read) {
+ status = pci_read_config_byte(utils->pci, offset, buffer);
+ }else {
+ status = pci_write_config_byte(utils->pci, offset, *buffer);
+ }
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_config_word(
+ IN mlx_utils *utils,
+ IN mlx_boolean read,
+ IN mlx_uint32 offset,
+ IN OUT mlx_uint16 *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if (read) {
+ status = pci_read_config_word(utils->pci, offset, buffer);
+ }else {
+ status = pci_write_config_word(utils->pci, offset, *buffer);
+ }
+ return status;
+}
+
+static
+mlx_status
+mlx_pci_config_dword(
+ IN mlx_utils *utils,
+ IN mlx_boolean read,
+ IN mlx_uint32 offset,
+ IN OUT mlx_uint32 *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ if (read) {
+ status = pci_read_config_dword(utils->pci, offset, buffer);
+ }else {
+ status = pci_write_config_dword(utils->pci, offset, *buffer);
+ }
+ return status;
+}
+static
+mlx_status
+mlx_pci_config(
+ IN mlx_utils *utils,
+ IN mlx_boolean read,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ IN OUT mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ mlx_uint8 *tmp = (mlx_uint8*)buffer;
+ mlx_uintn iteration = 0;
+ if( width == MlxPciWidthUint64) {
+ width = MlxPciWidthUint32;
+ count = count * 2;
+ }
+
+ for(;iteration < count ; iteration++) {
+ switch (width){
+ case MlxPciWidthUint8:
+ status = mlx_pci_config_byte(utils, read , offset++, tmp++);
+ break;
+ case MlxPciWidthUint16:
+ status = mlx_pci_config_word(utils, read , offset, (mlx_uint16*)tmp);
+ tmp += 2;
+ offset += 2;
+ break;
+ case MlxPciWidthUint32:
+ status = mlx_pci_config_dword(utils, read , offset, (mlx_uint32*)tmp);
+ tmp += 4;
+ offset += 4;
+ break;
+ default:
+ status = MLX_INVALID_PARAMETER;
+ }
+ if(status != MLX_SUCCESS) {
+ goto config_error;
+ }
+ }
+config_error:
+ return status;
+}
+mlx_status
+mlx_pci_init_priv(
+ IN mlx_utils *utils
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ adjust_pci_device ( utils->pci );
+#ifdef DEVICE_CX3
+ utils->config = ioremap ( pci_bar_start ( utils->pci, PCI_BASE_ADDRESS_0),
+ 0x100000 );
+#endif
+ return status;
+}
+
+mlx_status
+mlx_pci_read_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ OUT mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ status = mlx_pci_config(utils, TRUE, width, offset, count, buffer);
+ return status;
+}
+
+mlx_status
+mlx_pci_write_priv(
+ IN mlx_utils *utils,
+ IN mlx_pci_width width,
+ IN mlx_uint32 offset,
+ IN mlx_uintn count,
+ IN mlx_void *buffer
+ )
+{
+ mlx_status status = MLX_SUCCESS;
+ status = mlx_pci_config(utils, FALSE, width, offset, count, buffer);
+ return status;
+}
+
+mlx_status
+mlx_pci_mem_read_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_pci_width width __attribute__ ((unused)),
+ IN mlx_uint8 bar_index __attribute__ ((unused)),
+ IN mlx_uint64 offset,
+ IN mlx_uintn count __attribute__ ((unused)),
+ OUT mlx_void *buffer
+ )
+{
+ if (buffer == NULL || width != MlxPciWidthUint32)
+ return MLX_INVALID_PARAMETER;
+ *((mlx_uint32 *)buffer) = readl(offset);
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_pci_mem_write_priv(
+ IN mlx_utils *utils __attribute__ ((unused)),
+ IN mlx_pci_width width __attribute__ ((unused)),
+ IN mlx_uint8 bar_index __attribute__ ((unused)),
+ IN mlx_uint64 offset,
+ IN mlx_uintn count __attribute__ ((unused)),
+ IN mlx_void *buffer
+ )
+{
+ if (buffer == NULL || width != MlxPciWidthUint32)
+ return MLX_INVALID_PARAMETER;
+ barrier();
+ writel(*((mlx_uint32 *)buffer), offset);
+ return MLX_SUCCESS;
+}
diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c
new file mode 100644
index 00000000..5fca406f
--- /dev/null
+++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c
@@ -0,0 +1,83 @@
+/*
+ * MlxUtilsPriv.c
+ *
+ * Created on: Jan 25, 2015
+ * Author: maord
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <strings.h>
+#include "../../mlx_utils/include/private/mlx_utils_priv.h"
+
+mlx_status
+mlx_utils_delay_in_ms_priv(
+ IN mlx_uint32 msecs
+ )
+{
+ mdelay(msecs);
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_delay_in_us_priv(
+ IN mlx_uint32 usecs
+ )
+{
+ udelay(usecs);
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_ilog2_priv(
+ IN mlx_uint32 i,
+ OUT mlx_uint32 *log
+ )
+{
+ *log = ( fls ( i ) - 1 );
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_init_lock_priv(
+ OUT void **lock __unused
+ )
+{
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_free_lock_priv(
+ IN void *lock __unused
+ )
+{
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_acquire_lock_priv (
+ IN void *lock __unused
+ )
+{
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_release_lock_priv (
+ IN void *lock __unused
+ )
+{
+ return MLX_SUCCESS;
+}
+
+mlx_status
+mlx_utils_rand_priv (
+ IN mlx_utils *utils __unused,
+ OUT mlx_uint32 *rand_num
+ )
+{
+ do {
+ *rand_num = rand();
+ } while ( *rand_num == 0 );
+ return MLX_SUCCESS;
+}
diff --git a/src/drivers/infiniband/nodnic_prm.h b/src/drivers/infiniband/nodnic_prm.h
new file mode 100644
index 00000000..5e0fa989
--- /dev/null
+++ b/src/drivers/infiniband/nodnic_prm.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#ifndef SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_
+#define SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_
+
+#include "mlx_bitops.h"
+
+struct nodnic_wqe_segment_data_ptr_st { /* Little Endian */
+ pseudo_bit_t byte_count[0x0001f];
+ pseudo_bit_t always0[0x00001];
+/* -------------- */
+ pseudo_bit_t l_key[0x00020];
+/* -------------- */
+ pseudo_bit_t local_address_h[0x00020];
+/* -------------- */
+ pseudo_bit_t local_address_l[0x00020];
+/* -------------- */
+};
+
+struct MLX_DECLARE_STRUCT ( nodnic_wqe_segment_data_ptr );
+
+#define HERMON_MAX_SCATTER 1
+
+struct nodnic_recv_wqe {
+ struct nodnic_wqe_segment_data_ptr data[HERMON_MAX_SCATTER];
+} __attribute__ (( packed ));
+
+#endif /* SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_ */
diff --git a/src/drivers/infiniband/nodnic_shomron_prm.h b/src/drivers/infiniband/nodnic_shomron_prm.h
new file mode 100644
index 00000000..85cd9718
--- /dev/null
+++ b/src/drivers/infiniband/nodnic_shomron_prm.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+FILE_LICENCE ( GPL2_OR_LATER );
+
+#ifndef SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_
+#define SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_
+
+
+
+#include "nodnic_prm.h"
+
+
+#define SHOMRON_MAX_GATHER 1
+
+/* Send wqe segment ctrl */
+
+struct shomronprm_wqe_segment_ctrl_send_st { /* Little Endian */
+ pseudo_bit_t opcode[0x00008];
+ pseudo_bit_t wqe_index[0x00010];
+ pseudo_bit_t reserved1[0x00008];
+/* -------------- */
+ pseudo_bit_t ds[0x00006]; /* descriptor (wqe) size in 16bytes chunk */
+ pseudo_bit_t reserved2[0x00002];
+ pseudo_bit_t qpn[0x00018];
+/* -------------- */
+ pseudo_bit_t reserved3[0x00002];
+ pseudo_bit_t ce[0x00002];
+ pseudo_bit_t reserved4[0x0001c];
+/* -------------- */
+ pseudo_bit_t reserved5[0x00040];
+/* -------------- */
+ pseudo_bit_t mss[0x0000e];
+ pseudo_bit_t reserved6[0x0000e];
+ pseudo_bit_t cs13_inner[0x00001];
+ pseudo_bit_t cs14_inner[0x00001];
+ pseudo_bit_t cs13[0x00001];
+ pseudo_bit_t cs14[0x00001];
+/* -------------- */
+ pseudo_bit_t reserved7[0x00020];
+/* -------------- */
+ pseudo_bit_t inline_headers1[0x00010];
+ pseudo_bit_t inline_headers_size[0x0000a]; //sum size of inline_hdr1+inline_hdrs (0x10)
+ pseudo_bit_t reserved8[0x00006];
+/* -------------- */
+ pseudo_bit_t inline_headers2[0x00020];
+/* -------------- */
+ pseudo_bit_t inline_headers3[0x00020];
+/* -------------- */
+ pseudo_bit_t inline_headers4[0x00020];
+/* -------------- */
+ pseudo_bit_t inline_headers5[0x00020];
+};
+
+
+
+/* Completion Queue Entry Format #### michal - fixed by gdror */
+
+struct shomronprm_completion_queue_entry_st { /* Little Endian */
+
+ pseudo_bit_t reserved1[0x00080];
+/* -------------- */
+ pseudo_bit_t reserved2[0x00010];
+ pseudo_bit_t ml_path[0x00007];
+ pseudo_bit_t reserved3[0x00009];
+/* -------------- */
+ pseudo_bit_t slid[0x00010];
+ pseudo_bit_t reserved4[0x00010];
+/* -------------- */
+ pseudo_bit_t rqpn[0x00018];
+ pseudo_bit_t sl[0x00004];
+ pseudo_bit_t l3_hdr[0x00002];
+ pseudo_bit_t reserved5[0x00002];
+/* -------------- */
+ pseudo_bit_t reserved10[0x00020];
+/* -------------- */
+ pseudo_bit_t srqn[0x00018];
+ pseudo_bit_t reserved11[0x0008];
+/* -------------- */
+ pseudo_bit_t pkey_index[0x00020];
+/* -------------- */
+ pseudo_bit_t reserved6[0x00020];
+/* -------------- */
+ pseudo_bit_t byte_cnt[0x00020];
+/* -------------- */
+ pseudo_bit_t reserved7[0x00040];
+/* -------------- */
+ pseudo_bit_t qpn[0x00018];
+ pseudo_bit_t rx_drop_counter[0x00008];
+/* -------------- */
+ pseudo_bit_t owner[0x00001];
+ pseudo_bit_t reserved8[0x00003];
+ pseudo_bit_t opcode[0x00004];
+ pseudo_bit_t reserved9[0x00008];
+ pseudo_bit_t wqe_counter[0x00010];
+};
+
+
+/* Completion with Error CQE #### michal - gdror fixed */
+
+struct shomronprm_completion_with_error_st { /* Little Endian */
+ pseudo_bit_t reserved1[0x001a0];
+ /* -------------- */
+ pseudo_bit_t syndrome[0x00008];
+ pseudo_bit_t vendor_error_syndrome[0x00008];
+ pseudo_bit_t reserved2[0x00010];
+ /* -------------- */
+ pseudo_bit_t reserved3[0x00040];
+};
+
+
+struct MLX_DECLARE_STRUCT ( shomronprm_wqe_segment_ctrl_send );
+struct MLX_DECLARE_STRUCT ( shomronprm_completion_queue_entry );
+struct MLX_DECLARE_STRUCT ( shomronprm_completion_with_error );
+
+struct shomron_nodnic_eth_send_wqe {
+ struct shomronprm_wqe_segment_ctrl_send ctrl;
+ struct nodnic_wqe_segment_data_ptr data[SHOMRON_MAX_GATHER];
+} __attribute__ (( packed ));
+
+union shomronprm_completion_entry {
+ struct shomronprm_completion_queue_entry normal;
+ struct shomronprm_completion_with_error error;
+} __attribute__ (( packed ));
+
+
+#endif /* SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_ */
diff --git a/src/include/ipxe/errfile.h b/src/include/ipxe/errfile.h
index 63893157..338ebddc 100644
--- a/src/include/ipxe/errfile.h
+++ b/src/include/ipxe/errfile.h
@@ -186,6 +186,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#define ERRFILE_smsc95xx ( ERRFILE_DRIVER | 0x007a0000 )
#define ERRFILE_acm ( ERRFILE_DRIVER | 0x007b0000 )
#define ERRFILE_eoib ( ERRFILE_DRIVER | 0x007c0000 )
+#define ERRFILE_golan ( ERRFILE_DRIVER | 0x007d0000 )
+#define ERRFILE_flexboot_nodnic ( ERRFILE_DRIVER | 0x007e0000 )
#define ERRFILE_aoe ( ERRFILE_NET | 0x00000000 )
#define ERRFILE_arp ( ERRFILE_NET | 0x00010000 )