summaryrefslogtreecommitdiffstats
path: root/src/drivers
diff options
context:
space:
mode:
authorMichael Brown2008-04-22 02:37:00 +0200
committerMichael Brown2008-04-22 02:37:00 +0200
commitc9fb012d4fdbd397cef65a8dbf5bb9759228588a (patch)
tree108011c1902f4a2370c882ac770be7a085f745c4 /src/drivers
parent[Infiniband] Move event-queue process from driver to Infiniband core (diff)
downloadipxe-c9fb012d4fdbd397cef65a8dbf5bb9759228588a.tar.gz
ipxe-c9fb012d4fdbd397cef65a8dbf5bb9759228588a.tar.xz
ipxe-c9fb012d4fdbd397cef65a8dbf5bb9759228588a.zip
[Infiniband] Add multiport support for Arbel cards
Diffstat (limited to 'src/drivers')
-rw-r--r--src/drivers/infiniband/arbel.c251
-rw-r--r--src/drivers/infiniband/arbel.h66
-rw-r--r--src/drivers/infiniband/hermon.h5
3 files changed, 304 insertions, 18 deletions
diff --git a/src/drivers/infiniband/arbel.c b/src/drivers/infiniband/arbel.c
index 2409e285..d66ce12e 100644
--- a/src/drivers/infiniband/arbel.c
+++ b/src/drivers/infiniband/arbel.c
@@ -276,19 +276,30 @@ arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
}
static inline int
+arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
+ const struct arbelprm_event_mask *mask ) {
+ return arbel_cmd ( arbel,
+ ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
+ 0, sizeof ( *mask ) ),
+ 0, mask, index_map, NULL );
+}
+
+static inline int
arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
- const struct arbelprm_eqc *eqc ) {
+ const struct arbelprm_eqc *eqctx ) {
return arbel_cmd ( arbel,
ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
- 1, sizeof ( *eqc ) ),
- 0, eqc, index, NULL );
+ 1, sizeof ( *eqctx ) ),
+ 0, eqctx, index, NULL );
}
static inline int
-arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index ) {
+arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
+ struct arbelprm_eqc *eqctx ) {
return arbel_cmd ( arbel,
- ARBEL_HCR_VOID_CMD ( ARBEL_HCR_HW2SW_EQ ),
- 1, NULL, index, NULL );
+ ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
+ 1, sizeof ( *eqctx ) ),
+ 1, NULL, index, eqctx );
}
static inline int
@@ -337,6 +348,15 @@ arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
}
static inline int
+arbel_cmd_rts2rts_qp ( struct arbel *arbel, unsigned long qpn,
+ const struct arbelprm_qp_ee_state_transitions *ctx ) {
+ return arbel_cmd ( arbel,
+ ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
+ 1, sizeof ( *ctx ) ),
+ 0, ctx, qpn, NULL );
+}
+
+static inline int
arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
return arbel_cmd ( arbel,
ARBEL_HCR_VOID_CMD ( ARBEL_HCR_2RST_QPEE ),
@@ -847,13 +867,25 @@ static int arbel_modify_qp ( struct ib_device *ibdev,
struct ib_queue_pair *qp,
unsigned long mod_list ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
+ struct arbelprm_qp_ee_state_transitions qpctx;
+ unsigned long optparammask = 0;
+ int rc;
- /* TODO */
- ( void ) arbel;
- ( void ) qp;
- ( void ) mod_list;
+ /* Construct optparammask */
+ if ( mod_list & IB_MODIFY_QKEY )
+ optparammask |= ARBEL_QPEE_OPT_PARAM_QKEY;
- return -ENOTSUP;
+ /* Issue RTS2RTS_QP */
+ memset ( &qpctx, 0, sizeof ( qpctx ) );
+ MLX_FILL_1 ( &qpctx, 0, opt_param_mask, optparammask );
+ MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
+ if ( ( rc = arbel_cmd_rts2rts_qp ( arbel, qp->qpn, &qpctx ) ) != 0 ){
+ DBGC ( arbel, "Arbel %p RTS2RTS_QP failed: %s\n",
+ arbel, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
}
/**
@@ -1231,15 +1263,193 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
*/
/**
+ * Create event queue
+ *
+ * @v arbel Arbel device
+ * @ret rc Return status code
+ */
+static int arbel_create_eq ( struct arbel *arbel ) {
+ struct arbel_event_queue *arbel_eq = &arbel->eq;
+ struct arbelprm_eqc eqctx;
+ struct arbelprm_event_mask mask;
+ unsigned int i;
+ int rc;
+
+ /* Select event queue number */
+ arbel_eq->eqn = arbel->limits.reserved_eqs;
+
+ /* Calculate doorbell address */
+ arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
+ ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
+
+ /* Allocate event queue itself */
+ arbel_eq->eqe_size =
+ ( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
+ arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
+ sizeof ( arbel_eq->eqe[0] ) );
+ if ( ! arbel_eq->eqe ) {
+ rc = -ENOMEM;
+ goto err_eqe;
+ }
+ memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
+ for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
+ MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
+ }
+ barrier();
+
+ /* Hand queue over to hardware */
+ memset ( &eqctx, 0, sizeof ( eqctx ) );
+ MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
+ MLX_FILL_1 ( &eqctx, 2,
+ start_address_l, virt_to_phys ( arbel_eq->eqe ) );
+ MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
+ MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
+ MLX_FILL_1 ( &eqctx, 7, lkey, arbel->reserved_lkey );
+ if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
+ &eqctx ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p SW2HW_EQ failed: %s\n",
+ arbel, strerror ( rc ) );
+ goto err_sw2hw_eq;
+ }
+
+ /* Map events to this event queue */
+ memset ( &mask, 0, sizeof ( mask ) );
+ MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
+ if ( ( rc = arbel_cmd_map_eq ( arbel,
+ ( ARBEL_MAP_EQ | arbel_eq->eqn ),
+ &mask ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p MAP_EQ failed: %s\n",
+ arbel, strerror ( rc ) );
+ goto err_map_eq;
+ }
+
+ DBGC ( arbel, "Arbel %p EQN %#lx ring at [%p,%p])\n",
+ arbel, arbel_eq->eqn, arbel_eq->eqe,
+ ( ( ( void * ) arbel_eq->eqe ) + arbel_eq->eqe_size ) );
+ return 0;
+
+ err_map_eq:
+ arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
+ err_sw2hw_eq:
+ free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
+ err_eqe:
+ memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
+ return rc;
+}
+
+/**
+ * Destroy event queue
+ *
+ * @v arbel Arbel device
+ */
+static void arbel_destroy_eq ( struct arbel *arbel ) {
+ struct arbel_event_queue *arbel_eq = &arbel->eq;
+ struct arbelprm_eqc eqctx;
+ struct arbelprm_event_mask mask;
+ int rc;
+
+ /* Unmap events from event queue */
+ memset ( &mask, 0, sizeof ( mask ) );
+ MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
+ if ( ( rc = arbel_cmd_map_eq ( arbel,
+ ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
+ &mask ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p FATAL MAP_EQ failed to unmap: %s\n",
+ arbel, strerror ( rc ) );
+ /* Continue; HCA may die but system should survive */
+ }
+
+ /* Take ownership back from hardware */
+ if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
+ &eqctx ) ) != 0 ) {
+ DBGC ( arbel, "Arbel %p FATAL HW2SW_EQ failed: %s\n",
+ arbel, strerror ( rc ) );
+ /* Leak memory and return; at least we avoid corruption */
+ return;
+ }
+
+ /* Free memory */
+ free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
+ memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
+}
+
+/**
+ * Handle port state event
+ *
+ * @v arbel Arbel device
+ * @v eqe Port state change event queue entry
+ */
+static void arbel_event_port_state_change ( struct arbel *arbel,
+ union arbelprm_event_entry *eqe){
+ unsigned int port;
+ int link_up;
+
+ /* Get port and link status */
+ port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
+ link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
+ DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
+ ( link_up ? "up" : "down" ) );
+
+ /* Sanity check */
+ if ( port >= ARBEL_NUM_PORTS ) {
+ DBGC ( arbel, "Arbel %p port %d does not exist!\n",
+ arbel, ( port + 1 ) );
+ return;
+ }
+
+ /* Notify Infiniband core of link state change */
+ ib_link_state_changed ( arbel->ibdev[port] );
+}
+
+/**
* Poll event queue
*
* @v ibdev Infiniband device
*/
static void arbel_poll_eq ( struct ib_device *ibdev ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
+ struct arbel_event_queue *arbel_eq = &arbel->eq;
+ union arbelprm_event_entry *eqe;
+ unsigned int eqe_idx_mask;
+ unsigned int event_type;
- /* TODO */
- ( void ) arbel;
+ while ( 1 ) {
+ /* Look for event entry */
+ eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
+ eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
+ if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
+ /* Entry still owned by hardware; end of poll */
+ break;
+ }
+ DBGCP ( arbel, "Arbel %p event:\n", arbel );
+ DBGCP_HD ( arbel, eqe, sizeof ( *eqe ) );
+
+ /* Handle event */
+ event_type = MLX_GET ( &eqe->generic, event_type );
+ switch ( event_type ) {
+ case ARBEL_EV_PORT_STATE_CHANGE:
+ arbel_event_port_state_change ( arbel, eqe );
+ break;
+ default:
+ DBGC ( arbel, "Arbel %p unrecognised event type "
+ "%#x:\n", arbel, event_type );
+ DBGC_HD ( arbel, eqe, sizeof ( *eqe ) );
+ break;
+ }
+
+ /* Return ownership to hardware */
+ MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
+ barrier();
+
+ /* Update event queue's index */
+ arbel_eq->next_idx++;
+
+ /* Ring doorbell */
+ DBGCP ( arbel, "Ringing doorbell %08lx with %08lx\n",
+ virt_to_phys ( arbel_eq->doorbell ),
+ arbel_eq->next_idx );
+ writel ( arbel_eq->next_idx, arbel_eq->doorbell );
+ }
}
/***************************************************************************
@@ -1473,6 +1683,7 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
unsigned int log2_fw_pages;
size_t fw_size;
physaddr_t fw_base;
+ uint64_t eq_set_ci_base_addr;
int rc;
/* Get firmware parameters */
@@ -1489,6 +1700,10 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
fw_pages = ( 1 << log2_fw_pages );
DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
arbel, ( fw_pages * 4 ) );
+ eq_set_ci_base_addr =
+ ( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
+ ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
+ arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
/* Enable locally-attached memory. Ignore failure; there may
* be no attached memory.
@@ -1591,6 +1806,7 @@ static int arbel_get_limits ( struct arbel *arbel ) {
arbel->limits.reserved_cqs =
( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
+ arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
arbel->limits.reserved_mtts =
( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
@@ -1721,7 +1937,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
icm_offset += icm_usage ( log_num_rdbs, 32 );
/* Event queue contexts */
- log_num_eqs = 6;
+ log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
MLX_FILL_2 ( init_hca, 33,
qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
( icm_offset >> 6 ),
@@ -1950,6 +2166,10 @@ static int arbel_probe ( struct pci_device *pci,
if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
goto err_setup_mpt;
+ /* Set up event queue */
+ if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
+ goto err_create_eq;
+
/* Register Infiniband devices */
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
@@ -1965,6 +2185,8 @@ static int arbel_probe ( struct pci_device *pci,
err_register_ibdev:
for ( ; i >= 0 ; i-- )
unregister_ibdev ( arbel->ibdev[i] );
+ arbel_destroy_eq ( arbel );
+ err_create_eq:
err_setup_mpt:
arbel_cmd_close_hca ( arbel );
err_init_hca:
@@ -1997,6 +2219,7 @@ static void arbel_remove ( struct pci_device *pci ) {
for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
unregister_ibdev ( arbel->ibdev[i] );
+ arbel_destroy_eq ( arbel );
arbel_cmd_close_hca ( arbel );
arbel_free_icm ( arbel );
arbel_stop_firmware ( arbel );
diff --git a/src/drivers/infiniband/arbel.h b/src/drivers/infiniband/arbel.h
index 94fa67c1..68be8248 100644
--- a/src/drivers/infiniband/arbel.h
+++ b/src/drivers/infiniband/arbel.h
@@ -18,7 +18,7 @@
*/
/* Ports in existence */
-#define ARBEL_NUM_PORTS 1
+#define ARBEL_NUM_PORTS 2
#define ARBEL_PORT_BASE 1
/* PCI BARs */
@@ -57,6 +57,7 @@
#define ARBEL_HCR_RST2INIT_QPEE 0x0019
#define ARBEL_HCR_INIT2RTR_QPEE 0x001a
#define ARBEL_HCR_RTR2RTS_QPEE 0x001b
+#define ARBEL_HCR_RTS2RTS_QPEE 0x001c
#define ARBEL_HCR_2RST_QPEE 0x0021
#define ARBEL_HCR_MAD_IFC 0x0024
#define ARBEL_HCR_READ_MGM 0x0025
@@ -86,6 +87,14 @@
#define ARBEL_PAGE_SIZE 4096
#define ARBEL_DB_POST_SND_OFFSET 0x10
+#define ARBEL_DB_EQ_OFFSET(_eqn) ( 0x08 * (_eqn) )
+
+#define ARBEL_QPEE_OPT_PARAM_QKEY 0x00000020UL
+
+#define ARBEL_MAP_EQ ( 0UL << 31 )
+#define ARBEL_UNMAP_EQ ( 1UL << 31 )
+
+#define ARBEL_EV_PORT_STATE_CHANGE 0x09
/*
* Datatypes that seem to be missing from the autogenerated documentation
@@ -104,6 +113,20 @@ struct arbelprm_scalar_parameter_st {
pseudo_bit_t value[0x00020];
} __attribute__ (( packed ));
+struct arbelprm_event_mask_st {
+ pseudo_bit_t reserved0[0x00020];
+/* -------------- */
+ pseudo_bit_t completion[0x00001];
+ pseudo_bit_t reserved1[0x0008];
+ pseudo_bit_t port_state_change[0x00001];
+ pseudo_bit_t reserved2[0x00016];
+} __attribute__ (( packed ));
+
+struct arbelprm_port_state_change_event_st {
+ pseudo_bit_t reserved[0x00020];
+ struct arbelprm_port_state_change_st data;
+} __attribute__ (( packed ));
+
/*
* Wrapper structures for hardware datatypes
*
@@ -115,6 +138,8 @@ struct MLX_DECLARE_STRUCT ( arbelprm_completion_queue_entry );
struct MLX_DECLARE_STRUCT ( arbelprm_completion_with_error );
struct MLX_DECLARE_STRUCT ( arbelprm_cq_arm_db_record );
struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
+struct MLX_DECLARE_STRUCT ( arbelprm_event_mask );
+struct MLX_DECLARE_STRUCT ( arbelprm_event_queue_entry );
struct MLX_DECLARE_STRUCT ( arbelprm_eqc );
struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
struct MLX_DECLARE_STRUCT ( arbelprm_init_hca );
@@ -123,6 +148,7 @@ struct MLX_DECLARE_STRUCT ( arbelprm_mad_ifc );
struct MLX_DECLARE_STRUCT ( arbelprm_mgm_entry );
struct MLX_DECLARE_STRUCT ( arbelprm_mgm_hash );
struct MLX_DECLARE_STRUCT ( arbelprm_mpt );
+struct MLX_DECLARE_STRUCT ( arbelprm_port_state_change_event );
struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
struct MLX_DECLARE_STRUCT ( arbelprm_qp_ee_state_transitions );
struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
@@ -172,6 +198,11 @@ union arbelprm_completion_entry {
struct arbelprm_completion_with_error error;
} __attribute__ (( packed ));
+union arbelprm_event_entry {
+ struct arbelprm_event_queue_entry generic;
+ struct arbelprm_port_state_change_event port_state_change;
+} __attribute__ (( packed ));
+
union arbelprm_doorbell_record {
struct arbelprm_cq_arm_db_record cq_arm;
struct arbelprm_cq_ci_db_record cq_ci;
@@ -215,6 +246,8 @@ struct arbel_dev_limits {
unsigned int reserved_cqs;
/** CQ context entry size */
size_t cqc_entry_size;
+ /** Number of reserved EQs */
+ unsigned int reserved_eqs;
/** Number of reserved MTTs */
unsigned int reserved_mtts;
/** MTT entry size */
@@ -304,6 +337,33 @@ struct arbel_completion_queue {
size_t cqe_size;
};
+/** Maximum number of allocatable event queues
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define ARBEL_MAX_EQS 64
+
+/** A Arbel event queue */
+struct arbel_event_queue {
+ /** Event queue entries */
+ union arbelprm_event_entry *eqe;
+ /** Size of event queue */
+ size_t eqe_size;
+ /** Event queue number */
+ unsigned long eqn;
+ /** Next event queue entry index */
+ unsigned long next_idx;
+ /** Doorbell register */
+ void *doorbell;
+};
+
+/** Number of event queue entries
+ *
+ * This is a policy decision.
+ */
+#define ARBEL_NUM_EQES 4
+
+
/** An Arbel resource bitmask */
typedef uint32_t arbel_bitmask_t;
@@ -318,6 +378,8 @@ struct arbel {
void *config;
/** PCI user Access Region */
void *uar;
+ /** Event queue consumer index doorbells */
+ void *eq_ci_doorbells;
/** Command input mailbox */
void *mailbox_in;
@@ -333,6 +395,8 @@ struct arbel {
/** ICM area */
userptr_t icm;
+ /** Event queue */
+ struct arbel_event_queue eq;
/** Doorbell records */
union arbelprm_doorbell_record *db_rec;
/** Reserved LKey
diff --git a/src/drivers/infiniband/hermon.h b/src/drivers/infiniband/hermon.h
index 1ae27780..ed39da69 100644
--- a/src/drivers/infiniband/hermon.h
+++ b/src/drivers/infiniband/hermon.h
@@ -456,15 +456,14 @@ struct hermon {
/** ICM area */
userptr_t icm;
+ /** Event queue */
+ struct hermon_event_queue eq;
/** Reserved LKey
*
* Used to get unrestricted memory access.
*/
unsigned long reserved_lkey;
- /** Event queue */
- struct hermon_event_queue eq;
-
/** Completion queue in-use bitmask */
hermon_bitmask_t cq_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_CQS ) ];
/** Queue pair in-use bitmask */