From cdb42becdd40eeb320af3f21ac9a34e9d7517516 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:21 -0800 Subject: scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu Currently, both nvme and fcp each have their own concept of an io_channel, which is a combination wq/cq and associated msix. Different cpus would share an io_channel. The driver is now moving to per-cpu wq/cq pairs and msix vectors. The driver will still use separate wq/cq pairs per protocol on each cpu, but the protocols will share the msix vector. Given the elimination of the nvme and fcp io channels, the module parameters will be removed. A new parameter, lpfc_hdw_queue is added which allows the wq/cq pair allocation per cpu to be overridden and allocated to lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will be based on the number of cpus. If non-zero, the parameter specifies the number of queues to allocate. At this time, the maximum non-zero value is 64. To manage this new paradigm, a new hardware queue structure is created to track queue activity and relationships. As MSIX vector allocation must be known before setting up the relationships, msix allocation now occurs before queue datastructures are allocated. If the number of vectors allocated is less than the desired hardware queues, the hardware queue counts will be reduced to the number of vectors Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 6245f442d784..c64a8234d5bd 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -973,7 +973,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, * WQE release CQE */ ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; - wq = phba->sli4_hba.nvme_wq[rsp->hwqid]; + wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); list_add_tail(&nvmewqeq->list, &wq->wqfull_list); @@ -1047,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx]; + wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; @@ -1377,7 +1377,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) * allocate + 3, one for cmd, one for rsp and one for this alignment */ lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; - lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; + lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) @@ -1697,8 +1697,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) return; if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - wq = phba->sli4_hba.nvme_wq[qidx]; + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + wq = phba->sli4_hba.hdwq[qidx].nvme_wq; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } init_completion(&tgtp->tport_unreg_done); -- cgit v1.2.3-55-g7522 From 5e5b511d8bfaf765cb92a695cda336c936cb86dc Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:22 -0800 Subject: scsi: lpfc: Partition XRI buffer list across Hardware Queues Once the IO buff allocations were made shared, there was a single XRI buffer list shared by all hardware queues. A single list isn't great for performance when shared across the per-cpu hardware queues. Create a separate XRI IO buffer get/put list for each Hardware Queue. As SGLs and associated IO buffers get allocated/posted to the firmware; round robin their assignment across all available hardware Queues so that there is an equitable assignment. Modify SCSI and NVME IO submit code paths to use the Hardware Queue logic for XRI allocation. Add a debugfs interface to display hardware queue statistics Added new empty_io_bufs counter to track if a cpu runs out of XRIs. Replace common_ variables/names with io_ to make meanings clearer. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 8 +- drivers/scsi/lpfc/lpfc_attr.c | 2 +- drivers/scsi/lpfc/lpfc_crtn.h | 10 +- drivers/scsi/lpfc/lpfc_debugfs.c | 141 +++++++++++- drivers/scsi/lpfc/lpfc_debugfs.h | 3 + drivers/scsi/lpfc/lpfc_init.c | 447 +++++++++++++++++++++++++-------------- drivers/scsi/lpfc/lpfc_nvme.c | 90 ++++---- drivers/scsi/lpfc/lpfc_nvme.h | 3 +- drivers/scsi/lpfc/lpfc_nvmet.c | 22 +- drivers/scsi/lpfc/lpfc_scsi.c | 107 ++++++---- drivers/scsi/lpfc/lpfc_scsi.h | 3 +- drivers/scsi/lpfc/lpfc_sli.c | 88 +++----- drivers/scsi/lpfc/lpfc_sli.h | 1 + drivers/scsi/lpfc/lpfc_sli4.h | 36 +++- 14 files changed, 623 insertions(+), 338 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index da12476dd933..19827ce7a4d9 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -965,13 +965,6 @@ struct lpfc_hba { struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; - spinlock_t common_buf_list_get_lock; /* Common buf alloc list lock */ - spinlock_t common_buf_list_put_lock; /* Common buf free list lock */ - struct list_head lpfc_common_buf_list_get; - struct list_head lpfc_common_buf_list_put; - uint32_t total_common_bufs; - uint32_t get_common_bufs; - uint32_t put_common_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; struct list_head active_rrq_list; @@ -1045,6 +1038,7 @@ struct lpfc_hba { struct dentry *debug_nvmeio_trc; struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; + struct dentry *debug_hdwqinfo; atomic_t nvmeio_trc_cnt; uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_output_idx; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c6b1d432dd07..1671d9371d3b 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -337,7 +337,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.common_xri_max, + phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 6dc427d4228c..a623f6f619cc 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -515,10 +515,12 @@ int lpfc_sli4_read_config(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); -int lpfc_sli4_common_sgl_update(struct lpfc_hba *phba); -int lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, - struct list_head *blist, int xricnt); -int lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc); +int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); +int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf); +int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *blist, int xricnt); +int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index fc5ec99bfde2..a1b7263bfe2a 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -378,6 +378,73 @@ skipit: return len; } +static int lpfc_debugfs_last_hdwq; + +/** + * lpfc_debugfs_hdwqinfo_data - Dump Hardware Queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the Hardware Queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hdwq state will be + * dumped to @buf first and then info on each hdwq entry will be dumped to @buf + * until @size bytes have been dumped or all the hdwq info has been dumped. + * + * Notes: + * This routine will rotate through each configured Hardware Queue each + * time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_hdwqinfo_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_sli4_hdw_queue *qp; + int len = 0; + int i, out; + unsigned long iflag; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (!phba->sli4_hba.hdwq) + return 0; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (len > (LPFC_HDWQINFO_SIZE - 80)) + break; + qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_hdwq]; + + len += snprintf(buf + len, size - len, "HdwQ %d Info ", i); + spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); + spin_lock(&qp->abts_nvme_buf_list_lock); + spin_lock(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + + qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); + len += snprintf(buf + len, size - len, + "tot:%d get:%d put:%d mt:%d " + "ABTS scsi:%d nvme:%d Out:%d\n", + qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, + qp->empty_io_bufs, qp->abts_scsi_io_bufs, + qp->abts_nvme_io_bufs, out); + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->io_buf_list_get_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); + spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); + + lpfc_debugfs_last_hdwq++; + if (lpfc_debugfs_last_hdwq >= phba->cfg_hdw_queue) + lpfc_debugfs_last_hdwq = 0; + } + + return len; +} + static int lpfc_debugfs_last_hba_slim_off; /** @@ -863,17 +930,17 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) len += snprintf(buf + len, size - len, "\n"); cnt = 0; - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { cnt++; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); if (cnt) { len += snprintf(buf + len, size - len, "ABORT: %d ctx entries\n", cnt); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { @@ -885,7 +952,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ctxp->oxid, ctxp->state, ctxp->flag); } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /* Calculate outstanding IOs */ @@ -1619,6 +1686,48 @@ out: return rc; } +/** + * lpfc_debugfs_hdwqinfo_open - Open the hdwqinfo debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_hdwqinfo_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_hdwqinfo_data(phba, debug->buffer, + LPFC_HBQINFO_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + /** * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. @@ -4819,6 +4928,15 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_hdwqinfo +static const struct file_operations lpfc_debugfs_op_hdwqinfo = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_hdwqinfo_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { .owner = THIS_MODULE, @@ -5244,6 +5362,18 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) phba->hba_debugfs_root, phba, &lpfc_debugfs_op_hbqinfo); + /* Setup hdwqinfo */ + snprintf(name, sizeof(name), "hdwqinfo"); + phba->debug_hdwqinfo = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_hdwqinfo); + if (!phba->debug_hdwqinfo) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0411 Cant create debugfs hdwqinfo\n"); + goto debug_failed; + } + /* Setup dumpHBASlim */ if (phba->sli_rev < LPFC_SLI_REV4) { snprintf(name, sizeof(name), "dumpHBASlim"); @@ -5630,6 +5760,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; + debugfs_remove(phba->debug_hdwqinfo); /* hdwqinfo */ + phba->debug_hdwqinfo = NULL; + debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ phba->debug_dumpHBASlim = NULL; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 2c5bc494b247..c904fa74dfbc 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -284,6 +284,9 @@ struct lpfc_idiag { #endif +/* hdwqinfo output buffer size */ +#define LPFC_HDWQINFO_SIZE 8192 + enum { DUMP_FCP, DUMP_NVME, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c4acd9ee55b1..ed0318b9003e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1039,12 +1039,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; + struct lpfc_sli4_hdw_queue *qp; LIST_HEAD(aborts); LIST_HEAD(nvme_aborts); LIST_HEAD(nvmet_aborts); - unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - int cnt; + int cnt, idx; lpfc_sli_hbqbuf_free_all(phba); @@ -1071,57 +1071,65 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_unlock(&phba->sli4_hba.sgl_list_lock); - /* abts_scsi_buf_list_lock required because worker thread uses this + + /* abts_xxxx_buf_list_lock required because worker thread uses this * list. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, - &aborts); - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); - } - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, - &nvme_aborts); - list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, - &nvmet_aborts); - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); - } + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; - spin_unlock_irq(&phba->hbalock); + spin_lock(&qp->abts_scsi_buf_list_lock); + list_splice_init(&qp->lpfc_abts_scsi_buf_list, + &aborts); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { list_for_each_entry_safe(psb, psb_next, &aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; + cnt++; } - spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); - list_splice(&aborts, &phba->lpfc_common_buf_list_put); - spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); - } + spin_lock(&qp->io_buf_list_put_lock); + list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); + qp->put_io_bufs += qp->abts_scsi_io_bufs; + qp->abts_scsi_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->abts_scsi_buf_list_lock); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&qp->abts_nvme_buf_list_lock); + list_splice_init(&qp->lpfc_abts_nvme_buf_list, + &nvme_aborts); + list_for_each_entry_safe(psb, psb_next, &nvme_aborts, + list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + cnt++; + } + spin_lock(&qp->io_buf_list_put_lock); + qp->put_io_bufs += qp->abts_nvme_io_bufs; + qp->abts_nvme_io_bufs = 0; + list_splice_init(&nvme_aborts, + &qp->lpfc_io_buf_list_put); + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - cnt = 0; - list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { - psb->pCmd = NULL; - psb->status = IOSTAT_SUCCESS; - cnt++; } - spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); - phba->put_common_bufs += cnt; - list_splice(&nvme_aborts, &phba->lpfc_common_buf_list_put); - spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); + } + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + &nvmet_aborts); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } } + spin_unlock_irq(&phba->hbalock); lpfc_sli4_free_sp_events(phba); - return 0; + return cnt; } /** @@ -3126,18 +3134,6 @@ lpfc_online(struct lpfc_hba *phba) "6132 NVME restore reg failed " "on nvmei error x%x\n", error); } - /* Don't post more new bufs if repost already recovered - * the nvme sgls. - */ - if (phba->sli4_hba.common_xri_cnt == 0) { - i = lpfc_new_common_buf(phba, - phba->sli4_hba.common_xri_max); - if (i == 0) { - lpfc_unblock_mgmt_io(phba); - return 1; - } - phba->total_common_bufs += i; - } } else { lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ @@ -3372,7 +3368,7 @@ lpfc_scsi_free(struct lpfc_hba *phba) } /** - * lpfc_common_free - Free all the IO buffers and IOCBs from driver lists + * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the IO buffers and IOCBs from the driver @@ -3380,36 +3376,44 @@ lpfc_scsi_free(struct lpfc_hba *phba) * the internal resources before the device is removed from the system. **/ static void -lpfc_common_free(struct lpfc_hba *phba) +lpfc_io_free(struct lpfc_hba *phba) { struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + struct lpfc_sli4_hdw_queue *qp; + int idx; spin_lock_irq(&phba->hbalock); - /* Release all the lpfc_nvme_bufs maintained by this host. */ - spin_lock(&phba->common_buf_list_put_lock); - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_common_buf_list_put, list) { - list_del(&lpfc_ncmd->list); - phba->put_common_bufs--; - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - phba->total_common_bufs--; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + /* Release all the lpfc_nvme_bufs maintained by this host. */ + spin_lock(&qp->io_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, + list) { + list_del(&lpfc_ncmd->list); + qp->put_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_put_lock); + + spin_lock(&qp->io_buf_list_get_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_get, + list) { + list_del(&lpfc_ncmd->list); + qp->get_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_get_lock); } - spin_unlock(&phba->common_buf_list_put_lock); - spin_lock(&phba->common_buf_list_get_lock); - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_common_buf_list_get, list) { - list_del(&lpfc_ncmd->list); - phba->get_common_bufs--; - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - phba->total_common_bufs--; - } - spin_unlock(&phba->common_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } @@ -3654,8 +3658,101 @@ out_free_mem: return rc; } +int +lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) +{ + LIST_HEAD(blist); + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_scsi_buf *iobufp, *prev_iobufp; + int idx, cnt, xri, inserted; + + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irq(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + + /* Take everything off the get and put lists */ + list_splice_init(&qp->lpfc_io_buf_list_get, &blist); + list_splice(&qp->lpfc_io_buf_list_put, &blist); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + cnt += qp->get_io_bufs + qp->put_io_bufs; + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irq(&qp->io_buf_list_get_lock); + } + + /* + * Take IO buffers off blist and put on cbuf sorted by XRI. + * This is because POST_SGL takes a sequential range of XRIs + * to post to the firmware. + */ + for (idx = 0; idx < cnt; idx++) { + list_remove_head(&blist, lpfc_cmd, struct lpfc_scsi_buf, list); + if (!lpfc_cmd) + return cnt; + if (idx == 0) { + list_add_tail(&lpfc_cmd->list, cbuf); + continue; + } + xri = lpfc_cmd->cur_iocbq.sli4_xritag; + inserted = 0; + prev_iobufp = NULL; + list_for_each_entry(iobufp, cbuf, list) { + if (xri < iobufp->cur_iocbq.sli4_xritag) { + if (prev_iobufp) + list_add(&lpfc_cmd->list, + &prev_iobufp->list); + else + list_add(&lpfc_cmd->list, cbuf); + inserted = 1; + break; + } + prev_iobufp = iobufp; + } + if (!inserted) + list_add_tail(&lpfc_cmd->list, cbuf); + } + return cnt; +} + +int +lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_scsi_buf *lpfc_cmd; + int idx, cnt; + + qp = phba->sli4_hba.hdwq; + cnt = 0; + while (!list_empty(cbuf)) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + list_remove_head(cbuf, lpfc_cmd, + struct lpfc_scsi_buf, list); + if (!lpfc_cmd) + return cnt; + cnt++; + qp = &phba->sli4_hba.hdwq[idx]; + lpfc_cmd->hdwq = idx; + lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; + lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; + spin_lock(&qp->io_buf_list_put_lock); + list_add_tail(&lpfc_cmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + qp->total_io_bufs++; + spin_unlock(&qp->io_buf_list_put_lock); + } + } + return cnt; +} + /** - * lpfc_sli4_common_sgl_update - update xri-sgl sizing and mapping + * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated @@ -3667,52 +3764,38 @@ out_free_mem: * 0 - successful (for now, it always returns 0) **/ int -lpfc_sli4_common_sgl_update(struct lpfc_hba *phba) +lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) { struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; uint16_t i, lxri, els_xri_cnt; - uint16_t common_xri_cnt, common_xri_max; - LIST_HEAD(common_sgl_list); + uint16_t io_xri_cnt, io_xri_max; + LIST_HEAD(io_sgl_list); int rc, cnt; - phba->total_common_bufs = 0; - phba->get_common_bufs = 0; - phba->put_common_bufs = 0; - /* * update on pci function's allocated nvme xri-sgl list */ /* maximum number of xris available for nvme buffers */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - common_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - phba->sli4_hba.common_xri_max = common_xri_max; + io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.io_xri_max = io_xri_max; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "6074 Current allocated XRI sgl count:%d, " "maximum XRI count:%d\n", - phba->sli4_hba.common_xri_cnt, - phba->sli4_hba.common_xri_max); - - spin_lock_irq(&phba->common_buf_list_get_lock); - spin_lock(&phba->common_buf_list_put_lock); - list_splice_init(&phba->lpfc_common_buf_list_get, &common_sgl_list); - list_splice(&phba->lpfc_common_buf_list_put, &common_sgl_list); - cnt = phba->get_common_bufs + phba->put_common_bufs; - phba->get_common_bufs = 0; - phba->put_common_bufs = 0; - spin_unlock(&phba->common_buf_list_put_lock); - spin_unlock_irq(&phba->common_buf_list_get_lock); - - if (phba->sli4_hba.common_xri_cnt > phba->sli4_hba.common_xri_max) { + phba->sli4_hba.io_xri_cnt, + phba->sli4_hba.io_xri_max); + + cnt = lpfc_io_buf_flush(phba, &io_sgl_list); + + if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { /* max nvme xri shrunk below the allocated nvme buffers */ - spin_lock_irq(&phba->common_buf_list_get_lock); - common_xri_cnt = phba->sli4_hba.common_xri_cnt - - phba->sli4_hba.common_xri_max; - spin_unlock_irq(&phba->common_buf_list_get_lock); + io_xri_cnt = phba->sli4_hba.io_xri_cnt - + phba->sli4_hba.io_xri_max; /* release the extra allocated nvme buffers */ - for (i = 0; i < common_xri_cnt; i++) { - list_remove_head(&common_sgl_list, lpfc_ncmd, + for (i = 0; i < io_xri_cnt; i++) { + list_remove_head(&io_sgl_list, lpfc_ncmd, struct lpfc_nvme_buf, list); if (lpfc_ncmd) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, @@ -3721,16 +3804,15 @@ lpfc_sli4_common_sgl_update(struct lpfc_hba *phba) kfree(lpfc_ncmd); } } - spin_lock_irq(&phba->common_buf_list_get_lock); - phba->sli4_hba.common_xri_cnt -= common_xri_cnt; - spin_unlock_irq(&phba->common_buf_list_get_lock); + phba->sli4_hba.io_xri_cnt -= io_xri_cnt; } /* update xris associated to remaining allocated nvme buffers */ lpfc_ncmd = NULL; lpfc_ncmd_next = NULL; + phba->sli4_hba.io_xri_cnt = cnt; list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &common_sgl_list, list) { + &io_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -3742,22 +3824,16 @@ lpfc_sli4_common_sgl_update(struct lpfc_hba *phba) lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->common_buf_list_get_lock); - spin_lock(&phba->common_buf_list_put_lock); - list_splice_init(&common_sgl_list, &phba->lpfc_common_buf_list_get); - phba->get_common_bufs = cnt; - INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); - spin_unlock(&phba->common_buf_list_put_lock); - spin_unlock_irq(&phba->common_buf_list_get_lock); + cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); return 0; out_free_mem: - lpfc_common_free(phba); + lpfc_io_free(phba); return rc; } /** - * lpfc_new_common_buf - IO buffer allocator for HBA with SLI4 IF spec + * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec * @vport: The virtual port for which this call being executed. * @num_to_allocate: The requested number of buffers to allocate. * @@ -3767,11 +3843,11 @@ out_free_mem: * them on a list, it post them to the port by using SGL block post. * * Return codes: - * int - number of nvme buffers that were allocated and posted. + * int - number of IO buffers that were allocated and posted. * 0 = failure, less than num_to_alloc is a partial failure. **/ int -lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc) +lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) { struct lpfc_nvme_buf *lpfc_ncmd; struct lpfc_iocbq *pwqeq; @@ -3791,6 +3867,7 @@ lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc) return 0; } + phba->sli4_hba.io_xri_cnt = 0; for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); if (!lpfc_ncmd) @@ -3857,9 +3934,7 @@ lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc) /* add the nvme buffer to a post list */ list_add_tail(&lpfc_ncmd->list, &post_nblist); - spin_lock_irq(&phba->common_buf_list_get_lock); - phba->sli4_hba.common_xri_cnt++; - spin_unlock_irq(&phba->common_buf_list_get_lock); + phba->sli4_hba.io_xri_cnt++; } lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6114 Allocate %d out of %d requested new NVME " @@ -3867,7 +3942,7 @@ lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc) /* post the list of nvme buffer sgls to port if available */ if (!list_empty(&post_nblist)) - num_posted = lpfc_sli4_post_common_sgl_list( + num_posted = lpfc_sli4_post_io_sgl_list( phba, &post_nblist, bcnt); else num_posted = 0; @@ -5855,14 +5930,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) spin_lock_init(&phba->scsi_buf_list_put_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - /* Initialize the IO buffer list used by driver for SLI4 SCSI/NVME */ - spin_lock_init(&phba->common_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_common_buf_list_get); - phba->get_common_bufs = 0; - spin_lock_init(&phba->common_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); - phba->put_common_bufs = 0; - /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -6229,8 +6296,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Initialize the Abort nvme buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); + spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); } @@ -6903,7 +6969,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) phba->sli4_hba.els_xri_cnt = 0; /* nvme xri-buffer book keeping */ - phba->sli4_hba.common_xri_cnt = 0; + phba->sli4_hba.io_xri_cnt = 0; } /** @@ -7117,6 +7183,9 @@ lpfc_hba_alloc(struct pci_dev *pdev) static void lpfc_hba_free(struct lpfc_hba *phba) { + if (phba->sli_rev == LPFC_SLI_REV4) + kfree(phba->sli4_hba.hdwq); + /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); @@ -8330,6 +8399,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; + qdesc->hdwq = wqidx; phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, @@ -8340,6 +8410,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } + qdesc->hdwq = wqidx; phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; @@ -8368,6 +8439,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; + qdesc->hdwq = wqidx; phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; /* Create Fast Path FCP WQs */ @@ -8389,6 +8461,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } + qdesc->hdwq = wqidx; phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; @@ -8413,6 +8486,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; int idx; + struct lpfc_sli4_hdw_queue *qp; /* * Create HBA Record arrays. @@ -8429,14 +8503,33 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - phba->sli4_hba.hdwq = kcalloc(phba->cfg_hdw_queue, - sizeof(struct lpfc_sli4_hdw_queue), - GFP_KERNEL); if (!phba->sli4_hba.hdwq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6427 Failed allocate memory for " - "fast-path Hardware Queue array\n"); - goto out_error; + phba->sli4_hba.hdwq = kcalloc( + phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), + GFP_KERNEL); + if (!phba->sli4_hba.hdwq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6427 Failed allocate memory for " + "fast-path Hardware Queue array\n"); + goto out_error; + } + /* Prepare hardware queues to take IO buffers */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_init(&qp->io_buf_list_get_lock); + spin_lock_init(&qp->io_buf_list_put_lock); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_lock_init(&qp->abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); + qp->abts_scsi_io_bufs = 0; + spin_lock_init(&qp->abts_nvme_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); + qp->abts_nvme_io_bufs = 0; + } } if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { @@ -8488,6 +8581,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; + qdesc->hdwq = idx; phba->sli4_hba.hdwq[idx].hba_eq = qdesc; } @@ -8518,6 +8612,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; + qdesc->hdwq = idx; phba->sli4_hba.nvmet_cqset[idx] = qdesc; } } @@ -8650,6 +8745,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "receive HRQ\n"); goto out_error; } + qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; /* Only needed for header of RQ pair */ @@ -8676,6 +8772,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "receive DRQ\n"); goto out_error; } + qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; } } @@ -8727,7 +8824,6 @@ lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max) hdwq[idx].fcp_wq = NULL; hdwq[idx].nvme_wq = NULL; } - kfree(hdwq); } /** @@ -8749,7 +8845,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) if (phba->sli4_hba.hdwq) lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq, phba->cfg_hdw_queue); - phba->sli4_hba.hdwq = NULL; if (phba->nvmet_support) { lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, @@ -10400,8 +10495,10 @@ lpfc_unset_hba(struct lpfc_hba *phba) static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { + struct lpfc_sli4_hdw_queue *qp; + int idx, ccnt, fcnt; int wait_time = 0; - int common_xri_cmpl = 1; + int io_xri_cmpl = 1; int nvmet_xri_cmpl = 1; int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); @@ -10416,17 +10513,32 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_nvme_wait_for_io_drain(phba); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) - fcp_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + ccnt = 0; + fcnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + fcp_xri_cmpl = list_empty( + &qp->lpfc_abts_scsi_buf_list); + if (!fcp_xri_cmpl) /* if list is NOT empty */ + fcnt++; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + io_xri_cmpl = list_empty( + &qp->lpfc_abts_nvme_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + } + if (ccnt) + io_xri_cmpl = 0; + if (fcnt) + fcp_xri_cmpl = 0; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - common_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - while (!fcp_xri_cmpl || !els_xri_cmpl || !common_xri_cmpl || + while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) @@ -10434,7 +10546,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) "6424 NVMET XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); - if (!common_xri_cmpl) + if (!io_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6100 NVME XRI exchange busy " "wait time: %d seconds.\n", @@ -10455,17 +10567,31 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } + + ccnt = 0; + fcnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + fcp_xri_cmpl = list_empty( + &qp->lpfc_abts_scsi_buf_list); + if (!fcp_xri_cmpl) /* if list is NOT empty */ + fcnt++; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + io_xri_cmpl = list_empty( + &qp->lpfc_abts_nvme_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + } + if (ccnt) + io_xri_cmpl = 0; + if (fcnt) + fcp_xri_cmpl = 0; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - common_xri_cmpl = list_empty( - &phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty( &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) - fcp_xri_cmpl = list_empty( - &phba->sli4_hba.lpfc_abts_scsi_buf_list); - els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); @@ -11739,14 +11865,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Don't post more new bufs if repost already recovered * the nvme sgls. */ - if (phba->sli4_hba.common_xri_cnt == 0) { - len = lpfc_new_common_buf( - phba, phba->sli4_hba.common_xri_max); + if (phba->sli4_hba.io_xri_cnt == 0) { + len = lpfc_new_io_buf( + phba, phba->sli4_hba.io_xri_max); if (len == 0) { error = -ENOMEM; goto out_free_sysfs_attr; } - phba->total_common_bufs += len; } } @@ -11836,7 +11961,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) * the HBA FCoE function. */ lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->port_list_lock); @@ -11846,8 +11970,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ - lpfc_common_free(phba); + lpfc_io_free(phba); lpfc_free_iocb_list(phba); + lpfc_sli4_hba_unset(phba); lpfc_unset_driver_resource_phase2(phba); lpfc_sli4_driver_resource_unset(phba); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index c0f59fbaa173..0ecc73a6634f 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -58,7 +58,7 @@ static struct lpfc_nvme_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - int expedite); + int idx, int expedite); static void lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); @@ -1545,7 +1545,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, } } - lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, + lpfc_queue_info->index, expedite); if (lpfc_ncmd == NULL) { atomic_inc(&lport->xmt_fcp_noxri); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, @@ -1913,24 +1914,26 @@ static struct nvme_fc_port_template lpfc_nvme_template = { }; static inline struct lpfc_nvme_buf * -lpfc_nvme_buf(struct lpfc_hba *phba) +lpfc_nvme_buf(struct lpfc_hba *phba, int idx) { + struct lpfc_sli4_hdw_queue *qp; struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; + qp = &phba->sli4_hba.hdwq[idx]; list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_common_buf_list_get, list) { + &qp->lpfc_io_buf_list_get, list) { list_del_init(&lpfc_ncmd->list); - phba->get_common_bufs--; + qp->get_io_bufs--; return lpfc_ncmd; } return NULL; } /** - * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_common_buf_list of the HBA + * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA * @phba: The HBA for which this call is being executed. * - * This routine removes a nvme buffer from head of @phba lpfc_common_buf_list + * This routine removes a nvme buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: @@ -1939,30 +1942,32 @@ lpfc_nvme_buf(struct lpfc_hba *phba) **/ static struct lpfc_nvme_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - int expedite) + int idx, int expedite) { struct lpfc_nvme_buf *lpfc_ncmd = NULL; + struct lpfc_sli4_hdw_queue *qp; struct sli4_sge *sgl; struct lpfc_iocbq *pwqeq; union lpfc_wqe128 *wqe; unsigned long iflag = 0; - spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag); - if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) - lpfc_ncmd = lpfc_nvme_buf(phba); + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) + lpfc_ncmd = lpfc_nvme_buf(phba, idx); if (!lpfc_ncmd) { - spin_lock(&phba->common_buf_list_put_lock); - list_splice(&phba->lpfc_common_buf_list_put, - &phba->lpfc_common_buf_list_get); - phba->get_common_bufs += phba->put_common_bufs; - INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); - phba->put_common_bufs = 0; - spin_unlock(&phba->common_buf_list_put_lock); - if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT || + spin_lock(&qp->io_buf_list_put_lock); + list_splice(&qp->lpfc_io_buf_list_put, + &qp->lpfc_io_buf_list_get); + qp->get_io_bufs += qp->put_io_bufs; + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->put_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) - lpfc_ncmd = lpfc_nvme_buf(phba); + lpfc_ncmd = lpfc_nvme_buf(phba, idx); } - spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag); + spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); if (lpfc_ncmd) { pwqeq = &(lpfc_ncmd->cur_iocbq); @@ -1975,6 +1980,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; lpfc_ncmd->start_time = jiffies; lpfc_ncmd->flags = 0; + lpfc_ncmd->hdwq = idx; /* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. @@ -1993,7 +1999,10 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, atomic_inc(&ndlp->cmd_pending); lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; } - } + + } else + qp->empty_io_bufs++; + return lpfc_ncmd; } @@ -2003,13 +2012,14 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * @lpfc_ncmd: The nvme buffer which is being released. * * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba - * lpfc_common_buf_list list. For SLI4 XRI's are tied to the nvme buffer + * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) { + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) @@ -2018,6 +2028,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) lpfc_ncmd->ndlp = NULL; lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; + qp = &phba->sli4_hba.hdwq[lpfc_ncmd->hdwq]; if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " @@ -2025,21 +2036,21 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.iotag); - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, - iflag); + spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag); list_add_tail(&lpfc_ncmd->list, - &phba->sli4_hba.lpfc_abts_nvme_buf_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, - iflag); + &qp->lpfc_abts_nvme_buf_list); + qp->abts_nvme_io_bufs++; + spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag); } else { /* MUST zero fields if buffer is reused by another protocol */ lpfc_ncmd->nvmeCmd = NULL; lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; - spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); list_add_tail(&lpfc_ncmd->list, - &phba->lpfc_common_buf_list_put); - phba->put_common_bufs++; - spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } } @@ -2517,27 +2528,28 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) **/ void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) + struct sli4_wcqe_xri_aborted *axri, int idx) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; struct nvmefc_fcp_req *nvme_cmd = NULL; struct lpfc_nodelist *ndlp; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; + qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&qp->abts_nvme_buf_list_lock); list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, - &phba->sli4_hba.lpfc_abts_nvme_buf_list, - list) { + &qp->lpfc_abts_nvme_buf_list, list) { if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { list_del_init(&lpfc_ncmd->list); + qp->abts_nvme_io_bufs--; lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; lpfc_ncmd->status = IOSTAT_SUCCESS; - spin_unlock( - &phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); ndlp = lpfc_ncmd->ndlp; @@ -2563,7 +2575,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index e3a554417e98..4a020b9c8fbf 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -84,6 +84,8 @@ struct lpfc_nvme_buf { dma_addr_t dma_phys_sgl; struct sli4_sge *dma_sgl; struct lpfc_iocbq cur_iocbq; + uint16_t hdwq; + uint16_t cpu; /* NVME specific fields */ struct nvmefc_fcp_req *nvmeCmd; @@ -95,7 +97,6 @@ struct lpfc_nvme_buf { #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ - uint16_t cpu; uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index c64a8234d5bd..f2a30ee9702b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -226,15 +226,15 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) "6313 NVMET Defer ctx release xri x%x flg x%x\n", ctxp->oxid, ctxp->flag); - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); if (ctxp->flag & LPFC_NVMET_CTX_RLS) { - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); return; } ctxp->flag |= LPFC_NVMET_CTX_RLS; list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); } /** @@ -1162,9 +1162,9 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); list_for_each_entry_safe(ctx_buf, next_ctx_buf, &infop->nvmet_ctx_list, list) { - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctx_buf->list); - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; @@ -1502,7 +1502,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, } spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { @@ -1518,7 +1518,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, released = true; } ctxp->flag &= ~LPFC_NVMET_XBUSY; - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -1542,7 +1542,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, lpfc_worker_wake_up(phba); return; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); } @@ -1561,14 +1561,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, xri = be16_to_cpu(fc_hdr->fh_ox_id); spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); @@ -1589,7 +1589,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 63841f49affb..261d0ebf0093 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -525,19 +525,26 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; + int idx; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_for_each_entry_safe(psb, next_psb, - &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { - if (psb->rdata && psb->rdata->pnode - && psb->rdata->pnode->vport == vport) - psb->rdata = NULL; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + + spin_lock(&qp->abts_scsi_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &qp->lpfc_abts_scsi_buf_list, list) { + if (psb->rdata && psb->rdata->pnode && + psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&qp->abts_scsi_buf_list_lock); } - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); } @@ -551,11 +558,12 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) **/ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) + struct sli4_wcqe_xri_aborted *axri, int idx) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); struct lpfc_scsi_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; struct lpfc_iocbq *iocbq; int i; @@ -565,16 +573,19 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + + qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_lock(&qp->abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, - &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + &qp->lpfc_abts_scsi_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del(&psb->list); + qp->abts_scsi_io_bufs--; psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; spin_unlock( - &phba->sli4_hba.abts_scsi_buf_list_lock); + &qp->abts_scsi_buf_list_lock); if (psb->rdata && psb->rdata->pnode) ndlp = psb->rdata->pnode; else @@ -593,7 +604,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock(&qp->abts_scsi_buf_list_lock); for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -652,10 +663,10 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) return lpfc_cmd; } /** - * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_common_buf_list of the HBA + * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA * @phba: The HBA for which this call is being executed. * - * This routine removes a scsi buffer from head of @phba lpfc_common_buf_list + * This routine removes a scsi buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: @@ -666,48 +677,58 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; struct sli4_sge *sgl; IOCB_t *iocb; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; - uint32_t sgl_size; + uint32_t sgl_size, cpu, idx; int found = 0; - spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag); + cpu = smp_processor_id(); + if (cpu < phba->cfg_hdw_queue) + idx = cpu; + else + idx = cpu % phba->cfg_hdw_queue; + + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag); list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, - &phba->lpfc_common_buf_list_get, list) { + &qp->lpfc_io_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del_init(&lpfc_cmd->list); - phba->get_common_bufs--; + qp->get_io_bufs--; found = 1; break; } if (!found) { - spin_lock(&phba->common_buf_list_put_lock); - list_splice(&phba->lpfc_common_buf_list_put, - &phba->lpfc_common_buf_list_get); - phba->get_common_bufs += phba->put_common_bufs; - INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); - phba->put_common_bufs = 0; - spin_unlock(&phba->common_buf_list_put_lock); + spin_lock(&qp->io_buf_list_put_lock); + list_splice(&qp->lpfc_io_buf_list_put, + &qp->lpfc_io_buf_list_get); + qp->get_io_bufs += qp->put_io_bufs; + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->put_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, - &phba->lpfc_common_buf_list_get, + &qp->lpfc_io_buf_list_get, list) { if (lpfc_test_rrq_active( phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del_init(&lpfc_cmd->list); - phba->get_common_bufs--; + qp->get_io_bufs--; found = 1; break; } } - spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag); - if (!found) + spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); + if (!found) { + qp->empty_io_bufs++; return NULL; + } sgl_size = phba->cfg_sg_dma_buf_size - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); @@ -723,10 +744,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_cmd->flags = 0; lpfc_cmd->start_time = jiffies; lpfc_cmd->waitq = NULL; - lpfc_cmd->cpu = smp_processor_id(); + lpfc_cmd->cpu = cpu; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS lpfc_cmd->prot_data_type = 0; #endif + lpfc_cmd->hdwq = idx; lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + @@ -825,35 +847,36 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * - * This routine releases @psb scsi buffer by adding it to tail of @phba - * lpfc_common_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * This routine releases @psb scsi buffer by adding it to tail of @hdwq + * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; psb->seg_cnt = 0; psb->prot_seg_cnt = 0; + qp = &phba->sli4_hba.hdwq[psb->hdwq]; if (psb->exch_busy) { - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, - &phba->sli4_hba.lpfc_abts_scsi_buf_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list); + qp->abts_scsi_io_bufs++; + spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); } else { /* MUST zero fields if buffer is reused by another protocol */ psb->pCmd = NULL; psb->cur_iocbq.iocb_cmpl = NULL; - spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); - list_add_tail(&psb->list, &phba->lpfc_common_buf_list_put); - phba->put_common_bufs++; - spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } } diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index d784de3da0c0..ebb5d5860cdb 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -138,6 +138,8 @@ struct lpfc_scsi_buf { dma_addr_t dma_phys_sgl; struct ulp_bde64 *dma_sgl; struct lpfc_iocbq cur_iocbq; + uint16_t hdwq; + uint16_t cpu; /* SCSI specific fields */ struct scsi_cmnd *pCmd; @@ -150,7 +152,6 @@ struct lpfc_scsi_buf { #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ - uint16_t cpu; uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d41bfa4569a4..ab1b9d9123b6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6023,7 +6023,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) list_add_tail(&rsrc_blks->list, ext_blk_list); rsrc_start = rsrc_id; if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { - phba->sli4_hba.common_xri_start = rsrc_start + + phba->sli4_hba.io_xri_start = rsrc_start + lpfc_sli4_get_iocb_cnt(phba); } @@ -7051,37 +7051,30 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, } /** - * lpfc_sli4_repost_common_sgl_list - Repost all the allocated nvme buffer sgls + * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls * @phba: pointer to lpfc hba data structure. * * This routine walks the list of nvme buffers that have been allocated and * repost them to the port by using SGL block post. This is needed after a * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list - * to the lpfc_common_buf_list. If the repost fails, reject all nvme buffers. + * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. * * Returns: 0 = success, non-zero failure. **/ int -lpfc_sli4_repost_common_sgl_list(struct lpfc_hba *phba) +lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(post_nblist); int num_posted, rc = 0; /* get all NVME buffers need to repost to a local list */ - spin_lock_irq(&phba->common_buf_list_get_lock); - spin_lock(&phba->common_buf_list_put_lock); - list_splice_init(&phba->lpfc_common_buf_list_get, &post_nblist); - list_splice(&phba->lpfc_common_buf_list_put, &post_nblist); - phba->get_common_bufs = 0; - phba->put_common_bufs = 0; - spin_unlock(&phba->common_buf_list_put_lock); - spin_unlock_irq(&phba->common_buf_list_get_lock); + lpfc_io_buf_flush(phba, &post_nblist); /* post the list of nvme buffer sgls to port if available */ if (!list_empty(&post_nblist)) { - num_posted = lpfc_sli4_post_common_sgl_list( - phba, &post_nblist, phba->sli4_hba.common_xri_cnt); + num_posted = lpfc_sli4_post_io_sgl_list( + phba, &post_nblist, phba->sli4_hba.io_xri_cnt); /* failed to post any nvme buffer, return error */ if (num_posted == 0) rc = -EIO; @@ -7551,7 +7544,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) cnt += phba->sli4_hba.nvmet_xri_cnt; } else { /* update host common xri-sgl sizes and mappings */ - rc = lpfc_sli4_common_sgl_update(phba); + rc = lpfc_sli4_io_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "6082 Failed to update nvme-sgl size " @@ -7560,7 +7553,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } /* register the allocated common sgl pool to the port */ - rc = lpfc_sli4_repost_common_sgl_list(phba); + rc = lpfc_sli4_repost_io_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "6116 Error %d during nvme sgl post " @@ -8562,7 +8555,6 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); if (rc) goto exit; - /* * Initialize the bootstrap memory region to avoid stale data areas * in the mailbox post. Then copy the caller's mailbox contents to @@ -10002,6 +9994,8 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) struct lpfc_sli_ring * lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { + struct lpfc_scsi_buf *lpfc_cmd; + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (unlikely(!phba->sli4_hba.hdwq)) return NULL; @@ -10010,11 +10004,8 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) * be setup based on what work queue we used. */ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - piocb->hba_wqidx = - lpfc_sli4_scmd_to_wqidx_distr( - phba, piocb->context1); - piocb->hba_wqidx = piocb->hba_wqidx % - phba->cfg_hdw_queue; + lpfc_cmd = (struct lpfc_scsi_buf *)piocb->context1; + piocb->hba_wqidx = lpfc_cmd->hdwq; } return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; } else { @@ -12924,7 +12915,8 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) cq_event, struct lpfc_cq_event, list); spin_unlock_irq(&phba->hbalock); /* Notify aborted XRI for FCP work queue */ - lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri, + cq_event->hdwq); /* Free the event processed back to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); } @@ -13426,17 +13418,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, switch (cq->subtype) { case LPFC_FCP: - cq_event = lpfc_cq_event_setup( - phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); - if (!cq_event) - return false; - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&cq_event->list, - &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); - /* Set the fcp xri abort event flag */ - phba->hba_flag |= FCP_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; + lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); + workposted = false; break; case LPFC_NVME_LS: /* NVME LS uses ELS resources */ case LPFC_ELS: @@ -13444,6 +13427,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); if (!cq_event) return false; + cq_event->hdwq = cq->hdwq; spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); @@ -13457,7 +13441,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, if (phba->nvmet_support) lpfc_sli4_nvmet_xri_aborted(phba, wcqe); else - lpfc_sli4_nvme_xri_aborted(phba, wcqe); + lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); workposted = false; break; @@ -14073,7 +14057,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); /* First check for NVME/SCSI completion */ - if (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map) { + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map)) { /* Process NVME / NVMET command completion */ cq = phba->sli4_hba.hdwq[qidx].nvme_cq; goto process_cq; @@ -16656,7 +16641,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, } /** - * lpfc_sli4_post_common_sgl_block - post a block of nvme sgl list to firmware + * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware * @phba: pointer to lpfc hba data structure. * @nblist: pointer to nvme buffer list. * @count: number of scsi buffers on the list. @@ -16667,9 +16652,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, * **/ static int -lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, - struct list_head *nblist, - int count) +lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, + int count) { struct lpfc_nvme_buf *lpfc_ncmd; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; @@ -16770,7 +16754,7 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, } /** - * lpfc_sli4_post_common_sgl_list - Post blocks of nvme buffer sgls from a list + * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list * @phba: pointer to lpfc hba data structure. * @post_nblist: pointer to the nvme buffer list. * @@ -16784,8 +16768,8 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, * Returns: 0 = failure, non-zero number of successfully posted buffers. **/ int -lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, - struct list_head *post_nblist, int sb_count) +lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *post_nblist, int sb_count) { struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; int status, sgl_size; @@ -16793,7 +16777,6 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, dma_addr_t pdma_phys_sgl1; int last_xritag = NO_XRI; int cur_xritag; - unsigned long iflag; LIST_HEAD(prep_nblist); LIST_HEAD(blck_nblist); LIST_HEAD(nvme_nblist); @@ -16864,8 +16847,8 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, continue; /* post block of NVME buffer list sgls */ - status = lpfc_sli4_post_common_sgl_block(phba, &blck_nblist, - post_cnt); + status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, + post_cnt); /* don't reset xirtag due to hole in xri block */ if (block_cnt == 0) @@ -16891,17 +16874,8 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, } } /* Push NVME buffers with sgl posted to the available list */ - while (!list_empty(&nvme_nblist)) { - list_remove_head(&nvme_nblist, lpfc_ncmd, - struct lpfc_nvme_buf, list); - lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; - lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; - spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); - list_add_tail(&lpfc_ncmd->list, - &phba->lpfc_common_buf_list_put); - phba->put_common_bufs++; - spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); - } + lpfc_io_buf_replenish(phba, &nvme_nblist); + return num_posted; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7abb395bb64a..3922e762ed31 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd { struct lpfc_cq_event { struct list_head list; + uint16_t hdwq; union { struct lpfc_mcqe mcqe_cmpl; struct lpfc_acqe_link acqe_link; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 6e1e958df10a..4862249732dd 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -214,6 +214,7 @@ struct lpfc_queue { struct work_struct spwork; uint64_t isr_timestamp; + uint16_t hdwq; uint8_t qe_valid; struct lpfc_queue *assoc_qp; union sli4_qe qe[1]; /* array to index entries (must be last) */ @@ -538,6 +539,22 @@ struct lpfc_sli4_hdw_queue { struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */ uint16_t fcp_cq_map; uint16_t nvme_cq_map; + + /* Keep track of IO buffers for this hardware queue */ + spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ + struct list_head lpfc_io_buf_list_get; + spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ + struct list_head lpfc_io_buf_list_put; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_scsi_buf_list; + spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */ + struct list_head lpfc_abts_nvme_buf_list; + uint32_t total_io_bufs; + uint32_t get_io_bufs; + uint32_t put_io_bufs; + uint32_t empty_io_bufs; + uint32_t abts_scsi_io_bufs; + uint32_t abts_nvme_io_bufs; }; struct lpfc_sli4_hba { @@ -662,19 +679,20 @@ struct lpfc_sli4_hba { uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ uint16_t next_rpi; - uint16_t common_xri_max; - uint16_t common_xri_cnt; - uint16_t common_xri_start; + uint16_t io_xri_max; + uint16_t io_xri_cnt; + uint16_t io_xri_start; uint16_t els_xri_cnt; uint16_t nvmet_xri_cnt; uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_total; struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_nvmet_sgl_list; + spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ struct list_head lpfc_abts_nvmet_ctx_list; - struct list_head lpfc_abts_scsi_buf_list; - struct list_head lpfc_abts_nvme_buf_list; struct list_head lpfc_nvmet_io_wait_list; struct lpfc_nvmet_ctx_info *nvmet_ctx_info; struct lpfc_sglq **lpfc_sglq_active_list; @@ -703,8 +721,6 @@ struct lpfc_sli4_hba { #define LPFC_SLI4_PPNAME_NON 0 #define LPFC_SLI4_PPNAME_GET 1 struct lpfc_iov iov; - spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ - spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ uint32_t physical_port; @@ -839,7 +855,7 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); -int lpfc_repost_common_sgl_list(struct lpfc_hba *phba); +int lpfc_repost_io_sgl_list(struct lpfc_hba *phba); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); @@ -862,9 +878,9 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, - struct sli4_wcqe_xri_aborted *); + struct sli4_wcqe_xri_aborted *, int); void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri); + struct sli4_wcqe_xri_aborted *axri, int idx); void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, -- cgit v1.2.3-55-g7522 From 63df6d637e3358e64b43e7a774939f8f963926cb Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:24 -0800 Subject: scsi: lpfc: Adapt cpucheck debugfs logic to Hardware Queues Similar to the io execution path that reports cpu context information, the debugfs routines for cpu information needs to be aligned with new hardware queue implementation. Convert debugfs cnd nvme cpucheck statistics to report information per Hardware Queue. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 5 -- drivers/scsi/lpfc/lpfc_debugfs.c | 129 +++++++++++++++++++++------------------ drivers/scsi/lpfc/lpfc_nvme.c | 37 +++++------ drivers/scsi/lpfc/lpfc_nvmet.c | 58 ++++++++---------- drivers/scsi/lpfc/lpfc_sli4.h | 11 ++++ 5 files changed, 124 insertions(+), 116 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index feae8fb57623..310437b6b51a 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1152,11 +1152,6 @@ struct lpfc_hba { uint16_t sfp_warning; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS -#define LPFC_CHECK_CPU_CNT 32 - uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT]; uint16_t cpucheck_on; #define LPFC_CHECK_OFF 0 #define LPFC_CHECK_NVME_IO 1 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a1b7263bfe2a..d84fb8c088b1 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1366,62 +1366,67 @@ static int lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; - int i; + struct lpfc_sli4_hdw_queue *qp; + int i, j; int len = 0; - uint32_t tot_xmt = 0; - uint32_t tot_rcv = 0; - uint32_t tot_cmpl = 0; - uint32_t tot_ccmpl = 0; + uint32_t tot_xmt; + uint32_t tot_rcv; + uint32_t tot_cmpl; - if (phba->nvmet_support == 0) { - /* NVME Initiator */ - len += snprintf(buf + len, PAGE_SIZE - len, - "CPUcheck %s\n", - (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? - "Enabled" : "Disabled")); - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; - len += snprintf(buf + len, PAGE_SIZE - len, - "%02d: xmit x%08x cmpl x%08x\n", - i, phba->cpucheck_xmt_io[i], - phba->cpucheck_cmpl_io[i]); - tot_xmt += phba->cpucheck_xmt_io[i]; - tot_cmpl += phba->cpucheck_cmpl_io[i]; - } + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s ", + (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? + "Enabled" : "Disabled")); + if (phba->nvmet_support) { len += snprintf(buf + len, PAGE_SIZE - len, - "tot:xmit x%08x cmpl x%08x\n", - tot_xmt, tot_cmpl); - return len; + "%s\n", + (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? + "Rcv Enabled\n" : "Rcv Disabled\n")); + } else { + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); } - /* NVME Target */ - len += snprintf(buf + len, PAGE_SIZE - len, - "CPUcheck %s ", - (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? - "IO Enabled - " : "IO Disabled - ")); - len += snprintf(buf + len, PAGE_SIZE - len, - "%s\n", - (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? - "Rcv Enabled\n" : "Rcv Disabled\n")); - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + + tot_rcv = 0; + tot_xmt = 0; + tot_cmpl = 0; + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + tot_xmt += qp->cpucheck_xmt_io[j]; + tot_cmpl += qp->cpucheck_cmpl_io[j]; + if (phba->nvmet_support) + tot_rcv += qp->cpucheck_rcv_io[j]; + } + + /* Only display Hardware Qs with something */ + if (!tot_xmt && !tot_cmpl && !tot_rcv) + continue; + + len += snprintf(buf + len, PAGE_SIZE - len, + "HDWQ %03d: ", i); + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + /* Only display non-zero counters */ + if (!qp->cpucheck_xmt_io[j] && + !qp->cpucheck_cmpl_io[j] && + !qp->cpucheck_rcv_io[j]) + continue; + if (phba->nvmet_support) { + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %03d: %x/%x/%x ", j, + qp->cpucheck_rcv_io[j], + qp->cpucheck_xmt_io[j], + qp->cpucheck_cmpl_io[j]); + } else { + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %03d: %x/%x ", j, + qp->cpucheck_xmt_io[j], + qp->cpucheck_cmpl_io[j]); + } + } len += snprintf(buf + len, PAGE_SIZE - len, - "%02d: xmit x%08x ccmpl x%08x " - "cmpl x%08x rcv x%08x\n", - i, phba->cpucheck_xmt_io[i], - phba->cpucheck_ccmpl_io[i], - phba->cpucheck_cmpl_io[i], - phba->cpucheck_rcv_io[i]); - tot_xmt += phba->cpucheck_xmt_io[i]; - tot_rcv += phba->cpucheck_rcv_io[i]; - tot_cmpl += phba->cpucheck_cmpl_io[i]; - tot_ccmpl += phba->cpucheck_ccmpl_io[i]; + "Total: %x\n", tot_xmt); } - len += snprintf(buf + len, PAGE_SIZE - len, - "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", - tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); return len; } @@ -2474,9 +2479,10 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hdw_queue *qp; char mybuf[64]; char *pbuf; - int i; + int i, j; if (nbytes > 64) nbytes = 64; @@ -2506,13 +2512,14 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, return strlen(pbuf); } else if ((strncmp(pbuf, "zero", sizeof("zero") - 1) == 0)) { - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; - phba->cpucheck_rcv_io[i] = 0; - phba->cpucheck_xmt_io[i] = 0; - phba->cpucheck_cmpl_io[i] = 0; - phba->cpucheck_ccmpl_io[i] = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + qp->cpucheck_rcv_io[j] = 0; + qp->cpucheck_xmt_io[j] = 0; + qp->cpucheck_cmpl_io[j] = 0; + } } return strlen(pbuf); } @@ -5358,9 +5365,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_hbqinfo); + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_hbqinfo); /* Setup hdwqinfo */ snprintf(name, sizeof(name), "hdwqinfo"); @@ -5370,7 +5377,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) phba, &lpfc_debugfs_op_hdwqinfo); if (!phba->debug_hdwqinfo) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0411 Cant create debugfs hdwqinfo\n"); + "0511 Cant create debugfs hdwqinfo\n"); goto debug_failed; } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 0ecc73a6634f..fe0190b48abd 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -965,7 +965,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_lport *lport; struct lpfc_nvme_ctrl_stat *cstat; - uint32_t code, status, idx; + uint32_t code, status, idx, cpu; uint16_t cid, sqhd, data; uint32_t *ptr; @@ -1136,13 +1136,17 @@ out_err: lpfc_nvme_ktime(phba, lpfc_ncmd); } if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { - if (lpfc_ncmd->cpu != smp_processor_id()) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, - "6701 CPU Check cmpl: " - "cpu %d expect %d\n", - smp_processor_id(), lpfc_ncmd->cpu); - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; + idx = lpfc_ncmd->cur_iocbq.hba_wqidx; + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) { + if (lpfc_ncmd->cpu != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + cpu, lpfc_ncmd->cpu); + phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; + } } #endif @@ -1421,7 +1425,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, { int ret = 0; int expedite = 0; - int idx; + int idx, cpu; struct lpfc_nvme_lport *lport; struct lpfc_nvme_ctrl_stat *cstat; struct lpfc_vport *vport; @@ -1620,21 +1624,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { - lpfc_ncmd->cpu = smp_processor_id(); - if (lpfc_ncmd->cpu != lpfc_queue_info->index) { - /* Check for admin queue */ - if (lpfc_queue_info->qidx) { + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) { + lpfc_ncmd->cpu = cpu; + if (idx != cpu) lpfc_printf_vlog(vport, - KERN_ERR, LOG_NVME_IOERR, + KERN_INFO, LOG_NVME_IOERR, "6702 CPU Check cmd: " "cpu %d wq %d\n", lpfc_ncmd->cpu, lpfc_queue_info->index); - } - lpfc_ncmd->cpu = lpfc_queue_info->index; + phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++; } - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++; } #endif return 0; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index f2a30ee9702b..b5e287cacc2a 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -744,16 +744,6 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ktime_get_ns(); } } - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - id = smp_processor_id(); - if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6703 CPU Check cmpl: " - "cpu %d expect %d\n", - id, ctxp->cpu); - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_cmpl_io[id]++; - } #endif rsp->done(rsp); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -771,19 +761,22 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ctxp->ts_isr_data = cmdwqe->isr_timestamp; ctxp->ts_data_nvme = ktime_get_ns(); } - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - id = smp_processor_id(); +#endif + rsp->done(rsp); + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (id < LPFC_CHECK_CPU_CNT) { if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6704 CPU Check cmdcmpl: " "cpu %d expect %d\n", id, ctxp->cpu); - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_ccmpl_io[id]++; + phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++; } -#endif - rsp->done(rsp); } +#endif } static int @@ -910,16 +903,15 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, } if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { int id = smp_processor_id(); - ctxp->cpu = id; - if (id < LPFC_CHECK_CPU_CNT) - phba->cpucheck_xmt_io[id]++; - if (rsp->hwqid != id) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6705 CPU Check OP: " - "cpu %d expect %d\n", - id, rsp->hwqid); - ctxp->cpu = rsp->hwqid; + if (id < LPFC_CHECK_CPU_CNT) { + if (rsp->hwqid != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); + phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++; } + ctxp->cpu = id; /* Setup cpu for cmpl check */ } #endif @@ -1897,9 +1889,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, uint32_t size, oxid, sid, rc, qno; unsigned long iflag; int current_cpu; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint32_t id; -#endif if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) return; @@ -1940,9 +1929,14 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { - id = smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) - phba->cpucheck_rcv_io[id]++; + if (current_cpu < LPFC_CHECK_CPU_CNT) { + if (idx != current_cpu) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6703 CPU Check rcv: " + "cpu %d expect %d\n", + current_cpu, idx); + phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++; + } } #endif diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 4862249732dd..8e3e99d52f75 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -20,6 +20,10 @@ * included with this package. * *******************************************************************/ +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 @@ -555,6 +559,13 @@ struct lpfc_sli4_hdw_queue { uint32_t empty_io_bufs; uint32_t abts_scsi_io_bufs; uint32_t abts_nvme_io_bufs; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +#define LPFC_CHECK_CPU_CNT 128 + uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; +#endif }; struct lpfc_sli4_hba { -- cgit v1.2.3-55-g7522 From 1fbf97425002016a8d819a9c7b78c65ed449e64f Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:26 -0800 Subject: scsi: lpfc: Convert ring number to hardware queue for nvme wqe posting. SLI4 nvme functions are passing the SLI3 ring number when posting wqe to hardware. This should be indicating the hardware queue to use, not the ring number. Replace ring number with the hardware queue that should be used. Note: SCSI avoided this issue as it utilized an older lfpc_issue_iocb routine that properly adapts. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_crtn.h | 4 ++-- drivers/scsi/lpfc/lpfc_init.c | 3 ++- drivers/scsi/lpfc/lpfc_nvme.c | 11 ++++++----- drivers/scsi/lpfc/lpfc_nvme.h | 3 ++- drivers/scsi/lpfc/lpfc_nvmet.c | 34 ++++++++++++++++++++++++++-------- drivers/scsi/lpfc/lpfc_nvmet.h | 1 + drivers/scsi/lpfc/lpfc_scsi.c | 9 +++++---- drivers/scsi/lpfc/lpfc_scsi.h | 3 ++- drivers/scsi/lpfc/lpfc_sli.c | 26 ++++++++++++++------------ 9 files changed, 60 insertions(+), 34 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index a623f6f619cc..1bd1362f39a0 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -315,8 +315,8 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); -int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum, - struct lpfc_iocbq *iocbq); +int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_iocbq *pwqe); struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri); struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a15c3aa569b5..36d9c32c9c87 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3734,7 +3734,8 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) return cnt; cnt++; qp = &phba->sli4_hba.hdwq[idx]; - lpfc_cmd->hdwq = idx; + lpfc_cmd->hdwq_no = idx; + lpfc_cmd->hdwq = qp; lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; spin_lock(&qp->io_buf_list_put_lock); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index c13638a3c0e7..f1f697cd7e97 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -528,7 +528,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); + rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); if (rc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "6045 Issue GEN REQ WQE to NPORT x%x " @@ -1605,7 +1605,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_queue_info->index, ndlp->nlp_DID); - ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); + ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); if (ret) { atomic_inc(&lport->xmt_fcp_wqerr); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, @@ -1867,7 +1867,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; abts_buf->vport = vport; abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; - ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); + ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf); spin_unlock_irqrestore(&phba->hbalock, flags); if (ret_val) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, @@ -1978,7 +1978,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; lpfc_ncmd->start_time = jiffies; lpfc_ncmd->flags = 0; - lpfc_ncmd->hdwq = idx; + lpfc_ncmd->hdwq = qp; + lpfc_ncmd->hdwq_no = idx; /* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. @@ -2026,7 +2027,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) lpfc_ncmd->ndlp = NULL; lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; - qp = &phba->sli4_hba.hdwq[lpfc_ncmd->hdwq]; + qp = lpfc_ncmd->hdwq; if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 974fbec7dd04..13f900dd4eeb 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -79,7 +79,8 @@ struct lpfc_nvme_buf { dma_addr_t dma_phys_sgl; struct sli4_sge *dma_sgl; struct lpfc_iocbq cur_iocbq; - uint16_t hdwq; + struct lpfc_sli4_hdw_queue *hdwq; + uint16_t hdwq_no; uint16_t cpu; /* NVME specific fields */ diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index b5e287cacc2a..0d296aee2d82 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -845,7 +845,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { /* * Okay to repost buffer here, but wait till cmpl @@ -901,6 +901,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, else ctxp->ts_nvme_data = ktime_get_ns(); } + + /* Setup the hdw queue if not already set */ + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { int id = smp_processor_id(); if (id < LPFC_CHECK_CPU_CNT) { @@ -946,7 +951,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ctxp->oxid, rsp->op, rsp->rsplen); ctxp->flag |= LPFC_NVMET_IO_INP; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (!ctxp->ts_cmd_nvme) @@ -965,7 +970,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, * WQE release CQE */ ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; - wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq; + wq = ctxp->hdwq->nvme_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); list_add_tail(&nvmewqeq->list, &wq->wqfull_list); @@ -1015,6 +1020,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, if (phba->pport->load_flag & FC_UNLOADING) return; + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", ctxp->oxid, ctxp->flag, ctxp->state); @@ -1039,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq; + wq = ctxp->hdwq->nvme_wq; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; @@ -1649,6 +1657,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; + struct lpfc_nvmet_rcv_ctx *ctxp; unsigned long iflags; int rc; @@ -1662,7 +1671,8 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); if (rc == -EBUSY) { /* WQ was full again, so put it back on the list */ @@ -1765,6 +1775,7 @@ dropit: ctxp->state = LPFC_NVMET_STE_LS_RCV; ctxp->entry_cnt = 1; ctxp->rqb_buffer = (void *)nvmebuf; + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", oxid, size, sid); @@ -1987,6 +1998,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; ctxp->rqb_buffer = (void *)nvmebuf; + ctxp->hdwq = NULL; spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -3044,7 +3056,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->iocb_flag |= LPFC_IO_NVME; abts_wqeq->context2 = ctxp; abts_wqeq->vport = phba->pport; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { atomic_inc(&tgtp->xmt_abort_sol); @@ -3096,7 +3111,10 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; abts_wqeq->iocb_cmpl = NULL; abts_wqeq->iocb_flag |= LPFC_IO_NVMET; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { return 0; @@ -3165,7 +3183,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; abts_wqeq->iocb_cmpl = 0; abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { atomic_inc(&tgtp->xmt_abort_unsol); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 1aaff63f1f41..9b767c59de3d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -140,6 +140,7 @@ struct lpfc_nvmet_rcv_ctx { #define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */ struct rqb_dmabuf *rqb_buffer; struct lpfc_nvmet_ctxbuf *ctxbuf; + struct lpfc_sli4_hdw_queue *hdwq; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t ts_isr_cmd; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 08f284ca5f1e..55c58bbfee08 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -748,7 +748,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS lpfc_cmd->prot_data_type = 0; #endif - lpfc_cmd->hdwq = idx; + lpfc_cmd->hdwq = qp; + lpfc_cmd->hdwq_no = idx; lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + @@ -861,7 +862,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) psb->seg_cnt = 0; psb->prot_seg_cnt = 0; - qp = &phba->sli4_hba.hdwq[psb->hdwq]; + qp = psb->hdwq; if (psb->exch_busy) { spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); psb->pCmd = NULL; @@ -4018,7 +4019,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, sli4 = (phba->sli_rev == LPFC_SLI_REV4); piocbq->iocb.un.fcpi.fcpi_XRdy = 0; - idx = lpfc_cmd->hdwq; + idx = lpfc_cmd->hdwq_no; if (phba->sli4_hba.hdwq) hdwq = &phba->sli4_hba.hdwq[idx]; @@ -4557,7 +4558,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) return 0; out_host_busy_free_buf: - idx = lpfc_cmd->hdwq; + idx = lpfc_cmd->hdwq_no; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); if (phba->sli4_hba.hdwq) { switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 68e6c4ad3936..58cb6fd575c3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -138,7 +138,8 @@ struct lpfc_scsi_buf { dma_addr_t dma_phys_sgl; struct ulp_bde64 *dma_sgl; struct lpfc_iocbq cur_iocbq; - uint16_t hdwq; + struct lpfc_sli4_hdw_queue *hdwq; + uint16_t hdwq_no; uint16_t cpu; /* SCSI specific fields */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7847ce2a9409..90f039a41480 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10005,7 +10005,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) */ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { lpfc_cmd = (struct lpfc_scsi_buf *)piocb->context1; - piocb->hba_wqidx = lpfc_cmd->hdwq; + piocb->hba_wqidx = lpfc_cmd->hdwq_no; } return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; } else { @@ -11301,6 +11301,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *abtsiocbp; union lpfc_wqe128 *abts_wqe; int retval; + int idx = cmdiocb->hba_wqidx; /* * There are certain command types we don't want to abort. And we @@ -11356,7 +11357,8 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iocb_flag |= LPFC_IO_NVME; abtsiocbp->vport = vport; abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; - retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); + retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx], + abtsiocbp); if (retval) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, "6147 Failed abts issue_wqe with status x%x " @@ -19617,7 +19619,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, * @pwqe: Pointer to command WQE. **/ int -lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, struct lpfc_iocbq *pwqe) { union lpfc_wqe128 *wqe = &pwqe->wqe; @@ -19659,12 +19661,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, /* NVME_FCREQ and NVME_ABTS requests */ if (pwqe->iocb_flag & LPFC_IO_NVME) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring; + wq = qp->nvme_wq; + pring = wq->pring; + + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); spin_lock_irqsave(&pring->ring_lock, iflags); - wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq; - bf_set(wqe_cqid, &wqe->generic.wqe_com, - phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -19678,9 +19680,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, /* NVMET requests */ if (pwqe->iocb_flag & LPFC_IO_NVMET) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring; + wq = qp->nvme_wq; + pring = wq->pring; - spin_lock_irqsave(&pring->ring_lock, iflags); ctxp = pwqe->context2; sglq = ctxp->ctxbuf->sglq; if (pwqe->sli4_xritag == NO_XRI) { @@ -19689,9 +19691,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); - wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq; - bf_set(wqe_cqid, &wqe->generic.wqe_com, - phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); + + spin_lock_irqsave(&pring->ring_lock, iflags); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); -- cgit v1.2.3-55-g7522 From 222e9239c60888b7c9331f4b3d0a99d2f27dca6b Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:35 -0800 Subject: scsi: lpfc: Resize cpu maps structures based on possible cpus The work done to date utilized the number of present cpus when sizing per-cpu structures. Structures should have been sized based on the max possible cpu count. Convert the driver over to possible cpu count for sizing allocation. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 23 +++++++++++++++-------- drivers/scsi/lpfc/lpfc_init.c | 32 +++++++++++++------------------- drivers/scsi/lpfc/lpfc_nvmet.c | 35 ++++++++++++++++++++++------------- drivers/scsi/lpfc/lpfc_sli4.h | 2 +- 4 files changed, 51 insertions(+), 41 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 2864cb53b1e8..a114965a376c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5176,16 +5176,22 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, case 1: len += snprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: HBA centric mapping (%d): " - "%d online CPUs\n", - phba->cfg_fcp_cpu_map, - phba->sli4_hba.num_online_cpu); + "%d of %d CPUs online from %d possible CPUs\n", + phba->cfg_fcp_cpu_map, num_online_cpus(), + num_present_cpus(), + phba->sli4_hba.num_possible_cpu); break; } - while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { + while (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_possible_cpu) { cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; - if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { + if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %02d not present\n", + phba->sli4_hba.curr_disp_cpu); + else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) len += snprintf( buf + len, PAGE_SIZE - len, @@ -5225,14 +5231,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, /* display max number of CPUs keeping some margin */ if (phba->sli4_hba.curr_disp_cpu < - phba->sli4_hba.num_present_cpu && + phba->sli4_hba.num_possible_cpu && (len >= (PAGE_SIZE - 64))) { - len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); + len += snprintf(buf + len, + PAGE_SIZE - len, "more...\n"); break; } } - if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) + if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) phba->sli4_hba.curr_disp_cpu = 0; return len; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 05919480e430..8ba2861db7b6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6373,8 +6373,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) u32 if_type; u32 if_fam; - phba->sli4_hba.num_online_cpu = num_online_cpus(); phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.num_possible_cpu = num_possible_cpus(); phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ @@ -6796,7 +6796,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcf_rr_bmask; } - phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(struct lpfc_vector_map_info), GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { @@ -6868,8 +6868,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free memory allocated for msi-x interrupt vector to CPU mapping */ kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_present_cpu = 0; - phba->sli4_hba.num_online_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; /* Free memory allocated for fast-path work queue handles */ @@ -10519,15 +10519,14 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) int cpu; /* Find the desired phys_id for the specified EQ */ - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; if ((match == LPFC_FIND_BY_EQ) && (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && (cpup->eq == id)) return cpu; if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) return cpu; - cpup++; } return 0; } @@ -10545,11 +10544,10 @@ lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) int cpu; /* Find the desired phys_id for the specified EQ */ - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; if (cpup->hdwq == hdwq) return cpup->eq; - cpup++; } return 0; } @@ -10569,15 +10567,13 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu, struct lpfc_vector_map_info *cpup; int idx; - cpup = phba->sli4_hba.cpu_map; - for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + for_each_present_cpu(idx) { + cpup = &phba->sli4_hba.cpu_map[idx]; /* Does the cpup match the one we are looking for */ if ((cpup->phys_id == phys_id) && (cpup->core_id == core_id) && - (cpu != idx)) { + (cpu != idx)) return 1; - } - cpup++; } return 0; } @@ -10608,7 +10604,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) /* Init cpu_map array */ memset(phba->sli4_hba.cpu_map, 0xff, (sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu)); + phba->sli4_hba.num_possible_cpu)); max_phys_id = 0; min_phys_id = 0xffff; @@ -10617,8 +10613,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) phys_id = 0; /* Update CPU map with physical id and core id of each CPU */ - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; #ifdef CONFIG_X86 cpuinfo = &cpu_data(cpu); cpup->phys_id = cpuinfo->phys_proc_id; @@ -10645,8 +10641,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) max_core_id = cpup->core_id; if (cpup->core_id < min_core_id) min_core_id = cpup->core_id; - - cpup++; } for_each_possible_cpu(i) { diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0d296aee2d82..0b27e8c5ae32 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1194,9 +1194,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) /* Cycle the the entire CPU context list for every MRQ */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { - for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { + for_each_present_cpu(j) { + infop = lpfc_get_ctx_list(phba, j, i); __lpfc_nvmet_clean_io_for_cpu(phba, infop); - infop++; /* next */ } } kfree(phba->sli4_hba.nvmet_ctx_info); @@ -1211,14 +1211,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) union lpfc_wqe128 *wqe; struct lpfc_nvmet_ctx_info *last_infop; struct lpfc_nvmet_ctx_info *infop; - int i, j, idx; + int i, j, idx, cpu; lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6403 Allocate NVMET resources for %d XRIs\n", phba->sli4_hba.nvmet_xri_cnt); phba->sli4_hba.nvmet_ctx_info = kcalloc( - phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, + phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -1246,13 +1246,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * of the IO completion. Thus a context that was allocated for MRQ A * whose IO completed on CPU B will be freed to cpuB/mrqA. */ - infop = phba->sli4_hba.nvmet_ctx_info; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for_each_possible_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + infop = lpfc_get_ctx_list(phba, i, j); INIT_LIST_HEAD(&infop->nvmet_ctx_list); spin_lock_init(&infop->nvmet_ctx_list_lock); infop->nvmet_ctx_list_cnt = 0; - infop++; } } @@ -1262,8 +1261,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * MRQ 1 cycling thru CPUs 0 - X, and so on. */ for (j = 0; j < phba->cfg_nvmet_mrq; j++) { - last_infop = lpfc_get_ctx_list(phba, 0, j); - for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { + last_infop = lpfc_get_ctx_list(phba, + cpumask_first(cpu_present_mask), + j); + for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { infop = lpfc_get_ctx_list(phba, i, j); infop->nvmet_ctx_next_cpu = last_infop; last_infop = infop; @@ -1274,6 +1275,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * received command on a per xri basis. */ idx = 0; + cpu = cpumask_first(cpu_present_mask); for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { @@ -1327,7 +1329,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * is MRQidx will be associated with CPUidx. This association * can change on the fly. */ - infop = lpfc_get_ctx_list(phba, idx, idx); + infop = lpfc_get_ctx_list(phba, cpu, idx); spin_lock(&infop->nvmet_ctx_list_lock); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); infop->nvmet_ctx_list_cnt++; @@ -1335,11 +1337,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) /* Spread ctx structures evenly across all MRQs */ idx++; - if (idx >= phba->cfg_nvmet_mrq) + if (idx >= phba->cfg_nvmet_mrq) { idx = 0; + cpu = cpumask_first(cpu_present_mask); + continue; + } + cpu = cpumask_next(cpu, cpu_present_mask); + if (cpu == nr_cpu_ids) + cpu = cpumask_first(cpu_present_mask); + } - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for_each_present_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { infop = lpfc_get_ctx_list(phba, i, j); lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, @@ -1839,7 +1848,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, else get_infop = current_infop->nvmet_ctx_next_cpu; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { if (get_infop == current_infop) { get_infop = get_infop->nvmet_ctx_next_cpu; continue; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 20566c506e5f..1e3d7f534eaa 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -890,7 +890,7 @@ struct lpfc_sli4_hba { /* CPU to vector mapping information */ struct lpfc_vector_map_info *cpu_map; - uint16_t num_online_cpu; + uint16_t num_possible_cpu; uint16_t num_present_cpu; uint16_t curr_disp_cpu; struct lpfc_eq_intr_info __percpu *eq_info; -- cgit v1.2.3-55-g7522 From 472e146d1cf3410a898b49834500fa9e33ac41a2 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:39 -0800 Subject: scsi: lpfc: Correct upcalling nvmet_fc transport during io done downcall When the transport calls into the lpfc target to release an IO job structure, which corresponds to an exchange, and if the driver was waiting for an exchange in order to post a previously received command to the transport, the driver immediately takes the IO job and reuses the context for the prior command and calls nvmet_fc_rcv_fcp_req() to tell the transport about a newly received command. Problem is, the execution of the IO job release may be in the context of the back end driver and its bio completion handlers, thus it may be in a irq context and protection code kicks in in the bio and request layers that are subsequently called. Rework lpfc so that instead of immediately upcalling, queue it to a deferred work thread and have the thread make the upcall. Took advantage of this change to remove duplicated code with the normal command receive path that preps the IO job and upcalls nvmet_fc. Created a common routine both paths use. Also corrected some errors that were found during review of the context freeing and reuse - basically unlocked operations and a somewhat disjoint set of calls to release associated job elements. Cleaned up this path and added locks for coherency. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_nvmet.c | 247 ++++++++++++++++++++++------------------- drivers/scsi/lpfc/lpfc_nvmet.h | 1 + 3 files changed, 137 insertions(+), 112 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b710994a352e..ea97d82f99f9 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -144,6 +144,7 @@ struct lpfc_nvmet_ctxbuf { struct lpfc_nvmet_rcv_ctx *context; struct lpfc_iocbq *iocbq; struct lpfc_sglq *sglq; + struct work_struct defer_work; }; struct lpfc_dma_pool { diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0b27e8c5ae32..0d10dfc74018 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -73,6 +73,9 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, uint32_t, uint16_t); static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_nvmet_rcv_ctx *); +static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); + +static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); static union lpfc_wqe128 lpfc_tsend_cmd_template; static union lpfc_wqe128 lpfc_treceive_cmd_template; @@ -220,21 +223,19 @@ lpfc_nvmet_cmd_template(void) void lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) { - unsigned long iflag; + lockdep_assert_held(&ctxp->ctxlock); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6313 NVMET Defer ctx release xri x%x flg x%x\n", ctxp->oxid, ctxp->flag); - spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) { - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, - iflag); + if (ctxp->flag & LPFC_NVMET_CTX_RLS) return; - } + ctxp->flag |= LPFC_NVMET_CTX_RLS; + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /** @@ -325,7 +326,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) struct rqb_dmabuf *nvmebuf; struct lpfc_nvmet_ctx_info *infop; uint32_t *payload; - uint32_t size, oxid, sid, rc; + uint32_t size, oxid, sid; int cpu; unsigned long iflag; @@ -341,6 +342,20 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } + + if (ctxp->rqb_buffer) { + nvmebuf = ctxp->rqb_buffer; + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->rqb_buffer = NULL; + if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { + ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + } else { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + } + } ctxp->state = LPFC_NVMET_STE_FREE; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -388,46 +403,30 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) } #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - /* - * The calling sequence should be: - * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. - * When we return from nvmet_fc_rcv_fcp_req, all relevant info - * the NVME command / FC header is stored. - * A buffer has already been reposted for this IO, so just free - * the nvmebuf. - */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, - payload, size); - /* Process FCP command */ - if (rc == 0) { - ctxp->rqb_buffer = NULL; - atomic_inc(&tgtp->rcv_fcp_cmd_out); - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - return; - } + /* flag new work queued, replacement buffer has already + * been reposted + */ + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - /* Processing of FCP command is deferred */ - if (rc == -EOVERFLOW) { - lpfc_nvmeio_data(phba, - "NVMET RCV BUSY: xri x%x sz %d " - "from %06x\n", - oxid, size, sid); - atomic_inc(&tgtp->rcv_fcp_cmd_out); - return; + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6181 Unable to queue deferred work " + "for oxid x%x. " + "FCP Drop IO [x%x x%x x%x]\n", + ctxp->oxid, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); } - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", - ctxp->oxid, rc, - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out), - atomic_read(&tgtp->xmt_fcp_release)); - - lpfc_nvmet_defer_release(phba, ctxp); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); return; } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -1113,6 +1112,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_hba *phba = ctxp->phba; + unsigned long iflag; + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, smp_processor_id()); @@ -1131,6 +1132,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } static struct nvmet_fc_target_template lpfc_tgttemplate = { @@ -1323,6 +1327,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } + INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); /* * Add ctx to MRQidx context list. Our initial assumption @@ -1824,6 +1829,86 @@ dropit: #endif } +static void +lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_hba *phba = ctxp->phba; + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t *payload; + uint32_t rc; + unsigned long iflags; + + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6159 process_rcv_fcp_req, nvmebuf is NULL, " + "oxid: x%x flg: x%x state: x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return; + } + + payload = (uint32_t *)(nvmebuf->dbuf.virt); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info + * the NVME command / FC header is stored. + * A buffer has already been reposted for this IO, so just free + * the nvmebuf. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, ctxp->size); + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + return; + } + + /* Processing of FCP command is deferred */ + if (rc == -EOVERFLOW) { + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " + "from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + atomic_inc(&tgtp->rcv_fcp_cmd_out); + atomic_inc(&tgtp->defer_fod); + return; + } + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); +#endif +} + +static void +lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_ctxbuf *ctx_buf = + container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); + + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); +#endif +} + static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) @@ -1906,7 +1991,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf; struct lpfc_nvmet_ctx_info *current_infop; uint32_t *payload; - uint32_t size, oxid, sid, rc, qno; + uint32_t size, oxid, sid, qno; unsigned long iflag; int current_cpu; @@ -1917,11 +2002,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6157 NVMET FCP Drop IO\n"); - oxid = 0; - size = 0; - sid = 0; - ctxp = NULL; - goto dropit; + if (nvmebuf) + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + return; } /* @@ -2028,67 +2111,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - /* - * The calling sequence should be: - * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. - * When we return from nvmet_fc_rcv_fcp_req, all relevant info in - * the NVME command / FC header is stored, so we are free to repost - * the buffer. - */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, - payload, size); - - /* Process FCP command */ - if (rc == 0) { - ctxp->rqb_buffer = NULL; - atomic_inc(&tgtp->rcv_fcp_cmd_out); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - return; - } - - /* Processing of FCP command is deferred */ - if (rc == -EOVERFLOW) { - /* - * Post a brand new DMA buffer to RQ and defer - * freeing rcv buffer till .defer_rcv callback - */ - qno = nvmebuf->idx; - lpfc_post_rq_buffer( - phba, phba->sli4_hba.nvmet_mrq_hdr[qno], - phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); - - lpfc_nvmeio_data(phba, - "NVMET RCV BUSY: xri x%x sz %d from %06x\n", - oxid, size, sid); - atomic_inc(&tgtp->rcv_fcp_cmd_out); - atomic_inc(&tgtp->defer_fod); - return; - } - ctxp->rqb_buffer = nvmebuf; - - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", - ctxp->oxid, rc, - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out), - atomic_read(&tgtp->xmt_fcp_release)); -dropit: - lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", - oxid, size, sid); - if (oxid) { - lpfc_nvmet_defer_release(phba, ctxp); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - return; - } - - if (ctx_buf) - lpfc_nvmet_ctxbuf_post(phba, ctx_buf); - - if (nvmebuf) - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); } /** diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 9b767c59de3d..b8c342a41d98 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -137,6 +137,7 @@ struct lpfc_nvmet_rcv_ctx { #define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ +#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */ #define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */ struct rqb_dmabuf *rqb_buffer; struct lpfc_nvmet_ctxbuf *ctxbuf; -- cgit v1.2.3-55-g7522 From c160c0f8063e4225c069ac904a3f2cf89dcb3a81 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:40 -0800 Subject: scsi: lpfc: Fix nvmet issues when link bounce under IO load Various null pointer dereference and general protection fault panics occur when there is a link bounce under load. There are a large number of "error" message 6413 indicating "bad release". The issues resolve to list corruptions due to missing or inconsistent lock protection. Lockups are due to nested locks in the unsolicited abort path. The unsolicited abort path calls the wrong abort processing routine. There was also duplicate context release while aborts were still active in the hardware. Removed duplicate locks and added lock protection around list item removal. Commonized lock handling around the abort processing routines. Prevent context release while still in ABTS list. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_nvmet.c | 50 +++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 13 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0d10dfc74018..4aadb3d5e718 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1032,7 +1032,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, atomic_inc(&lpfc_nvmep->xmt_fcp_abort); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->state = LPFC_NVMET_STE_ABORT; /* Since iaab/iaar are NOT set, we need to check * if the firmware is in process of aborting IO @@ -1044,13 +1043,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, ctxp->flag |= LPFC_NVMET_ABORT_OP; if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { + spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); wq = ctxp->hdwq->nvme_wq; - spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; } + spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* An state of LPFC_NVMET_STE_RCV means we have just received * the NVME command and have not started processing it. @@ -1062,7 +1062,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, else lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - spin_unlock_irqrestore(&ctxp->ctxlock, flags); } static void @@ -1076,14 +1075,18 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, unsigned long flags; bool aborting = false; - if (ctxp->state != LPFC_NVMET_STE_DONE && - ctxp->state != LPFC_NVMET_STE_ABORT) { + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVMET_XBUSY) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6027 NVMET release with XBUSY flag x%x" + " oxid x%x\n", + ctxp->flag, ctxp->oxid); + else if (ctxp->state != LPFC_NVMET_STE_DONE && + ctxp->state != LPFC_NVMET_STE_ABORT) lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); - } - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || (ctxp->flag & LPFC_NVMET_XBUSY)) { aborting = true; @@ -1523,6 +1526,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; + spin_lock(&ctxp->ctxlock); /* Check if we already received a free context call * and we have completed processing an abort situation. */ @@ -1532,6 +1536,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, released = true; } ctxp->flag &= ~LPFC_NVMET_XBUSY; + spin_unlock(&ctxp->ctxlock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); rrq_empty = list_empty(&phba->active_rrq_list); @@ -1563,7 +1568,6 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr) - { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_hba *phba = vport->phba; @@ -2696,15 +2700,17 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (ctxp->flag & LPFC_NVMET_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->state = LPFC_NVMET_STE_DONE; /* Check if we already received a free context call * and we have completed processing an abort situation. */ - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; @@ -2770,6 +2776,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVMET_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); @@ -2784,10 +2791,11 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * and we have completed processing an abort situation. */ ctxp->state = LPFC_NVMET_STE_DONE; - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; @@ -2993,12 +3001,15 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); /* No failure to an ABTS request. */ + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Issue ABTS for this WQE based on iotag */ ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); + spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, @@ -3006,11 +3017,13 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } abts_wqeq = ctxp->abort_wqeq; abts_wqe = &abts_wqeq->wqe; ctxp->state = LPFC_NVMET_STE_ABORT; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, @@ -3031,7 +3044,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "NVME Req now. hba_flag x%x oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } @@ -3044,7 +3059,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "still pending on oxid x%x\n", ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } @@ -3099,7 +3116,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } atomic_inc(&tgtp->xmt_abort_rsp_error); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6166 Failed ABORT issue_wqe with status x%x " @@ -3108,7 +3127,6 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 1; } - static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, @@ -3117,6 +3135,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; unsigned long flags; + bool released = false; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3154,8 +3173,12 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, aerr: spin_lock_irqsave(&ctxp->ctxlock, flags); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) + if (ctxp->flag & LPFC_NVMET_CTX_RLS) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); spin_unlock_irqrestore(&ctxp->ctxlock, flags); @@ -3163,7 +3186,8 @@ aerr: lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", ctxp->oxid, rc); - lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); return 1; } -- cgit v1.2.3-55-g7522 From 0d041215f0b4420bf193f3b6e13a1887ffc8320c Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:41 -0800 Subject: scsi: lpfc: Update 12.2.0.0 file copyrights to 2019 For files modified as part of 12.2.0.0 patches, update copyright to 2019 Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 2 +- drivers/scsi/lpfc/lpfc_attr.c | 2 +- drivers/scsi/lpfc/lpfc_crtn.h | 2 +- drivers/scsi/lpfc/lpfc_ct.c | 2 +- drivers/scsi/lpfc/lpfc_debugfs.c | 2 +- drivers/scsi/lpfc/lpfc_debugfs.h | 2 +- drivers/scsi/lpfc/lpfc_els.c | 2 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 2 +- drivers/scsi/lpfc/lpfc_hw4.h | 2 +- drivers/scsi/lpfc/lpfc_init.c | 2 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 +- drivers/scsi/lpfc/lpfc_nvme.c | 2 +- drivers/scsi/lpfc/lpfc_nvme.h | 2 +- drivers/scsi/lpfc/lpfc_nvmet.c | 2 +- drivers/scsi/lpfc/lpfc_nvmet.h | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.h | 2 +- drivers/scsi/lpfc/lpfc_sli.c | 2 +- drivers/scsi/lpfc/lpfc_sli.h | 2 +- drivers/scsi/lpfc/lpfc_sli4.h | 2 +- drivers/scsi/lpfc/lpfc_version.h | 2 +- drivers/scsi/lpfc/lpfc_vport.c | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c') diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ea97d82f99f9..41d849f283f6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 212bfae1966a..ce3e541434dc 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 982401c31c12..e0b14d791b8c 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 98faa3aae35c..7290573110fe 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 19d13a8789b9..ee98ea1c68f9 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 1fbee6496f85..93ab7dfb8ee0 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3df2cfd02b41..fc077cb87900 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 47608d4b8e8c..aa4961a2caf8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c9a056ef321a..ff875b833192 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 563b6009ffce..d24af076afa8 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index acfce67dacde..6172682a24ba 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 271ad42be7f4..e73895674f39 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 07d5fa16dd39..6b6d88cefdb8 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 4aadb3d5e718..caf641ee6210 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index b8c342a41d98..c3ca93d55856 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index af263b0e8bad..c98f264f1d83 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 9ee0ea4a1ef2..f76667b7da7b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d548066a7e91..c4c319245a66 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index d203a666497f..7a1a761efdd6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 1e3d7f534eaa..40c85091c805 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 3f4398ffb567..a248a895c7d0 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0d712eb04e2b..343bc71d4615 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * -- cgit v1.2.3-55-g7522