summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorJames Smart2017-02-12 22:52:35 +0100
committerMartin K. Petersen2017-02-23 00:41:43 +0100
commit2d7dbc4c2775eb30df97be00090adbfcc7fc5086 (patch)
tree28eba1892b2d6122142a27dccb17d2291f471aa5 /drivers/scsi/lpfc/lpfc_init.c
parentscsi: lpfc: NVME Target: Base modifications (diff)
downloadkernel-qcow2-linux-2d7dbc4c2775eb30df97be00090adbfcc7fc5086.tar.gz
kernel-qcow2-linux-2d7dbc4c2775eb30df97be00090adbfcc7fc5086.tar.xz
kernel-qcow2-linux-2d7dbc4c2775eb30df97be00090adbfcc7fc5086.zip
scsi: lpfc: NVME Target: Receive buffer updates
NVME Target: Receive buffer updates Allocates buffer pools and configures adapter interfaces to handle receive buffer (asynchronous FCP CMD ius, first burst data) from the adapter. Splits by protocol, etc. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c233
1 files changed, 226 insertions, 7 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 03fa3f02e33e..e6cbc0bac029 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3354,8 +3354,15 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
* update on pci function's nvmet xri-sgl list
*/
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
- nvmet_xri_cnt = 0;
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ if (nvmet_xri_cnt > tot_cnt) {
+ phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6301 NVMET post-sgl count changed to %d\n",
+ phba->cfg_nvmet_mrq_post);
+ }
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */
@@ -7674,11 +7681,13 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2574 IO channels: irqs %d fcp %d nvme %d\n",
+ "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
phba->io_channel_irqs, phba->cfg_fcp_io_channel,
- phba->cfg_nvme_io_channel);
+ phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -7768,7 +7777,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, io_channel;
+ int idx, io_channel, max;
/*
* Create HBA Record arrays.
@@ -7845,7 +7854,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
-
phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
sizeof(struct lpfc_queue *),
GFP_KERNEL);
@@ -7870,6 +7878,39 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"fast-path CQ map\n");
goto out_error;
}
+
+ if (phba->nvmet_support) {
+ phba->sli4_hba.nvmet_cqset = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_cqset) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3121 Fail allocate memory for "
+ "fast-path CQ set array\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_mrq_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3122 Fail allocate memory for "
+ "fast-path RQ set hdr array\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_data = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_mrq_data) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3124 Fail allocate memory for "
+ "fast-path RQ set data array\n");
+ goto out_error;
+ }
+ }
}
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
@@ -7897,6 +7938,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (lpfc_alloc_nvme_wq_cq(phba, idx))
goto out_error;
+ /* allocate MRQ CQs */
+ max = phba->cfg_nvme_io_channel;
+ if (max < phba->cfg_nvmet_mrq)
+ max = phba->cfg_nvmet_mrq;
+
+ for (idx = 0; idx < max; idx++)
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
+ goto out_error;
+
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3142 Failed allocate NVME "
+ "CQ Set (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
+ }
+ }
+
/*
* Create Slow Path Completion Queues (CQs)
*/
@@ -7999,6 +8064,44 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ /* Create NVMET Receive Queue for header */
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3146 Failed allocate "
+ "receive HRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
+
+ /* Only needed for header of RQ pair */
+ qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
+ GFP_KERNEL);
+ if (qdesc->rqbp == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6131 Failed allocate "
+ "Header RQBP\n");
+ goto out_error;
+ }
+
+ /* Create NVMET Receive Queue for data */
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3156 Failed allocate "
+ "receive DRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
+ }
+ }
+
/* Create the Queues needed for Flash Optimized Fabric operations */
if (phba->cfg_fof)
lpfc_fof_queue_create(phba);
@@ -8085,6 +8188,14 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
+
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8422,6 +8533,44 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
(uint32_t)rc);
goto out_destroy;
}
+ if (phba->nvmet_support) {
+ if (!phba->sli4_hba.nvmet_cqset) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3165 Fast-path NVME CQ Set "
+ "array not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy;
+ }
+ if (phba->cfg_nvmet_mrq > 1) {
+ rc = lpfc_cq_create_set(phba,
+ phba->sli4_hba.nvmet_cqset,
+ phba->sli4_hba.hba_eq,
+ LPFC_WCQ, LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3164 Failed setup of NVME CQ "
+ "Set, rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+ } else {
+ /* Set up NVMET Receive Complete Queue */
+ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
+ phba->sli4_hba.hba_eq[0],
+ LPFC_WCQ, LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6089 Failed setup NVMET CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6090 NVMET CQ setup: cq-id=%d, "
+ "parent eq-id=%d\n",
+ phba->sli4_hba.nvmet_cqset[0]->queue_id,
+ phba->sli4_hba.hba_eq[0]->queue_id);
+ }
+ }
/* Set up slow-path ELS WQ/CQ */
if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
@@ -8473,6 +8622,58 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq->queue_id);
}
+ /*
+ * Create NVMET Receive Queue (RQ)
+ */
+ if (phba->nvmet_support) {
+ if ((!phba->sli4_hba.nvmet_cqset) ||
+ (!phba->sli4_hba.nvmet_mrq_hdr) ||
+ (!phba->sli4_hba.nvmet_mrq_data)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6130 MRQ CQ Queues not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy;
+ }
+ if (phba->cfg_nvmet_mrq > 1) {
+ rc = lpfc_mrq_create(phba,
+ phba->sli4_hba.nvmet_mrq_hdr,
+ phba->sli4_hba.nvmet_mrq_data,
+ phba->sli4_hba.nvmet_cqset,
+ LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6098 Failed setup of NVMET "
+ "MRQ: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ } else {
+ rc = lpfc_rq_create(phba,
+ phba->sli4_hba.nvmet_mrq_hdr[0],
+ phba->sli4_hba.nvmet_mrq_data[0],
+ phba->sli4_hba.nvmet_cqset[0],
+ LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6057 Failed setup of NVMET "
+ "Receive Queue: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ lpfc_printf_log(
+ phba, KERN_INFO, LOG_INIT,
+ "6099 NVMET RQ setup: hdr-rq-id=%d, "
+ "dat-rq-id=%d parent cq-id=%d\n",
+ phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
+ phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
+ phba->sli4_hba.nvmet_cqset[0]->queue_id);
+
+ }
+ }
+
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
@@ -8589,6 +8790,21 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(phba,
+ phba->sli4_hba.nvmet_mrq_hdr[qidx],
+ phba->sli4_hba.nvmet_mrq_data[qidx]);
+ }
+
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(phba,
+ phba->sli4_hba.nvmet_cqset[qidx]);
+ }
+
/* Unset FCP response complete queue */
if (phba->sli4_hba.fcp_cq)
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
@@ -9935,6 +10151,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -10875,12 +11092,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if (phba->intr_type != MSIX) {
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
phba->cfg_fcp_io_channel = 1;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
phba->cfg_nvme_io_channel = 1;
+ if (phba->nvmet_support)
+ phba->cfg_nvmet_mrq = 1;
+ }
phba->io_channel_irqs = 1;
}
-
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,