summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
24 files changed, 668 insertions, 241 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 562dc0139735..8eb3f96fe068 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -733,7 +733,6 @@ struct lpfc_hba {
uint32_t fc_rttov; /* R_T_TOV timer value */
uint32_t fc_altov; /* AL_TOV timer value */
uint32_t fc_crtov; /* C_R_TOV timer value */
- uint32_t fc_citov; /* C_I_TOV timer value */
struct serv_parm fc_fabparam; /* fabric service parameters buffer */
uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
@@ -757,6 +756,7 @@ struct lpfc_hba {
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
uint32_t initial_imax;
+ uint8_t bbcredit_support;
/* HBA Config Parameters */
uint32_t cfg_ack0;
@@ -836,6 +836,7 @@ struct lpfc_hba {
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
uint32_t cfg_enable_fc4_type;
+ uint32_t cfg_enable_bbcr; /*Enable BB Credit Recovery*/
uint32_t cfg_xri_split;
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
@@ -946,14 +947,14 @@ struct lpfc_hba {
struct list_head active_rrq_list;
spinlock_t hbalock;
- /* pci_mem_pools */
- struct pci_pool *lpfc_sg_dma_buf_pool;
- struct pci_pool *lpfc_mbuf_pool;
- struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
- struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
- struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
- struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct pci_pool *txrdy_payload_pool;
+ /* dma_mem_pools */
+ struct dma_pool *lpfc_sg_dma_buf_pool;
+ struct dma_pool *lpfc_mbuf_pool;
+ struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */
+ struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
+ struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
+ struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
+ struct dma_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7ee1a94c0b33..c17677f494af 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -247,13 +247,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
- spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
- spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
- tot = phba->sli4_hba.nvmet_xri_cnt -
- (phba->sli4_hba.nvmet_ctx_get_cnt +
- phba->sli4_hba.nvmet_ctx_put_cnt);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ /* Calculate outstanding IOs */
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+ tot += atomic_read(&tgtp->xmt_fcp_release);
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
len += snprintf(buf + len, PAGE_SIZE - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
@@ -1894,6 +1891,36 @@ static inline bool lpfc_rangecheck(uint val, uint min, uint max)
}
/**
+ * lpfc_enable_bbcr_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static ssize_t
+lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
+{
+ if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3068 %s_enable_bbcr changed from %d to %d\n",
+ LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+ phba->cfg_enable_bbcr = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
+ LPFC_DRIVER_NAME, val);
+ return -EINVAL;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -5116,6 +5143,14 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
*/
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+/*
+ * lpfc_enable_bbcr: Enable BB Credit Recovery
+ * 0 = BB Credit Recovery disabled
+ * 1 = BB Credit Recovery enabled (default)
+ * Value range is [0,1]. Default value is 1.
+ */
+LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_bg_info,
@@ -5223,6 +5258,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
&dev_attr_lpfc_enable_mds_diags,
+ &dev_attr_lpfc_enable_bbcr,
NULL,
};
@@ -6234,11 +6270,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+ lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
phba->nvmet_support = 0;
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+ phba->cfg_enable_bbcr = 0;
} else {
/* We MUST have FCP support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index d56dafcdd563..931db52692f5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -46,6 +46,16 @@ lpfc_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
+#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0444);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a1686c2d863c..fe9e1c079c20 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2384,20 +2384,17 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
goto job_error;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq) {
- rc = -ENOMEM;
+ if (!pmboxq)
goto link_diag_test_exit;
- }
req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
sizeof(struct lpfc_sli4_cfg_mhdr));
alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
req_len, LPFC_SLI4_MBX_EMBED);
- if (alloc_len != req_len) {
- rc = -ENOMEM;
+ if (alloc_len != req_len)
goto link_diag_test_exit;
- }
+
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
phba->sli4_hba.lnk_info.lnk_no);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index da669dce12fe..7e300734b345 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -556,9 +556,8 @@ int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
-void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
+ struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9c0c1463057d..33417681f5d4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -955,7 +955,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
did, fc4_data_0, fc4_data_1);
@@ -969,7 +969,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"3064 Setting ndlp %p, DID x%06x with "
"FC4 x%08x, Data: x%08x x%08x\n",
ndlp, did, ndlp->nlp_fc4_type,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 744f3f395b64..d50c481ec41c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -851,13 +851,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
}
- spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
- spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
- tot = phba->sli4_hba.nvmet_xri_cnt -
- (phba->sli4_hba.nvmet_ctx_get_cnt +
- phba->sli4_hba.nvmet_ctx_put_cnt);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ /* Calculate outstanding IOs */
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+ tot += atomic_read(&tgtp->xmt_fcp_release);
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
len += snprintf(buf + len, size - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 7b7d314af0e0..c4edd87bfc65 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -478,16 +478,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
return;
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
- eq = phba->sli4_hba.hba_eq[eqidx];
- if (cq->assoc_qid == eq->queue_id)
+ if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
break;
}
if (eqidx == phba->io_channel_irqs) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0;
- eq = phba->sli4_hba.hba_eq[0];
}
+ eq = phba->sli4_hba.hba_eq[eqidx];
+
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 094c97b9e5f7..f9a566eaef04 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -159,6 +159,7 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
+#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d1d6f691df4..468a66371de9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1527,6 +1527,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
uint16_t keep_nlp_state;
+ struct lpfc_nvme_rport *keep_nrport = NULL;
int put_node;
int put_rport;
unsigned long *active_rrqs_xri_bitmap = NULL;
@@ -1624,6 +1625,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
keep_nlp_state = new_ndlp->nlp_state;
lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
+ /* interchange the nvme remoteport structs */
+ keep_nrport = new_ndlp->nrport;
+ new_ndlp->nrport = ndlp->nrport;
+
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
/* The new_ndlp is replacing ndlp totally, so we need
@@ -1646,6 +1651,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
}
new_ndlp->nlp_type = ndlp->nlp_type;
}
+
+ /* Fix up the nvme rport */
+ if (ndlp->nrport) {
+ ndlp->nrport = NULL;
+ lpfc_nlp_put(ndlp);
+ }
+
/* We shall actually free the ndlp with both nlp_DID and
* nlp_portname fields equals 0 to avoid any ndlp on the
* nodelist never to be used.
@@ -1690,6 +1702,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
keep_nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
+ /* Previous ndlp no longer active with nvme host transport.
+ * Remove reference from earlier registration unless the
+ * nvme host took care of it.
+ */
+ if (ndlp->nrport)
+ lpfc_nlp_put(ndlp);
+ ndlp->nrport = keep_nrport;
+
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
@@ -1966,6 +1986,7 @@ int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost;
struct serv_parm *sp;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
@@ -1984,6 +2005,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
+ spin_unlock_irq(shost->host_lock);
+
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PLOGI request, remainder of payload is service parameters */
@@ -2007,6 +2033,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
+ sp->cmn.bbRcvSizeMsb &= 0xF;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
@@ -2151,6 +2178,16 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint16_t cmdsize;
u32 local_nlp_type, elscmd;
+ /*
+ * If we are in RSCN mode, the FC4 types supported from a
+ * previous GFT_ID command may not be accurate. So, if we
+ * are a NVME Initiator, always look for the possibility of
+ * the remote NPort beng a NVME Target.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->fc_flag & FC_RSCN_MODE &&
+ vport->nvmei_support)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
send_next_prli:
@@ -3420,8 +3457,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
maxretry = 3;
delay = 1000;
retry = 1;
- break;
+ } else if (cmd == ELS_CMD_FLOGI &&
+ stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_NOTHING_MORE) {
+ vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+ retry = 1;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0820 FLOGI Failed (x%x). "
+ "BBCredit Not Supported\n",
+ stat.un.lsRjtError);
}
+ break;
+
case LSRJT_PROTOCOL_ERR:
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
@@ -3442,6 +3489,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out_retry;
}
break;
+ case LSRJT_CMD_UNSUPPORTED:
+ /* lpfc nvmet returns this type of LS_RJT when it
+ * receives an FCP PRLI because lpfc nvmet only
+ * support NVME. ELS request is terminated for FCP4
+ * on this rport.
+ */
+ if (stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
+ spin_unlock_irq(shost->host_lock);
+ retry = 0;
+ goto out_retry;
+ }
+ break;
}
break;
@@ -3930,7 +3992,25 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
- lpfc_unreg_rpi(vport, ndlp);
+ if (!lpfc_unreg_rpi(vport, ndlp) &&
+ (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0314 PLOGI recov DID x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_rpi, ndlp->nlp_flag);
+ mp = mbox->context1;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
/* Increment reference count to ndlp to hold the
* reference to ndlp for the callback function.
*/
@@ -4132,6 +4212,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
+ sp->cmn.bbRcvSizeMsb &= 0xF;
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
@@ -7989,6 +8070,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
+
+ /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
+ if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ rjt_exp = LSEXP_REQ_UNSUPPORTED;
+ break;
+ }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8784,6 +8872,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t); /* Node Name */
pcmd += sizeof(uint32_t); /* Node Name */
memcpy(pcmd, &vport->fc_nodename, 8);
+ sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
lpfc_set_disctmo(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index aa5e5ff56dfb..20808349a80e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1108,6 +1108,7 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
+ uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1134,10 +1135,17 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI)
+ if (vport->port_state != LPFC_FLOGI) {
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+ bbscn = bf_get(lpfc_bbscn_def,
+ &phba->sli4_hba.bbscn_params);
+ vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+ vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
+ }
lpfc_initial_flogi(vport);
- else if (vport->fc_flag & FC_PT2PT)
+ } else if (vport->fc_flag & FC_PT2PT) {
lpfc_disc_start(vport);
+ }
return;
out:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 26a5647e057e..bdc1f184f67a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2293,15 +2293,27 @@ typedef struct {
uint32_t rttov;
uint32_t altov;
uint32_t crtov;
- uint32_t citov;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd4:19;
+ uint32_t cscn:1;
+ uint32_t bbscn:4;
+ uint32_t rsvd3:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd3:8;
+ uint32_t bbscn:4;
+ uint32_t cscn:1;
+ uint32_t rsvd4:19;
+#endif
+
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rrq_enable:1;
uint32_t rrq_immed:1;
- uint32_t rsvd4:29;
+ uint32_t rsvd5:29;
uint32_t ack0_enable:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t ack0_enable:1;
- uint32_t rsvd4:29;
+ uint32_t rsvd5:29;
uint32_t rrq_immed:1;
uint32_t rrq_enable:1;
#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bb4715705fa3..1db0a38683f4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2217,9 +2217,15 @@ struct lpfc_mbx_reg_vfi {
uint32_t e_d_tov;
uint32_t r_a_tov;
uint32_t word10;
-#define lpfc_reg_vfi_nport_id_SHIFT 0
-#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
-#define lpfc_reg_vfi_nport_id_WORD word10
+#define lpfc_reg_vfi_nport_id_SHIFT 0
+#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD word10
+#define lpfc_reg_vfi_bbcr_SHIFT 27
+#define lpfc_reg_vfi_bbcr_MASK 0x00000001
+#define lpfc_reg_vfi_bbcr_WORD word10
+#define lpfc_reg_vfi_bbscn_SHIFT 28
+#define lpfc_reg_vfi_bbscn_MASK 0x0000000F
+#define lpfc_reg_vfi_bbscn_WORD word10
};
struct lpfc_mbx_init_vpi {
@@ -2646,7 +2652,16 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_link_speed_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_link_speed_WORD word6
uint32_t rsvd_7;
- uint32_t rsvd_8;
+ uint32_t word8;
+#define lpfc_mbx_rd_conf_bbscn_min_SHIFT 0
+#define lpfc_mbx_rd_conf_bbscn_min_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_min_WORD word8
+#define lpfc_mbx_rd_conf_bbscn_max_SHIFT 4
+#define lpfc_mbx_rd_conf_bbscn_max_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_max_WORD word8
+#define lpfc_mbx_rd_conf_bbscn_def_SHIFT 8
+#define lpfc_mbx_rd_conf_bbscn_def_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_def_WORD word8
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 491aa95eb0f6..7e7ae786121b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -37,6 +37,7 @@
#include <linux/miscdevice.h>
#include <linux/percpu.h>
#include <linux/msi.h>
+#include <linux/bitops.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1253,6 +1254,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
unsigned long time_elapsed;
uint32_t tick_cqe, max_cqe, val;
uint64_t tot, data1, data2, data3;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_register reg_data;
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
@@ -1281,13 +1283,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* Check outstanding IO count */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
if (phba->nvmet_support) {
- spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
- spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
- tot = phba->sli4_hba.nvmet_xri_cnt -
- (phba->sli4_hba.nvmet_ctx_get_cnt +
- phba->sli4_hba.nvmet_ctx_put_cnt);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ tgtp = phba->targetport->private;
+ /* Calculate outstanding IOs */
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+ tot += atomic_read(&tgtp->xmt_fcp_release);
+ tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
} else {
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(
@@ -3048,7 +3048,7 @@ lpfc_online(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct lpfc_vport **vports;
- int i;
+ int i, error = 0;
bool vpis_cleared = false;
if (!phba)
@@ -3072,6 +3072,18 @@ lpfc_online(struct lpfc_hba *phba)
if (!phba->sli4_hba.max_cfg_param.vpi_used)
vpis_cleared = true;
spin_unlock_irq(&phba->hbalock);
+
+ /* Reestablish the local initiator port.
+ * The offline process destroyed the previous lport.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+ !phba->nvmet_support) {
+ error = lpfc_nvme_create_localport(phba->pport);
+ if (error)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6132 NVME restore reg failed "
+ "on nvmei error x%x\n", error);
+ }
} else {
lpfc_sli_queue_init(phba);
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
@@ -3226,6 +3238,13 @@ lpfc_offline(struct lpfc_hba *phba)
/* stop port and all timers associated with this hba */
lpfc_stop_port(phba);
+
+ /* Tear down the local and target port registrations. The
+ * nvme transports need to cleanup.
+ */
+ lpfc_nvmet_destroy_targetport(phba);
+ lpfc_nvme_destroy_localport(phba->pport);
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
@@ -3275,7 +3294,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
list) {
list_del(&sb->list);
- pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
@@ -3286,7 +3305,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
list) {
list_del(&sb->list);
- pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
@@ -3317,7 +3336,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
- pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
phba->total_nvme_bufs--;
@@ -3328,7 +3347,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
- pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
phba->total_nvme_bufs--;
@@ -3640,7 +3659,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
list_remove_head(&scsi_sgl_list, psb,
struct lpfc_scsi_buf, list);
if (psb) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
}
@@ -3710,9 +3729,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4)
return be64_to_cpu(wwn);
else
- return (((wwn & 0xffffffff00000000) >> 32) |
- ((wwn & 0x00000000ffffffff) << 32));
-
+ return rol64(wwn, 32);
}
/**
@@ -3774,7 +3791,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
list_remove_head(&nvme_sgl_list, lpfc_ncmd,
struct lpfc_nvme_buf, list);
if (lpfc_ncmd) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -5930,8 +5947,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */
@@ -5940,8 +5955,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
- spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
- spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/*
@@ -6516,6 +6529,12 @@ lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
kfree(sglq_entry);
}
+
+ /* Update the nvmet_xri_cnt to reflect no current sgls.
+ * The next initialization cycle sets the count and allocates
+ * the sgls over again.
+ */
+ phba->sli4_hba.nvmet_xri_cnt = 0;
}
/**
@@ -6846,8 +6865,8 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = pci_pool_create(
- "txrdy_pool", phba->pcidev,
+ phba->txrdy_payload_pool = dma_pool_create(
+ "txrdy_pool", &phba->pcidev->dev,
TXRDY_PAYLOAD_LEN, 16, 0);
if (phba->txrdy_payload_pool) {
phba->targetport = NULL;
@@ -7605,6 +7624,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3082 Mailbox (x%x) returned ldv:x0\n",
bf_get(lpfc_mqe_command, &pmb->u.mqe));
+ if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
+ phba->bbcredit_support = 1;
+ phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
+ }
+
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
@@ -8301,6 +8325,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
+ /* Put list in known state in case driver load fails. */
+ INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
+
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
phba->sli4_hba.rq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index ce25a18367b5..81fb92967b11 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -376,7 +376,12 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgLnk.rttov = phba->fc_rttov;
mb->un.varCfgLnk.altov = phba->fc_altov;
mb->un.varCfgLnk.crtov = phba->fc_crtov;
- mb->un.varCfgLnk.citov = phba->fc_citov;
+ mb->un.varCfgLnk.cscn = 0;
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+ mb->un.varCfgLnk.cscn = 1;
+ mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
+ &phba->sli4_hba.bbscn_params);
+ }
if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
mb->un.varCfgLnk.ack0_enable = 1;
@@ -2139,6 +2144,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
{
struct lpfc_mbx_reg_vfi *reg_vfi;
struct lpfc_hba *phba = vport->phba;
+ uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
memset(mbox, 0, sizeof(*mbox));
reg_vfi = &mbox->u.mqe.un.reg_vfi;
@@ -2168,16 +2174,39 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
}
+
+ bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
+ bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
+ bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
+
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ bbscn_fabric != 0) {
+ bbscn_max = bf_get(lpfc_bbscn_max,
+ &phba->sli4_hba.bbscn_params);
+ if (bbscn_fabric <= bbscn_max) {
+ bbscn_def = bf_get(lpfc_bbscn_def,
+ &phba->sli4_hba.bbscn_params);
+
+ if (bbscn_fabric > bbscn_def)
+ bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
+ bbscn_fabric);
+ else
+ bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
+
+ bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
+ }
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
- " port_state:x%x topology chg:%d\n",
+ " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
vport->fc_myDID,
phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[vport->vfi],
phba->vpi_ids[vport->vpi],
reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
- vport->port_state, phba->fc_topology_changed);
+ vport->port_state, phba->fc_topology_changed,
+ bbscn_fabric);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index fcc05a1517c2..56faeb049b4a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -97,8 +97,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
i = SLI4_PAGE_SIZE;
phba->lpfc_sg_dma_buf_pool =
- pci_pool_create("lpfc_sg_dma_buf_pool",
- phba->pcidev,
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
if (!phba->lpfc_sg_dma_buf_pool)
@@ -106,15 +106,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
} else {
phba->lpfc_sg_dma_buf_pool =
- pci_pool_create("lpfc_sg_dma_buf_pool",
- phba->pcidev, phba->cfg_sg_dma_buf_size,
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
align, 0);
if (!phba->lpfc_sg_dma_buf_pool)
goto fail;
}
- phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+ phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
LPFC_BPL_SIZE,
align, 0);
if (!phba->lpfc_mbuf_pool)
@@ -128,7 +128,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pool->max_count = 0;
pool->current_count = 0;
for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
- pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+ pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
GFP_KERNEL, &pool->elements[i].phys);
if (!pool->elements[i].virt)
goto fail_free_mbuf_pool;
@@ -152,21 +152,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
sizeof(struct lpfc_node_rrq));
if (!phba->rrq_pool)
goto fail_free_nlp_mem_pool;
- phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
- phba->pcidev,
+ phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
+ &phba->pcidev->dev,
LPFC_HDR_BUF_SIZE, align, 0);
if (!phba->lpfc_hrb_pool)
goto fail_free_rrq_mem_pool;
- phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
- phba->pcidev,
+ phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
+ &phba->pcidev->dev,
LPFC_DATA_BUF_SIZE, align, 0);
if (!phba->lpfc_drb_pool)
goto fail_free_hrb_pool;
phba->lpfc_hbq_pool = NULL;
} else {
- phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
- phba->pcidev, LPFC_BPL_SIZE, align, 0);
+ phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
+ &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
if (!phba->lpfc_hbq_pool)
goto fail_free_nlp_mem_pool;
phba->lpfc_hrb_pool = NULL;
@@ -185,10 +185,10 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
return 0;
fail_free_drb_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
+ dma_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
- pci_pool_destroy(phba->lpfc_hrb_pool);
+ dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
fail_free_rrq_mem_pool:
mempool_destroy(phba->rrq_pool);
@@ -201,14 +201,14 @@ fail_free_drb_pool:
phba->mbox_mem_pool = NULL;
fail_free_mbuf_pool:
while (i--)
- pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
fail_free_lpfc_mbuf_pool:
- pci_pool_destroy(phba->lpfc_mbuf_pool);
+ dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
fail_free_dma_buf_pool:
- pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
@@ -218,8 +218,8 @@ int
lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
{
phba->lpfc_nvmet_drb_pool =
- pci_pool_create("lpfc_nvmet_drb_pool",
- phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+ dma_pool_create("lpfc_nvmet_drb_pool",
+ &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
SGL_ALIGN_SZ, 0);
if (!phba->lpfc_nvmet_drb_pool) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -248,20 +248,20 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_nvmet_drb_pool)
- pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+ dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
if (phba->lpfc_drb_pool)
- pci_pool_destroy(phba->lpfc_drb_pool);
+ dma_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
if (phba->lpfc_hrb_pool)
- pci_pool_destroy(phba->lpfc_hrb_pool);
+ dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
if (phba->txrdy_payload_pool)
- pci_pool_destroy(phba->txrdy_payload_pool);
+ dma_pool_destroy(phba->txrdy_payload_pool);
phba->txrdy_payload_pool = NULL;
if (phba->lpfc_hbq_pool)
- pci_pool_destroy(phba->lpfc_hbq_pool);
+ dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
if (phba->rrq_pool)
@@ -282,15 +282,15 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free MBUF memory pool */
for (i = 0; i < pool->current_count; i++)
- pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
- pci_pool_destroy(phba->lpfc_mbuf_pool);
+ dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
/* Free DMA buffer memory pool */
- pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
phba->lpfc_sg_dma_buf_pool = NULL;
/* Free Device Data memory pool */
@@ -379,7 +379,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
* @handle: used to return the DMA-mapped address of the mbuf
*
* Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
- * Allocates from generic pci_pool_alloc function first and if that fails and
+ * Allocates from generic dma_pool_alloc function first and if that fails and
* mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
* HBA's pool.
*
@@ -397,7 +397,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
unsigned long iflags;
void *ret;
- ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+ ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
spin_lock_irqsave(&phba->hbalock, iflags);
if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
@@ -433,7 +433,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
pool->elements[pool->current_count].phys = dma;
pool->current_count++;
} else {
- pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+ dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
}
return;
}
@@ -470,7 +470,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
* @handle: used to return the DMA-mapped address of the nvmet_buf
*
* Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
- * PCI pool. Allocates from generic pci_pool_alloc function.
+ * PCI pool. Allocates from generic dma_pool_alloc function.
*
* Returns:
* pointer to the allocated nvmet_buf on success
@@ -481,7 +481,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
void *ret;
- ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+ ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
return ret;
}
@@ -497,7 +497,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
void
lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
{
- pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
}
/**
@@ -522,7 +522,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
if (!hbqbp)
return NULL;
- hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+ hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
@@ -547,7 +547,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
- pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+ dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}
@@ -574,16 +574,16 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
if (!dma_buf)
return NULL;
- dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&dma_buf->hbuf.phys);
if (!dma_buf->hbuf.virt) {
kfree(dma_buf);
return NULL;
}
- dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+ dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
&dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) {
- pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
@@ -607,8 +607,8 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
void
lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
{
- pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
- pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+ dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+ dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
}
@@ -634,16 +634,16 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
if (!dma_buf)
return NULL;
- dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&dma_buf->hbuf.phys);
if (!dma_buf->hbuf.virt) {
kfree(dma_buf);
return NULL;
}
- dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+ dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
GFP_KERNEL, &dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) {
- pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
@@ -667,8 +667,8 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{
- pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
- pci_pool_free(phba->lpfc_nvmet_drb_pool,
+ dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+ dma_pool_free(phba->lpfc_nvmet_drb_pool,
dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f74cb0142fd4..f3ad7cac355d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1724,6 +1724,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
lpfc_nvme_update_localport(vport);
}
+ } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
} else if (ndlp->nlp_fc4_type == 0) {
rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
0, ndlp->nlp_DID);
@@ -1892,6 +1895,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out;
}
+ /* When the rport rejected the FCP PRLI as unsupported.
+ * This should only happen in Pt2Pt so an NVME PRLI
+ * should be outstanding still.
+ */
+ if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+ ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
+ goto out_err;
+ }
+
/* The LS Req had some error. Don't let this be a
* target.
*/
@@ -2189,12 +2201,15 @@ lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
/*
- * Take no action. If a LOGO is outstanding, then possibly DevLoss has
- * timed out and is calling for Device Remove. In this case, the LOGO
- * must be allowed to complete in state LOGO_ISSUE so that the rpi
- * and other NLP flags are correctly cleaned up.
+ * DevLoss has timed out and is calling for Device Remove.
+ * In this case, abort the LOGO and cleanup the ndlp
*/
- return ndlp->nlp_state;
+
+ lpfc_unreg_rpi(vport, ndlp);
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
}
static uint32_t
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0a0a1b92d01d..79ba3ce063a4 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -110,7 +110,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
qhandle->index = qidx;
}
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to "
"io_channel %d qhandle %p\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle);
@@ -364,7 +364,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
- if (rc == WQE_ERROR) {
+ if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"6045 Issue GEN REQ WQE to NPORT x%x "
"Data: x%x x%x\n",
@@ -1270,7 +1270,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* not exceed the programmed depth.
*/
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
- ret = -EAGAIN;
+ ret = -EBUSY;
goto out_fail;
}
@@ -1279,7 +1279,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
- ret = -ENOMEM;
+ ret = -EBUSY;
goto out_fail;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1332,7 +1332,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
- ret = -EBUSY;
goto out_free_nvme_buf;
}
@@ -1576,7 +1575,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
spin_unlock_irqrestore(&phba->hbalock, flags);
- if (ret_val == IOCB_ERROR) {
+ if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq %p.\n",
@@ -1939,7 +1938,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL,
&lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
@@ -1950,7 +1949,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
break;
@@ -1961,7 +1960,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, pwqeq);
if (iotag == 0) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -2182,8 +2181,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
vport->localport = localport;
lport->vport = vport;
vport->nvmei_support = 1;
- len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
- vport->phba->total_nvme_bufs += len;
+
+ /* Don't post more new bufs if repost already recovered
+ * the nvme sgls.
+ */
+ if (phba->sli4_hba.nvme_xri_cnt == 0) {
+ len = lpfc_new_nvme_buf(vport,
+ phba->sli4_hba.nvme_xri_max);
+ vport->phba->total_nvme_bufs += len;
+ }
}
return ret;
@@ -2296,6 +2302,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_type);
localport = vport->localport;
+ if (!localport)
+ return 0;
+
lport = (struct lpfc_nvme_lport *)localport->private;
/* NVME rports are not preserved across devloss.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index bbbd0f84160d..0b7c1a49e203 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -170,12 +170,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
+ struct lpfc_nvmet_ctx_info *infop;
uint32_t *payload;
uint32_t size, oxid, sid, rc;
+ int cpu;
unsigned long iflag;
if (ctxp->txrdy) {
- pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+ dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
ctxp->txrdy_phys);
ctxp->txrdy = NULL;
ctxp->txrdy_phys = 0;
@@ -267,11 +269,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
- spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
- list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
- phba->sli4_hba.nvmet_ctx_put_cnt++;
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
+ /*
+ * Use the CPU context list, from the MRQ the IO was received on
+ * (ctxp->idx), to save context structure.
+ */
+ cpu = smp_processor_id();
+ infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
+ spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
+ list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+ infop->nvmet_ctx_list_cnt++;
+ spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
#endif
}
@@ -552,7 +559,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, wqe);
+ start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
memset(((char *)cmdwqe) + start_clean, 0,
(sizeof(struct lpfc_iocbq) - start_clean));
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -879,51 +886,54 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
};
static void
-lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
+ struct lpfc_nvmet_ctx_info *infop)
{
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags;
- spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
- spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
- &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+ &infop->nvmet_ctx_list, list) {
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
- __lpfc_clear_active_sglq(phba,
- ctx_buf->sglq->sli4_lxritag);
+
+ __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
- &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
- list_for_each_entry_safe(ctx_buf, next_ctx_buf,
- &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
- list_del_init(&ctx_buf->list);
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
- __lpfc_clear_active_sglq(phba,
- ctx_buf->sglq->sli4_lxritag);
- ctx_buf->sglq->state = SGL_FREED;
- ctx_buf->sglq->ndlp = NULL;
+ spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
+}
- spin_lock(&phba->sli4_hba.sgl_list_lock);
- list_add_tail(&ctx_buf->sglq->list,
- &phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
+static void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+ struct lpfc_nvmet_ctx_info *infop;
+ int i, j;
- lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
- kfree(ctx_buf->context);
+ /* The first context list, MRQ 0 CPU 0 */
+ infop = phba->sli4_hba.nvmet_ctx_info;
+ if (!infop)
+ return;
+
+ /* Cycle the the entire CPU context list for every MRQ */
+ for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+ for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+ __lpfc_nvmet_clean_io_for_cpu(phba, infop);
+ infop++; /* next */
+ }
}
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+ kfree(phba->sli4_hba.nvmet_ctx_info);
+ phba->sli4_hba.nvmet_ctx_info = NULL;
}
static int
@@ -932,15 +942,71 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
struct lpfc_nvmet_ctxbuf *ctx_buf;
struct lpfc_iocbq *nvmewqe;
union lpfc_wqe128 *wqe;
- int i;
+ struct lpfc_nvmet_ctx_info *last_infop;
+ struct lpfc_nvmet_ctx_info *infop;
+ int i, j, idx;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6403 Allocate NVMET resources for %d XRIs\n",
phba->sli4_hba.nvmet_xri_cnt);
+ phba->sli4_hba.nvmet_ctx_info = kcalloc(
+ phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_ctx_info) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6419 Failed allocate memory for "
+ "nvmet context lists\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Assuming X CPUs in the system, and Y MRQs, allocate some
+ * lpfc_nvmet_ctx_info structures as follows:
+ *
+ * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
+ * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
+ * ...
+ * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
+ *
+ * Each line represents a MRQ "silo" containing an entry for
+ * every CPU.
+ *
+ * MRQ X is initially assumed to be associated with CPU X, thus
+ * contexts are initially distributed across all MRQs using
+ * the MRQ index (N) as follows cpuN/mrqN. When contexts are
+ * freed, the are freed to the MRQ silo based on the CPU number
+ * of the IO completion. Thus a context that was allocated for MRQ A
+ * whose IO completed on CPU B will be freed to cpuB/mrqA.
+ */
+ infop = phba->sli4_hba.nvmet_ctx_info;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ INIT_LIST_HEAD(&infop->nvmet_ctx_list);
+ spin_lock_init(&infop->nvmet_ctx_list_lock);
+ infop->nvmet_ctx_list_cnt = 0;
+ infop++;
+ }
+ }
+
+ /*
+ * Setup the next CPU context info ptr for each MRQ.
+ * MRQ 0 will cycle thru CPUs 0 - X separately from
+ * MRQ 1 cycling thru CPUs 0 - X, and so on.
+ */
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ last_infop = lpfc_get_ctx_list(phba, 0, j);
+ for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
+ infop = lpfc_get_ctx_list(phba, i, j);
+ infop->nvmet_ctx_next_cpu = last_infop;
+ last_infop = infop;
+ }
+ }
+
/* For all nvmet xris, allocate resources needed to process a
* received command on a per xri basis.
*/
+ idx = 0;
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) {
@@ -977,7 +1043,6 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
/* Word 7 */
bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
- bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
@@ -995,12 +1060,35 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
- spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
- list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
- spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
+ /*
+ * Add ctx to MRQidx context list. Our initial assumption
+ * is MRQidx will be associated with CPUidx. This association
+ * can change on the fly.
+ */
+ infop = lpfc_get_ctx_list(phba, idx, idx);
+ spin_lock(&infop->nvmet_ctx_list_lock);
+ list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+ infop->nvmet_ctx_list_cnt++;
+ spin_unlock(&infop->nvmet_ctx_list_lock);
+
+ /* Spread ctx structures evenly across all MRQs */
+ idx++;
+ if (idx >= phba->cfg_nvmet_mrq)
+ idx = 0;
+ }
+
+ infop = phba->sli4_hba.nvmet_ctx_info;
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6408 TOTAL NVMET ctx for CPU %d "
+ "MRQ %d: cnt %d nextcpu %p\n",
+ i, j, infop->nvmet_ctx_list_cnt,
+ infop->nvmet_ctx_next_cpu);
+ infop++;
+ }
}
- phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0;
}
@@ -1365,10 +1453,65 @@ dropit:
#endif
}
+static struct lpfc_nvmet_ctxbuf *
+lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
+ struct lpfc_nvmet_ctx_info *current_infop)
+{
+ struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
+ struct lpfc_nvmet_ctx_info *get_infop;
+ int i;
+
+ /*
+ * The current_infop for the MRQ a NVME command IU was received
+ * on is empty. Our goal is to replenish this MRQs context
+ * list from a another CPUs.
+ *
+ * First we need to pick a context list to start looking on.
+ * nvmet_ctx_start_cpu has available context the last time
+ * we needed to replenish this CPU where nvmet_ctx_next_cpu
+ * is just the next sequential CPU for this MRQ.
+ */
+ if (current_infop->nvmet_ctx_start_cpu)
+ get_infop = current_infop->nvmet_ctx_start_cpu;
+ else
+ get_infop = current_infop->nvmet_ctx_next_cpu;
+
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ if (get_infop == current_infop) {
+ get_infop = get_infop->nvmet_ctx_next_cpu;
+ continue;
+ }
+ spin_lock(&get_infop->nvmet_ctx_list_lock);
+
+ /* Just take the entire context list, if there are any */
+ if (get_infop->nvmet_ctx_list_cnt) {
+ list_splice_init(&get_infop->nvmet_ctx_list,
+ &current_infop->nvmet_ctx_list);
+ current_infop->nvmet_ctx_list_cnt =
+ get_infop->nvmet_ctx_list_cnt - 1;
+ get_infop->nvmet_ctx_list_cnt = 0;
+ spin_unlock(&get_infop->nvmet_ctx_list_lock);
+
+ current_infop->nvmet_ctx_start_cpu = get_infop;
+ list_remove_head(&current_infop->nvmet_ctx_list,
+ ctx_buf, struct lpfc_nvmet_ctxbuf,
+ list);
+ return ctx_buf;
+ }
+
+ /* Otherwise, move on to the next CPU for this MRQ */
+ spin_unlock(&get_infop->nvmet_ctx_list_lock);
+ get_infop = get_infop->nvmet_ctx_next_cpu;
+ }
+
+ /* Nothing found, all contexts for the MRQ are in-flight */
+ return NULL;
+}
+
/**
* lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
* @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
*
* This routine is used for processing the WQE associated with a unsolicited
@@ -1380,22 +1523,26 @@ dropit:
**/
static void
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
+ uint32_t idx,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
-#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
+ struct lpfc_nvmet_ctx_info *current_infop;
uint32_t *payload;
uint32_t size, oxid, sid, rc, qno;
unsigned long iflag;
+ int current_cpu;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
#endif
+ if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ return;
+
ctx_buf = NULL;
if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1407,31 +1554,24 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
goto dropit;
}
- spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
- if (phba->sli4_hba.nvmet_ctx_get_cnt) {
- list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+ /*
+ * Get a pointer to the context list for this MRQ based on
+ * the CPU this MRQ IRQ is associated with. If the CPU association
+ * changes from our initial assumption, the context list could
+ * be empty, thus it would need to be replenished with the
+ * context list from another CPU for this MRQ.
+ */
+ current_cpu = smp_processor_id();
+ current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
+ spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
+ if (current_infop->nvmet_ctx_list_cnt) {
+ list_remove_head(&current_infop->nvmet_ctx_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
- phba->sli4_hba.nvmet_ctx_get_cnt--;
+ current_infop->nvmet_ctx_list_cnt--;
} else {
- spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
- if (phba->sli4_hba.nvmet_ctx_put_cnt) {
- list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
- &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
- phba->sli4_hba.nvmet_ctx_get_cnt =
- phba->sli4_hba.nvmet_ctx_put_cnt;
- phba->sli4_hba.nvmet_ctx_put_cnt = 0;
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-
- list_remove_head(
- &phba->sli4_hba.lpfc_nvmet_ctx_get_list,
- ctx_buf, struct lpfc_nvmet_ctxbuf, list);
- phba->sli4_hba.nvmet_ctx_get_cnt--;
- } else {
- spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
- }
+ ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
}
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+ spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -1483,6 +1623,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->size = size;
ctxp->oxid = oxid;
ctxp->sid = sid;
+ ctxp->idx = idx;
ctxp->state = LPFC_NVMET_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
@@ -1556,7 +1697,6 @@ dropit:
if (nvmebuf)
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
-#endif
}
/**
@@ -1591,7 +1731,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/**
* lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
* @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
+ * @idx: relative index of MRQ vector
* @nvmebuf: pointer to received nvme data structure.
*
* This routine is used to process an unsolicited event received from a SLI
@@ -1602,7 +1742,7 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
**/
void
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
+ uint32_t idx,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
@@ -1610,7 +1750,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
}
- lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
+ lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
isr_timestamp);
}
@@ -1863,6 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->sli4_xritag);
/* Word 7 */
+ bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
/* Word 8 */
@@ -1939,7 +2080,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
case NVMET_FCOP_WRITEDATA:
/* Words 0 - 2 : The first sg segment */
- txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
+ txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
GFP_KERNEL, &physaddr);
if (!txrdy) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1971,6 +2112,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->sli4_xritag);
/* Word 7 */
+ bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
CMD_FCP_TRECEIVE64_WQE);
@@ -2054,6 +2196,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->sli4_xritag);
/* Word 7 */
+ bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 48a76788b003..25a65b0bb7f3 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -74,6 +74,19 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_abort_rsp_error;
};
+struct lpfc_nvmet_ctx_info {
+ struct list_head nvmet_ctx_list;
+ spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+ uint16_t nvmet_ctx_list_cnt;
+ char pad[16]; /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq) \
+ (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
struct lpfc_nvmet_rcv_ctx {
union {
struct nvmefc_tgt_ls_req ls_req;
@@ -92,6 +105,7 @@ struct lpfc_nvmet_rcv_ctx {
uint16_t size;
uint16_t entry_cnt;
uint16_t cpu;
+ uint16_t idx;
uint16_t state;
/* States */
#define LPFC_NVMET_STE_LS_RCV 1
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index adc784539061..1a6f122bb25d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -416,7 +416,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@@ -427,7 +427,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
@@ -826,7 +826,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@@ -839,7 +839,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
@@ -848,7 +848,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
@@ -857,7 +857,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
- pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e948ea05fd33..8b119f87b51d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
-static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
- uint32_t);
+static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+ struct lpfc_eqe *eqe, uint32_t qidx);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -106,7 +106,7 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
* -ENOMEM.
* The caller is expected to hold the hbalock when calling this routine.
**/
-static uint32_t
+static int
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{
union lpfc_wqe *temp_wqe;
@@ -123,7 +123,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
idx = ((q->host_index + 1) % q->entry_count);
if (idx == q->hba_index) {
q->WQ_overflow++;
- return -ENOMEM;
+ return -EBUSY;
}
q->WQ_posted++;
/* set consumption flag every once in a while */
@@ -10741,7 +10741,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->vport = vport;
abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
- if (retval == IOCB_ERROR) {
+ if (retval) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6147 Failed abts issue_wqe with status x%x "
"for oxid x%x\n",
@@ -13010,7 +13010,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* completion queue, and then return.
*
**/
-static void
+static int
lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
struct lpfc_queue *speq)
{
@@ -13034,7 +13034,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0365 Slow-path CQ identifier "
"(%d) does not exist\n", cqid);
- return;
+ return 0;
}
/* Save EQ associated with this CQ */
@@ -13071,7 +13071,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0370 Invalid completion queue type (%d)\n",
cq->type);
- return;
+ return 0;
}
/* Catch the no cq entry condition, log an error */
@@ -13086,6 +13086,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
+
+ return ecount;
}
/**
@@ -13289,7 +13291,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (fc_hdr->fh_type == FC_TYPE_FCP) {
dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
- phba, phba->sli4_hba.els_wq->pring, dma_buf,
+ phba, idx, dma_buf,
cq->assoc_qp->isr_timestamp);
return false;
}
@@ -13393,7 +13395,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* queue and process all the entries on the completion queue, rearm the
* completion queue, and then return.
**/
-static void
+static int
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t qidx)
{
@@ -13409,7 +13411,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
- return;
+ return 0;
}
/* Get the reference to the corresponding CQ */
@@ -13446,8 +13448,9 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Otherwise this is a Slow path event */
if (cq == NULL) {
- lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
- return;
+ ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
+ phba->sli4_hba.hba_eq[qidx]);
+ return ecount;
}
process_cq:
@@ -13456,7 +13459,7 @@ process_cq:
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
- return;
+ return 0;
}
/* Save EQ associated with this CQ */
@@ -13486,6 +13489,8 @@ process_cq:
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
+
+ return ecount;
}
static void
@@ -13706,6 +13711,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
+ int ccount = 0;
int hba_eqidx;
/* Get the driver's phba structure from the dev_id */
@@ -13757,8 +13763,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
if (eqe == NULL)
break;
- lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
- if (!(++ecount % fpeq->entry_repost))
+ ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+ if (!(++ecount % fpeq->entry_repost) ||
+ ccount > LPFC_MAX_ISR_CQE)
break;
fpeq->EQ_processed++;
}
@@ -17051,7 +17058,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd = cmdiocb->context2;
if (pcmd && pcmd->virt)
- pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+ dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_sli_release_iocbq(phba, cmdiocb);
}
@@ -17079,7 +17086,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
/* Allocate buffer for command payload */
pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (pcmd)
- pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+ pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
&pcmd->phys);
if (!pcmd || !pcmd->virt)
goto exit;
@@ -17128,7 +17135,7 @@ exit:
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2023 Unable to process MDS loopback frame\n");
if (pcmd && pcmd->virt)
- pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+ dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_sli_release_iocbq(phba, iocbq);
lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -18888,6 +18895,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
unsigned long iflags;
+ uint32_t ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
@@ -18906,10 +18914,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
- if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
+ ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
+ if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- return WQE_ERROR;
+ return ret;
}
+
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
return 0;
@@ -18924,9 +18934,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
bf_set(wqe_cqid, &wqe->generic.wqe_com,
phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
- if (lpfc_sli4_wq_put(wq, wqe)) {
+ ret = lpfc_sli4_wq_put(wq, wqe);
+ if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- return WQE_ERROR;
+ return ret;
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -18950,9 +18961,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
bf_set(wqe_cqid, &wqe->generic.wqe_com,
phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
- if (lpfc_sli4_wq_put(wq, wqe)) {
+ ret = lpfc_sli4_wq_put(wq, wqe);
+ if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- return WQE_ERROR;
+ return ret;
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7a1d74e9e877..60200385fe00 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,6 +158,7 @@ struct lpfc_queue {
#define LPFC_MQ_REPOST 8
#define LPFC_CQ_REPOST 64
#define LPFC_RQ_REPOST 64
+#define LPFC_MAX_ISR_CQE 64
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
@@ -419,6 +420,20 @@ struct lpfc_hba_eq_hdl {
#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
};
+/*BB Credit recovery value*/
+struct lpfc_bbscn_params {
+ uint32_t word0;
+#define lpfc_bbscn_min_SHIFT 0
+#define lpfc_bbscn_min_MASK 0x0000000F
+#define lpfc_bbscn_min_WORD word0
+#define lpfc_bbscn_max_SHIFT 4
+#define lpfc_bbscn_max_MASK 0x0000000F
+#define lpfc_bbscn_max_WORD word0
+#define lpfc_bbscn_def_SHIFT 8
+#define lpfc_bbscn_def_MASK 0x0000000F
+#define lpfc_bbscn_def_WORD word0
+};
+
/* Port Capabilities for SLI4 Parameters */
struct lpfc_pc_sli4_params {
uint32_t supported;
@@ -550,6 +565,7 @@ struct lpfc_sli4_hba {
uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
+ struct lpfc_bbscn_params bbscn_params;
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -621,8 +637,6 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
- uint16_t nvmet_ctx_get_cnt;
- uint16_t nvmet_ctx_put_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list;
@@ -631,9 +645,8 @@ struct lpfc_sli4_hba {
struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
- struct list_head lpfc_nvmet_ctx_get_list;
- struct list_head lpfc_nvmet_ctx_put_list;
struct list_head lpfc_nvmet_io_wait_list;
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
@@ -664,8 +677,6 @@ struct lpfc_sli4_hba {
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
- spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
- spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6a24c3e2d5e..6aa192b3e4bf 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.1"
+#define LPFC_DRIVER_VERSION "11.4.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */