summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorJames Smart2017-11-21 01:00:43 +0100
committerMartin K. Petersen2017-12-05 02:32:55 +0100
commit81e6a63728a409ae0e0061c1dc5adb4a85cc4869 (patch)
tree2beb8f49dc331f34782dc58aac88300d4366b90f /drivers/scsi/lpfc/lpfc_init.c
parentscsi: lpfc: Fix driver handling of nvme resources during unload (diff)
downloadkernel-qcow2-linux-81e6a63728a409ae0e0061c1dc5adb4a85cc4869.tar.gz
kernel-qcow2-linux-81e6a63728a409ae0e0061c1dc5adb4a85cc4869.tar.xz
kernel-qcow2-linux-81e6a63728a409ae0e0061c1dc5adb4a85cc4869.zip
scsi: lpfc: small sg cnt cleanup
The logic for sg_seg_cnt is a bit convoluted. This patch tries to clean up a couple of areas, especially around the +2 and +1 logic. This patch: - Cleans up the lpfc_sg_seg_cnt attribute to specify a real minimum rather than making the minimum be whatever the default is. - Removes the hardcoding of +2 (for the number of elements we use in a sgl for cmd iu and rsp iu) and +1 (an additional entry to compensate for nvme's reduction of io size based on a possible partial page) logic in sg list initialization. In the case where the +1 logic is referenced in host and target io checks, use the values set in the transport template as that value was properly set. There can certainly be more done in this area and it will be addressed in combined host/target driver effort. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a6ac72087f4c..fa211550a32a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5806,6 +5806,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5861,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5908,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}