]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
scsi: lpfc: Mitigate high memory pre-allocation by SCSI-MQ
authorJames Smart <jsmart2021@gmail.com>
Fri, 16 Aug 2019 02:36:49 +0000 (19:36 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Sep 2019 09:35:20 +0000 (10:35 +0100)
[ Upstream commit 77ffd3465ba837e9dc714e17b014e77b2eae765a ]

When SCSI-MQ is enabled, the SCSI-MQ layers will do pre-allocation of MQ
resources based on shost values set by the driver. In newer cases of the
driver, which attempts to set nr_hw_queues to the cpu count, the
multipliers become excessive, with a single shost having SCSI-MQ
pre-allocation reaching into the multiple GBytes range.  NPIV, which
creates additional shosts, only multiply this overhead. On lower-memory
systems, this can exhaust system memory very quickly, resulting in a system
crash or failures in the driver or elsewhere due to low memory conditions.

After testing several scenarios, the situation can be mitigated by limiting
the value set in shost->nr_hw_queues to 4. Although the shost values were
changed, the driver still had per-cpu hardware queues of its own that
allowed parallelization per-cpu.  Testing revealed that even with the
smallish number for nr_hw_queues for SCSI-MQ, performance levels remained
near maximum with the within-driver affiinitization.

A module parameter was created to allow the value set for the nr_hw_queues
to be tunable.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Ewan D. Milne <emilne@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli4.h

index aafcffaa25f716ac33cfab2a3ce884ad5d7b8d83..4604e1bc334c0b98a14a2ccac91509b4ce43bf76 100644 (file)
@@ -822,6 +822,7 @@ struct lpfc_hba {
        uint32_t cfg_cq_poll_threshold;
        uint32_t cfg_cq_max_proc_limit;
        uint32_t cfg_fcp_cpu_map;
+       uint32_t cfg_fcp_mq_threshold;
        uint32_t cfg_hdw_queue;
        uint32_t cfg_irq_chann;
        uint32_t cfg_suppress_rsp;
index d4c65e2109e2ff1af6f04c0b6b75dec90d452655..353da12d797bacda3bfab1caaa4413ae14fdcc3a 100644 (file)
@@ -5640,6 +5640,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
             "Embed NVME Command in WQE");
 
+/*
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
+ *
+ *      0    = Set nr_hw_queues by the number of CPUs or HW queues.
+ *      1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ *
+ * Value range is [0,128]. Default value is 8.
+ */
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+           LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+           "Set the number of SCSI Queues advertised");
+
 /*
  * lpfc_hdw_queue: Set the number of Hardware Queues the driver
  * will advertise it supports to the NVME and  SCSI layers. This also
@@ -5961,6 +5974,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_cq_poll_threshold,
        &dev_attr_lpfc_cq_max_proc_limit,
        &dev_attr_lpfc_fcp_cpu_map,
+       &dev_attr_lpfc_fcp_mq_threshold,
        &dev_attr_lpfc_hdw_queue,
        &dev_attr_lpfc_irq_chann,
        &dev_attr_lpfc_suppress_rsp,
@@ -7042,6 +7056,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        /* Initialize first burst. Target vs Initiator are different. */
        lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
        lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+       lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
        lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
        lpfc_irq_chann_init(phba, lpfc_irq_chann);
        lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
index eaaef682de251b893b1877ca94ebf90fb404325a..2fd8f15f99975aab87d388850ed1d71da431d2ea 100644 (file)
@@ -4308,10 +4308,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        shost->max_cmd_len = 16;
 
        if (phba->sli_rev == LPFC_SLI_REV4) {
-               if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
-                       shost->nr_hw_queues = phba->cfg_hdw_queue;
-               else
-                       shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+               if (!phba->cfg_fcp_mq_threshold ||
+                   phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+                       phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+               shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+                                           phba->cfg_fcp_mq_threshold);
 
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
index 8e4fd1a98023ceec0ce482385cba96b013128c17..986594ec40e2ae1148c5cde39f304ad33ce5d309 100644 (file)
 #define LPFC_HBA_HDWQ_MAX      128
 #define LPFC_HBA_HDWQ_DEF      0
 
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN      0
+#define LPFC_FCP_MQ_THRESHOLD_MAX      128
+#define LPFC_FCP_MQ_THRESHOLD_DEF      8
+
 /* Common buffer size to accomidate SCSI and NVME IO buffers */
 #define LPFC_COMMON_IO_BUF_SZ  768