uint32_t cfg_auto_imax;
        uint32_t cfg_fcp_imax;
        uint32_t cfg_fcp_cpu_map;
-       uint32_t cfg_fcp_io_channel;
+       uint32_t cfg_hdw_queue;
        uint32_t cfg_suppress_rsp;
        uint32_t cfg_nvme_oas;
        uint32_t cfg_nvme_embed_cmd;
-       uint32_t cfg_nvme_io_channel;
        uint32_t cfg_nvmet_mrq_post;
        uint32_t cfg_nvmet_mrq;
        uint32_t cfg_enable_nvmet;
 #define LPFC_ENABLE_NVME 2
 #define LPFC_ENABLE_BOTH 3
        uint32_t cfg_enable_pbde;
-       uint32_t io_channel_irqs;       /* number of irqs for io channels */
        struct nvmet_fc_target_port *targetport;
        lpfc_vpd_t vpd;         /* vital product data */
 
 
 
        totin = 0;
        totout = 0;
-       for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+       for (i = 0; i < phba->cfg_hdw_queue; i++) {
                cstat = &lport->cstat[i];
                tot = atomic_read(&cstat->fc4NvmeIoCmpls);
                totin += tot;
        phba->cfg_fcp_imax = (uint32_t)val;
        phba->initial_imax = phba->cfg_fcp_imax;
 
-       for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
+       for (i = 0; i < phba->cfg_hdw_queue; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
                                         val);
 
             "Embed NVME Command in WQE");
 
 /*
- * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
- * will advertise it supports to the SCSI layer. This also will map to
- * the number of WQs the driver will create.
- *
- *      0    = Configure the number of io channels to the number of active CPUs.
- *      1,32 = Manually specify how many io channels to use.
- *
- * Value range is [0,32]. Default value is 4.
- */
-LPFC_ATTR_R(fcp_io_channel,
-           LPFC_FCP_IO_CHAN_DEF,
-           LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
-           "Set the number of FCP I/O channels");
-
-/*
- * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
- * will advertise it supports to the NVME layer. This also will map to
- * the number of WQs the driver will create.
- *
- * This module parameter is valid when lpfc_enable_fc4_type is set
- * to support NVME.
+ * lpfc_hdw_queue: Set the number of IO channels the driver
+ * will advertise it supports to the NVME and  SCSI layers. This also
+ * will map to the number of EQ/CQ/WQs the driver will create.
  *
  * The NVME Layer will try to create this many, plus 1 administrative
  * hardware queue. The administrative queue will always map to WQ 0
  * A hardware IO queue maps (qidx) to a specific driver WQ.
  *
- *      0    = Configure the number of io channels to the number of active CPUs.
- *      1,32 = Manually specify how many io channels to use.
+ *      0    = Configure the number of hdw queues to the number of active CPUs.
+ *      1,64 = Manually specify how many hdw queues to use.
  *
- * Value range is [0,32]. Default value is 0.
+ * Value range is [0,64]. Default value is 0.
  */
-LPFC_ATTR_R(nvme_io_channel,
-           LPFC_NVME_IO_CHAN_DEF,
-           LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
-           "Set the number of NVME I/O channels");
+LPFC_ATTR_R(hdw_queue,
+           LPFC_HBA_HDWQ_DEF,
+           LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
+           "Set the number of I/O Hardware Queues");
 
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
        &dev_attr_lpfc_auto_imax,
        &dev_attr_lpfc_fcp_imax,
        &dev_attr_lpfc_fcp_cpu_map,
-       &dev_attr_lpfc_fcp_io_channel,
+       &dev_attr_lpfc_hdw_queue,
        &dev_attr_lpfc_suppress_rsp,
-       &dev_attr_lpfc_nvme_io_channel,
        &dev_attr_lpfc_nvmet_mrq,
        &dev_attr_lpfc_nvmet_mrq_post,
        &dev_attr_lpfc_nvme_enable_fb,
        /* Initialize first burst. Target vs Initiator are different. */
        lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
        lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
-       lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
-       lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+       lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
        lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
        lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
 
        phba->cfg_enable_pbde = 0;
 
        /* A value of 0 means use the number of CPUs found in the system */
-       if (phba->cfg_fcp_io_channel == 0)
-               phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
-       if (phba->cfg_nvme_io_channel == 0)
-               phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
-       if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
-               phba->cfg_fcp_io_channel = 0;
-
-       if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
-               phba->cfg_nvme_io_channel = 0;
-
-       if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-               phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-       else
-               phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+       if (phba->cfg_hdw_queue == 0)
+               phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
 
        phba->cfg_soft_wwnn = 0L;
        phba->cfg_soft_wwpn = 0L;
 void
 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
 {
-       if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
-               phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
-       if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
-               phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+       if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+               phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
            phba->nvmet_support) {
                phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
-               phba->cfg_fcp_io_channel = 0;
 
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
                                "6013 %s x%x fb_size x%x, fb_max x%x\n",
                }
 
                if (!phba->cfg_nvmet_mrq)
-                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+                       phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
 
                /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
-               if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
-                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+               if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+                       phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
                        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
                                        "6018 Adjust lpfc_nvmet_mrq to %d\n",
                                        phba->cfg_nvmet_mrq);
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
                phba->cfg_nvmet_fb_size = 0;
        }
-
-       if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-               phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-       else
-               phba->io_channel_irqs = phba->cfg_nvme_io_channel;
 }
 
 /**
 
                                atomic_read(&lport->fc4NvmeLsRequests),
                                atomic_read(&lport->fc4NvmeLsCmpls));
 
-               if (phba->cfg_nvme_io_channel < 32)
-                       maxch = phba->cfg_nvme_io_channel;
+               if (phba->cfg_hdw_queue < LPFC_HBA_HDWQ_MAX)
+                       maxch = phba->cfg_hdw_queue;
                else
-                       maxch = 32;
+                       maxch = LPFC_HBA_HDWQ_MAX;
                totin = 0;
                totout = 0;
-               for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
                        cstat = &lport->cstat[i];
                        tot = atomic_read(&cstat->fc4NvmeIoCmpls);
                        totin += tot;
        struct lpfc_queue *qp;
        int qidx;
 
-       for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-               qp = phba->sli4_hba.fcp_wq[qidx];
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
                if (qp->assoc_qid != cq_id)
                        continue;
                *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
                if (*len >= max_cnt)
                        return 1;
        }
-       for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-               qp = phba->sli4_hba.nvme_wq[qidx];
-               if (qp->assoc_qid != cq_id)
-                       continue;
-               *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
-               if (*len >= max_cnt)
-                       return 1;
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+                       if (qp->assoc_qid != cq_id)
+                               continue;
+                       *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+                       if (*len >= max_cnt)
+                               return 1;
+               }
        }
        return 0;
 }
        struct lpfc_queue *qp;
        int qidx, rc;
 
-       for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-               qp = phba->sli4_hba.fcp_cq[qidx];
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
                if (qp->assoc_qid != eq_id)
                        continue;
 
                        return 1;
        }
 
-       for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-               qp = phba->sli4_hba.nvme_cq[qidx];
-               if (qp->assoc_qid != eq_id)
-                       continue;
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
+                       if (qp->assoc_qid != eq_id)
+                               continue;
 
-               *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
+                       *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
 
-               /* Reset max counter */
-               qp->CQ_max_cqe = 0;
+                       /* Reset max counter */
+                       qp->CQ_max_cqe = 0;
 
-               if (*len >= max_cnt)
-                       return 1;
+                       if (*len >= max_cnt)
+                               return 1;
 
-               rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
-                               max_cnt, qp->queue_id);
-               if (rc)
-                       return 1;
+                       rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
+                                                  max_cnt, qp->queue_id);
+                       if (rc)
+                               return 1;
+               }
        }
 
        if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
        spin_lock_irq(&phba->hbalock);
 
        /* Fast-path event queue */
-       if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
+       if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) {
 
                x = phba->lpfc_idiag_last_eq;
                phba->lpfc_idiag_last_eq++;
-               if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
+               if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
                        phba->lpfc_idiag_last_eq = 0;
 
                len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                                        "EQ %d out of %d HBA EQs\n",
-                                       x, phba->io_channel_irqs);
+                                       x, phba->cfg_hdw_queue);
 
                /* Fast-path EQ */
-               qp = phba->sli4_hba.hba_eq[x];
+               qp = phba->sli4_hba.hdwq[x].hba_eq;
                if (!qp)
                        goto out;
 
        switch (quetp) {
        case LPFC_IDIAG_EQ:
                /* HBA event queue */
-               if (phba->sli4_hba.hba_eq) {
-                       for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
-                               qp = phba->sli4_hba.hba_eq[qidx];
+               if (phba->sli4_hba.hdwq) {
+                       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                               qp = phba->sli4_hba.hdwq[qidx].hba_eq;
                                if (qp && qp->queue_id == queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(qp,
                        goto pass_check;
                }
                /* FCP complete queue */
-               if (phba->sli4_hba.fcp_cq) {
-                       for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+               if (phba->sli4_hba.hdwq) {
+                       for (qidx = 0; qidx < phba->cfg_hdw_queue;
                                                                qidx++) {
-                               qp = phba->sli4_hba.fcp_cq[qidx];
+                               qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
                                if (qp && qp->queue_id == queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(
                        }
                }
                /* NVME complete queue */
-               if (phba->sli4_hba.nvme_cq) {
+               if (phba->sli4_hba.hdwq) {
                        qidx = 0;
                        do {
-                               if (phba->sli4_hba.nvme_cq[qidx] &&
-                                   phba->sli4_hba.nvme_cq[qidx]->queue_id ==
-                                   queid) {
+                               qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
+                               if (qp && qp->queue_id == queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(
-                                               phba->sli4_hba.nvme_cq[qidx],
-                                               index, count);
+                                               qp, index, count);
                                        if (rc)
                                                goto error_out;
-                                       idiag.ptr_private =
-                                               phba->sli4_hba.nvme_cq[qidx];
+                                       idiag.ptr_private = qp;
                                        goto pass_check;
                                }
-                       } while (++qidx < phba->cfg_nvme_io_channel);
+                       } while (++qidx < phba->cfg_hdw_queue);
                }
                goto error_out;
                break;
                        idiag.ptr_private = phba->sli4_hba.nvmels_wq;
                        goto pass_check;
                }
-               /* FCP work queue */
-               if (phba->sli4_hba.fcp_wq) {
-                       for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
-                                                               qidx++) {
-                               qp = phba->sli4_hba.fcp_wq[qidx];
+
+               if (phba->sli4_hba.hdwq) {
+                       /* FCP/SCSI work queue */
+                       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                               qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
                                if (qp && qp->queue_id == queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(
                                        goto pass_check;
                                }
                        }
-               }
-               /* NVME work queue */
-               if (phba->sli4_hba.nvme_wq) {
-                       for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
-                                                               qidx++) {
-                               qp = phba->sli4_hba.nvme_wq[qidx];
+                       /* NVME work queue */
+                       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                               qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
                                if (qp && qp->queue_id == queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(
                        }
                }
 
-               /* NVME work queues */
-               if (phba->sli4_hba.nvme_wq) {
-                       for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
-                               qidx++) {
-                               if (!phba->sli4_hba.nvme_wq[qidx])
-                                       continue;
-                               if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
-                                   queid) {
-                                       /* Sanity check */
-                                       rc = lpfc_idiag_que_param_check(
-                                               phba->sli4_hba.nvme_wq[qidx],
-                                               index, count);
-                                       if (rc)
-                                               goto error_out;
-                                       idiag.ptr_private =
-                                               phba->sli4_hba.nvme_wq[qidx];
-                                       goto pass_check;
-                               }
-                       }
-               }
                goto error_out;
                break;
        case LPFC_IDIAG_RQ:
        lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
        lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
 
-       for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
                lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
 
-       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-               lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+                       lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+       }
 
        lpfc_debug_dump_hdr_rq(phba);
        lpfc_debug_dump_dat_rq(phba);
        lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
        lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
 
-       for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
                lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
 
-       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-               lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+                       lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+       }
 
        /*
         * Dump Event Queues (EQs)
         */
-       for (idx = 0; idx < phba->io_channel_irqs; idx++)
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
                lpfc_debug_dump_hba_eq(phba, idx);
 }
 
        char *qtypestr;
 
        if (qtype == DUMP_FCP) {
-               wq = phba->sli4_hba.fcp_wq[wqidx];
+               wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
                qtypestr = "FCP";
        } else if (qtype == DUMP_NVME) {
-               wq = phba->sli4_hba.nvme_wq[wqidx];
+               wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
                qtypestr = "NVME";
        } else if (qtype == DUMP_MBX) {
                wq = phba->sli4_hba.mbx_wq;
        int eqidx;
 
        /* fcp/nvme wq and cq are 1:1, thus same indexes */
+       eq = NULL;
 
        if (qtype == DUMP_FCP) {
-               wq = phba->sli4_hba.fcp_wq[wqidx];
-               cq = phba->sli4_hba.fcp_cq[wqidx];
+               wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
+               cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
                qtypestr = "FCP";
        } else if (qtype == DUMP_NVME) {
-               wq = phba->sli4_hba.nvme_wq[wqidx];
-               cq = phba->sli4_hba.nvme_cq[wqidx];
+               wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
+               cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
                qtypestr = "NVME";
        } else if (qtype == DUMP_MBX) {
                wq = phba->sli4_hba.mbx_wq;
        } else
                return;
 
-       for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
-               if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
+       for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
+               eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
+               if (cq->assoc_qid == eq->queue_id)
                        break;
        }
-       if (eqidx == phba->io_channel_irqs) {
+       if (eqidx == phba->cfg_hdw_queue) {
                pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
                eqidx = 0;
+               eq = phba->sli4_hba.hdwq[0].hba_eq;
        }
 
-       eq = phba->sli4_hba.hba_eq[eqidx];
-
        if (qtype == DUMP_FCP || qtype == DUMP_NVME)
                pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
                        "->EQ[Idx:%d|Qid:%d]:\n",
 {
        struct lpfc_queue *qp;
 
-       qp = phba->sli4_hba.hba_eq[qidx];
+       qp = phba->sli4_hba.hdwq[qidx].hba_eq;
 
        pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
 
 {
        int wq_idx;
 
-       for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
-               if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+       for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+               if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
                        break;
-       if (wq_idx < phba->cfg_fcp_io_channel) {
+       if (wq_idx < phba->cfg_hdw_queue) {
                pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
                return;
        }
 
-       for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
-               if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+       for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+               if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
                        break;
-       if (wq_idx < phba->cfg_nvme_io_channel) {
+       if (wq_idx < phba->cfg_hdw_queue) {
                pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
                return;
        }
 
 {
        int cq_idx;
 
-       for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
-               if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+       for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+               if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
                        break;
 
-       if (cq_idx < phba->cfg_fcp_io_channel) {
+       if (cq_idx < phba->cfg_hdw_queue) {
                pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
                return;
        }
 
-       for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
-               if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+       for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+               if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
                        break;
 
-       if (cq_idx < phba->cfg_nvme_io_channel) {
+       if (cq_idx < phba->cfg_hdw_queue) {
                pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
                return;
        }
 
 {
        int eq_idx;
 
-       for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
-               if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+       for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
+               if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
                        break;
 
-       if (eq_idx < phba->io_channel_irqs) {
+       if (eq_idx < phba->cfg_hdw_queue) {
                printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
                return;
        }
 }
 
                                        localport->private;
                                tot = 0;
                                for (i = 0;
-                                       i < phba->cfg_nvme_io_channel; i++) {
+                                       i < phba->cfg_hdw_queue; i++) {
                                        cstat = &lport->cstat[i];
                                        data1 = atomic_read(
                                                &cstat->fc4NvmeInputRequests);
                }
 
                /* Interrupts per sec per EQ */
-               val = phba->cfg_fcp_imax / phba->io_channel_irqs;
+               val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
                tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
 
                /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
                max_cqe = time_elapsed * tick_cqe;
 
-               for (i = 0; i < phba->io_channel_irqs; i++) {
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
                        /* Fast-path EQ */
-                       qp = phba->sli4_hba.hba_eq[i];
+                       qp = phba->sli4_hba.hdwq[i].hba_eq;
                        if (!qp)
                                continue;
 
                                if (val) {
                                        /* First, interrupts per sec per EQ */
                                        val = phba->cfg_fcp_imax /
-                                               phba->io_channel_irqs;
+                                               phba->cfg_hdw_queue;
 
                                        /* us delay between each interrupt */
                                        val = LPFC_SEC_TO_USEC / val;
 void
 lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
-       lpfc_stop_vport_timers(phba->pport);
+       if (phba->pport)
+               lpfc_stop_vport_timers(phba->pport);
        del_timer_sync(&phba->sli.mbox_tmo);
        del_timer_sync(&phba->fabric_block_timer);
        del_timer_sync(&phba->eratt_poll);
        shost->max_lun = vport->cfg_max_luns;
        shost->this_id = -1;
        shost->max_cmd_len = 16;
-       shost->nr_hw_queues = phba->cfg_fcp_io_channel;
+       shost->nr_hw_queues = phba->cfg_hdw_queue;
        if (phba->sli_rev == LPFC_SLI_REV4) {
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
 {
        /* Reset some HBA SLI4 setup states */
        lpfc_stop_hba_timers(phba);
-       phba->pport->work_port_events = 0;
+       if (phba->pport)
+               phba->pport->work_port_events = 0;
        phba->sli4_hba.intr_enable = 0;
 }
 
                goto out_remove_rpi_hdrs;
        }
 
-       phba->sli4_hba.hba_eq_hdl = kcalloc(phba->io_channel_irqs,
-                                               sizeof(struct lpfc_hba_eq_hdl),
-                                               GFP_KERNEL);
+       phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue,
+                                           sizeof(struct lpfc_hba_eq_hdl),
+                                           GFP_KERNEL);
        if (!phba->sli4_hba.hba_eq_hdl) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for "
                         * NVMET, FCP io channel WQs are not created.
                         */
                        length -= 6;
+
+                       /* Take off FCP queues */
                        if (!phba->nvmet_support)
-                               length -= phba->cfg_fcp_io_channel;
+                               length -= phba->cfg_hdw_queue;
 
-                       if (phba->cfg_nvme_io_channel > length) {
+                       /* Check to see if there is enough for NVME */
+                       if (phba->cfg_hdw_queue > length) {
                                lpfc_printf_log(
                                        phba, KERN_ERR, LOG_SLI,
                                        "2005 Reducing NVME IO channel to %d: "
-                                       "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
+                                       "WQ %d CQ %d CommonIO %d\n",
                                        length,
                                        phba->sli4_hba.max_cfg_param.max_wq,
                                        phba->sli4_hba.max_cfg_param.max_cq,
-                                       phba->cfg_nvme_io_channel,
-                                       phba->cfg_fcp_io_channel);
+                                       phba->cfg_hdw_queue);
 
-                               phba->cfg_nvme_io_channel = length;
+                               phba->cfg_hdw_queue = length;
                        }
                }
        }
 static int
 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 {
-       int io_channel;
-
        /*
         * Sanity check for configured queue parameters against the run-time
         * device parameters
         */
 
-       /* Sanity check on HBA EQ parameters */
-       io_channel = phba->io_channel_irqs;
-
-       if (phba->sli4_hba.num_online_cpu < io_channel) {
-               lpfc_printf_log(phba,
-                               KERN_ERR, LOG_INIT,
-                               "3188 Reducing IO channels to match number of "
-                               "online CPUs: from %d to %d\n",
-                               io_channel, phba->sli4_hba.num_online_cpu);
-               io_channel = phba->sli4_hba.num_online_cpu;
-       }
-
-       if (io_channel > phba->sli4_hba.max_cfg_param.max_eq) {
+       if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2575 Reducing IO channels to match number of "
                                "available EQs: from %d to %d\n",
-                               io_channel,
+                               phba->cfg_hdw_queue,
                                phba->sli4_hba.max_cfg_param.max_eq);
-               io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+               phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq;
        }
 
-       /* The actual number of FCP / NVME event queues adopted */
-       if (io_channel != phba->io_channel_irqs)
-               phba->io_channel_irqs = io_channel;
-       if (phba->cfg_fcp_io_channel > io_channel)
-               phba->cfg_fcp_io_channel = io_channel;
-       if (phba->cfg_nvme_io_channel > io_channel)
-               phba->cfg_nvme_io_channel = io_channel;
        if (phba->nvmet_support) {
-               if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
-                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+               if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
        }
        if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
-                       phba->io_channel_irqs, phba->cfg_fcp_io_channel,
-                       phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
+                       "2574 IO channels: hdwQ %d MRQ: %d\n",
+                       phba->cfg_hdw_queue, phba->cfg_nvmet_mrq);
 
        /* Get EQ depth from module parameter, fake the default for now */
        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
                return 1;
        }
        qdesc->qe_valid = 1;
-       phba->sli4_hba.nvme_cq[wqidx] = qdesc;
+       phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
 
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
                                      LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
                                wqidx);
                return 1;
        }
-       phba->sli4_hba.nvme_wq[wqidx] = qdesc;
+       phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
 }
                return 1;
        }
        qdesc->qe_valid = 1;
-       phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+       phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
 
        /* Create Fast Path FCP WQs */
        if (phba->enab_exp_wqcq_pages) {
                                wqidx);
                return 1;
        }
-       phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+       phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
 }
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
        struct lpfc_queue *qdesc;
-       int idx, io_channel;
+       int idx;
 
        /*
         * Create HBA Record arrays.
         * Both NVME and FCP will share that same vectors / EQs
         */
-       io_channel = phba->io_channel_irqs;
-       if (!io_channel)
-               return -ERANGE;
-
        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
 
-       phba->sli4_hba.hba_eq =  kcalloc(io_channel,
-                                       sizeof(struct lpfc_queue *),
-                                       GFP_KERNEL);
-       if (!phba->sli4_hba.hba_eq) {
+       phba->sli4_hba.hdwq = kcalloc(phba->cfg_hdw_queue,
+                                     sizeof(struct lpfc_sli4_hdw_queue),
+                                     GFP_KERNEL);
+       if (!phba->sli4_hba.hdwq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2576 Failed allocate memory for "
-                       "fast-path EQ record array\n");
+                       "6427 Failed allocate memory for "
+                       "fast-path Hardware Queue array\n");
                goto out_error;
        }
 
-       if (phba->cfg_fcp_io_channel) {
-               phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_cq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2577 Failed allocate memory for "
-                                       "fast-path CQ record array\n");
-                       goto out_error;
-               }
-               phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_wq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2578 Failed allocate memory for "
-                                       "fast-path FCP WQ record array\n");
-                       goto out_error;
-               }
-               /*
-                * Since the first EQ can have multiple CQs associated with it,
-                * this array is used to quickly see if we have a FCP fast-path
-                * CQ match.
-                */
-               phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
-                                                       sizeof(uint16_t),
-                                                       GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_cq_map) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2545 Failed allocate memory for "
-                                       "fast-path CQ map\n");
-                       goto out_error;
-               }
-       }
-
-       if (phba->cfg_nvme_io_channel) {
-               phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_cq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "6077 Failed allocate memory for "
-                                       "fast-path CQ record array\n");
-                       goto out_error;
-               }
-
-               phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_wq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2581 Failed allocate memory for "
-                                       "fast-path NVME WQ record array\n");
-                       goto out_error;
-               }
-
-               /*
-                * Since the first EQ can have multiple CQs associated with it,
-                * this array is used to quickly see if we have a NVME fast-path
-                * CQ match.
-                */
-               phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
-                                                       sizeof(uint16_t),
-                                                       GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_cq_map) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "6078 Failed allocate memory for "
-                                       "fast-path CQ map\n");
-                       goto out_error;
-               }
-
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                if (phba->nvmet_support) {
                        phba->sli4_hba.nvmet_cqset = kcalloc(
                                        phba->cfg_nvmet_mrq,
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
 
        /* Create HBA Event Queues (EQs) */
-       for (idx = 0; idx < io_channel; idx++) {
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                /* Create EQs */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.eq_esize,
                        goto out_error;
                }
                qdesc->qe_valid = 1;
-               phba->sli4_hba.hba_eq[idx] = qdesc;
+               phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
        }
 
-       /* FCP and NVME io channels are not required to be balanced */
 
-       for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+       /* Allocate SCSI SLI4 CQ/WQs */
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
                if (lpfc_alloc_fcp_wq_cq(phba, idx))
                        goto out_error;
 
-       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-               if (lpfc_alloc_nvme_wq_cq(phba, idx))
-                       goto out_error;
+       /* Allocate NVME SLI4 CQ/WQs */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+                       if (lpfc_alloc_nvme_wq_cq(phba, idx))
+                               goto out_error;
 
-       if (phba->nvmet_support) {
-               for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
-                       qdesc = lpfc_sli4_queue_alloc(phba,
+               if (phba->nvmet_support) {
+                       for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+                               qdesc = lpfc_sli4_queue_alloc(
+                                                     phba,
                                                      LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.cq_esize,
                                                      phba->sli4_hba.cq_ecount);
-                       if (!qdesc) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "3142 Failed allocate NVME "
-                                       "CQ Set (%d)\n", idx);
-                               goto out_error;
+                               if (!qdesc) {
+                                       lpfc_printf_log(
+                                               phba, KERN_ERR, LOG_INIT,
+                                               "3142 Failed allocate NVME "
+                                               "CQ Set (%d)\n", idx);
+                                       goto out_error;
+                               }
+                               qdesc->qe_valid = 1;
+                               phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                        }
-                       qdesc->qe_valid = 1;
-                       phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                }
        }
 
        }
        phba->sli4_hba.dat_rq = qdesc;
 
-       if (phba->nvmet_support) {
+       if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+           phba->nvmet_support) {
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
 }
 
 static inline void
-lpfc_sli4_release_queue_map(uint16_t **qmap)
+lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max)
 {
-       if (*qmap != NULL) {
-               kfree(*qmap);
-               *qmap = NULL;
+       uint32_t idx;
+
+       for (idx = 0; idx < max; idx++) {
+               lpfc_sli4_queue_free(hdwq[idx].hba_eq);
+               lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
+               lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
+               lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
+               lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
+               hdwq[idx].hba_eq = NULL;
+               hdwq[idx].fcp_cq = NULL;
+               hdwq[idx].nvme_cq = NULL;
+               hdwq[idx].fcp_wq = NULL;
+               hdwq[idx].nvme_wq = NULL;
        }
+       kfree(hdwq);
 }
 
 /**
 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
        /* Release HBA eqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
-
-       /* Release FCP cqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
-                                phba->cfg_fcp_io_channel);
-
-       /* Release FCP wqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
-                                phba->cfg_fcp_io_channel);
-
-       /* Release FCP CQ mapping array */
-       lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
-
-       /* Release NVME cqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
-                                       phba->cfg_nvme_io_channel);
-
-       /* Release NVME wqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
-                                       phba->cfg_nvme_io_channel);
-
-       /* Release NVME CQ mapping array */
-       lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+       if (phba->sli4_hba.hdwq)
+               lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq,
+                                      phba->cfg_hdw_queue);
+       phba->sli4_hba.hdwq = NULL;
 
        if (phba->nvmet_support) {
                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
        cq->chann = qidx;
 
        if (qtype != LPFC_MBOX) {
-               /* Setup nvme_cq_map for fast lookup */
+               /* Setup cq_map for fast lookup */
                if (cq_map)
                        *cq_map = cq->queue_id;
 
 {
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
+       struct lpfc_sli4_hdw_queue *qp;
        LPFC_MBOXQ_t *mboxq;
        int qidx;
-       uint32_t length, io_channel;
+       uint32_t length;
        int rc = -ENOMEM;
 
        /* Check for dual-ULP support */
        /*
         * Set up HBA Event Queues (EQs)
         */
-       io_channel = phba->io_channel_irqs;
+       qp = phba->sli4_hba.hdwq;
 
        /* Set up HBA event queue */
-       if (io_channel && !phba->sli4_hba.hba_eq) {
+       if (!qp) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3147 Fast-path EQs not allocated\n");
                rc = -ENOMEM;
                goto out_error;
        }
-       for (qidx = 0; qidx < io_channel; qidx++) {
-               if (!phba->sli4_hba.hba_eq[qidx]) {
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               if (!qp[qidx].hba_eq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
                                        "allocated\n", qidx);
                        rc = -ENOMEM;
                        goto out_destroy;
                }
-               rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
-                                               phba->cfg_fcp_imax);
+               rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
+                                   phba->cfg_fcp_imax);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0523 Failed setup of fast-path EQ "
                        goto out_destroy;
                }
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2584 HBA EQ setup: queue[%d]-id=%d\n",
-                               qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
+                               "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
+                               qp[qidx].hba_eq->queue_id);
        }
 
-       if (phba->cfg_nvme_io_channel) {
-               if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "6084 Fast-path NVME %s array not allocated\n",
-                               (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
-                       rc = -ENOMEM;
-                       goto out_destroy;
-               }
-
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
                        rc = lpfc_create_wq_cq(phba,
-                                       phba->sli4_hba.hba_eq[
-                                               qidx % io_channel],
-                                       phba->sli4_hba.nvme_cq[qidx],
-                                       phba->sli4_hba.nvme_wq[qidx],
-                                       &phba->sli4_hba.nvme_cq_map[qidx],
+                                       qp[qidx].hba_eq,
+                                       qp[qidx].nvme_cq,
+                                       qp[qidx].nvme_wq,
+                                       &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
                                        qidx, LPFC_NVME);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                }
        }
 
-       if (phba->cfg_fcp_io_channel) {
-               /* Set up fast-path FCP Response Complete Queue */
-               if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               rc = lpfc_create_wq_cq(phba,
+                                      qp[qidx].hba_eq,
+                                      qp[qidx].fcp_cq,
+                                      qp[qidx].fcp_wq,
+                                      &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
+                                      qidx, LPFC_FCP);
+               if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "3148 Fast-path FCP %s array not allocated\n",
-                               phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
-                       rc = -ENOMEM;
-                       goto out_destroy;
-               }
-
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-                       rc = lpfc_create_wq_cq(phba,
-                                       phba->sli4_hba.hba_eq[
-                                               qidx % io_channel],
-                                       phba->sli4_hba.fcp_cq[qidx],
-                                       phba->sli4_hba.fcp_wq[qidx],
-                                       &phba->sli4_hba.fcp_cq_map[qidx],
-                                       qidx, LPFC_FCP);
-                       if (rc) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0535 Failed to setup fastpath "
                                        "FCP WQ/CQ (%d), rc = 0x%x\n",
                                        qidx, (uint32_t)rc);
-                               goto out_destroy;
-                       }
+                       goto out_destroy;
                }
        }
 
                goto out_destroy;
        }
 
-       rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+       rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
                               phba->sli4_hba.mbx_cq,
                               phba->sli4_hba.mbx_wq,
                               NULL, 0, LPFC_MBOX);
                if (phba->cfg_nvmet_mrq > 1) {
                        rc = lpfc_cq_create_set(phba,
                                        phba->sli4_hba.nvmet_cqset,
-                                       phba->sli4_hba.hba_eq,
+                                       qp,
                                        LPFC_WCQ, LPFC_NVMET);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                } else {
                        /* Set up NVMET Receive Complete Queue */
                        rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
-                                           phba->sli4_hba.hba_eq[0],
+                                           qp[0].hba_eq,
                                            LPFC_WCQ, LPFC_NVMET);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "6090 NVMET CQ setup: cq-id=%d, "
                                        "parent eq-id=%d\n",
                                        phba->sli4_hba.nvmet_cqset[0]->queue_id,
-                                       phba->sli4_hba.hba_eq[0]->queue_id);
+                                       qp[0].hba_eq->queue_id);
                }
        }
 
                rc = -ENOMEM;
                goto out_destroy;
        }
-       rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-                                       phba->sli4_hba.els_cq,
-                                       phba->sli4_hba.els_wq,
-                                       NULL, 0, LPFC_ELS);
+       rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+                              phba->sli4_hba.els_cq,
+                              phba->sli4_hba.els_wq,
+                              NULL, 0, LPFC_ELS);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
-                       (uint32_t)rc);
+                               "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+                               (uint32_t)rc);
                goto out_destroy;
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        phba->sli4_hba.els_wq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       if (phba->cfg_nvme_io_channel) {
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                /* Set up NVME LS Complete Queue */
                if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        rc = -ENOMEM;
                        goto out_destroy;
                }
-               rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-                                       phba->sli4_hba.nvmels_cq,
-                                       phba->sli4_hba.nvmels_wq,
-                                       NULL, 0, LPFC_NVME_LS);
+               rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+                                      phba->sli4_hba.nvmels_cq,
+                                      phba->sli4_hba.nvmels_wq,
+                                      NULL, 0, LPFC_NVME_LS);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0529 Failed setup of NVVME LS WQ/CQ: "
-                               "rc = 0x%x\n", (uint32_t)rc);
+                                       "0526 Failed setup of NVVME LS WQ/CQ: "
+                                       "rc = 0x%x\n", (uint32_t)rc);
                        goto out_destroy;
                }
 
                        phba->sli4_hba.dat_rq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
+       for (qidx = 0; qidx < phba->cfg_hdw_queue;
+            qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
                                         phba->cfg_fcp_imax);
 
 void
 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 {
+       struct lpfc_sli4_hdw_queue *qp;
        int qidx;
 
        /* Unset mailbox command work queue */
                lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
                                phba->sli4_hba.dat_rq);
 
-       /* Unset FCP work queue */
-       if (phba->sli4_hba.fcp_wq)
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-                       lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
-
-       /* Unset NVME work queue */
-       if (phba->sli4_hba.nvme_wq) {
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-                       lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
-       }
-
        /* Unset mailbox command complete queue */
        if (phba->sli4_hba.mbx_cq)
                lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
        if (phba->sli4_hba.nvmels_cq)
                lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
 
-       /* Unset NVME response complete queue */
-       if (phba->sli4_hba.nvme_cq)
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-                       lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
-
        if (phba->nvmet_support) {
                /* Unset NVMET MRQ queue */
                if (phba->sli4_hba.nvmet_mrq_hdr) {
                }
        }
 
-       /* Unset FCP response complete queue */
-       if (phba->sli4_hba.fcp_cq)
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-                       lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
-
-       /* Unset fast-path event queue */
-       if (phba->sli4_hba.hba_eq)
-               for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
-                       lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
+       /* Unset fast-path SLI4 queues */
+       if (phba->sli4_hba.hdwq) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       qp = &phba->sli4_hba.hdwq[qidx];
+                       lpfc_wq_destroy(phba, qp->fcp_wq);
+                       lpfc_wq_destroy(phba, qp->nvme_wq);
+                       lpfc_cq_destroy(phba, qp->fcp_cq);
+                       lpfc_cq_destroy(phba, qp->nvme_cq);
+                       lpfc_eq_destroy(phba, qp->hba_eq);
+               }
+       }
 }
 
 /**
                if (vec >= vectors)
                        vec = 0;
                index++;
-               if (index >= phba->cfg_fcp_io_channel)
+               if (index >= phba->cfg_hdw_queue)
                        index = 0;
                cpup++;
        }
        char *name;
 
        /* Set up MSI-X multi-message vectors */
-       vectors = phba->io_channel_irqs;
+       vectors = phba->cfg_hdw_queue;
 
        rc = pci_alloc_irq_vectors(phba->pcidev,
                                (phba->nvmet_support) ? 1 : 2,
                }
        }
 
-       if (vectors != phba->io_channel_irqs) {
+       if (vectors != phba->cfg_hdw_queue) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3238 Reducing IO channels to match number of "
                                "MSI-X vectors, requested %d got %d\n",
-                               phba->io_channel_irqs, vectors);
-               if (phba->cfg_fcp_io_channel > vectors)
-                       phba->cfg_fcp_io_channel = vectors;
-               if (phba->cfg_nvme_io_channel > vectors)
-                       phba->cfg_nvme_io_channel = vectors;
-               if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-                       phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-               else
-                       phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+                               phba->cfg_hdw_queue, vectors);
+               if (phba->cfg_hdw_queue > vectors)
+                       phba->cfg_hdw_queue = vectors;
+               if (phba->cfg_nvmet_mrq > vectors)
+                       phba->cfg_nvmet_mrq = vectors;
        }
        lpfc_cpu_affinity_check(phba, vectors);
 
                return rc;
        }
 
-       for (index = 0; index < phba->io_channel_irqs; index++) {
+       for (index = 0; index < phba->cfg_hdw_queue; index++) {
                phba->sli4_hba.hba_eq_hdl[index].idx = index;
                phba->sli4_hba.hba_eq_hdl[index].phba = phba;
        }
                        phba->intr_type = INTx;
                        intr_mode = 0;
 
-                       for (idx = 0; idx < phba->io_channel_irqs; idx++) {
+                       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                                eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
                                eqhdl->idx = idx;
                                eqhdl->phba = phba;
                int index;
 
                /* Free up MSI-X multi-message vectors */
-               for (index = 0; index < phba->io_channel_irqs; index++)
+               for (index = 0; index < phba->cfg_hdw_queue; index++)
                        free_irq(pci_irq_vector(phba->pcidev, index),
                                        &phba->sli4_hba.hba_eq_hdl[index]);
        } else {
        struct pci_dev *pdev = phba->pcidev;
 
        lpfc_stop_hba_timers(phba);
-       phba->sli4_hba.intr_enable = 0;
+       if (phba->pport)
+               phba->sli4_hba.intr_enable = 0;
 
        /*
         * Gracefully wait out the potential current outstanding asynchronous
                phba->nvme_support = 0;
                phba->nvmet_support = 0;
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
-               phba->cfg_nvme_io_channel = 0;
-               phba->io_channel_irqs = phba->cfg_fcp_io_channel;
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
                                "6101 Disabling NVME support: "
                                "Not supported by firmware: %d %d\n",
        /* Get the default values for Model Name and Description */
        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-       /* Create SCSI host to the physical port */
-       error = lpfc_create_shost(phba);
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1415 Failed to create scsi host.\n");
-               goto out_unset_driver_resource;
-       }
-
-       /* Configure sysfs attributes */
-       vport = phba->pport;
-       error = lpfc_alloc_sysfs_attr(vport);
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1416 Failed to allocate sysfs attr\n");
-               goto out_destroy_shost;
-       }
-
-       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
        /* Now, trying to enable interrupt and bring up the device */
        cfg_mode = phba->cfg_use_msi;
 
        /* Put device to a known state before enabling interrupt */
+       phba->pport = NULL;
        lpfc_stop_port(phba);
 
        /* Configure and enable interrupt */
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0426 Failed to enable interrupt.\n");
                error = -ENODEV;
-               goto out_free_sysfs_attr;
+               goto out_unset_driver_resource;
        }
        /* Default to single EQ for non-MSI-X */
        if (phba->intr_type != MSIX) {
-               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-                       phba->cfg_fcp_io_channel = 1;
+               phba->cfg_hdw_queue = 1;
                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-                       phba->cfg_nvme_io_channel = 1;
                        if (phba->nvmet_support)
                                phba->cfg_nvmet_mrq = 1;
                }
-               phba->io_channel_irqs = 1;
+       }
+
+       /* Create SCSI host to the physical port */
+       error = lpfc_create_shost(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1415 Failed to create scsi host.\n");
+               goto out_disable_intr;
+       }
+       vport = phba->pport;
+       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+
+       /* Configure sysfs attributes */
+       error = lpfc_alloc_sysfs_attr(vport);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1416 Failed to allocate sysfs attr\n");
+               goto out_destroy_shost;
        }
 
        /* Set up SLI-4 HBA */
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "1421 Failed to set up hba\n");
                error = -ENODEV;
-               goto out_disable_intr;
+               goto out_free_sysfs_attr;
        }
 
        /* Log the current active interrupt mode */
                                phba, phba->sli4_hba.common_xri_max);
                        if (len == 0) {
                                error = -ENOMEM;
-                               goto out_disable_intr;
+                               goto out_free_sysfs_attr;
                        }
                        phba->total_common_bufs += len;
                }
 
        return 0;
 
-out_disable_intr:
-       lpfc_sli4_disable_intr(phba);
 out_free_sysfs_attr:
        lpfc_free_sysfs_attr(vport);
 out_destroy_shost:
        lpfc_destroy_shost(phba);
+out_disable_intr:
+       lpfc_sli4_disable_intr(phba);
 out_unset_driver_resource:
        lpfc_unset_driver_resource_phase2(phba);
 out_unset_driver_resource_s4:
 
        if (qidx) {
                str = "IO ";  /* IO queue */
                qhandle->index = ((qidx - 1) %
-                       vport->phba->cfg_nvme_io_channel);
+                       vport->phba->cfg_hdw_queue);
        } else {
                str = "ADM";  /* Admin queue */
                qhandle->index = qidx;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
                         "6073 Binding %s HdwQueue %d  (cpu %d) to "
-                        "io_channel %d qhandle %p\n", str,
+                        "hdw_queue %d qhandle %p\n", str,
                         qidx, qhandle->cpu_id, qhandle->index, qhandle);
        *handle = (void *)qhandle;
        return 0;
         * allocate + 3, one for cmd, one for rsp and one for this alignment
         */
        lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
-       lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
+       lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
 
        cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
-                       phba->cfg_nvme_io_channel), GFP_KERNEL);
+                       phba->cfg_hdw_queue), GFP_KERNEL);
        if (!cstat)
                return -ENOMEM;
 
                atomic_set(&lport->fc4NvmeLsRequests, 0);
                atomic_set(&lport->fc4NvmeLsCmpls, 0);
 
-               for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
                        cstat = &lport->cstat[i];
                        atomic_set(&cstat->fc4NvmeInputRequests, 0);
                        atomic_set(&cstat->fc4NvmeOutputRequests, 0);
        struct lpfc_sli_ring  *pring;
        u32 i, wait_cnt = 0;
 
-       if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
+       if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
                return;
 
        /* Cycle through all NVME rings and make sure all outstanding
         * WQEs have been removed from the txcmplqs.
         */
-       for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-               pring = phba->sli4_hba.nvme_wq[i]->pring;
+       for (i = 0; i < phba->cfg_hdw_queue; i++) {
+               pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
 
                if (!pring)
                        continue;
 
                 * WQE release CQE
                 */
                ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
-               wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
+               wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
                pring = wq->pring;
                spin_lock_irqsave(&pring->ring_lock, iflags);
                list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
        if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
                                                 ctxp->oxid);
-               wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
+               wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
                spin_unlock_irqrestore(&ctxp->ctxlock, flags);
                lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
                return;
         * allocate + 3, one for cmd, one for rsp and one for this alignment
         */
        lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
-       lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+       lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
        lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
 
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
                return;
        if (phba->targetport) {
                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-                       wq = phba->sli4_hba.nvme_wq[qidx];
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
                        lpfc_nvmet_wqfull_flush(phba, wq, NULL);
                }
                init_completion(&tgtp->tport_unreg_done);
 
                return hwq;
        }
 
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
-           && phba->cfg_fcp_io_channel > 1) {
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU &&
+           phba->cfg_hdw_queue > 1) {
                cpu = lpfc_cmd->cpu;
                if (cpu < phba->sli4_hba.num_present_cpu) {
                        cpup = phba->sli4_hba.cpu_map;
                }
        }
        chann = atomic_add_return(1, &phba->fcp_qidx);
-       chann = chann % phba->cfg_fcp_io_channel;
+       chann = chann % phba->cfg_hdw_queue;
        return chann;
 }
 
 
        iocb = &lpfc_cmd->cur_iocbq;
        if (phba->sli_rev == LPFC_SLI_REV4) {
-               pring_s4 = phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
+               pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
                if (!pring_s4) {
                        ret = FAILED;
                        goto out_unlock;
 
 
        /* Look on all the FCP Rings for the iotag */
        if (phba->sli_rev >= LPFC_SLI_REV4) {
-               for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-                       pring = phba->sli4_hba.fcp_wq[i]->pring;
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
+                       pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
                        lpfc_sli_abort_iocb_ring(phba, pring);
                }
        } else {
        struct lpfc_sli_ring  *pring;
        uint32_t i;
 
-       if (phba->sli_rev < LPFC_SLI_REV4)
+       if ((phba->sli_rev < LPFC_SLI_REV4) ||
+           !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
                return;
 
        /* Abort all IO on each NVME ring. */
-       for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-               pring = phba->sli4_hba.nvme_wq[i]->pring;
+       for (i = 0; i < phba->cfg_hdw_queue; i++) {
+               pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
                lpfc_sli_abort_wqe_ring(phba, pring);
        }
 }
 
        /* Look on all the FCP Rings for the iotag */
        if (phba->sli_rev >= LPFC_SLI_REV4) {
-               for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-                       pring = phba->sli4_hba.fcp_wq[i]->pring;
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
+                       pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
 
                        spin_lock_irq(&pring->ring_lock);
                        /* Retrieve everything on txq */
        uint32_t i;
        struct lpfc_iocbq *piocb, *next_iocb;
 
-       if (phba->sli_rev < LPFC_SLI_REV4)
+       if ((phba->sli_rev < LPFC_SLI_REV4) ||
+           !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
                return;
 
        /* Hint to other driver operations that a flush is in progress. */
         * a local driver reason code.  This is a flush so no
         * abort exchange to FW.
         */
-       for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-               pring = phba->sli4_hba.nvme_wq[i]->pring;
+       for (i = 0; i < phba->cfg_hdw_queue; i++) {
+               pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
 
                spin_lock_irq(&pring->ring_lock);
                list_for_each_entry_safe(piocb, next_iocb,
 {
        int qidx;
        struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
+       struct lpfc_sli4_hdw_queue *qp;
 
        sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
        sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
                sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
                                                LPFC_QUEUE_REARM);
 
-       if (sli4_hba->fcp_cq)
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-                       sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
+       qp = sli4_hba->hdwq;
+       if (sli4_hba->hdwq) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       sli4_hba->sli4_cq_release(qp[qidx].fcp_cq,
                                                LPFC_QUEUE_REARM);
-
-       if (sli4_hba->nvme_cq)
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-                       sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
+                       sli4_hba->sli4_cq_release(qp[qidx].nvme_cq,
                                                LPFC_QUEUE_REARM);
+               }
 
-       if (sli4_hba->hba_eq)
-               for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
-                       sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
-                                                       LPFC_QUEUE_REARM);
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++)
+                       sli4_hba->sli4_eq_release(qp[qidx].hba_eq,
+                                               LPFC_QUEUE_REARM);
+       }
 
        if (phba->nvmet_support) {
                for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
 
        /* Find the eq associated with the mcq */
 
-       if (sli4_hba->hba_eq)
-               for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
-                       if (sli4_hba->hba_eq[eqidx]->queue_id ==
+       if (sli4_hba->hdwq)
+               for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++)
+                       if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
                            sli4_hba->mbx_cq->assoc_qid) {
-                               fpeq = sli4_hba->hba_eq[eqidx];
+                               fpeq = sli4_hba->hdwq[eqidx].hba_eq;
                                break;
                        }
        if (!fpeq)
        /* Get the WQ */
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
            (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-               wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
+               wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
        } else {
                wq = phba->sli4_hba.els_wq;
        }
 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
 {
        if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
-               if (unlikely(!phba->sli4_hba.fcp_wq))
+               if (unlikely(!phba->sli4_hba.hdwq))
                        return NULL;
                /*
                 * for abort iocb hba_wqidx should already
                                lpfc_sli4_scmd_to_wqidx_distr(
                                        phba, piocb->context1);
                        piocb->hba_wqidx = piocb->hba_wqidx %
-                               phba->cfg_fcp_io_channel;
+                               phba->cfg_hdw_queue;
                }
-               return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
+               return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
        } else {
                if (unlikely(!phba->sli4_hba.els_wq))
                        return NULL;
                        if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
 
                                /* Get associated EQ with this index */
-                               fpeq = phba->sli4_hba.hba_eq[idx];
+                               fpeq = phba->sli4_hba.hdwq[idx].hba_eq;
 
                                /* Turn off interrupts from this EQ */
                                phba->sli4_hba.sli4_eq_clr_intr(fpeq);
        INIT_LIST_HEAD(&psli->mboxq);
        INIT_LIST_HEAD(&psli->mboxq_cmpl);
        /* Initialize list headers for txq and txcmplq as double linked lists */
-       for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-               pring = phba->sli4_hba.fcp_wq[i]->pring;
-               pring->flag = 0;
-               pring->ringno = LPFC_FCP_RING;
-               INIT_LIST_HEAD(&pring->txq);
-               INIT_LIST_HEAD(&pring->txcmplq);
-               INIT_LIST_HEAD(&pring->iocb_continueq);
-               spin_lock_init(&pring->ring_lock);
-       }
-       for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-               pring = phba->sli4_hba.nvme_wq[i]->pring;
+       for (i = 0; i < phba->cfg_hdw_queue; i++) {
+               pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
                pring->flag = 0;
                pring->ringno = LPFC_FCP_RING;
                INIT_LIST_HEAD(&pring->txq);
        INIT_LIST_HEAD(&pring->iocb_continueq);
        spin_lock_init(&pring->ring_lock);
 
-       if (phba->cfg_nvme_io_channel) {
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
+                       pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+                               pring->flag = 0;
+                       pring->ringno = LPFC_FCP_RING;
+                       INIT_LIST_HEAD(&pring->txq);
+                       INIT_LIST_HEAD(&pring->txcmplq);
+                       INIT_LIST_HEAD(&pring->iocb_continueq);
+                       spin_lock_init(&pring->ring_lock);
+               }
                pring = phba->sli4_hba.nvmels_wq->pring;
                pring->flag = 0;
                pring->ringno = LPFC_ELS_RING;
        /* Get the reference to the corresponding CQ */
        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 
+       /* First check for NVME/SCSI completion */
+       if (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map) {
+               /* Process NVME / NVMET command completion */
+               cq = phba->sli4_hba.hdwq[qidx].nvme_cq;
+               goto  process_cq;
+       }
+
+       if (cqid == phba->sli4_hba.hdwq[qidx].fcp_cq_map) {
+               /* Process FCP command completion */
+               cq = phba->sli4_hba.hdwq[qidx].fcp_cq;
+               goto  process_cq;
+       }
+
+       /* Next check for NVMET completion */
        if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
                id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
                if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
                }
        }
 
-       if (phba->sli4_hba.nvme_cq_map &&
-           (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
-               /* Process NVME / NVMET command completion */
-               cq = phba->sli4_hba.nvme_cq[qidx];
-               goto  process_cq;
-       }
-
-       if (phba->sli4_hba.fcp_cq_map &&
-           (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
-               /* Process FCP command completion */
-               cq = phba->sli4_hba.fcp_cq[qidx];
-               goto  process_cq;
-       }
-
        if (phba->sli4_hba.nvmels_cq &&
            (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
                /* Process NVME unsol rcv */
 
        /* Otherwise this is a Slow path event */
        if (cq == NULL) {
-               lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+               lpfc_sli4_sp_handle_eqe(phba, eqe,
+                                       phba->sli4_hba.hdwq[qidx].hba_eq);
                return;
        }
 
        }
 
        /* Save EQ associated with this CQ */
-       cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+       cq->assoc_qp = phba->sli4_hba.hdwq[qidx].hba_eq;
 
        if (!queue_work(phba->wq, &cq->irqwork))
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 
        if (unlikely(!phba))
                return IRQ_NONE;
-       if (unlikely(!phba->sli4_hba.hba_eq))
+       if (unlikely(!phba->sli4_hba.hdwq))
                return IRQ_NONE;
 
        /* Get to the EQ struct associated with this vector */
-       fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
+       fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
        if (unlikely(!fpeq))
                return IRQ_NONE;
 
        /*
         * Invoke fast-path host attention interrupt handling as appropriate.
         */
-       for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
                hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
                                        &phba->sli4_hba.hba_eq_hdl[qidx]);
                if (hba_irq_rc == IRQ_HANDLED)
        union lpfc_sli4_cfg_shdr *shdr;
        uint16_t dmult;
 
-       if (startq >= phba->io_channel_irqs)
+       if (startq >= phba->cfg_hdw_queue)
                return 0;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        eq_delay = &mbox->u.mqe.un.eq_delay;
 
        /* Calculate delay multiper from maximum interrupt per second */
-       result = imax / phba->io_channel_irqs;
+       result = imax / phba->cfg_hdw_queue;
        if (result > LPFC_DMULT_CONST || result == 0)
                dmult = 0;
        else
                dmult = LPFC_DMULT_MAX;
 
        cnt = 0;
-       for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
-               eq = phba->sli4_hba.hba_eq[qidx];
+       for (qidx = startq; qidx < phba->cfg_hdw_queue; qidx++) {
+               eq = phba->sli4_hba.hdwq[qidx].hba_eq;
                if (!eq)
                        continue;
                eq->q_mode = imax;
                        val =  phba->cfg_fcp_imax;
                        if (val) {
                                /* First, interrupts per sec per EQ */
-                               val = phba->cfg_fcp_imax /
-                                       phba->io_channel_irqs;
+                               val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
 
                                /* us delay between each interrupt */
                                val = LPFC_SEC_TO_USEC / val;
  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
  * @phba: HBA structure that indicates port to create a queue on.
  * @cqp: The queue structure array to use to create the completion queues.
- * @eqp: The event queue array to bind these completion queues to.
+ * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
  *
  * This function creates a set of  completion queue, s to support MRQ
  * as detailed in @cqp, on a port,
  **/
 int
 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
-                  struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
+                  struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
+                  uint32_t subtype)
 {
        struct lpfc_queue *cq;
        struct lpfc_queue *eq;
 
        /* sanity check on queue memory */
        numcq = phba->cfg_nvmet_mrq;
-       if (!cqp || !eqp || !numcq)
+       if (!cqp || !hdwq || !numcq)
                return -ENODEV;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
        for (idx = 0; idx < numcq; idx++) {
                cq = cqp[idx];
-               eq = eqp[idx];
+               eq = hdwq[idx].hba_eq;
                if (!cq || !eq) {
                        status = -ENOMEM;
                        goto out;
 
        if (phba->link_flag & LS_MDS_LOOPBACK) {
                /* MDS WQE are posted only to first WQ*/
-               wq = phba->sli4_hba.fcp_wq[0];
+               wq = phba->sli4_hba.hdwq[0].fcp_wq;
                if (unlikely(!wq))
                        return 0;
                pring = wq->pring;
        /* NVME_FCREQ and NVME_ABTS requests */
        if (pwqe->iocb_flag & LPFC_IO_NVME) {
                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
-               pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+               pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
 
                spin_lock_irqsave(&pring->ring_lock, iflags);
-               wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+               wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
                bf_set(wqe_cqid, &wqe->generic.wqe_com,
-                     phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+                     phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
                ret = lpfc_sli4_wq_put(wq, wqe);
                if (ret) {
                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
        /* NVMET requests */
        if (pwqe->iocb_flag & LPFC_IO_NVMET) {
                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
-               pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+               pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
 
                spin_lock_irqsave(&pring->ring_lock, iflags);
                ctxp = pwqe->context2;
                }
                bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
                       pwqe->sli4_xritag);
-               wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+               wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
                bf_set(wqe_cqid, &wqe->generic.wqe_com,
-                     phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+                     phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
                ret = lpfc_sli4_wq_put(wq, wqe);
                if (ret) {
                        spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
 #define LPFC_NEMBED_MBOX_SGL_CNT               254
 
 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
-#define LPFC_HBA_IO_CHAN_MIN   0
-#define LPFC_HBA_IO_CHAN_MAX   32
-#define LPFC_FCP_IO_CHAN_DEF   4
-#define LPFC_NVME_IO_CHAN_DEF  0
+#define LPFC_HBA_HDWQ_MIN      0
+#define LPFC_HBA_HDWQ_MAX      64
+#define LPFC_HBA_HDWQ_DEF      0
 
 /* Common buffer size to accomidate SCSI and NVME IO buffers */
 #define LPFC_COMMON_IO_BUF_SZ  768
 
-/* Number of channels used for Flash Optimized Fabric (FOF) operations */
-
-#define LPFC_FOF_IO_CHAN_NUM       1
-
 /*
  * Provide the default FCF Record attributes used by the driver
  * when nonFIP mode is configured and there is no other default
 #define LPFC_VECTOR_MAP_EMPTY  0xffff
 
 /* SLI4 HBA data structure entries */
+struct lpfc_sli4_hdw_queue {
+       /* Pointers to the constructed SLI4 queues */
+       struct lpfc_queue *hba_eq;  /* Event queues for HBA */
+       struct lpfc_queue *fcp_cq;  /* Fast-path FCP compl queue */
+       struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
+       struct lpfc_queue *fcp_wq;  /* Fast-path FCP work queue */
+       struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
+       uint16_t fcp_cq_map;
+       uint16_t nvme_cq_map;
+};
+
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
                                           * config space registers
        uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
 
        /* Pointers to the constructed SLI4 queues */
-       struct lpfc_queue **hba_eq;  /* Event queues for HBA */
-       struct lpfc_queue **fcp_cq;  /* Fast-path FCP compl queue */
-       struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
+       struct lpfc_sli4_hdw_queue *hdwq;
+       struct list_head lpfc_wq_list;
+
+       /* Pointers to the constructed SLI4 queues for NVMET */
        struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
        struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
        struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
-       struct lpfc_queue **fcp_wq;  /* Fast-path FCP work queue */
-       struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
-       uint16_t *fcp_cq_map;
-       uint16_t *nvme_cq_map;
-       struct list_head lpfc_wq_list;
 
        struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
        struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
 int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
                        struct lpfc_queue *, uint32_t, uint32_t);
 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
-                       struct lpfc_queue **eqp, uint32_t type,
+                       struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
                        uint32_t subtype);
 int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
                       struct lpfc_queue *, uint32_t);