lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
 {
        struct lpfc_queue *qdesc;
-       int cnt;
 
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                           phba->sli4_hba.cq_ecount);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_NVME_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
+                                     LPFC_NVME_CQSIZE);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0508 Failed allocate fast-path NVME CQ (%d)\n",
        }
        phba->sli4_hba.nvme_cq[wqidx] = qdesc;
 
-       cnt = LPFC_NVME_WQSIZE;
-       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_NVME_PAGE_SIZE,
+                                     LPFC_WQE128_SIZE, LPFC_NVME_WQSIZE);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0509 Failed allocate fast-path NVME WQ (%d)\n",
        uint32_t wqesize;
 
        /* Create Fast Path FCP CQs */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                       phba->sli4_hba.cq_ecount);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
+                                     phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
        /* Create Fast Path FCP WQs */
        wqesize = (phba->fcp_embed_io) ?
                LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-       qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     wqesize, phba->sli4_hba.wq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0503 Failed allocate fast-path FCP WQ (%d)\n",
        /* Create HBA Event Queues (EQs) */
        for (idx = 0; idx < io_channel; idx++) {
                /* Create EQs */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.eq_esize,
                                              phba->sli4_hba.eq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
        if (phba->nvmet_support) {
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        qdesc = lpfc_sli4_queue_alloc(phba,
-                                       phba->sli4_hba.cq_esize,
-                                       phba->sli4_hba.cq_ecount);
+                                                     LPFC_DEFAULT_PAGE_SIZE,
+                                                     phba->sli4_hba.cq_esize,
+                                                     phba->sli4_hba.cq_ecount);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "3142 Failed allocate NVME "
         */
 
        /* Create slow-path Mailbox Command Complete Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
                                      phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
        phba->sli4_hba.mbx_cq = qdesc;
 
        /* Create slow-path ELS Complete Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
                                      phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 
        /* Create Mailbox Command Queue */
 
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.mq_esize,
                                      phba->sli4_hba.mq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
         */
 
        /* Create slow-path ELS Work Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.wq_esize,
                                      phba->sli4_hba.wq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                /* Create NVME LS Complete Queue */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.cq_esize,
                                              phba->sli4_hba.cq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                phba->sli4_hba.nvmels_cq = qdesc;
 
                /* Create NVME LS Work Queue */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.wq_esize,
                                              phba->sli4_hba.wq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
         */
 
        /* Create Receive Queue for header */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.rq_esize,
                                      phba->sli4_hba.rq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
        phba->sli4_hba.hdr_rq = qdesc;
 
        /* Create Receive Queue for data */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.rq_esize,
                                      phba->sli4_hba.rq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
                                                      LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
 
                        /* Create NVMET Receive Queue for data */
                        qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
                                                      LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
                        qidx, (uint32_t)rc);
                return rc;
        }
+       cq->chann = qidx;
 
        if (qtype != LPFC_MBOX) {
                /* Setup nvme_cq_map for fast lookup */
                        /* no need to tear down cq - caller will do so */
                        return rc;
                }
+               wq->chann = qidx;
 
                /* Bind this CQ/WQ to the NVME ring */
                pring = wq->pring;
                                                "rc = 0x%x\n", (uint32_t)rc);
                                goto out_destroy;
                        }
+                       phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                        "6090 NVMET CQ setup: cq-id=%d, "
                                        "parent eq-id=%d\n",
        uint32_t wqesize;
 
        /* Create FOF EQ */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.eq_esize,
                                      phba->sli4_hba.eq_ecount);
        if (!qdesc)
                goto out_error;
        if (phba->cfg_fof) {
 
                /* Create OAS CQ */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                                     phba->sli4_hba.cq_ecount);
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.cq_esize,
+                                             phba->sli4_hba.cq_ecount);
                if (!qdesc)
                        goto out_error;
 
                /* Create OAS WQ */
                wqesize = (phba->fcp_embed_io) ?
                                LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-               qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             wqesize,
                                              phba->sli4_hba.wq_ecount);
 
                if (!qdesc)
 
        while (!list_empty(&queue->page_list)) {
                list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
                                 list);
-               dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+               dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
                                  dmabuf->virt, dmabuf->phys);
                kfree(dmabuf);
        }
 /**
  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
  * @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
  * @entry_size: The size of each queue entry for this queue.
  * @entry count: The number of entries that this queue will handle.
  *
  * queue on the HBA.
  **/
 struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
-                     uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+                     uint32_t entry_size, uint32_t entry_count)
 {
        struct lpfc_queue *queue;
        struct lpfc_dmabuf *dmabuf;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
        if (!phba->sli4_hba.pc_sli4_params.supported)
-               hw_page_size = SLI4_PAGE_SIZE;
+               hw_page_size = page_size;
 
        queue = kzalloc(sizeof(struct lpfc_queue) +
                        (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
        INIT_LIST_HEAD(&queue->wq_list);
        INIT_LIST_HEAD(&queue->page_list);
        INIT_LIST_HEAD(&queue->child_list);
+
+       /* Set queue parameters now.  If the system cannot provide memory
+        * resources, the free routine needs to know what was allocated.
+        */
+       queue->entry_size = entry_size;
+       queue->entry_count = entry_count;
+       queue->page_size = hw_page_size;
+       queue->phba = phba;
+
        for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
                dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
                if (!dmabuf)
                        queue->qe[total_qe_count].address = dma_pointer;
                }
        }
-       queue->entry_size = entry_size;
-       queue->entry_count = entry_count;
-       queue->phba = phba;
        INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
        INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
 
        if (!cq || !eq)
                return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
-               hw_page_size = SLI4_PAGE_SIZE;
+               hw_page_size = cq->page_size;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
        bf_set(lpfc_mbox_hdr_version, &shdr->request,
               phba->sli4_hba.pc_sli4_params.cqv);
        if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
-               /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
-               bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+               bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+                      (cq->page_size / SLI4_PAGE_SIZE));
                bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
                       eq->queue_id);
        } else {
                       eq->queue_id);
        }
        switch (cq->entry_count) {
+       case 2048:
+       case 4096:
+               if (phba->sli4_hba.pc_sli4_params.cqv ==
+                   LPFC_Q_CREATE_VERSION_2) {
+                       cq_create->u.request.context.lpfc_cq_context_count =
+                               cq->entry_count;
+                       bf_set(lpfc_cq_context_count,
+                              &cq_create->u.request.context,
+                              LPFC_CQ_CNT_WORD7);
+                       break;
+               }
+               /* Fall Thru */
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0361 Unsupported CQ count: "
                break;
        }
        list_for_each_entry(dmabuf, &cq->page_list, list) {
-               memset(dmabuf->virt, 0, hw_page_size);
+               memset(dmabuf->virt, 0, cq->page_size);
                cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
                                        putPaddrLow(dmabuf->phys);
                cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
        numcq = phba->cfg_nvmet_mrq;
        if (!cqp || !eqp || !numcq)
                return -ENODEV;
-       if (!phba->sli4_hba.pc_sli4_params.supported)
-               hw_page_size = SLI4_PAGE_SIZE;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
                        status = -ENOMEM;
                        goto out;
                }
+               if (!phba->sli4_hba.pc_sli4_params.supported)
+                       hw_page_size = cq->page_size;
 
                switch (idx) {
                case 0:
                        bf_set(lpfc_mbx_cq_create_set_num_cq,
                               &cq_set->u.request, numcq);
                        switch (cq->entry_count) {
+                       case 2048:
+                       case 4096:
+                               if (phba->sli4_hba.pc_sli4_params.cqv ==
+                                   LPFC_Q_CREATE_VERSION_2) {
+                                       bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+                                              &cq_set->u.request,
+                                               cq->entry_count);
+                                       bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+                                              &cq_set->u.request,
+                                              LPFC_CQ_CNT_WORD7);
+                                       break;
+                               }
+                               /* Fall Thru */
                        default:
                                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                                "3118 Bad CQ count. (%d)\n",
                cq->host_index = 0;
                cq->hba_index = 0;
                cq->entry_repost = LPFC_CQ_REPOST;
+               cq->chann = idx;
 
                rc = 0;
                list_for_each_entry(dmabuf, &cq->page_list, list) {
        void __iomem *bar_memmap_p;
        uint32_t db_offset;
        uint16_t pci_barset;
+       uint8_t wq_create_version;
 
        /* sanity check on queue memory */
        if (!wq || !cq)
                return -ENODEV;
        if (!phba->sli4_hba.pc_sli4_params.supported)
-               hw_page_size = SLI4_PAGE_SIZE;
+               hw_page_size = wq->page_size;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
        bf_set(lpfc_mbox_hdr_version, &shdr->request,
               phba->sli4_hba.pc_sli4_params.wqv);
 
-       switch (phba->sli4_hba.pc_sli4_params.wqv) {
+       if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+               wq_create_version = LPFC_Q_CREATE_VERSION_1;
+       else
+               wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+       switch (wq_create_version) {
        case LPFC_Q_CREATE_VERSION_0:
                switch (wq->entry_size) {
                default:
                }
                bf_set(lpfc_mbx_wq_create_page_size,
                       &wq_create->u.request_1,
-                      LPFC_WQ_PAGE_SIZE_4096);
+                      (wq->page_size / SLI4_PAGE_SIZE));
                page = wq_create->u.request_1.page;
                break;
        default: