list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
                                 list) {
                list_del(&sb->list);
-               pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+               dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
                                 list) {
                list_del(&sb->list);
-               pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+               dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
                                 &phba->lpfc_nvme_buf_list_put, list) {
                list_del(&lpfc_ncmd->list);
-               pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+               dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
                              lpfc_ncmd->dma_handle);
                kfree(lpfc_ncmd);
                phba->total_nvme_bufs--;
        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
                                 &phba->lpfc_nvme_buf_list_get, list) {
                list_del(&lpfc_ncmd->list);
-               pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+               dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
                              lpfc_ncmd->dma_handle);
                kfree(lpfc_ncmd);
                phba->total_nvme_bufs--;
                        list_remove_head(&scsi_sgl_list, psb,
                                         struct lpfc_scsi_buf, list);
                        if (psb) {
-                               pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                               dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                              psb->data, psb->dma_handle);
                                kfree(psb);
                        }
                        list_remove_head(&nvme_sgl_list, lpfc_ncmd,
                                         struct lpfc_nvme_buf, list);
                        if (lpfc_ncmd) {
-                               pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                               dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                              lpfc_ncmd->data,
                                              lpfc_ncmd->dma_handle);
                                kfree(lpfc_ncmd);
        if (phba->nvmet_support) {
                /* Only 1 vport (pport) will support NVME target */
                if (phba->txrdy_payload_pool == NULL) {
-                       phba->txrdy_payload_pool = pci_pool_create(
-                               "txrdy_pool", phba->pcidev,
+                       phba->txrdy_payload_pool = dma_pool_create(
+                               "txrdy_pool", &phba->pcidev->dev,
                                TXRDY_PAYLOAD_LEN, 16, 0);
                        if (phba->txrdy_payload_pool) {
                                phba->targetport = NULL;
 
                        i = SLI4_PAGE_SIZE;
 
                phba->lpfc_sg_dma_buf_pool =
-                       pci_pool_create("lpfc_sg_dma_buf_pool",
-                                       phba->pcidev,
+                       dma_pool_create("lpfc_sg_dma_buf_pool",
+                                       &phba->pcidev->dev,
                                        phba->cfg_sg_dma_buf_size,
                                        i, 0);
                if (!phba->lpfc_sg_dma_buf_pool)
 
        } else {
                phba->lpfc_sg_dma_buf_pool =
-                       pci_pool_create("lpfc_sg_dma_buf_pool",
-                                       phba->pcidev, phba->cfg_sg_dma_buf_size,
+                       dma_pool_create("lpfc_sg_dma_buf_pool",
+                                       &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
                                        align, 0);
 
                if (!phba->lpfc_sg_dma_buf_pool)
                        goto fail;
        }
 
-       phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+       phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
                                                        LPFC_BPL_SIZE,
                                                        align, 0);
        if (!phba->lpfc_mbuf_pool)
        pool->max_count = 0;
        pool->current_count = 0;
        for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
-               pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+               pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
                                       GFP_KERNEL, &pool->elements[i].phys);
                if (!pool->elements[i].virt)
                        goto fail_free_mbuf_pool;
                                                sizeof(struct lpfc_node_rrq));
                if (!phba->rrq_pool)
                        goto fail_free_nlp_mem_pool;
-               phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
-                                             phba->pcidev,
+               phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
+                                             &phba->pcidev->dev,
                                              LPFC_HDR_BUF_SIZE, align, 0);
                if (!phba->lpfc_hrb_pool)
                        goto fail_free_rrq_mem_pool;
 
-               phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
-                                             phba->pcidev,
+               phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
+                                             &phba->pcidev->dev,
                                              LPFC_DATA_BUF_SIZE, align, 0);
                if (!phba->lpfc_drb_pool)
                        goto fail_free_hrb_pool;
                phba->lpfc_hbq_pool = NULL;
        } else {
-               phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
-                       phba->pcidev, LPFC_BPL_SIZE, align, 0);
+               phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
+                       &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
                if (!phba->lpfc_hbq_pool)
                        goto fail_free_nlp_mem_pool;
                phba->lpfc_hrb_pool = NULL;
 
        return 0;
 fail_free_drb_pool:
-       pci_pool_destroy(phba->lpfc_drb_pool);
+       dma_pool_destroy(phba->lpfc_drb_pool);
        phba->lpfc_drb_pool = NULL;
  fail_free_hrb_pool:
-       pci_pool_destroy(phba->lpfc_hrb_pool);
+       dma_pool_destroy(phba->lpfc_hrb_pool);
        phba->lpfc_hrb_pool = NULL;
  fail_free_rrq_mem_pool:
        mempool_destroy(phba->rrq_pool);
        phba->mbox_mem_pool = NULL;
  fail_free_mbuf_pool:
        while (i--)
-               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+               dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
                                                 pool->elements[i].phys);
        kfree(pool->elements);
  fail_free_lpfc_mbuf_pool:
-       pci_pool_destroy(phba->lpfc_mbuf_pool);
+       dma_pool_destroy(phba->lpfc_mbuf_pool);
        phba->lpfc_mbuf_pool = NULL;
  fail_free_dma_buf_pool:
-       pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+       dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
        phba->lpfc_sg_dma_buf_pool = NULL;
  fail:
        return -ENOMEM;
 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
 {
        phba->lpfc_nvmet_drb_pool =
-               pci_pool_create("lpfc_nvmet_drb_pool",
-                               phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+               dma_pool_create("lpfc_nvmet_drb_pool",
+                               &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
                                SGL_ALIGN_SZ, 0);
        if (!phba->lpfc_nvmet_drb_pool) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
        /* Free HBQ pools */
        lpfc_sli_hbqbuf_free_all(phba);
        if (phba->lpfc_nvmet_drb_pool)
-               pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+               dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
        phba->lpfc_nvmet_drb_pool = NULL;
        if (phba->lpfc_drb_pool)
-               pci_pool_destroy(phba->lpfc_drb_pool);
+               dma_pool_destroy(phba->lpfc_drb_pool);
        phba->lpfc_drb_pool = NULL;
        if (phba->lpfc_hrb_pool)
-               pci_pool_destroy(phba->lpfc_hrb_pool);
+               dma_pool_destroy(phba->lpfc_hrb_pool);
        phba->lpfc_hrb_pool = NULL;
        if (phba->txrdy_payload_pool)
-               pci_pool_destroy(phba->txrdy_payload_pool);
+               dma_pool_destroy(phba->txrdy_payload_pool);
        phba->txrdy_payload_pool = NULL;
 
        if (phba->lpfc_hbq_pool)
-               pci_pool_destroy(phba->lpfc_hbq_pool);
+               dma_pool_destroy(phba->lpfc_hbq_pool);
        phba->lpfc_hbq_pool = NULL;
 
        if (phba->rrq_pool)
 
        /* Free MBUF memory pool */
        for (i = 0; i < pool->current_count; i++)
-               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+               dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
                              pool->elements[i].phys);
        kfree(pool->elements);
 
-       pci_pool_destroy(phba->lpfc_mbuf_pool);
+       dma_pool_destroy(phba->lpfc_mbuf_pool);
        phba->lpfc_mbuf_pool = NULL;
 
        /* Free DMA buffer memory pool */
-       pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+       dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
        phba->lpfc_sg_dma_buf_pool = NULL;
 
        /* Free Device Data memory pool */
  * @handle: used to return the DMA-mapped address of the mbuf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
- * Allocates from generic pci_pool_alloc function first and if that fails and
+ * Allocates from generic dma_pool_alloc function first and if that fails and
  * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
  * HBA's pool.
  *
        unsigned long iflags;
        void *ret;
 
-       ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+       ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
 
        spin_lock_irqsave(&phba->hbalock, iflags);
        if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
                pool->elements[pool->current_count].phys = dma;
                pool->current_count++;
        } else {
-               pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+               dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
        }
        return;
 }
  * @handle: used to return the DMA-mapped address of the nvmet_buf
  *
  * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
- * PCI pool.  Allocates from generic pci_pool_alloc function.
+ * PCI pool.  Allocates from generic dma_pool_alloc function.
  *
  * Returns:
  *   pointer to the allocated nvmet_buf on success
 {
        void *ret;
 
-       ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+       ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
        return ret;
 }
 
 void
 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
 {
-       pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+       dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
 }
 
 /**
        if (!hbqbp)
                return NULL;
 
-       hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+       hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
                                          &hbqbp->dbuf.phys);
        if (!hbqbp->dbuf.virt) {
                kfree(hbqbp);
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-       pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+       dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
        kfree(hbqbp);
        return;
 }
        if (!dma_buf)
                return NULL;
 
-       dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+       dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
                                            &dma_buf->hbuf.phys);
        if (!dma_buf->hbuf.virt) {
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+       dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
                                            &dma_buf->dbuf.phys);
        if (!dma_buf->dbuf.virt) {
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+               dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
                              dma_buf->hbuf.phys);
                kfree(dma_buf);
                return NULL;
 void
 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
 {
-       pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-       pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+       dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+       dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
        kfree(dmab);
 }
 
        if (!dma_buf)
                return NULL;
 
-       dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+       dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
                                            &dma_buf->hbuf.phys);
        if (!dma_buf->hbuf.virt) {
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+       dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
                                            GFP_KERNEL, &dma_buf->dbuf.phys);
        if (!dma_buf->dbuf.virt) {
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+               dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
                              dma_buf->hbuf.phys);
                kfree(dma_buf);
                return NULL;
 void
 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
 {
-       pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-       pci_pool_free(phba->lpfc_nvmet_drb_pool,
+       dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+       dma_pool_free(phba->lpfc_nvmet_drb_pool,
                      dmab->dbuf.virt, dmab->dbuf.phys);
        kfree(dmab);
 }
 
                 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
                 * necessary to support the sg_tablesize.
                 */
-               psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+               psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
                                        GFP_KERNEL, &psb->dma_handle);
                if (!psb->data) {
                        kfree(psb);
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
-                       pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                      psb->data, psb->dma_handle);
                        kfree(psb);
                        break;
                 * for the struct fcp_cmnd, struct fcp_rsp and the number
                 * of bde's necessary to support the sg_tablesize.
                 */
-               psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+               psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
                                                GFP_KERNEL, &psb->dma_handle);
                if (!psb->data) {
                        kfree(psb);
                 */
                if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
                    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
-                       pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                      psb->data, psb->dma_handle);
                        kfree(psb);
                        break;
 
                lxri = lpfc_sli4_next_xritag(phba);
                if (lxri == NO_XRI) {
-                       pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                      psb->data, psb->dma_handle);
                        kfree(psb);
                        break;
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
-                       pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
                                      psb->data, psb->dma_handle);
                        kfree(psb);
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,