]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: store a struct device pointer in struct nvme_dev
authorChristoph Hellwig <hch@lst.de>
Fri, 22 May 2015 09:12:39 +0000 (11:12 +0200)
committerChuck Anderson <chuck.anderson@oracle.com>
Wed, 6 Jul 2016 23:31:42 +0000 (16:31 -0700)
Most users want the generic device, so store that in struct nvme_dev
instead of the pci_dev.  This also happens to be a nice step towards
making some code reusable for non-PCI transports.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit e75ec752d725b7b612c0b2db1bca50a9e53c0879)

Orabug: 22620486
Signed-off-by: Jason Luo <zhangqing.luo@oracle.com>
Conflicts:
drivers/block/nvme-core.c

drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
include/linux/nvme.h

index a8307c9b26b33c1e8d6f7d20954134696c634611..074021ab04dfb40292208efeb8bd9c01189310aa 100644 (file)
@@ -633,17 +633,17 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                req->errors = 0;
 
        if (cmd_rq->aborted)
-               dev_warn(&nvmeq->dev->pci_dev->dev,
+               dev_warn(nvmeq->dev->dev,
                        "completing aborted command with status:%04x\n",
                        status);
 
        if (iod->nents) {
-               dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
+               dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                if (blk_integrity_rq(req)) {
                        if (!rq_data_dir(req))
                                nvme_dif_remap(req, nvme_dif_complete);
-                       dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+                       dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1,
                                rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                }
        }
@@ -884,7 +884,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 
                if (blk_rq_bytes(req) !=
                     nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
-                       dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
+                       dma_unmap_sg(nvmeq->dev->dev, iod->sg,
                                        iod->nents, dma_dir);
                        goto retry_cmd;
                }
@@ -1215,8 +1215,7 @@ static void nvme_abort_req(struct request *req)
                if (work_busy(&dev->reset_work))
                        goto out;
                list_del_init(&dev->node);
-               dev_warn(&dev->pci_dev->dev,
-                       "I/O %d QID %d timeout, reset controller\n",
+               dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
                                                        req->tag, nvmeq->qid);
                dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
@@ -1385,22 +1384,21 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
                                                        int depth)
 {
-       struct device *dmadev = &dev->pci_dev->dev;
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
        if (!nvmeq)
                return NULL;
 
-       nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth),
+       nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
                                          &nvmeq->cq_dma_addr, GFP_KERNEL);
        if (!nvmeq->cqes)
                goto free_nvmeq;
 
-       nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+       nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
                                        &nvmeq->sq_dma_addr, GFP_KERNEL);
        if (!nvmeq->sq_cmds)
                goto free_cqdma;
 
-       nvmeq->q_dmadev = dmadev;
+       nvmeq->q_dmadev = dev->dev;
        nvmeq->dev = dev;
        snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
                        dev->instance, qid);
@@ -1416,7 +1414,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        return nvmeq;
 
  free_cqdma:
-       dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
+       dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
                                                        nvmeq->cq_dma_addr);
  free_nvmeq:
        kfree(nvmeq);
@@ -1488,7 +1486,7 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(&dev->pci_dev->dev,
+                       dev_err(dev->dev,
                                "Device not ready; aborting %s\n", enabled ?
                                                "initialisation" : "reset");
                        return -ENODEV;
@@ -1538,7 +1536,7 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(&dev->pci_dev->dev,
+                       dev_err(dev->dev,
                                "Device shutdown incomplete; abort shutdown\n");
                        return -ENODEV;
                }
@@ -1581,7 +1579,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
                dev->admin_tagset.reserved_tags = 1;
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
-               dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+               dev->admin_tagset.numa_node = dev_to_node(dev->dev);
                dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
                dev->admin_tagset.driver_data = dev;
 
@@ -1615,14 +1613,14 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
 
        if (page_shift < dev_page_min) {
-               dev_err(&dev->pci_dev->dev,
+               dev_err(dev->dev,
                                "Minimum device page size (%u) too large for "
                                "host (%u)\n", 1 << dev_page_min,
                                1 << page_shift);
                return -ENODEV;
        }
        if (page_shift > dev_page_max) {
-               dev_info(&dev->pci_dev->dev,
+               dev_info(dev->dev,
                                "Device maximum page size (%u) smaller than "
                                "host (%u); enabling work-around\n",
                                1 << dev_page_max, 1 << page_shift);
@@ -1713,7 +1711,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
        sg_mark_end(&sg[i - 1]);
        iod->nents = count;
 
-       nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+       nents = dma_map_sg(dev->dev, sg, count,
                                write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        if (!nents)
                goto free_iod;
@@ -1735,7 +1733,7 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
 {
        int i;
 
-       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+       dma_unmap_sg(dev->dev, iod->sg, iod->nents,
                                write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        for (i = 0; i < iod->nents; i++)
@@ -1789,7 +1787,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
                goto unmap;
        }
        if (meta_len) {
-               meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+               meta = dma_alloc_coherent(dev->dev, meta_len,
                                                &meta_dma, GFP_KERNEL);
 
                if (!meta) {
@@ -1827,7 +1825,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
                        if (copy_to_user(metadata, meta, meta_len))
                                status = -EFAULT;
                }
-               dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
+               dma_free_coherent(dev->dev, meta_len, meta, meta_dma);
        }
        return status;
 }
@@ -1987,21 +1985,20 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        u16 old_ms;
        unsigned short bs;
 
-       id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
-                                                               GFP_KERNEL);
+       id = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!id) {
-               dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n",
+               dev_warn(dev->dev, "%s: Memory alocation failure\n",
                                                                __func__);
                return -ENODEV;
        }
        if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
-               dev_warn(&dev->pci_dev->dev,
+               dev_warn(dev->dev,
                        "identify failed ns:%d, setting capacity to 0\n",
                        ns->ns_id);
                memset(id, 0, sizeof(*id));
        }
        if (id->ncap == 0) {
-               dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+               dma_free_coherent(dev->dev, 4096, id, dma_addr);
                return -ENODEV;
        }
 
@@ -2044,7 +2041,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        if (dev->oncs & NVME_CTRL_ONCS_DSM)
                nvme_config_discard(ns);
 
-       dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+       dma_free_coherent(dev->dev, 4096, id, dma_addr);
        return 0;
 }
 
@@ -2071,7 +2068,7 @@ static int nvme_kthread(void *data)
                                if (work_busy(&dev->reset_work))
                                        continue;
                                list_del_init(&dev->node);
-                               dev_warn(&dev->pci_dev->dev,
+                               dev_warn(dev->dev,
                                        "Failed status: %x, reset controller\n",
                                        readl(&dev->bar->csts));
                                dev->reset_workfn = nvme_reset_failed_dev;
@@ -2103,7 +2100,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 {
        struct nvme_ns *ns;
        struct gendisk *disk;
-       int node = dev_to_node(&dev->pci_dev->dev);
+       int node = dev_to_node(dev->dev);
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
@@ -2191,8 +2188,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
        if (status < 0)
                return status;
        if (status > 0) {
-               dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
-                                                                       status);
+               dev_err(dev->dev, "Could not set queue count (%d)\n", status);
                return 0;
        }
        return min(result & 0xffff, result >> 16) + 1;
@@ -2206,7 +2202,7 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
        struct nvme_queue *adminq = dev->queues[0];
-       struct pci_dev *pdev = dev->pci_dev;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int result, i, vecs, nr_io_queues, size;
 
        nr_io_queues = num_possible_cpus();
@@ -2367,15 +2363,15 @@ static void nvme_dev_scan(struct work_struct *work)
        if (!dev->tagset.tags)
                return;
 
-       ctrl = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, GFP_KERNEL);
+       ctrl = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!ctrl)
                return;
        if (nvme_identify(dev, 0, 1, dma_addr)) {
-               dma_free_coherent(&dev->pci_dev->dev, 4096, ctrl, dma_addr);
+               dma_free_coherent(dev->dev, 4096, ctrl, dma_addr);
                return;
        }
        nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
-       dma_free_coherent(&dev->pci_dev->dev, 4096, ctrl, dma_addr);
+       dma_free_coherent(dev->dev, 4096, ctrl, dma_addr);
 }
 
 /*
@@ -2386,21 +2382,21 @@ static void nvme_dev_scan(struct work_struct *work)
  */
 static int nvme_dev_add(struct nvme_dev *dev)
 {
-       struct pci_dev *pdev = dev->pci_dev;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int res;
        struct nvme_id_ctrl *ctrl;
        void *mem;
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-       mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
+       mem = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
        res = nvme_identify(dev, 0, 1, dma_addr);
        if (res) {
-               dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
-               dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+               dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
+               dma_free_coherent(dev->dev, 4096, mem, dma_addr);
                return -EIO;
        }
 
@@ -2428,13 +2424,13 @@ static int nvme_dev_add(struct nvme_dev *dev)
                } else
                        dev->max_hw_sectors = max_hw_sectors;
        }
-       dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+       dma_free_coherent(dev->dev, 4096, mem, dma_addr);
 
        if (!dev->tagset.tags) {
                dev->tagset.ops = &nvme_mq_ops;
                dev->tagset.nr_hw_queues = dev->online_queues - 1;
                dev->tagset.timeout = NVME_IO_TIMEOUT;
-               dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+               dev->tagset.numa_node = dev_to_node(dev->dev);
                dev->tagset.queue_depth =
                                min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
                dev->tagset.cmd_size = nvme_cmd_size(dev);
@@ -2452,7 +2448,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 {
        u64 cap;
        int bars, result = -ENOMEM;
-       struct pci_dev *pdev = dev->pci_dev;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        if (pci_enable_device_mem(pdev))
                return result;
@@ -2466,8 +2462,8 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (pci_request_selected_regions(pdev, bars, "nvme"))
                goto disable_pci;
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+       if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
                goto disable;
 
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2508,19 +2504,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
 {
-       if (dev->pci_dev->msi_enabled)
-               pci_disable_msi(dev->pci_dev);
-       else if (dev->pci_dev->msix_enabled)
-               pci_disable_msix(dev->pci_dev);
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       else if (pdev->msix_enabled)
+               pci_disable_msix(pdev);
 
        if (dev->bar) {
                iounmap(dev->bar);
                dev->bar = NULL;
-               pci_release_regions(dev->pci_dev);
+               pci_release_regions(pdev);
        }
 
-       if (pci_is_enabled(dev->pci_dev))
-               pci_disable_device(dev->pci_dev);
+       if (pci_is_enabled(pdev))
+               pci_disable_device(pdev);
 }
 
 struct nvme_delq_ctx {
@@ -2639,7 +2637,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
                                        &worker, "nvme%d", dev->instance);
 
        if (IS_ERR(kworker_task)) {
-               dev_err(&dev->pci_dev->dev,
+               dev_err(dev->dev,
                        "Failed to create queue del task\n");
                for (i = dev->queue_count - 1; i > 0; i--)
                        nvme_disable_queue(dev, i);
@@ -2748,14 +2746,13 @@ static void nvme_dev_remove(struct nvme_dev *dev)
 
 static int nvme_setup_prp_pools(struct nvme_dev *dev)
 {
-       struct device *dmadev = &dev->pci_dev->dev;
-       dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+       dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
                                                PAGE_SIZE, PAGE_SIZE, 0);
        if (!dev->prp_page_pool)
                return -ENOMEM;
 
        /* Optimisation for I/Os between 4k and 128k */
-       dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+       dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
                                                256, 256, 0);
        if (!dev->prp_small_pool) {
                dma_pool_destroy(dev->prp_page_pool);
@@ -2811,7 +2808,7 @@ static void nvme_free_dev(struct kref *kref)
 {
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
-       pci_dev_put(dev->pci_dev);
+       put_device(dev->dev);
        put_device(dev->device);
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
@@ -2870,7 +2867,7 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
                return nvme_user_cmd(dev, ns, (void __user *)arg);
        case NVME_IOCTL_RESET:
-               dev_warn(&dev->pci_dev->dev, "resetting controller\n");
+               dev_warn(dev->dev, "resetting controller\n");
                return nvme_reset(dev);
        default:
                return -ENOTTY;
@@ -2962,7 +2959,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
 static int nvme_remove_dead_ctrl(void *arg)
 {
        struct nvme_dev *dev = (struct nvme_dev *)arg;
-       struct pci_dev *pdev = dev->pci_dev;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        if (pci_get_drvdata(pdev))
                pci_stop_and_remove_bus_device_locked(pdev);
@@ -3000,11 +2997,11 @@ static int nvme_dev_resume(struct nvme_dev *dev)
 
 static void nvme_dead_ctrl(struct nvme_dev *dev)
 {
-       dev_warn(&dev->pci_dev->dev, "Device failed to resume\n");
+       dev_warn(dev->dev, "Device failed to resume\n");
        kref_get(&dev->kref);
        if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
                                                dev->instance))) {
-               dev_err(&dev->pci_dev->dev,
+               dev_err(dev->dev,
                        "Failed to start controller remove task\n");
                kref_put(&dev->kref, nvme_free_dev);
        }
@@ -3107,7 +3104,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_LIST_HEAD(&dev->namespaces);
        dev->reset_workfn = nvme_reset_failed_dev;
        INIT_WORK(&dev->reset_work, nvme_reset_workfn);
-       dev->pci_dev = pci_dev_get(pdev);
+       dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
        if (result)
@@ -3146,7 +3143,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  release:
        nvme_release_instance(dev);
  put_pci:
-       pci_dev_put(dev->pci_dev);
+       put_device(dev->dev);
  free:
        kfree(dev->queues);
        kfree(dev->entry);
index 705b2b784a86690acc1b88e5d037046f2b9b4694..33a3ecd5971fbdafe61f75ccf505b1ea2030d1f5 100644 (file)
@@ -684,7 +684,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
        u8 cmdque = 0x01 << 1;
        u8 fw_offset = sizeof(dev->firmware_rev);
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -728,8 +728,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out_dma:
        return res;
 }
@@ -787,7 +786,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        int xfer_len;
        __be32 tmp_id = cpu_to_be32(ns->ns_id);
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -842,7 +841,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                inq_response[6] = 0x00;    /* Rsvd */
                inq_response[7] = 0x44;    /* Designator Length */
 
-               sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+               sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
                memcpy(&inq_response[12], dev->model, sizeof(dev->model));
                sprintf(&inq_response[52], "%04x", tmp_id);
                memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
@@ -851,8 +850,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out_dma:
        return res;
 }
@@ -883,7 +881,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out_mem;
        }
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -933,8 +931,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out_dma:
        kfree(inq_response);
  out_mem:
@@ -1039,8 +1036,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
                goto out_mem;
        }
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev,
-                                       sizeof(struct nvme_smart_log),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -1078,7 +1074,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
        xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
        res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
 
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+       dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
                          mem, dma_addr);
  out_dma:
        kfree(log_response);
@@ -1107,8 +1103,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out_mem;
        }
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev,
-                                       sizeof(struct nvme_smart_log),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -1159,7 +1154,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
        res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
 
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+       dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
                          mem, dma_addr);
  out_dma:
        kfree(log_response);
@@ -1210,7 +1205,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
                return SNTI_INTERNAL_ERROR;
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -1247,8 +1242,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        }
 
  out_dma:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out:
        return res;
 }
@@ -1495,8 +1489,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        unsigned ps_desired = 0;
 
        /* NVMe Controller Identify */
-       mem = dma_alloc_coherent(&dev->pci_dev->dev,
-                               sizeof(struct nvme_id_ctrl),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
                                &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -1557,8 +1550,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        if (nvme_sc)
                res = nvme_sc;
  out_dma:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
  out:
        return res;
 }
@@ -1821,7 +1813,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
         */
 
        if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
-               mem = dma_alloc_coherent(&dev->pci_dev->dev,
+               mem = dma_alloc_coherent(dev->dev,
                        sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
                if (mem == NULL) {
                        res = -ENOMEM;
@@ -1846,7 +1838,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
                                                (1 << (id_ns->lbaf[flbas].ds));
                }
  out_dma:
-               dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+               dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                  mem, dma_addr);
        }
  out:
@@ -1929,7 +1921,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_command c;
 
        /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -1980,8 +1972,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                res = nvme_sc;
 
  out_dma:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out:
        return res;
 }
@@ -2487,7 +2478,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                resp_size = READ_CAP_16_RESP_SIZE;
        }
 
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+       mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
                                                        &dma_addr, GFP_KERNEL);
        if (mem == NULL) {
                res = -ENOMEM;
@@ -2516,8 +2507,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 
        kfree(response);
  out_dma:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
  out:
        return res;
 }
@@ -2550,8 +2540,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out;
        } else {
                /* NVMe Controller Identify */
-               mem = dma_alloc_coherent(&dev->pci_dev->dev,
-                                       sizeof(struct nvme_id_ctrl),
+               mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
                                        &dma_addr, GFP_KERNEL);
                if (mem == NULL) {
                        res = -ENOMEM;
@@ -2602,8 +2591,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 
        kfree(response);
  out_dma:
-       dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
-                         dma_addr);
+       dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
  out:
        return res;
 }
@@ -2915,7 +2903,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out;
        }
 
-       range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+       range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range),
                                                        &dma_addr, GFP_KERNEL);
        if (!range)
                goto out;
@@ -2936,8 +2924,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
        res = nvme_trans_status_code(hdr, nvme_sc);
 
-       dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
-                                                       range, dma_addr);
+       dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr);
  out:
        kfree(plist);
        return res;
index c9bd2b971220a23ee246606c9093cf234503fa5b..1ee62006736052321fcda4f2e72bcbdc4aea4b3d 100644 (file)
@@ -74,7 +74,7 @@ struct nvme_dev {
        struct blk_mq_tag_set tagset;
        struct blk_mq_tag_set admin_tagset;
        u32 __iomem *dbs;
-       struct pci_dev *pci_dev;
+       struct device *dev;
        struct dma_pool *prp_page_pool;
        struct dma_pool *prp_small_pool;
        int instance;