]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme-pci: Remove nvme_setup_prps BUG_ON
authorKeith Busch <keith.busch@intel.com>
Mon, 23 Oct 2017 18:20:19 +0000 (11:20 -0700)
committerAshok Vairavan <ashok.vairavan@oracle.com>
Mon, 23 Oct 2017 18:55:17 +0000 (11:55 -0700)
This patch replaces the invalid nvme SGL kernel panic with a warning,
and returns an appropriate error. The warning will occur only on the
first occurance, and sgl details will be printed to help debug how the
request was allowed to form.

Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit 86eea2895d11dde9bf43fa2046331e84154e00f4)

Orabug: 26871819

Conflicts:
    Added the macro BLK_STS to get status from nvme_setup_prps.

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Kyle Fortin <kyle.fortin@oracle.com>
Reviewed-by: Ashish Samant <ashish.samant@oracle.com>
drivers/nvme/host/pci.c
include/linux/blk_types.h

index 75956a0a5d0a883cfb3c7835c20436215541534d..e132dca300faeb76d89635491e65daf3e9d1b29b 100644 (file)
@@ -462,7 +462,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 }
 #endif
 
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
+static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req,
                int total_len)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -480,7 +480,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
 
        length -= (page_size - offset);
        if (length <= 0)
-               return true;
+               return BLK_STS_OK;
 
        dma_len -= (page_size - offset);
        if (dma_len) {
@@ -493,7 +493,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
 
        if (length <= page_size) {
                iod->first_dma = dma_addr;
-               return true;
+               return BLK_STS_OK;
        }
 
        nprps = DIV_ROUND_UP(length, page_size);
@@ -509,7 +509,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
        if (!prp_list) {
                iod->first_dma = dma_addr;
                iod->npages = -1;
-               return false;
+               return BLK_MQ_RQ_QUEUE_BUSY;
        }
        list[0] = prp_list;
        iod->first_dma = prp_dma;
@@ -519,7 +519,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
                        __le64 *old_prp_list = prp_list;
                        prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
                        if (!prp_list)
-                               return false;
+                               return BLK_MQ_RQ_QUEUE_BUSY;
                        list[iod->npages++] = prp_list;
                        prp_list[0] = old_prp_list[i - 1];
                        old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -533,13 +533,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
                        break;
                if (dma_len > 0)
                        continue;
-               BUG_ON(dma_len < 0);
+               if (unlikely(dma_len < 0))
+                       goto bad_sgl;
                sg = sg_next(sg);
                dma_addr = sg_dma_address(sg);
                dma_len = sg_dma_len(sg);
        }
 
-       return true;
+       return BLK_STS_OK;
+
+ bad_sgl:
+       if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
+                               blk_rq_bytes(req), iod->nents)) {
+               for_each_sg(iod->sg, sg, iod->nents, i) {
+                       dma_addr_t phys = sg_phys(sg);
+                       pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+                              "dma_address:%pad dma_length:%d\n", i, &phys,
+                                       sg->offset, sg->length,
+                                       &sg_dma_address(sg),
+                                       sg_dma_len(sg));
+               }
+       }
+       return BLK_MQ_RQ_QUEUE_ERROR;
+
 }
 
 static int nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -560,7 +576,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
        if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
                goto out;
 
-       if (!nvme_setup_prps(dev, req, size))
+       ret = nvme_setup_prps(dev, req, size);
+       if (ret != BLK_STS_OK)
                goto out_unmap;
 
        ret = BLK_MQ_RQ_QUEUE_ERROR;
index 72846bf3491a135289db7da82cd5fcfde777a406..7d4e8a1deefa72ce858e58beb4aa101cb0dc694f 100644 (file)
@@ -17,6 +17,19 @@ struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *, int);
 typedef void (bio_destructor_t) (struct bio *);
 
+typedef u8 __bitwise blk_status_t;
+#define BLK_STS_OK 0
+#define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
+#define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
+#define BLK_STS_NOSPC           ((__force blk_status_t)3)
+#define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
+#define BLK_STS_TARGET          ((__force blk_status_t)5)
+#define BLK_STS_NEXUS           ((__force blk_status_t)6)
+#define BLK_STS_MEDIUM          ((__force blk_status_t)7)
+#define BLK_STS_PROTECTION      ((__force blk_status_t)8)
+#define BLK_STS_RESOURCE        ((__force blk_status_t)9)
+#define BLK_STS_IOERR           ((__force blk_status_t)10)
+
 /*
  * was unsigned short, but we might as well be ready for > 64kB I/O pages
  */