]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: fix max_segments integer truncation
authorChristoph Hellwig <hch@lst.de>
Fri, 30 Dec 2016 20:51:50 +0000 (12:51 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:09 +0000 (13:41 -0700)
The block layer uses an unsigned short for max_segments.  The way we
calculate the value for NVMe tends to generate very large 32-bit values,
which after integer truncation may lead to a zero value instead of
the desired outcome.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reported-by: Jeff Lien <Jeff.Lien@hgst.com>
Tested-by: Jeff Lien <Jeff.Lien@hgst.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 45686b6198bd824f083ff5293f191d78db9d708a)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/core.c

index f022efff03d690bef982afbaa9d65e4108c8d4f0..c8c921e83a531364dda164b5c9d8c9af8863b251 100644 (file)
@@ -843,15 +843,16 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                struct request_queue *q)
 {
        if (ctrl->max_hw_sectors) {
+               u32 max_segments =
+                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
-               blk_queue_max_segments(q,
-                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
+               blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
        if (ctrl->stripe_size)
                blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
-       blk_queue_virt_boundary(q, ctrl->page_size - 1);
 }
 
 /*