]> www.infradead.org Git - users/hch/block.git/commitdiff
nvme: set max_hw_sectors unconditionally
authorChristoph Hellwig <hch@lst.de>
Mon, 26 Feb 2024 17:52:33 +0000 (12:52 -0500)
committerChristoph Hellwig <hch@lst.de>
Thu, 29 Feb 2024 15:00:27 +0000 (07:00 -0800)
All transports set a max_hw_sectors value in the nvme_ctrl, so make
the code using it unconditional and clean it up using a little helper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
drivers/nvme/host/core.c

index eed3e22e24d913ee91d9b23550504d1dbb931c0a..74cd384ca5fc73b2a7f3d262e5996ca79bfe397c 100644 (file)
@@ -1944,19 +1944,19 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
        return 0;
 }
 
+static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
+{
+       return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
+}
+
 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                struct request_queue *q)
 {
        bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
 
-       if (ctrl->max_hw_sectors) {
-               u32 max_segments =
-                       (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
-
-               max_segments = min_not_zero(max_segments, ctrl->max_segments);
-               blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
-               blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
-       }
+       blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+       blk_queue_max_segments(q, min_t(u32, USHRT_MAX,
+               min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)));
        blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
        blk_queue_dma_alignment(q, 3);
        blk_queue_write_cache(q, vwc, vwc);