From 5ba566b455bd5302aff276dca38059202b7be08c Mon Sep 17 00:00:00 2001 From: Ashok Vairavan Date: Fri, 6 Oct 2017 06:51:15 -0700 Subject: [PATCH] blk: [Partial] Replace SG_GAPGS with new queue limits mask Several fixes went in upstream on top of queue_virt_boundary() to address the gaps issue. However, back-porting queue_virt_boundary() api disrupts iSER, storvsc and mpt3sas. Hence, implemented a hybrid approach in QU4 got GAPS functionality. NVMe driver supports both queue_virt_boundary() and QUEUE_FLAG_SG_GAPS to facilitate smooth transistion. Orabug: 26871819 Signed-off-by: Ashok Vairavan Reviewed-by: Martin K. Petersen Reviewed-by: Kyle Fortin --- block/blk-settings.c | 11 +++++++++++ drivers/nvme/host/core.c | 6 ++++++ include/linux/blkdev.h | 8 +++++++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 5891a1cba471..4f16f7fe5f97 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -814,6 +814,17 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) } EXPORT_SYMBOL(blk_queue_segment_boundary); +/** + * blk_queue_virt_boundary - set boundary rules for bio merging + * @q: the request queue for the device + * @mask: the memory boundary mask + **/ +void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) +{ + q->limits.virt_boundary_mask = mask; +} +EXPORT_SYMBOL(blk_queue_virt_boundary); + /** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 92bfaa3f34ed..b4d340b075f3 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1057,6 +1057,12 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, } if (ctrl->stripe_size) blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); + /* + * Hybrid implementation of blk_queue_virt_boundary and + * QUEUE_FLAG_SG_GAPS in UEK4 to pick the upstream patches based on + * blk_queue_virt_boundary(). + */ + blk_queue_virt_boundary(q, ctrl->page_size - 1); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; blk_queue_write_cache(q, vwc, vwc); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 55db2417916a..305a4aa8940e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -319,7 +319,7 @@ struct queue_limits { * allow extending the structure while preserving ABI. */ UEK_KABI_USE2(1, unsigned int max_dev_sectors, unsigned int unuse) - UEK_KABI_RESERVED(2) + UEK_KABI_USE(2, unsigned long virt_boundary_mask) }; struct request_queue { @@ -1185,6 +1185,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ +extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); @@ -1230,6 +1231,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q) return q->limits.seg_boundary_mask; } +static inline unsigned long queue_virt_boundary(struct request_queue *q) +{ + return q->limits.virt_boundary_mask; +} + static inline unsigned int queue_max_sectors(struct request_queue *q) { return q->limits.max_sectors; -- 2.50.1