]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk: [Partial] Replace SG_GAPGS with new queue limits mask
authorAshok Vairavan <ashok.vairavan@oracle.com>
Fri, 6 Oct 2017 13:51:15 +0000 (06:51 -0700)
committerAshok Vairavan <ashok.vairavan@oracle.com>
Mon, 23 Oct 2017 18:24:58 +0000 (11:24 -0700)
Several fixes went in upstream on top of queue_virt_boundary()
to address the gaps issue. However, back-porting queue_virt_boundary() api
disrupts iSER, storvsc and mpt3sas. Hence, implemented a hybrid
approach in QU4 got GAPS functionality. NVMe driver supports both
queue_virt_boundary() and QUEUE_FLAG_SG_GAPS to facilitate smooth
transistion.

Orabug: 26871819

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Kyle Fortin <kyle.fortin@oracle.com>
block/blk-settings.c
drivers/nvme/host/core.c
include/linux/blkdev.h

index 5891a1cba4711b1ac6303a94268e61c8e63f8af4..4f16f7fe5f971dead469f8305db2ebdaf5796896 100644 (file)
@@ -814,6 +814,17 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 }
 EXPORT_SYMBOL(blk_queue_segment_boundary);
 
+/**
+ * blk_queue_virt_boundary - set boundary rules for bio merging
+ * @q:  the request queue for the device
+ * @mask:  the memory boundary mask
+ **/
+void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
+{
+       q->limits.virt_boundary_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_virt_boundary);
+
 /**
  * blk_queue_dma_alignment - set dma length and memory alignment
  * @q:     the request queue for the device
index 92bfaa3f34ed36a49a457aa4c364d7ce849c4b14..b4d340b075f3157bcab4c5697dc5669817b30aec 100644 (file)
@@ -1057,6 +1057,12 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
        }
        if (ctrl->stripe_size)
                blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       /*
+        * Hybrid implementation of blk_queue_virt_boundary and
+        * QUEUE_FLAG_SG_GAPS in UEK4 to pick the upstream patches based on
+        * blk_queue_virt_boundary().
+        */
+       blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
        blk_queue_write_cache(q, vwc, vwc);
index 55db2417916a4e7c3bd708dbed7792acee3d05fb..305a4aa8940e90c0918d9c47bb1bddb14c8044cb 100644 (file)
@@ -319,7 +319,7 @@ struct queue_limits {
    * allow extending the structure while preserving ABI.
    */
        UEK_KABI_USE2(1, unsigned int max_dev_sectors, unsigned int unuse)
-        UEK_KABI_RESERVED(2)
+        UEK_KABI_USE(2, unsigned long virt_boundary_mask)
 };
 
 struct request_queue {
@@ -1185,6 +1185,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
 
 #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */
 
+extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
@@ -1230,6 +1231,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
        return q->limits.seg_boundary_mask;
 }
 
+static inline unsigned long queue_virt_boundary(struct request_queue *q)
+{
+       return q->limits.virt_boundary_mask;
+}
+
 static inline unsigned int queue_max_sectors(struct request_queue *q)
 {
        return q->limits.max_sectors;