]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "blk: [Partial] Replace SG_GAPGS with new queue limits mask"
authorAshok Vairavan <ashok.vairavan@oracle.com>
Sun, 4 Feb 2018 00:39:19 +0000 (16:39 -0800)
committerJack Vogel <jack.vogel@oracle.com>
Wed, 7 Feb 2018 18:06:03 +0000 (10:06 -0800)
This reverts commit 5ba566b455bd5302aff276dca38059202b7be08c.

In UEK4, the decision is made to do complete switch to
queue_virt_boundary discarding the use of SG_GAPS flags. Hence,
this partial porting of new queue limit mask is reverted. Also,
this partial patch is broken for stacked devices.

Orabug: 27484719

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Kyle Fortin <kyle.fortin@oracle.com>
block/blk-settings.c
drivers/nvme/host/core.c
include/linux/blkdev.h

index 4f16f7fe5f971dead469f8305db2ebdaf5796896..5891a1cba4711b1ac6303a94268e61c8e63f8af4 100644 (file)
@@ -814,17 +814,6 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 }
 EXPORT_SYMBOL(blk_queue_segment_boundary);
 
-/**
- * blk_queue_virt_boundary - set boundary rules for bio merging
- * @q:  the request queue for the device
- * @mask:  the memory boundary mask
- **/
-void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-{
-       q->limits.virt_boundary_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_virt_boundary);
-
 /**
  * blk_queue_dma_alignment - set dma length and memory alignment
  * @q:     the request queue for the device
index b4d340b075f3157bcab4c5697dc5669817b30aec..92bfaa3f34ed36a49a457aa4c364d7ce849c4b14 100644 (file)
@@ -1057,12 +1057,6 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
        }
        if (ctrl->stripe_size)
                blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
-       /*
-        * Hybrid implementation of blk_queue_virt_boundary and
-        * QUEUE_FLAG_SG_GAPS in UEK4 to pick the upstream patches based on
-        * blk_queue_virt_boundary().
-        */
-       blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
        blk_queue_write_cache(q, vwc, vwc);
index fc6abbdefe549842939cdd5383699cf908879163..68c78da09c5ee2e942b6ddbf29724f8f7010ff74 100644 (file)
@@ -319,7 +319,7 @@ struct queue_limits {
    * allow extending the structure while preserving ABI.
    */
        UEK_KABI_USE2(1, unsigned int max_dev_sectors, unsigned int unuse)
-        UEK_KABI_USE(2, unsigned long virt_boundary_mask)
+        UEK_KABI_RESERVED(2)
 };
 
 struct request_queue {
@@ -1185,7 +1185,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
 
 #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */
 
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
@@ -1231,11 +1230,6 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
        return q->limits.seg_boundary_mask;
 }
 
-static inline unsigned long queue_virt_boundary(struct request_queue *q)
-{
-       return q->limits.virt_boundary_mask;
-}
-
 static inline unsigned int queue_max_sectors(struct request_queue *q)
 {
        return q->limits.max_sectors;