]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "block: Check for gaps on front and back merges"
authorAshok Vairavan <ashok.vairavan@oracle.com>
Sun, 4 Feb 2018 01:34:38 +0000 (17:34 -0800)
committerJack Vogel <jack.vogel@oracle.com>
Wed, 7 Feb 2018 18:06:20 +0000 (10:06 -0800)
This reverts commit 50f113a81b852e45ead0d4b0fd5d79e96530f643.

In UEK4, the decision is made to do complete switch to
queue_virt_boundary discarding the use of SG_GAPS flags. Hence,
this partial porting of new queue limit mask is reverted. Also,
this partial patch is broken for stacked devices.

Orabug: 27484719

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Kyle Fortin <kyle.fortin@oracle.com>
block/blk-merge.c
include/linux/blkdev.h

index 9df4e020fe47e7e15b92f30ab8bf6490371cff12..fd3fee81c23ce2f1cdc73d2bf4e76188c90358cb 100644 (file)
@@ -312,8 +312,6 @@ no_merge:
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
-       if (req_gap_back_merge(req, bio))
-               return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
            blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
@@ -332,9 +330,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
-
-       if (req_gap_front_merge(req, bio))
-               return 0;
        if (blk_rq_sectors(req) + bio_sectors(bio) >
            blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
@@ -361,6 +356,14 @@ static bool req_no_special_merge(struct request *req)
        return !q->mq_ops && req->special;
 }
 
+static int req_gap_to_prev(struct request *req, struct request *next)
+{
+       struct bio *prev = req->biotail;
+
+       return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
+                               next->bio->bi_io_vec[0].bv_offset);
+}
+
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                                struct request *next)
 {
@@ -375,7 +378,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        if (req_no_special_merge(req) || req_no_special_merge(next))
                return 0;
 
-       if (req_gap_back_merge(req, next->bio))
+       if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
+           req_gap_to_prev(req, next))
                return 0;
 
        /*
@@ -585,6 +589,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
+       if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+               struct bio_vec *bprev;
+
+               bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
+               if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+                       return false;
+       }
+
        return true;
 }
 
index 68c78da09c5ee2e942b6ddbf29724f8f7010ff74..55db2417916a4e7c3bd708dbed7792acee3d05fb 100644 (file)
@@ -1430,29 +1430,6 @@ static inline void put_dev_sector(Sector p)
        page_cache_release(p.v);
 }
 
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
-                        struct bio *next)
-{
-       if (!bio_has_data(prev))
-               return false;
-
-       if (!test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags))
-               return false;
-
-       return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
-                               next->bi_io_vec[0].bv_offset);
-}
-
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
-{
-       return bio_will_gap(req->q, req->biotail, bio);
-}
-
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
-       return bio_will_gap(req->q, bio, req->bio);
-}
-
 struct work_struct;
 int kblockd_schedule_work(struct work_struct *work);
 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);