int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
- if (req_gap_back_merge(req, bio))
- return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
-
- if (req_gap_front_merge(req, bio))
- return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
return !q->mq_ops && req->special;
}
+static int req_gap_to_prev(struct request *req, struct request *next)
+{
+ struct bio *prev = req->biotail;
+
+ return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
+ next->bio->bi_io_vec[0].bv_offset);
+}
+
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
- if (req_gap_back_merge(req, next->bio))
+ if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
+ req_gap_to_prev(req, next))
return 0;
/*
!blk_write_same_mergeable(rq->bio, bio))
return false;
+ if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+ struct bio_vec *bprev;
+
+ bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
+ if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+ return false;
+ }
+
return true;
}
page_cache_release(p.v);
}
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
- struct bio *next)
-{
- if (!bio_has_data(prev))
- return false;
-
- if (!test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags))
- return false;
-
- return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
-}
-
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, req->biotail, bio);
-}
-
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, bio, req->bio);
-}
-
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);