From: Keith Busch Date: Thu, 1 Feb 2018 19:37:06 +0000 (-0800) Subject: block: Replace SG_GAPS with new queue limits mask X-Git-Tag: v4.1.12-124.31.3~1201 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=000195bde805ffad8b8c9a2c39e3efee721a2ba8;p=users%2Fjedix%2Flinux-maple.git block: Replace SG_GAPS with new queue limits mask The SG_GAPS queue flag caused checks for bio vector alignment against PAGE_SIZE, but the device may have different constraints. This patch adds a queue limits so a driver with such constraints can set to allow requests that would have been unnecessarily split. The new gaps check takes the request_queue as a parameter to simplify the logic around invoking this function. This new limit makes the queue flag redundant, so removing it and all usage. Device-mappers will inherit the correct settings through blk_stack_limits(). Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 03100aada96f0645bbcb89aea24c01f02d0ef1fa) Orabug: 27484719 Conflicts: Changes in blk_bio_segment_split() are not picked up from 03100aada96f0645bbcb89aea24c01f02d0ef1fa as we are not porting arbitary bio size support. Signed-off-by: Ashok Vairavan Reviewed-by: Kyle Fortin --- diff --git a/block/bio.c b/block/bio.c index f4cbd84b0abf..3b88bd38c973 100644 --- a/block/bio.c +++ b/block/bio.c @@ -770,8 +770,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. */ - if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && - bvec_gap_to_prev(prev, offset)) + if (bvec_gap_to_prev(q, prev, offset)) return 0; } diff --git a/block/blk-merge.c b/block/blk-merge.c index fd3fee81c23c..2030ccb6e5df 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -356,12 +356,12 @@ static bool req_no_special_merge(struct request *req) return !q->mq_ops && req->special; } -static int req_gap_to_prev(struct request *req, struct request *next) +static int req_gap_to_prev(struct request *req, struct bio *next) { struct bio *prev = req->biotail; - return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1], - next->bio->bi_io_vec[0].bv_offset); + return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], + next->bi_io_vec[1].bv_offset); } static int ll_merge_requests_fn(struct request_queue *q, struct request *req, @@ -378,8 +378,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, if (req_no_special_merge(req) || req_no_special_merge(next)) return 0; - if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && - req_gap_to_prev(req, next)) + if (req_gap_to_prev(req, next->bio)) return 0; /* @@ -564,8 +563,6 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, bool blk_rq_merge_ok(struct request *rq, struct bio *bio) { - struct request_queue *q = rq->q; - if (!rq_mergeable(rq) || !bio_mergeable(bio)) return false; @@ -589,13 +586,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) !blk_write_same_mergeable(rq->bio, bio)) return false; - if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { - struct bio_vec *bprev; - - bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; - if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) - return false; - } + /* Only check gaps if the bio carries data */ + if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) + return false; return true; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 5891a1cba471..5c3b05c3cbe5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -111,6 +111,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_segments = BLK_MAX_SEGMENTS; lim->max_integrity_segments = 0; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->virt_boundary_mask = 0; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_dev_sectors = 0; @@ -577,6 +578,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); + t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, + b->virt_boundary_mask); t->max_segments = min_not_zero(t->max_segments, b->max_segments); t->max_integrity_segments = min_not_zero(t->max_integrity_segments, @@ -814,6 +817,17 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) } EXPORT_SYMBOL(blk_queue_segment_boundary); +/** + * blk_queue_virt_boundary - set boundary rules for bio merging + * @q: the request queue for the device + * @mask: the memory boundary mask + **/ +void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) +{ + q->limits.virt_boundary_mask = mask; +} +EXPORT_SYMBOL(blk_queue_virt_boundary); + /** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e411ccba0af6..e7837422929e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1400,14 +1400,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } -static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags); -} - static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1528,11 +1520,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); - if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps)) - queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q); - else - queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q); - dm_table_set_integrity(t); /* diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 92bfaa3f34ed..9ed9f7e9cba4 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1059,6 +1059,9 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; + + blk_queue_virt_boundary(q, ctrl->page_size - 1); + blk_queue_write_cache(q, vwc, vwc); } @@ -1430,7 +1433,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (IS_ERR(ns->queue)) goto out_release_instance; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); - queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue); ns->queue->queuedata = ns; ns->ctrl = ctrl; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 25953016116d..27988b311366 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1826,9 +1826,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); -#ifdef QUEUE_FLAG_SG_GAPS - queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, sdev->request_queue); -#endif + blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 0e7d930620a6..7c6e20093757 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -186,15 +186,6 @@ static inline void *bio_data(struct bio *bio) #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) -/* - * Check if adding a bio_vec after bprv with offset would create a gap in - * the SG list. Most drivers don't care about this, but some do. - */ -static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset) -{ - return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); -} - #define bio_io_error(bio) bio_endio((bio), -EIO) /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 55db2417916a..0cd7d34b2307 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -319,7 +319,7 @@ struct queue_limits { * allow extending the structure while preserving ABI. */ UEK_KABI_USE2(1, unsigned int max_dev_sectors, unsigned int unuse) - UEK_KABI_RESERVED(2) + UEK_KABI_USE(2, unsigned long virt_boundary_mask) }; struct request_queue { @@ -534,7 +534,6 @@ struct request_queue { #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ -#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */ #define QUEUE_FLAG_POLL 23 /* IO polling enabled if set */ #define QUEUE_FLAG_WC 24 /* Write back caching */ #define QUEUE_FLAG_FUA 25 /* device supports FUA writes */ @@ -1060,6 +1059,7 @@ extern int blk_queue_dma_drain(struct request_queue *q, void *buf, unsigned int size); extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); @@ -1230,6 +1230,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q) return q->limits.seg_boundary_mask; } +static inline unsigned long queue_virt_boundary(struct request_queue *q) +{ + return q->limits.virt_boundary_mask; +} + static inline unsigned int queue_max_sectors(struct request_queue *q) { return q->limits.max_sectors; @@ -1430,6 +1435,19 @@ static inline void put_dev_sector(Sector p) page_cache_release(p.v); } +/* + * Check if adding a bio_vec after bprv with offset would create a gap in + * the SG list. Most drivers don't care about this, but some do. + */ +static inline bool bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + if (!queue_virt_boundary(q)) + return false; + return offset || + ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); +} + struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);