]> www.infradead.org Git - nvme.git/commitdiff
block: Generalize chunk_sectors support as boundary support
authorJohn Garry <john.g.garry@oracle.com>
Thu, 20 Jun 2024 12:53:51 +0000 (12:53 +0000)
committerJens Axboe <axboe@kernel.dk>
Thu, 20 Jun 2024 21:19:17 +0000 (15:19 -0600)
The purpose of the chunk_sectors limit is to ensure that a mergeble request
fits within the boundary of the chunck_sector value.

Such a feature will be useful for other request_queue boundary limits, so
generalize the chunk_sectors merge code.

This idea was proposed by Hannes Reinecke.

Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Acked-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20240620125359.2684798-3-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-merge.c
drivers/md/dm.c
include/linux/blkdev.h

index 8957e08e020c2d04bda71a16a2ff0e3388524910..68969e27c8319351ae99dd4902222426f65bc651 100644 (file)
@@ -154,6 +154,11 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
        return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
 }
 
+static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim)
+{
+       return lim->chunk_sectors;
+}
+
 /*
  * Return the maximum number of sectors from the start of a bio that may be
  * submitted as a single request to a block device. If enough sectors remain,
@@ -167,12 +172,13 @@ static inline unsigned get_max_io_size(struct bio *bio,
 {
        unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
        unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
+       unsigned boundary_sectors = blk_boundary_sectors(lim);
        unsigned max_sectors = lim->max_sectors, start, end;
 
-       if (lim->chunk_sectors) {
+       if (boundary_sectors) {
                max_sectors = min(max_sectors,
-                       blk_chunk_sectors_left(bio->bi_iter.bi_sector,
-                                              lim->chunk_sectors));
+                       blk_boundary_sectors_left(bio->bi_iter.bi_sector,
+                                             boundary_sectors));
        }
 
        start = bio->bi_iter.bi_sector & (pbs - 1);
@@ -588,19 +594,21 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
                                                  sector_t offset)
 {
        struct request_queue *q = rq->q;
-       unsigned int max_sectors;
+       struct queue_limits *lim = &q->limits;
+       unsigned int max_sectors, boundary_sectors;
 
        if (blk_rq_is_passthrough(rq))
                return q->limits.max_hw_sectors;
 
+       boundary_sectors = blk_boundary_sectors(lim);
        max_sectors = blk_queue_get_max_sectors(rq);
 
-       if (!q->limits.chunk_sectors ||
+       if (!boundary_sectors ||
            req_op(rq) == REQ_OP_DISCARD ||
            req_op(rq) == REQ_OP_SECURE_ERASE)
                return max_sectors;
        return min(max_sectors,
-                  blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
+                  blk_boundary_sectors_left(offset, boundary_sectors));
 }
 
 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
index 8a976cee448bed4c454eb5f6fb94aa9463de4ecd..7d107ae06e1ae17e53898d1633b8020432c90c82 100644 (file)
@@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
                return len;
        return min_t(sector_t, len,
                min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
-                   blk_chunk_sectors_left(target_offset, max_granularity)));
+                   blk_boundary_sectors_left(target_offset, max_granularity)));
 }
 
 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
index 0e8253c1507a51c4fc388ef20fec756c099c607a..fb7d4c21bba87fe8b1647a109279aee3963e2750 100644 (file)
@@ -907,14 +907,15 @@ static inline bool bio_straddles_zones(struct bio *bio)
 }
 
 /*
- * Return how much of the chunk is left to be used for I/O at a given offset.
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
  */
-static inline unsigned int blk_chunk_sectors_left(sector_t offset,
-               unsigned int chunk_sectors)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+               unsigned int boundary_sectors)
 {
-       if (unlikely(!is_power_of_2(chunk_sectors)))
-               return chunk_sectors - sector_div(offset, chunk_sectors);
-       return chunk_sectors - (offset & (chunk_sectors - 1));
+       if (unlikely(!is_power_of_2(boundary_sectors)))
+               return boundary_sectors - sector_div(offset, boundary_sectors);
+       return boundary_sectors - (offset & (boundary_sectors - 1));
 }
 
 /**