From 2f5a65ef30a636d5030917eebd283ac447a212af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Oct 2024 15:19:37 +0100 Subject: [PATCH 01/16] block: add a bdev_limits helper Add a helper to get the queue_limits from the bdev without having to poke into the request_queue. Signed-off-by: Christoph Hellwig Reviewed-by: John Garry Link: https://lore.kernel.org/r/20241029141937.249920-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-merge.c | 3 +-- block/blk-settings.c | 2 +- drivers/md/dm-cache-target.c | 4 ++-- drivers/md/dm-clone-target.c | 4 ++-- drivers/md/dm-thin.c | 2 +- fs/btrfs/zoned.c | 7 ++----- include/linux/blkdev.h | 15 ++++++++++----- 7 files changed, 19 insertions(+), 18 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 8b9a9646aed8..d813d799cee7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -411,10 +411,9 @@ struct bio *bio_split_zone_append(struct bio *bio, */ struct bio *bio_split_to_limits(struct bio *bio) { - const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; unsigned int nr_segs; - return __bio_split_to_limits(bio, lim, &nr_segs); + return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs); } EXPORT_SYMBOL(bio_split_to_limits); diff --git a/block/blk-settings.c b/block/blk-settings.c index a446654ddee5..95fc39d09872 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -661,7 +661,7 @@ EXPORT_SYMBOL(blk_stack_limits); void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx) { - if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits, + if (blk_stack_limits(t, bdev_limits(bdev), get_start_sect(bdev) + offset)) pr_notice("%s: Warning: Device %pg is misaligned\n", pfx, bdev); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index aaeeabfab09b..c4520ff7fe1a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3360,7 +3360,7 @@ static int cache_iterate_devices(struct dm_target *ti, static void disable_passdown_if_not_supported(struct cache *cache) { struct block_device *origin_bdev = cache->origin_dev->bdev; - struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; + struct queue_limits *origin_limits = bdev_limits(origin_bdev); const char *reason = NULL; if (!cache->features.discard_passdown) @@ -3382,7 +3382,7 @@ static void disable_passdown_if_not_supported(struct cache *cache) static void set_discard_limits(struct cache *cache, struct queue_limits *limits) { struct block_device *origin_bdev = cache->origin_dev->bdev; - struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; + struct queue_limits *origin_limits = bdev_limits(origin_bdev); if (!cache->features.discard_passdown) { /* No passdown is done so setting own virtual limits */ diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 12bbe487a4c8..e956d980672c 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2020,7 +2020,7 @@ static void clone_resume(struct dm_target *ti) static void disable_passdown_if_not_supported(struct clone *clone) { struct block_device *dest_dev = clone->dest_dev->bdev; - struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; + struct queue_limits *dest_limits = bdev_limits(dest_dev); const char *reason = NULL; if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) @@ -2041,7 +2041,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) static void set_discard_limits(struct clone *clone, struct queue_limits *limits) { struct block_device *dest_bdev = clone->dest_dev->bdev; - struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; + struct queue_limits *dest_limits = bdev_limits(dest_bdev); if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) { /* No passdown is done so we set our own virtual limits */ diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 89632ce97760..9095f19a84f3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2842,7 +2842,7 @@ static void disable_discard_passdown_if_not_supported(struct pool_c *pt) { struct pool *pool = pt->pool; struct block_device *data_bdev = pt->data_dev->bdev; - struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; + struct queue_limits *data_limits = bdev_limits(data_bdev); const char *reason = NULL; if (!pt->adjusted_pf.discard_passdown) diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 69d03feea4e0..46b9386957e6 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -707,11 +707,8 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info) * zoned mode. In this case, we don't have a valid max zone * append size. */ - if (bdev_is_zoned(device->bdev)) { - blk_stack_limits(lim, - &bdev_get_queue(device->bdev)->limits, - 0); - } + if (bdev_is_zoned(device->bdev)) + blk_stack_limits(lim, bdev_limits(device->bdev), 0); } /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d0a52ed05e60..7bfc877e159e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1159,6 +1159,11 @@ enum blk_default_limits { */ #define BLK_DEF_MAX_SECTORS_CAP 2560u +static inline struct queue_limits *bdev_limits(struct block_device *bdev) +{ + return &bdev_get_queue(bdev)->limits; +} + static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; @@ -1293,23 +1298,23 @@ unsigned int bdev_discard_alignment(struct block_device *bdev); static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_discard_sectors; + return bdev_limits(bdev)->max_discard_sectors; } static inline unsigned int bdev_discard_granularity(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.discard_granularity; + return bdev_limits(bdev)->discard_granularity; } static inline unsigned int bdev_max_secure_erase_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; + return bdev_limits(bdev)->max_secure_erase_sectors; } static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; + return bdev_limits(bdev)->max_write_zeroes_sectors; } static inline bool bdev_nonrot(struct block_device *bdev) @@ -1345,7 +1350,7 @@ static inline bool bdev_write_cache(struct block_device *bdev) static inline bool bdev_fua(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; + return bdev_limits(bdev)->features & BLK_FEAT_FUA; } static inline bool bdev_nowait(struct block_device *bdev) -- 2.51.0 From 8d3fd059dd289e6c322e5741ad56794bcce699a2 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 30 Oct 2024 11:19:00 +0000 Subject: [PATCH 02/16] loop: Use bdev limit helpers for configuring discard Instead of directly looking at the request_queue limits, use the bdev limits helpers, which is preferable. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20241030111900.3981223-1-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 78a7bb28defe..7719858c49bb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -786,11 +786,11 @@ static void loop_config_discard(struct loop_device *lo, * file-backed loop devices: discarded regions read back as zero. */ if (S_ISBLK(inode->i_mode)) { - struct request_queue *backingq = bdev_get_queue(I_BDEV(inode)); + struct block_device *bdev = I_BDEV(inode); - max_discard_sectors = backingq->limits.max_write_zeroes_sectors; - granularity = bdev_discard_granularity(I_BDEV(inode)) ?: - queue_physical_block_size(backingq); + max_discard_sectors = bdev_write_zeroes_sectors(bdev); + granularity = bdev_discard_granularity(bdev) ?: + bdev_physical_block_size(bdev); /* * We use punch hole to reclaim the free space used by the -- 2.51.0 From 826cc42adf44930a633d11a5993676d85ddb0842 Mon Sep 17 00:00:00 2001 From: Yang Erkun Date: Wed, 30 Oct 2024 11:49:14 +0800 Subject: [PATCH 03/16] brd: defer automatic disk creation until module initialization succeeds My colleague Wupeng found the following problems during fault injection: BUG: unable to handle page fault for address: fffffbfff809d073 PGD 6e648067 P4D 123ec8067 PUD 123ec4067 PMD 100e38067 PTE 0 Oops: Oops: 0000 [#1] PREEMPT SMP KASAN NOPTI CPU: 5 UID: 0 PID: 755 Comm: modprobe Not tainted 6.12.0-rc3+ #17 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014 RIP: 0010:__asan_load8+0x4c/0xa0 ... Call Trace: blkdev_put_whole+0x41/0x70 bdev_release+0x1a3/0x250 blkdev_release+0x11/0x20 __fput+0x1d7/0x4a0 task_work_run+0xfc/0x180 syscall_exit_to_user_mode+0x1de/0x1f0 do_syscall_64+0x6b/0x170 entry_SYSCALL_64_after_hwframe+0x76/0x7e loop_init() is calling loop_add() after __register_blkdev() succeeds and is ignoring disk_add() failure from loop_add(), for loop_add() failure is not fatal and successfully created disks are already visible to bdev_open(). brd_init() is currently calling brd_alloc() before __register_blkdev() succeeds and is releasing successfully created disks when brd_init() returns an error. This can cause UAF for the latter two case: case 1: T1: modprobe brd brd_init brd_alloc(0) // success add_disk disk_scan_partitions bdev_file_open_by_dev // alloc file fput // won't free until back to userspace brd_alloc(1) // failed since mem alloc error inject // error path for modprobe will release code segment // back to userspace __fput blkdev_release bdev_release blkdev_put_whole bdev->bd_disk->fops->release // fops is freed now, UAF! case 2: T1: T2: modprobe brd brd_init brd_alloc(0) // success open(/dev/ram0) brd_alloc(1) // fail // error path for modprobe close(/dev/ram0) ... /* UAF! */ bdev->bd_disk->fops->release Fix this problem by following what loop_init() does. Besides, reintroduce brd_devices_mutex to help serialize modifications to brd_list. Fixes: 7f9b348cb5e9 ("brd: convert to blk_alloc_disk/blk_cleanup_disk") Reported-by: Wupeng Ma Signed-off-by: Yang Erkun Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20241030034914.907829-1-yangerkun@huaweicloud.com Signed-off-by: Jens Axboe --- drivers/block/brd.c | 66 ++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 2fd1ed101748..5a95671d8151 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -316,8 +316,40 @@ __setup("ramdisk_size=", ramdisk_size); * (should share code eventually). */ static LIST_HEAD(brd_devices); +static DEFINE_MUTEX(brd_devices_mutex); static struct dentry *brd_debugfs_dir; +static struct brd_device *brd_find_or_alloc_device(int i) +{ + struct brd_device *brd; + + mutex_lock(&brd_devices_mutex); + list_for_each_entry(brd, &brd_devices, brd_list) { + if (brd->brd_number == i) { + mutex_unlock(&brd_devices_mutex); + return ERR_PTR(-EEXIST); + } + } + + brd = kzalloc(sizeof(*brd), GFP_KERNEL); + if (!brd) { + mutex_unlock(&brd_devices_mutex); + return ERR_PTR(-ENOMEM); + } + brd->brd_number = i; + list_add_tail(&brd->brd_list, &brd_devices); + mutex_unlock(&brd_devices_mutex); + return brd; +} + +static void brd_free_device(struct brd_device *brd) +{ + mutex_lock(&brd_devices_mutex); + list_del(&brd->brd_list); + mutex_unlock(&brd_devices_mutex); + kfree(brd); +} + static int brd_alloc(int i) { struct brd_device *brd; @@ -340,14 +372,9 @@ static int brd_alloc(int i) BLK_FEAT_NOWAIT, }; - list_for_each_entry(brd, &brd_devices, brd_list) - if (brd->brd_number == i) - return -EEXIST; - brd = kzalloc(sizeof(*brd), GFP_KERNEL); - if (!brd) - return -ENOMEM; - brd->brd_number = i; - list_add_tail(&brd->brd_list, &brd_devices); + brd = brd_find_or_alloc_device(i); + if (IS_ERR(brd)) + return PTR_ERR(brd); xa_init(&brd->brd_pages); @@ -378,8 +405,7 @@ static int brd_alloc(int i) out_cleanup_disk: put_disk(disk); out_free_dev: - list_del(&brd->brd_list); - kfree(brd); + brd_free_device(brd); return err; } @@ -398,8 +424,7 @@ static void brd_cleanup(void) del_gendisk(brd->brd_disk); put_disk(brd->brd_disk); brd_free_pages(brd); - list_del(&brd->brd_list); - kfree(brd); + brd_free_device(brd); } } @@ -426,16 +451,6 @@ static int __init brd_init(void) { int err, i; - brd_check_and_reset_par(); - - brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); - - for (i = 0; i < rd_nr; i++) { - err = brd_alloc(i); - if (err) - goto out_free; - } - /* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. @@ -451,11 +466,18 @@ static int __init brd_init(void) * dynamically. */ + brd_check_and_reset_par(); + + brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); + if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) { err = -EIO; goto out_free; } + for (i = 0; i < rd_nr; i++) + brd_alloc(i); + pr_info("brd: module loaded\n"); return 0; -- 2.51.0 From 133008e84b99e4f5f8cf3d8b768c995732df9406 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 16 Oct 2024 13:13:09 -0700 Subject: [PATCH 04/16] blk-integrity: remove seed for user mapped buffers The seed is only used for kernel generation and verification. That doesn't happen for user buffers, so passing the seed around doesn't accomplish anything. Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Reviewed-by: Anuj Gupta Reviewed-by: Kanchan Joshi Link: https://lore.kernel.org/r/20241016201309.1090320-1-kbusch@meta.com Signed-off-by: Jens Axboe --- block/bio-integrity.c | 13 +++++-------- block/blk-integrity.c | 4 ++-- drivers/nvme/host/ioctl.c | 17 ++++++++--------- include/linux/bio-integrity.h | 4 ++-- include/linux/blk-integrity.h | 5 ++--- 5 files changed, 19 insertions(+), 24 deletions(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 88e3ad73c385..2a4bd6611692 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -199,7 +199,7 @@ EXPORT_SYMBOL(bio_integrity_add_page); static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec, int nr_vecs, unsigned int len, - unsigned int direction, u32 seed) + unsigned int direction) { bool write = direction == ITER_SOURCE; struct bio_integrity_payload *bip; @@ -247,7 +247,6 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec, } bip->bip_flags |= BIP_COPY_USER; - bip->bip_iter.bi_sector = seed; bip->bip_vcnt = nr_vecs; return 0; free_bip: @@ -258,7 +257,7 @@ free_buf: } static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec, - int nr_vecs, unsigned int len, u32 seed) + int nr_vecs, unsigned int len) { struct bio_integrity_payload *bip; @@ -267,7 +266,6 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec, return PTR_ERR(bip); memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec)); - bip->bip_iter.bi_sector = seed; bip->bip_iter.bi_size = len; bip->bip_vcnt = nr_vecs; return 0; @@ -303,8 +301,7 @@ static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages, return nr_bvecs; } -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, - u32 seed) +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits); @@ -350,9 +347,9 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, if (copy) ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes, - direction, seed); + direction); else - ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes, seed); + ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes); if (ret) goto release_pages; if (bvec != stack_vec) diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 83b696ba0cac..b180cac61a9d 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -113,9 +113,9 @@ new_segment: EXPORT_SYMBOL(blk_rq_map_integrity_sg); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, u32 seed) + ssize_t bytes) { - int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed); + int ret = bio_integrity_map_user(rq->bio, ubuf, bytes); if (ret) return ret; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index b9b79ccfabf8..f697d2d1d7e4 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -114,7 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) + struct io_uring_cmd *ioucmd, unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; @@ -152,8 +152,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, bio_set_dev(bio, bdev); if (has_metadata) { - ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len, - meta_seed); + ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len); if (ret) goto out_unmap; } @@ -170,7 +169,7 @@ out: static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, - void __user *meta_buffer, unsigned meta_len, u32 meta_seed, + void __user *meta_buffer, unsigned meta_len, u64 *result, unsigned timeout, unsigned int flags) { struct nvme_ns *ns = q->queuedata; @@ -187,7 +186,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, NULL, flags); + meta_len, NULL, flags); if (ret) return ret; } @@ -268,7 +267,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.lbatm = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, - meta_len, lower_32_bits(io.slba), NULL, 0, 0); + meta_len, NULL, 0, 0); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, @@ -323,7 +322,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &result, timeout, 0); + cmd.metadata_len, &result, timeout, 0); if (status >= 0) { if (put_user(result, &ucmd->result)) @@ -370,7 +369,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &cmd.result, timeout, flags); + cmd.metadata_len, &cmd.result, timeout, flags); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -504,7 +503,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, if (d.addr && d.data_len) { ret = nvme_map_user_request(req, d.addr, d.data_len, nvme_to_user_ptr(d.metadata), - d.metadata_len, 0, ioucmd, vec); + d.metadata_len, ioucmd, vec); if (ret) return ret; } diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h index dd831c269e99..dbf0f74c1529 100644 --- a/include/linux/bio-integrity.h +++ b/include/linux/bio-integrity.h @@ -72,7 +72,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr); int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len); void bio_integrity_unmap_user(struct bio *bio); bool bio_integrity_prep(struct bio *bio); void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); @@ -99,7 +99,7 @@ static inline void bioset_integrity_free(struct bio_set *bs) } static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, - ssize_t len, u32 seed) + ssize_t len) { return -EINVAL; } diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 676f8f860c47..c7eae0bfb013 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -28,7 +28,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, u32 seed); + ssize_t bytes); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) @@ -104,8 +104,7 @@ static inline int blk_rq_map_integrity_sg(struct request *q, } static inline int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, - u32 seed) + ssize_t bytes) { return -EINVAL; } -- 2.51.0 From 496a51b37143c690a06612a6bd58827ef2341761 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 31 Oct 2024 19:02:24 +0800 Subject: [PATCH 05/16] lib/iov_iter.c: initialize bi.bi_idx before iterating over bvec Initialize bi.bi_idx as 0 before iterating over bvec, otherwise garbage data can be used as ->bi_idx. Cc: Christoph Hellwig Reported-and-tested-by: Klara Modin Fixes: e4e535bff2bc ("iov_iter: don't require contiguous pages in iov_iter_extract_bvec_pages") Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- lib/iov_iter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 9fc06f5fb748..c761f6db3cb4 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1699,6 +1699,7 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, i->bvec++; skip = 0; } + bi.bi_idx = 0; bi.bi_size = maxsize + skip; bi.bi_bvec_done = skip; -- 2.51.0 From cafd00d0e90956c1c570a0a96cd86298897d247b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 30 Oct 2024 06:18:51 +0100 Subject: [PATCH 06/16] block: remove zone append special casing from the direct I/O path This code is unused, and all future zoned file systems should follow the btrfs lead of splitting the bios themselves to the zoned limits in the I/O submission handler, because if they didn't they would be hit by commit ed9832bc08db ("block: introduce folio awareness and add a bigger size from folio") breaking this code when the zone append limit (that is usually the max_hw_sectors limit) is smaller than the largest possible folio size. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20241030051859.280923-2-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 34 ++-------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/block/bio.c b/block/bio.c index ac4d77c88932..6a60d62a529d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1206,21 +1206,12 @@ EXPORT_SYMBOL_GPL(__bio_release_pages); void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) { - size_t size = iov_iter_count(iter); - WARN_ON_ONCE(bio->bi_max_vecs); - if (bio_op(bio) == REQ_OP_ZONE_APPEND) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - size_t max_sectors = queue_max_zone_append_sectors(q); - - size = min(size, max_sectors << SECTOR_SHIFT); - } - bio->bi_vcnt = iter->nr_segs; bio->bi_io_vec = (struct bio_vec *)iter->bvec; bio->bi_iter.bi_bvec_done = iter->iov_offset; - bio->bi_iter.bi_size = size; + bio->bi_iter.bi_size = iov_iter_count(iter); bio_set_flag(bio, BIO_CLONED); } @@ -1245,20 +1236,6 @@ static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len, return 0; } -static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio, - size_t len, size_t offset) -{ - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - bool same_page = false; - - if (bio_add_hw_folio(q, bio, folio, len, offset, - queue_max_zone_append_sectors(q), &same_page) != len) - return -EINVAL; - if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) - unpin_user_folio(folio, 1); - return 0; -} - static unsigned int get_contig_folio_len(unsigned int *num_pages, struct page **pages, unsigned int i, struct folio *folio, size_t left, @@ -1365,14 +1342,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) len = get_contig_folio_len(&num_pages, pages, i, folio, left, offset); - if (bio_op(bio) == REQ_OP_ZONE_APPEND) { - ret = bio_iov_add_zone_append_folio(bio, folio, len, - folio_offset); - if (ret) - break; - } else - bio_iov_add_folio(bio, folio, len, folio_offset); - + bio_iov_add_folio(bio, folio, len, folio_offset); offset = 0; } -- 2.51.0 From f187b9bf1a639090893c31030ddb60f9beae23f0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 30 Oct 2024 06:18:52 +0100 Subject: [PATCH 07/16] block: remove bio_add_zone_append_page This is only used by the nvmet zns passthrough code, which can trivially just use bio_add_pc_page and do the sanity check for the max zone append limit itself. All future zoned file systems should follow the btrfs lead and let the upper layers fill up bios unlimited by hardware constraints and split them to the limits in the I/O submission handler. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20241030051859.280923-3-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 33 --------------------------------- drivers/nvme/target/zns.c | 21 +++++++++++++-------- include/linux/bio.h | 2 -- 3 files changed, 13 insertions(+), 43 deletions(-) diff --git a/block/bio.c b/block/bio.c index 6a60d62a529d..daceb0a5c1d7 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1064,39 +1064,6 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, } EXPORT_SYMBOL(bio_add_pc_page); -/** - * bio_add_zone_append_page - attempt to add page to zone-append bio - * @bio: destination bio - * @page: page to add - * @len: vec entry length - * @offset: vec entry offset - * - * Attempt to add a page to the bio_vec maplist of a bio that will be submitted - * for a zone-append request. This can fail for a number of reasons, such as the - * bio being full or the target block device is not a zoned block device or - * other limitations of the target block device. The target block device must - * allow bio's up to PAGE_SIZE, so it is always possible to add a single page - * to an empty bio. - * - * Returns: number of bytes added to the bio, or 0 in case of a failure. - */ -int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) -{ - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - bool same_page = false; - - if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) - return 0; - - if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) - return 0; - - return bio_add_hw_page(q, bio, page, len, offset, - queue_max_zone_append_sectors(q), &same_page); -} -EXPORT_SYMBOL_GPL(bio_add_zone_append_page); - /** * __bio_add_page - add page(s) to a bio in a new segment * @bio: destination bio diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index af9e13be7678..3aef35b05111 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -537,6 +537,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; + u32 data_len = nvmet_rw_data_len(req); struct bio *bio; int sg_cnt; @@ -544,6 +545,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; + if (data_len > + bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) { + req->error_loc = offsetof(struct nvme_rw_command, length); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + if (!req->sg_cnt) { nvmet_req_complete(req, 0); return; @@ -576,20 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) bio->bi_opf |= REQ_FUA; for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { - struct page *p = sg_page(sg); - unsigned int l = sg->length; - unsigned int o = sg->offset; - unsigned int ret; + unsigned int len = sg->length; - ret = bio_add_zone_append_page(bio, p, l, o); - if (ret != sg->length) { + if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio, + sg_page(sg), len, sg->offset) != len) { status = NVME_SC_INTERNAL; goto out_put_bio; } - total_len += sg->length; + total_len += len; } - if (total_len != nvmet_rw_data_len(req)) { + if (total_len != data_len) { status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } diff --git a/include/linux/bio.h b/include/linux/bio.h index faceadb040f9..4a1bf43ca53d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -418,8 +418,6 @@ bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); -int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, -- 2.51.0 From d47de6ac8842327ae1c782670283450159c55d5b Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 1 Nov 2024 09:22:15 +0000 Subject: [PATCH 08/16] loop: Simplify discard granularity calc A bdev discard granularity is always at least SECTOR_SIZE, so don't check for a zero value. Suggested-by: Christoph Hellwig Signed-off-by: John Garry Link: https://lore.kernel.org/r/20241101092215.422428-1-john.g.garry@oracle.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7719858c49bb..f21f4254b038 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -789,8 +789,7 @@ static void loop_config_discard(struct loop_device *lo, struct block_device *bdev = I_BDEV(inode); max_discard_sectors = bdev_write_zeroes_sectors(bdev); - granularity = bdev_discard_granularity(bdev) ?: - bdev_physical_block_size(bdev); + granularity = bdev_discard_granularity(bdev); /* * We use punch hole to reclaim the free space used by the -- 2.51.0 From 341468e0ab4bb1b26ad0b0cee7c6db4bf283043a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 2 Nov 2024 09:42:11 +0800 Subject: [PATCH 09/16] lib/iov_iter: fix bvec iterator setup .bi_size of bvec iterator should be initialized as real max size for walking, and .bi_bvec_done just counts how many bytes need to be skipped in the 1st bvec, so .bi_size isn't related with .bi_bvec_done. This patch fixes bvec iterator initialization, and the inner `size` check isn't needed any more, so revert Eric Dumazet's commit 7bc802acf193 ("iov-iter: do not return more bytes than requested in iov_iter_extract_bvec_pages()"). Cc: Eric Dumazet Fixes: e4e535bff2bc ("iov_iter: don't require contiguous pages in iov_iter_extract_bvec_pages") Reported-by: syzbot+71abe7ab2b70bca770fd@syzkaller.appspotmail.com Tested-by: syzbot+71abe7ab2b70bca770fd@syzkaller.appspotmail.com Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- lib/iov_iter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c761f6db3cb4..4a54c7af62c0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1700,7 +1700,7 @@ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, skip = 0; } bi.bi_idx = 0; - bi.bi_size = maxsize + skip; + bi.bi_size = maxsize; bi.bi_bvec_done = skip; maxpages = want_pages_array(pages, maxsize, skip, maxpages); -- 2.51.0 From 05df01668490c670aeafe002cfa63334687b012f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 4 Nov 2024 06:42:18 +0100 Subject: [PATCH 10/16] block: update blk_stack_limits documentation Listing every single features that needs to be pre-set by stacking drivers does not scale. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20241104054218.45596-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-settings.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 95fc39d09872..5ee3d6d1448d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -508,10 +508,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->features |= (b->features & BLK_FEAT_INHERIT_MASK); /* - * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the - * stacking driver and all underlying devices. The stacking driver sets - * the flags before stacking the limits, and this will clear the flags - * if any of the underlying devices does not support it. + * Some feaures need to be supported both by the stacking driver and all + * underlying devices. The stacking driver sets these flags before + * stacking the limits, and this will clear the flags if any of the + * underlying devices does not support it. */ if (!(b->features & BLK_FEAT_NOWAIT)) t->features &= ~BLK_FEAT_NOWAIT; -- 2.51.0 From e494c3dce6981c0b19fee79928b293a1cf0af867 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 4 Nov 2024 08:39:31 +0100 Subject: [PATCH 11/16] block: remove the max_zone_append_sectors check in blk_revalidate_disk_zones With the lock layer zone append emulation, we are now always setting a max_zone_append_sectors value for zoned devices and this check can't ever trigger. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20241104073955.112324-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-zoned.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index af19296fa50d..a287577d1ad6 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1774,12 +1774,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk) return -ENODEV; } - if (!queue_max_zone_append_sectors(q)) { - pr_warn("%s: Invalid 0 maximum zone append limit\n", - disk->disk_name); - return -ENODEV; - } - /* * Ensure that all memory allocations in this context are done as if * GFP_NOIO was specified. -- 2.51.0 From 2a8f6153e1c2db06a537a5c9d61102eb591776f1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 4 Nov 2024 08:39:32 +0100 Subject: [PATCH 12/16] block: pre-calculate max_zone_append_sectors max_zone_append_sectors differs from all other queue limits in that the final value used is not stored in the queue_limits but needs to be obtained using queue_limits_max_zone_append_sectors helper. This not only adds (tiny) extra overhead to the I/O path, but also can be easily forgotten in file system code. Add a new max_hw_zone_append_sectors value to queue_limits which is set by the driver, and calculate max_zone_append_sectors from that and the other inputs in blk_validate_zoned_limits, similar to how max_sectors is calculated to fix this. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20241104073955.112324-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-merge.c | 3 +-- block/blk-settings.c | 25 ++++++++++++------------- block/blk-sysfs.c | 17 +++-------------- drivers/block/null_blk/zoned.c | 2 +- drivers/block/ublk_drv.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/md/dm-zone.c | 4 ++-- drivers/nvme/host/multipath.c | 2 +- drivers/nvme/host/zns.c | 2 +- drivers/scsi/sd_zbc.c | 2 -- include/linux/blkdev.h | 21 +++------------------ 12 files changed, 27 insertions(+), 57 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 09d10bb95fda..5df4607321ca 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -607,7 +607,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, return BLK_STS_IOERR; /* Make sure the BIO is small enough and will not get split */ - if (nr_sectors > queue_max_zone_append_sectors(q)) + if (nr_sectors > q->limits.max_zone_append_sectors) return BLK_STS_IOERR; bio->bi_opf |= REQ_NOMERGE; diff --git a/block/blk-merge.c b/block/blk-merge.c index d813d799cee7..7c1375a080ad 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -388,11 +388,10 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, struct bio *bio_split_zone_append(struct bio *bio, const struct queue_limits *lim, unsigned *nr_segs) { - unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim); int split_sectors; split_sectors = bio_split_rw_at(bio, lim, nr_segs, - max_sectors << SECTOR_SHIFT); + lim->max_zone_append_sectors << SECTOR_SHIFT); if (WARN_ON_ONCE(split_sectors > 0)) split_sectors = -EINVAL; return bio_submit_split(bio, split_sectors); diff --git a/block/blk-settings.c b/block/blk-settings.c index 5ee3d6d1448d..5cb69d85af0e 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -91,17 +91,16 @@ static int blk_validate_zoned_limits(struct queue_limits *lim) if (lim->zone_write_granularity < lim->logical_block_size) lim->zone_write_granularity = lim->logical_block_size; - if (lim->max_zone_append_sectors) { - /* - * The Zone Append size is limited by the maximum I/O size - * and the zone size given that it can't span zones. - */ - lim->max_zone_append_sectors = - min3(lim->max_hw_sectors, - lim->max_zone_append_sectors, - lim->chunk_sectors); - } - + /* + * The Zone Append size is limited by the maximum I/O size and the zone + * size given that it can't span zones. + * + * If no max_hw_zone_append_sectors limit is provided, the block layer + * will emulated it, else we're also bound by the hardware limit. + */ + lim->max_zone_append_sectors = + min_not_zero(lim->max_hw_zone_append_sectors, + min(lim->chunk_sectors, lim->max_hw_sectors)); return 0; } @@ -527,8 +526,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, b->max_write_zeroes_sectors); - t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t), - queue_limits_max_zone_append_sectors(b)); + t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors, + b->max_hw_zone_append_sectors); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 741b95dfdbf6..d9f22122ae2f 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -131,6 +131,7 @@ QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors) QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors) QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors) QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors) +QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors) #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \ static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ @@ -178,18 +179,6 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, return ret; } -/* - * For zone append queue_max_zone_append_sectors does not just return the - * underlying queue limits, but actually contains a calculation. Because of - * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here. - */ -static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page) -{ - return sprintf(page, "%llu\n", - (u64)queue_max_zone_append_sectors(disk->queue) << - SECTOR_SHIFT); -} - static ssize_t queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count) { @@ -479,7 +468,7 @@ QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes"); -QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); +QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes"); QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); QUEUE_RO_ENTRY(queue_zoned, "zoned"); @@ -607,7 +596,7 @@ static struct attribute *queue_attrs[] = { &queue_atomic_write_unit_max_entry.attr, &queue_write_same_max_entry.attr, &queue_max_write_zeroes_sectors_entry.attr, - &queue_zone_append_max_entry.attr, + &queue_max_zone_append_sectors_entry.attr, &queue_zone_write_granularity_entry.attr, &queue_rotational_entry.attr, &queue_zoned_entry.attr, diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 9bc768b2ca56..0d5f9bf95229 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -166,7 +166,7 @@ int null_init_zoned_dev(struct nullb_device *dev, lim->features |= BLK_FEAT_ZONED; lim->chunk_sectors = dev->zone_size_sects; - lim->max_zone_append_sectors = dev->zone_append_max_sectors; + lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; lim->max_active_zones = dev->zone_max_active; return 0; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 59951e7c2593..8d938b2b41ee 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2270,7 +2270,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) lim.features |= BLK_FEAT_ZONED; lim.max_active_zones = p->max_active_zones; lim.max_open_zones = p->max_open_zones; - lim.max_zone_append_sectors = p->max_zone_append_sectors; + lim.max_hw_zone_append_sectors = p->max_zone_append_sectors; } if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) { diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 194417abc105..0e99a4714928 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -784,7 +784,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk, wg, v); return -ENODEV; } - lim->max_zone_append_sectors = v; + lim->max_hw_zone_append_sectors = v; dev_dbg(&vdev->dev, "max append sectors = %u\n", v); return 0; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index c0d41c36e06e..20edd3fabbab 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -344,7 +344,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); } else { set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - lim->max_zone_append_sectors = 0; + lim->max_hw_zone_append_sectors = 0; } /* @@ -379,7 +379,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, if (!zlim.mapped_nr_seq_zones) { lim->max_open_zones = 0; lim->max_active_zones = 0; - lim->max_zone_append_sectors = 0; + lim->max_hw_zone_append_sectors = 0; lim->zone_write_granularity = 0; lim->chunk_sectors = 0; lim->features &= ~BLK_FEAT_ZONED; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 6a15873055b9..c26cb7d3a2e5 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -636,7 +636,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (head->ids.csi == NVME_CSI_ZNS) lim.features |= BLK_FEAT_ZONED; else - lim.max_zone_append_sectors = 0; + lim.max_hw_zone_append_sectors = 0; head->disk = blk_alloc_disk(&lim, ctrl->numa_node); if (IS_ERR(head->disk)) diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 9a06f9d98cd6..382949e18c6a 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -111,7 +111,7 @@ void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; - lim->max_zone_append_sectors = ns->ctrl->max_zone_append; + lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; lim->chunk_sectors = ns->head->zsze = nvme_lba_to_sect(ns->head, zi->zone_size); } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ee2b74238758..de5c54c057ec 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -634,8 +634,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, lim->max_open_zones = sdkp->zones_max_open; lim->max_active_zones = 0; lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks); - /* Enable block layer zone append emulation */ - lim->max_zone_append_sectors = 0; return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7bfc877e159e..6d1413bd69a5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -375,6 +375,7 @@ struct queue_limits { unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -1204,25 +1205,9 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int -queue_limits_max_zone_append_sectors(const struct queue_limits *l) -{ - unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); - - return min_not_zero(l->max_zone_append_sectors, max_sectors); -} - -static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) -{ - if (!blk_queue_is_zoned(q)) - return 0; - - return queue_limits_max_zone_append_sectors(&q->limits); -} - static inline bool queue_emulates_zone_append(struct request_queue *q) { - return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; + return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; } static inline bool bdev_emulates_zone_append(struct block_device *bdev) @@ -1233,7 +1218,7 @@ static inline bool bdev_emulates_zone_append(struct block_device *bdev) static inline unsigned int bdev_max_zone_append_sectors(struct block_device *bdev) { - return queue_max_zone_append_sectors(bdev_get_queue(bdev)); + return bdev_limits(bdev)->max_zone_append_sectors; } static inline unsigned int bdev_max_segments(struct block_device *bdev) -- 2.51.0 From 1e79892e76a78f33deecc895086bef92d8147183 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 7 Oct 2024 10:48:04 +0200 Subject: [PATCH 13/16] md/raid5-ppl: Use atomic64_inc_return() in ppl_new_iounit() Use atomic64_inc_return(&ref) instead of atomic64_add_return(1, &ref) to use optimized implementation and ease register pressure around the primitive for targets that implement optimized variant. Signed-off-by: Uros Bizjak Cc: Song Liu Cc: Yu Kuai Reviewed-by: Yu Kuai Reviewed-by: Artur Paszkiewicz Link: https://lore.kernel.org/r/20241007084831.48067-1-ubizjak@gmail.com Signed-off-by: Song Liu --- drivers/md/raid5-ppl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index a70cbec12ed0..37c4da5311ca 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -258,7 +258,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); pplhdr->signature = cpu_to_le32(ppl_conf->signature); - io->seq = atomic64_add_return(1, &ppl_conf->seq); + io->seq = atomic64_inc_return(&ppl_conf->seq); pplhdr->generation = cpu_to_le64(io->seq); return io; -- 2.51.0 From 4abfce19c7fb090c93e6677e26b2c709ca6f6fd8 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Thu, 31 Oct 2024 11:31:08 +0800 Subject: [PATCH 14/16] md: add a new helper rdev_blocked() The helper will be used in later patches for raid1/raid10/raid5, the difference is that Faulty rdev with unacknowledged bad block will not be considered blocked. Signed-off-by: Yu Kuai Tested-by: Mariusz Tkaczyk Link: https://lore.kernel.org/r/20241031033114.3845582-2-yukuai1@huaweicloud.com Signed-off-by: Song Liu --- drivers/md/md.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/md/md.h b/drivers/md/md.h index 5d2e6bd58e4d..4ba93af36126 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -1002,6 +1002,30 @@ static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector); } +static inline bool rdev_blocked(struct md_rdev *rdev) +{ + /* + * Blocked will be set by error handler and cleared by daemon after + * updating superblock, meanwhile write IO should be blocked to prevent + * reading old data after power failure. + */ + if (test_bit(Blocked, &rdev->flags)) + return true; + + /* + * Faulty device should not be accessed anymore, there is no need to + * wait for bad block to be acknowledged. + */ + if (test_bit(Faulty, &rdev->flags)) + return false; + + /* rdev is blocked by badblocks. */ + if (test_bit(BlockedBadBlocks, &rdev->flags)) + return true; + + return false; +} + #define mddev_add_trace_msg(mddev, fmt, args...) \ do { \ if (!mddev_is_dm(mddev)) \ -- 2.51.0 From 50e8274855e7ab5499ff8296e09802874a3f03b1 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Thu, 31 Oct 2024 11:31:09 +0800 Subject: [PATCH 15/16] md: don't wait faulty rdev in md_wait_for_blocked_rdev() md_wait_for_blocked_rdev() is called for write IO while rdev is blocked, howerver, rdev can be faulty after choosing this rdev to write, and faulty rdev should never be accessed anymore, hence there is no point to wait for faulty rdev to be unblocked. Signed-off-by: Yu Kuai Tested-by: Mariusz Tkaczyk Link: https://lore.kernel.org/r/20241031033114.3845582-3-yukuai1@huaweicloud.com Signed-off-by: Song Liu --- drivers/md/md.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 179ee4afe937..b2a0e0a84309 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9762,9 +9762,7 @@ EXPORT_SYMBOL(md_reap_sync_thread); void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) { sysfs_notify_dirent_safe(rdev->sysfs_state); - wait_event_timeout(rdev->blocked_wait, - !test_bit(Blocked, &rdev->flags) && - !test_bit(BlockedBadBlocks, &rdev->flags), + wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev), msecs_to_jiffies(5000)); rdev_dec_pending(rdev, mddev); } -- 2.51.0 From 29967332ced51a15a22f11381eeebbc500ba1858 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Thu, 31 Oct 2024 11:31:10 +0800 Subject: [PATCH 16/16] md: don't record new badblocks for faulty rdev Faulty will be checked before issuing IO to the rdev, however, rdev can be faulty at any time, hence it's possible that rdev_set_badblocks() will be called for faulty rdev. In this case, mddev->sb_flags will be set and some other path can be blocked by updating super block. Since faulty rdev will not be accesed anymore, there is no need to record new babblocks for faulty rdev and forcing updating super block. Noted this is not a bugfix, just prevent updating superblock in some corner cases, and will help to slice a bug related to external metadata[1], testing also shows that devices are removed faster in the case IO error. [1] https://lore.kernel.org/all/f34452df-810b-48b2-a9b4-7f925699a9e7@linux.intel.com/ Signed-off-by: Yu Kuai Tested-by: Mariusz Tkaczyk Link: https://lore.kernel.org/r/20241031033114.3845582-4-yukuai1@huaweicloud.com Signed-off-by: Song Liu --- drivers/md/md.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/md/md.c b/drivers/md/md.c index b2a0e0a84309..bbe002ebd584 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9791,6 +9791,17 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, { struct mddev *mddev = rdev->mddev; int rv; + + /* + * Recording new badblocks for faulty rdev will force unnecessary + * super block updating. This is fragile for external management because + * userspace daemon may trying to remove this device and deadlock may + * occur. This will be probably solved in the mdadm, but it is safer to + * avoid it. + */ + if (test_bit(Faulty, &rdev->flags)) + return 1; + if (is_new) s += rdev->new_data_offset; else -- 2.51.0