]> www.infradead.org Git - users/hch/misc.git/commitdiff
block: remove zone append special casing from the direct I/O path
authorChristoph Hellwig <hch@lst.de>
Wed, 30 Oct 2024 05:18:51 +0000 (06:18 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 31 Oct 2024 16:54:25 +0000 (10:54 -0600)
This code is unused, and all future zoned file systems should follow
the btrfs lead of splitting the bios themselves to the zoned limits
in the I/O submission handler, because if they didn't they would be
hit by commit ed9832bc08db ("block: introduce folio awareness and add
a bigger size from folio") breaking this code when the zone append
limit (that is usually the max_hw_sectors limit) is smaller than the
largest possible folio size.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20241030051859.280923-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bio.c

index ac4d77c889322df0a494b5055f0089807cad6367..6a60d62a529d3a528e326b22492d8990c70a581c 100644 (file)
@@ -1206,21 +1206,12 @@ EXPORT_SYMBOL_GPL(__bio_release_pages);
 
 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
 {
-       size_t size = iov_iter_count(iter);
-
        WARN_ON_ONCE(bio->bi_max_vecs);
 
-       if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
-               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-               size_t max_sectors = queue_max_zone_append_sectors(q);
-
-               size = min(size, max_sectors << SECTOR_SHIFT);
-       }
-
        bio->bi_vcnt = iter->nr_segs;
        bio->bi_io_vec = (struct bio_vec *)iter->bvec;
        bio->bi_iter.bi_bvec_done = iter->iov_offset;
-       bio->bi_iter.bi_size = size;
+       bio->bi_iter.bi_size = iov_iter_count(iter);
        bio_set_flag(bio, BIO_CLONED);
 }
 
@@ -1245,20 +1236,6 @@ static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
        return 0;
 }
 
-static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
-                                        size_t len, size_t offset)
-{
-       struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       bool same_page = false;
-
-       if (bio_add_hw_folio(q, bio, folio, len, offset,
-                       queue_max_zone_append_sectors(q), &same_page) != len)
-               return -EINVAL;
-       if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
-               unpin_user_folio(folio, 1);
-       return 0;
-}
-
 static unsigned int get_contig_folio_len(unsigned int *num_pages,
                                         struct page **pages, unsigned int i,
                                         struct folio *folio, size_t left,
@@ -1365,14 +1342,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
                        len = get_contig_folio_len(&num_pages, pages, i,
                                                   folio, left, offset);
 
-               if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
-                       ret = bio_iov_add_zone_append_folio(bio, folio, len,
-                                       folio_offset);
-                       if (ret)
-                               break;
-               } else
-                       bio_iov_add_folio(bio, folio, len, folio_offset);
-
+               bio_iov_add_folio(bio, folio, len, folio_offset);
                offset = 0;
        }