]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: move priv off stack in btrfs_encoded_read_regular_fill_pages()
authorMark Harmstone <maharmstone@fb.com>
Tue, 22 Oct 2024 14:50:19 +0000 (15:50 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:21 +0000 (14:34 +0100)
Change btrfs_encoded_read_regular_fill_pages() so that the priv struct
is allocated rather than stored on the stack, in preparation for adding
an asynchronous mode to the function.

Signed-off-by: Mark Harmstone <maharmstone@fb.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/inode.c

index 358a282de7fea27ff9a3f7eb3484ae7cc2d9332c..ed66eafa6b52f93f4c08e4aece7184d47d82ad56 100644 (file)
@@ -9085,16 +9085,21 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
                                          struct page **pages)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
-       struct btrfs_encoded_read_private priv = {
-               .pending = ATOMIC_INIT(1),
-       };
+       struct btrfs_encoded_read_private *priv;
        unsigned long i = 0;
        struct btrfs_bio *bbio;
+       int ret;
 
-       init_waitqueue_head(&priv.wait);
+       priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+       if (!priv)
+               return -ENOMEM;
+
+       init_waitqueue_head(&priv->wait);
+       atomic_set(&priv->pending, 1);
+       priv->status = 0;
 
        bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
-                              btrfs_encoded_read_endio, &priv);
+                              btrfs_encoded_read_endio, priv);
        bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
        bbio->inode = inode;
 
@@ -9102,11 +9107,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
                size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
 
                if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
-                       atomic_inc(&priv.pending);
+                       atomic_inc(&priv->pending);
                        btrfs_submit_bbio(bbio, 0);
 
                        bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
-                                              btrfs_encoded_read_endio, &priv);
+                                              btrfs_encoded_read_endio, priv);
                        bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
                        bbio->inode = inode;
                        continue;
@@ -9117,13 +9122,15 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
                disk_io_size -= bytes;
        } while (disk_io_size);
 
-       atomic_inc(&priv.pending);
+       atomic_inc(&priv->pending);
        btrfs_submit_bbio(bbio, 0);
 
-       if (atomic_dec_return(&priv.pending))
-               io_wait_event(priv.wait, !atomic_read(&priv.pending));
+       if (atomic_dec_return(&priv->pending))
+               io_wait_event(priv->wait, !atomic_read(&priv->pending));
        /* See btrfs_encoded_read_endio() for ordering. */
-       return blk_status_to_errno(READ_ONCE(priv.status));
+       ret = blk_status_to_errno(READ_ONCE(priv->status));
+       kfree(priv);
+       return ret;
 }
 
 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,