pgoff_t index,
                                               unsigned long num_ra_pages)
 {
-       DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
+       DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
        struct page *page;
 
        index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
 
 
 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
 {
-       DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
+       DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
        pgoff_t redirty_idx = page_idx;
 
                                               pgoff_t index,
                                               unsigned long num_ra_pages)
 {
-       DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
+       DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
        struct page *page;
 
        index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
 
  * @file: The file, used primarily by network filesystems for authentication.
  *       May be NULL if invoked internally by the filesystem.
  * @mapping: Readahead this filesystem object.
+ * @ra: File readahead state.  May be NULL.
  */
 struct readahead_control {
        struct file *file;
        struct address_space *mapping;
+       struct file_ra_state *ra;
 /* private: use the readahead_* accessors instead */
        pgoff_t _index;
        unsigned int _nr_pages;
        unsigned int _batch_count;
 };
 
-#define DEFINE_READAHEAD(rac, f, m, i)                                 \
-       struct readahead_control rac = {                                \
+#define DEFINE_READAHEAD(ractl, f, r, m, i)                            \
+       struct readahead_control ractl = {                              \
                .file = f,                                              \
                .mapping = m,                                           \
+               .ra = r,                                                \
                ._index = i,                                            \
        }
 
 
 void page_cache_ra_unbounded(struct readahead_control *,
                unsigned long nr_to_read, unsigned long lookahead_count);
-void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct page *,
                unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
-               struct page *, unsigned long req_count);
 
 /**
  * page_cache_sync_readahead - generic file readahead
                struct file_ra_state *ra, struct file *file, pgoff_t index,
                unsigned long req_count)
 {
-       DEFINE_READAHEAD(ractl, file, mapping, index);
-       page_cache_sync_ra(&ractl, ra, req_count);
+       DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+       page_cache_sync_ra(&ractl, req_count);
 }
 
 /**
                struct file_ra_state *ra, struct file *file,
                struct page *page, pgoff_t index, unsigned long req_count)
 {
-       DEFINE_READAHEAD(ractl, file, mapping, index);
-       page_cache_async_ra(&ractl, ra, page, req_count);
+       DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+       page_cache_async_ra(&ractl, page, req_count);
 }
 
 /**
 
        struct file *file = vmf->vma->vm_file;
        struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
-       DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
+       DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
        struct file *fpin = NULL;
        unsigned int mmap_miss;
 
 
        if (vmf->vma->vm_flags & VM_SEQ_READ) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
-               page_cache_sync_ra(&ractl, ra, ra->ra_pages);
+               page_cache_sync_ra(&ractl, ra->ra_pages);
                return fpin;
        }
 
 
 
 void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
                unsigned long lookahead_size);
-void force_page_cache_ra(struct readahead_control *, struct file_ra_state *,
-               unsigned long nr);
+void force_page_cache_ra(struct readahead_control *, unsigned long nr);
 static inline void force_page_cache_readahead(struct address_space *mapping,
                struct file *file, pgoff_t index, unsigned long nr_to_read)
 {
-       DEFINE_READAHEAD(ractl, file, mapping, index);
-       force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
+       DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
+       force_page_cache_ra(&ractl, nr_to_read);
 }
 
 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
 
  * memory at once.
  */
 void force_page_cache_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, unsigned long nr_to_read)
+               unsigned long nr_to_read)
 {
        struct address_space *mapping = ractl->mapping;
+       struct file_ra_state *ra = ractl->ra;
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
 static void ondemand_readahead(struct readahead_control *ractl,
-               struct file_ra_state *ra, bool hit_readahead_marker,
-               unsigned long req_size)
+               bool hit_readahead_marker, unsigned long req_size)
 {
        struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
+       struct file_ra_state *ra = ractl->ra;
        unsigned long max_pages = ra->ra_pages;
        unsigned long add_pages;
        unsigned long index = readahead_index(ractl);
 }
 
 void page_cache_sync_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, unsigned long req_count)
+               unsigned long req_count)
 {
        bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
 
         * read-ahead will do the right thing and limit the read to just the
         * requested range, which we'll set to 1 page for this case.
         */
-       if (!ra->ra_pages || blk_cgroup_congested()) {
+       if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
                if (!ractl->file)
                        return;
                req_count = 1;
 
        /* be dumb */
        if (do_forced_ra) {
-               force_page_cache_ra(ractl, ra, req_count);
+               force_page_cache_ra(ractl, req_count);
                return;
        }
 
        /* do read-ahead */
-       ondemand_readahead(ractl, ra, false, req_count);
+       ondemand_readahead(ractl, false, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 
 void page_cache_async_ra(struct readahead_control *ractl,
-               struct file_ra_state *ra, struct page *page,
-               unsigned long req_count)
+               struct page *page, unsigned long req_count)
 {
        /* no read-ahead */
-       if (!ra->ra_pages)
+       if (!ractl->ra->ra_pages)
                return;
 
        /*
                return;
 
        /* do read-ahead */
-       ondemand_readahead(ractl, ra, true, req_count);
+       ondemand_readahead(ractl, true, req_count);
 }
 EXPORT_SYMBOL_GPL(page_cache_async_ra);