return block_read_full_page(page, blkdev_get_block);
 }
 
-static int blkdev_readpages(struct file *file, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages)
+static void blkdev_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
+       mpage_readahead(rac, blkdev_get_block);
 }
 
 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
 
 static const struct address_space_operations def_blk_aops = {
        .readpage       = blkdev_readpage,
-       .readpages      = blkdev_readpages,
+       .readahead      = blkdev_readahead,
        .writepage      = blkdev_writepage,
        .write_begin    = blkdev_write_begin,
        .write_end      = blkdev_write_end,
 
        return mpage_readpage(page, exfat_get_block);
 }
 
-static int exfat_readpages(struct file *file, struct address_space *mapping,
-               struct list_head *pages, unsigned int nr_pages)
+static void exfat_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
+       mpage_readahead(rac, exfat_get_block);
 }
 
 static int exfat_writepage(struct page *page, struct writeback_control *wbc)
 
 static const struct address_space_operations exfat_aops = {
        .readpage       = exfat_readpage,
-       .readpages      = exfat_readpages,
+       .readahead      = exfat_readahead,
        .writepage      = exfat_writepage,
        .writepages     = exfat_writepages,
        .write_begin    = exfat_write_begin,
 
        return mpage_readpage(page, ext2_get_block);
 }
 
-static int
-ext2_readpages(struct file *file, struct address_space *mapping,
-               struct list_head *pages, unsigned nr_pages)
+static void ext2_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
+       mpage_readahead(rac, ext2_get_block);
 }
 
 static int
 
 const struct address_space_operations ext2_aops = {
        .readpage               = ext2_readpage,
-       .readpages              = ext2_readpages,
+       .readahead              = ext2_readahead,
        .writepage              = ext2_writepage,
        .write_begin            = ext2_write_begin,
        .write_end              = ext2_write_end,
 
 const struct address_space_operations ext2_nobh_aops = {
        .readpage               = ext2_readpage,
-       .readpages              = ext2_readpages,
+       .readahead              = ext2_readahead,
        .writepage              = ext2_nobh_writepage,
        .write_begin            = ext2_nobh_write_begin,
        .write_end              = nobh_write_end,
 
        return mpage_readpage(page, fat_get_block);
 }
 
-static int fat_readpages(struct file *file, struct address_space *mapping,
-                        struct list_head *pages, unsigned nr_pages)
+static void fat_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, fat_get_block);
+       mpage_readahead(rac, fat_get_block);
 }
 
 static void fat_write_failed(struct address_space *mapping, loff_t to)
 
 static const struct address_space_operations fat_aops = {
        .readpage       = fat_readpage,
-       .readpages      = fat_readpages,
+       .readahead      = fat_readahead,
        .writepage      = fat_writepage,
        .writepages     = fat_writepages,
        .write_begin    = fat_write_begin,
 
 }
 
 /**
- * gfs2_readpages - Read a bunch of pages at once
+ * gfs2_readahead - Read a bunch of pages at once
  * @file: The file to read from
  * @mapping: Address space info
  * @pages: List of pages to read
  *    obviously not something we'd want to do on too regular a basis.
  *    Any I/O we ignore at this time will be done via readpage later.
  * 2. We don't handle stuffed files here we let readpage do the honours.
- * 3. mpage_readpages() does most of the heavy lifting in the common case.
+ * 3. mpage_readahead() does most of the heavy lifting in the common case.
  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
  */
 
-static int gfs2_readpages(struct file *file, struct address_space *mapping,
-                         struct list_head *pages, unsigned nr_pages)
+static void gfs2_readahead(struct readahead_control *rac)
 {
-       struct inode *inode = mapping->host;
+       struct inode *inode = rac->mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_holder gh;
-       int ret;
 
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       ret = gfs2_glock_nq(&gh);
-       if (unlikely(ret))
+       if (gfs2_glock_nq(&gh))
                goto out_uninit;
        if (!gfs2_is_stuffed(ip))
-               ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
+               mpage_readahead(rac, gfs2_block_map);
        gfs2_glock_dq(&gh);
 out_uninit:
        gfs2_holder_uninit(&gh);
-       if (unlikely(gfs2_withdrawn(sdp)))
-               ret = -EIO;
-       return ret;
 }
 
 /**
        .writepage = gfs2_writepage,
        .writepages = gfs2_writepages,
        .readpage = gfs2_readpage,
-       .readpages = gfs2_readpages,
+       .readahead = gfs2_readahead,
        .bmap = gfs2_bmap,
        .invalidatepage = gfs2_invalidatepage,
        .releasepage = gfs2_releasepage,
        .writepage = gfs2_jdata_writepage,
        .writepages = gfs2_jdata_writepages,
        .readpage = gfs2_readpage,
-       .readpages = gfs2_readpages,
+       .readahead = gfs2_readahead,
        .set_page_dirty = jdata_set_page_dirty,
        .bmap = gfs2_bmap,
        .invalidatepage = gfs2_invalidatepage,
 
        return block_write_full_page(page, hpfs_get_block, wbc);
 }
 
-static int hpfs_readpages(struct file *file, struct address_space *mapping,
-                         struct list_head *pages, unsigned nr_pages)
+static void hpfs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block);
+       mpage_readahead(rac, hpfs_get_block);
 }
 
 static int hpfs_writepages(struct address_space *mapping,
 const struct address_space_operations hpfs_aops = {
        .readpage = hpfs_readpage,
        .writepage = hpfs_writepage,
-       .readpages = hpfs_readpages,
+       .readahead = hpfs_readahead,
        .writepages = hpfs_writepages,
        .write_begin = hpfs_write_begin,
        .write_end = hpfs_write_end,
 
        }
 
        /*
-        * Just like mpage_readpages and block_read_full_page we always
+        * Just like mpage_readahead and block_read_full_page we always
         * return 0 and just mark the page as PageError on errors.  This
         * should be cleaned up all through the stack eventually.
         */
 
        return mpage_readpage(page, isofs_get_block);
 }
 
-static int isofs_readpages(struct file *file, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages)
+static void isofs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, isofs_get_block);
+       mpage_readahead(rac, isofs_get_block);
 }
 
 static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
 
 static const struct address_space_operations isofs_aops = {
        .readpage = isofs_readpage,
-       .readpages = isofs_readpages,
+       .readahead = isofs_readahead,
        .bmap = _isofs_bmap
 };
 
 
        return mpage_readpage(page, jfs_get_block);
 }
 
-static int jfs_readpages(struct file *file, struct address_space *mapping,
-               struct list_head *pages, unsigned nr_pages)
+static void jfs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, jfs_get_block);
+       mpage_readahead(rac, jfs_get_block);
 }
 
 static void jfs_write_failed(struct address_space *mapping, loff_t to)
 
 const struct address_space_operations jfs_aops = {
        .readpage       = jfs_readpage,
-       .readpages      = jfs_readpages,
+       .readahead      = jfs_readahead,
        .writepage      = jfs_writepage,
        .writepages     = jfs_writepages,
        .write_begin    = jfs_write_begin,
 
 }
 
 /*
- * support function for mpage_readpages.  The fs supplied get_block might
+ * support function for mpage_readahead.  The fs supplied get_block might
  * return an up to date buffer.  This is used to map that buffer into
  * the page, which allows readpage to avoid triggering a duplicate call
  * to get_block.
 }
 
 /**
- * mpage_readpages - populate an address space with some pages & start reads against them
- * @mapping: the address_space
- * @pages: The address of a list_head which contains the target pages.  These
- *   pages have their ->index populated and are otherwise uninitialised.
- *   The page at @pages->prev has the lowest file offset, and reads should be
- *   issued in @pages->prev to @pages->next order.
- * @nr_pages: The number of pages at *@pages
+ * mpage_readahead - start reads against pages
+ * @rac: Describes which pages to read.
  * @get_block: The filesystem's block mapper function.
  *
  * This function walks the pages and the blocks within each page, building and
  *
  * This all causes the disk requests to be issued in the correct order.
  */
-int
-mpage_readpages(struct address_space *mapping, struct list_head *pages,
-                               unsigned nr_pages, get_block_t get_block)
+void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
 {
+       struct page *page;
        struct mpage_readpage_args args = {
                .get_block = get_block,
                .is_readahead = true,
        };
-       unsigned page_idx;
-
-       for (page_idx = 0; page_idx < nr_pages; page_idx++) {
-               struct page *page = lru_to_page(pages);
 
+       while ((page = readahead_page(rac))) {
                prefetchw(&page->flags);
-               list_del(&page->lru);
-               if (!add_to_page_cache_lru(page, mapping,
-                                       page->index,
-                                       readahead_gfp_mask(mapping))) {
-                       args.page = page;
-                       args.nr_pages = nr_pages - page_idx;
-                       args.bio = do_mpage_readpage(&args);
-               }
+               args.page = page;
+               args.nr_pages = readahead_count(rac);
+               args.bio = do_mpage_readpage(&args);
                put_page(page);
        }
-       BUG_ON(!list_empty(pages));
        if (args.bio)
                mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
-       return 0;
 }
-EXPORT_SYMBOL(mpage_readpages);
+EXPORT_SYMBOL(mpage_readahead);
 
 /*
  * This isn't called much at all
                 * Page has buffers, but they are all unmapped. The page was
                 * created by pagein or read over a hole which was handled by
                 * block_read_full_page().  If this address_space is also
-                * using mpage_readpages then this can rarely happen.
+                * using mpage_readahead then this can rarely happen.
                 */
                goto confused;
        }
 
        return mpage_readpage(page, nilfs_get_block);
 }
 
-/**
- * nilfs_readpages() - implement readpages() method of nilfs_aops {}
- * address_space_operations.
- * @file - file struct of the file to be read
- * @mapping - address_space struct used for reading multiple pages
- * @pages - the pages to be read
- * @nr_pages - number of pages to be read
- */
-static int nilfs_readpages(struct file *file, struct address_space *mapping,
-                          struct list_head *pages, unsigned int nr_pages)
+static void nilfs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
+       mpage_readahead(rac, nilfs_get_block);
 }
 
 static int nilfs_writepages(struct address_space *mapping,
        .readpage               = nilfs_readpage,
        .writepages             = nilfs_writepages,
        .set_page_dirty         = nilfs_set_page_dirty,
-       .readpages              = nilfs_readpages,
+       .readahead              = nilfs_readahead,
        .write_begin            = nilfs_write_begin,
        .write_end              = nilfs_write_end,
        /* .releasepage         = nilfs_releasepage, */
 
  * grow out to a tree. If need be, detecting boundary extents could
  * trivially be added in a future version of ocfs2_get_block().
  */
-static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
-                          struct list_head *pages, unsigned nr_pages)
+static void ocfs2_readahead(struct readahead_control *rac)
 {
-       int ret, err = -EIO;
-       struct inode *inode = mapping->host;
+       int ret;
+       struct inode *inode = rac->mapping->host;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
-       loff_t start;
-       struct page *last;
 
        /*
         * Use the nonblocking flag for the dlm code to avoid page
         */
        ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
        if (ret)
-               return err;
+               return;
 
-       if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
-               ocfs2_inode_unlock(inode, 0);
-               return err;
-       }
+       if (down_read_trylock(&oi->ip_alloc_sem) == 0)
+               goto out_unlock;
 
        /*
         * Don't bother with inline-data. There isn't anything
         * to read-ahead in that case anyway...
         */
        if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-               goto out_unlock;
+               goto out_up;
 
        /*
         * Check whether a remote node truncated this file - we just
         * drop out in that case as it's not worth handling here.
         */
-       last = lru_to_page(pages);
-       start = (loff_t)last->index << PAGE_SHIFT;
-       if (start >= i_size_read(inode))
-               goto out_unlock;
+       if (readahead_pos(rac) >= i_size_read(inode))
+               goto out_up;
 
-       err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
+       mpage_readahead(rac, ocfs2_get_block);
 
-out_unlock:
+out_up:
        up_read(&oi->ip_alloc_sem);
+out_unlock:
        ocfs2_inode_unlock(inode, 0);
-
-       return err;
 }
 
 /* Note: Because we don't support holes, our allocation has
 
 const struct address_space_operations ocfs2_aops = {
        .readpage               = ocfs2_readpage,
-       .readpages              = ocfs2_readpages,
+       .readahead              = ocfs2_readahead,
        .writepage              = ocfs2_writepage,
        .write_begin            = ocfs2_write_begin,
        .write_end              = ocfs2_write_end,
 
        return block_read_full_page(page, omfs_get_block);
 }
 
-static int omfs_readpages(struct file *file, struct address_space *mapping,
-               struct list_head *pages, unsigned nr_pages)
+static void omfs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, omfs_get_block);
+       mpage_readahead(rac, omfs_get_block);
 }
 
 static int omfs_writepage(struct page *page, struct writeback_control *wbc)
 
 const struct address_space_operations omfs_aops = {
        .readpage = omfs_readpage,
-       .readpages = omfs_readpages,
+       .readahead = omfs_readahead,
        .writepage = omfs_writepage,
        .writepages = omfs_writepages,
        .write_begin = omfs_write_begin,
 
        return mpage_readpage(page, qnx6_get_block);
 }
 
-static int qnx6_readpages(struct file *file, struct address_space *mapping,
-                  struct list_head *pages, unsigned nr_pages)
+static void qnx6_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block);
+       mpage_readahead(rac, qnx6_get_block);
 }
 
 /*
 }
 static const struct address_space_operations qnx6_aops = {
        .readpage       = qnx6_readpage,
-       .readpages      = qnx6_readpages,
+       .readahead      = qnx6_readahead,
        .bmap           = qnx6_bmap
 };
 
 
        return retval;
 }
 
-static int
-reiserfs_readpages(struct file *file, struct address_space *mapping,
-                  struct list_head *pages, unsigned nr_pages)
+static void reiserfs_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
+       mpage_readahead(rac, reiserfs_get_block);
 }
 
 /*
 const struct address_space_operations reiserfs_address_space_operations = {
        .writepage = reiserfs_writepage,
        .readpage = reiserfs_readpage,
-       .readpages = reiserfs_readpages,
+       .readahead = reiserfs_readahead,
        .releasepage = reiserfs_releasepage,
        .invalidatepage = reiserfs_invalidatepage,
        .write_begin = reiserfs_write_begin,
 
        return mpage_readpage(page, udf_get_block);
 }
 
-static int udf_readpages(struct file *file, struct address_space *mapping,
-                       struct list_head *pages, unsigned nr_pages)
+static void udf_readahead(struct readahead_control *rac)
 {
-       return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
+       mpage_readahead(rac, udf_get_block);
 }
 
 static int udf_write_begin(struct file *file, struct address_space *mapping,
 
 const struct address_space_operations udf_aops = {
        .readpage       = udf_readpage,
-       .readpages      = udf_readpages,
+       .readahead      = udf_readahead,
        .writepage      = udf_writepage,
        .writepages     = udf_writepages,
        .write_begin    = udf_write_begin,
 
 #ifdef CONFIG_BLOCK
 
 struct writeback_control;
+struct readahead_control;
 
-int mpage_readpages(struct address_space *mapping, struct list_head *pages,
-                               unsigned nr_pages, get_block_t get_block);
+void mpage_readahead(struct readahead_control *, get_block_t get_block);
 int mpage_readpage(struct page *page, get_block_t get_block);
 int mpage_writepages(struct address_space *mapping,
                struct writeback_control *wbc, get_block_t get_block);
 
                 * to the LRU. Later, when the IO completes the pages are
                 * marked uptodate and unlocked. However, the queueing
                 * could be merging multiple pages for one bio (e.g.
-                * mpage_readpages). If an allocation happens for the
+                * mpage_readahead). If an allocation happens for the
                 * second or third page, the process can end up locking
                 * the same page twice and deadlocking. Rather than
                 * trying to be clever about what pages can be locked,