]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: add an 'end' parameter to find_get_entries
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 31 Dec 2020 22:04:19 +0000 (22:04 +0000)
committerJohannes Weiner <hannes@cmpxchg.org>
Thu, 31 Dec 2020 22:04:19 +0000 (22:04 +0000)
This simplifies the callers and leads to a more efficient implementation
since the XArray has this functionality already.

Link: https://lkml.kernel.org/r/20201112212641.27837-11-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pagemap.h
mm/filemap.c
mm/shmem.c
mm/swap.c

index 644571c28c867d121033fce4163d73a128c0a0e9..f837eed042e315d0b20605279766517576499220 100644 (file)
@@ -451,8 +451,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
 }
 
 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
-                         unsigned int nr_entries, struct page **entries,
-                         pgoff_t *indices);
+               pgoff_t end, unsigned int nr_entries, struct page **entries,
+               pgoff_t *indices);
 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
                        pgoff_t end, unsigned int nr_pages,
                        struct page **pages);
index 682a0d3f0a78731917ee9cddaa707df1223afd39..045c6b169bc27fa794ef0e2f4a888bcfdd609f95 100644 (file)
@@ -1878,6 +1878,7 @@ reset:
  * find_get_entries - gang pagecache lookup
  * @mapping:   The address_space to search
  * @start:     The starting page cache index
+ * @end:       The final page index (inclusive).
  * @nr_entries:        The maximum number of entries
  * @entries:   Where the resulting entries are placed
  * @indices:   The cache indices corresponding to the entries in @entries
@@ -1901,9 +1902,9 @@ reset:
  *
  * Return: the number of pages and shadow entries which were found.
  */
-unsigned find_get_entries(struct address_space *mapping,
-                         pgoff_t start, unsigned int nr_entries,
-                         struct page **entries, pgoff_t *indices)
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+               pgoff_t end, unsigned int nr_entries, struct page **entries,
+               pgoff_t *indices)
 {
        XA_STATE(xas, &mapping->i_pages, start);
        struct page *page;
@@ -1913,7 +1914,7 @@ unsigned find_get_entries(struct address_space *mapping,
                return 0;
 
        rcu_read_lock();
-       while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
+       while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
                /*
                 * Terminate early on finding a THP, to allow the caller to
                 * handle it all at once; but continue if this is hugetlbfs.
index 0961c0bf162a02eb405803b7957b6a2e91afa06e..ce36ffb1837d29965aa53614497450bbbab1cfb6 100644 (file)
@@ -913,8 +913,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index >= end)
-                               break;
 
                        if (xa_is_value(page)) {
                                if (unfalloc)
@@ -967,9 +965,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
        while (index < end) {
                cond_resched();
 
-               pvec.nr = find_get_entries(mapping, index,
-                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                               pvec.pages, indices);
+               pvec.nr = find_get_entries(mapping, index, end - 1,
+                               PAGEVEC_SIZE, pvec.pages, indices);
                if (!pvec.nr) {
                        /* If all gone or hole-punch or unfalloc, we're done */
                        if (index == start || end != -1)
@@ -982,9 +979,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index >= end)
-                               break;
-
                        if (xa_is_value(page)) {
                                if (unfalloc)
                                        continue;
index 2cca7141470c1368a26953078e58363c65a85593..bcb4360ff883f5486dfad9e7925c09b4986dd30c 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1060,7 +1060,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
                                pgoff_t start, unsigned nr_entries,
                                pgoff_t *indices)
 {
-       pvec->nr = find_get_entries(mapping, start, nr_entries,
+       pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
                                    pvec->pages, indices);
        return pagevec_count(pvec);
 }