]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm/filemap: fix storing to a THP shadow entry
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 7 Jun 2021 20:08:45 +0000 (21:08 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Jun 2021 11:37:15 +0000 (13:37 +0200)
commit 198b62f83eef1d605d70eca32759c92cdcc14175 upstream

When a THP is removed from the page cache by reclaim, we replace it with a
shadow entry that occupies all slots of the XArray previously occupied by
the THP.  If the user then accesses that page again, we only allocate a
single page, but storing it into the shadow entry replaces all entries
with that one page.  That leads to bugs like

page dumped because: VM_BUG_ON_PAGE(page_to_pgoff(page) != offset)
------------[ cut here ]------------
kernel BUG at mm/filemap.c:2529!

https://bugzilla.kernel.org/show_bug.cgi?id=206569

This is hard to reproduce with mainline, but happens regularly with the
THP patchset (as so many more THPs are created).  This solution is take
from the THP patchset.  It splits the shadow entry into order-0 pieces at
the time that we bring a new page into cache.

Fixes: 99cb0dbd47a1 ("mm,thp: add read-only THP support for (non-shmem) FS")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Qian Cai <cai@lca.pw>
Link: https://lkml.kernel.org/r/20200903183029.14930-4-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/filemap.c

index db542b4948838b7e46d4a262d105be386e83428a..c10e237cc2c6eccdb0ae34ca2c35288b33b6d3b7 100644 (file)
@@ -856,7 +856,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
        int huge = PageHuge(page);
        struct mem_cgroup *memcg;
        int error;
-       void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
@@ -872,21 +871,41 @@ noinline int __add_to_page_cache_locked(struct page *page,
        get_page(page);
        page->mapping = mapping;
        page->index = offset;
+       gfp_mask &= GFP_RECLAIM_MASK;
 
        do {
+               unsigned int order = xa_get_order(xas.xa, xas.xa_index);
+               void *entry, *old = NULL;
+
+               if (order > thp_order(page))
+                       xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
+                                       order, gfp_mask);
                xas_lock_irq(&xas);
-               old = xas_load(&xas);
-               if (old && !xa_is_value(old))
-                       xas_set_err(&xas, -EEXIST);
+               xas_for_each_conflict(&xas, entry) {
+                       old = entry;
+                       if (!xa_is_value(entry)) {
+                               xas_set_err(&xas, -EEXIST);
+                               goto unlock;
+                       }
+               }
+
+               if (old) {
+                       if (shadowp)
+                               *shadowp = old;
+                       /* entry may have been split before we acquired lock */
+                       order = xa_get_order(xas.xa, xas.xa_index);
+                       if (order > thp_order(page)) {
+                               xas_split(&xas, old, order);
+                               xas_reset(&xas);
+                       }
+               }
+
                xas_store(&xas, page);
                if (xas_error(&xas))
                        goto unlock;
 
-               if (xa_is_value(old)) {
+               if (old)
                        mapping->nrexceptional--;
-                       if (shadowp)
-                               *shadowp = old;
-               }
                mapping->nrpages++;
 
                /* hugetlb pages do not participate in page cache accounting */
@@ -894,7 +913,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
                        __inc_node_page_state(page, NR_FILE_PAGES);
 unlock:
                xas_unlock_irq(&xas);
-       } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
+       } while (xas_nomem(&xas, gfp_mask));
 
        if (xas_error(&xas))
                goto error;