]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/filemap: Fix storing to a THP shadow entry for-5.9
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 23 Aug 2020 20:24:20 +0000 (16:24 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 24 Aug 2020 01:58:13 +0000 (21:58 -0400)
When a THP is removed from the page cache by reclaim, we replace it with
a shadow entry that occupies all slots of the XArray previously occupied
by the THP.  If the user then accesses that page again, we only allocate
a single page, but storing it into the shadow entry replaces all entries
with that one page.  That leads to bugs like

page dumped because: VM_BUG_ON_PAGE(page_to_pgoff(page) != offset)
------------[ cut here ]------------
kernel BUG at mm/filemap.c:2529!

https://bugzilla.kernel.org/show_bug.cgi?id=206569

This is hard to reproduce with mainline, but happens regularly with the
THP patchset (as so many more THPs are created).  This solution is take
from the THP patchset.  It splits the shadow entry into order-0 pieces
at the time that we bring a new page into cache.

Fixes: 99cb0dbd47a1 ("mm,thp: add read-only THP support for (non-shmem) FS")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/filemap.c

index 1aaea26556cc7e4ab702af1e96262d67a0607cf0..1ef6b71d68a64cc1a10dfccde141bb3557139421 100644 (file)
@@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
 static int __add_to_page_cache_locked(struct page *page,
                                      struct address_space *mapping,
-                                     pgoff_t offset, gfp_t gfp_mask,
+                                     pgoff_t offset, gfp_t gfp,
                                      void **shadowp)
 {
        XA_STATE(xas, &mapping->i_pages, offset);
        int huge = PageHuge(page);
        int error;
-       void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
@@ -846,25 +845,46 @@ static int __add_to_page_cache_locked(struct page *page,
        page->index = offset;
 
        if (!huge) {
-               error = mem_cgroup_charge(page, current->mm, gfp_mask);
+               error = mem_cgroup_charge(page, current->mm, gfp);
                if (error)
                        goto error;
        }
 
+       gfp &= GFP_RECLAIM_MASK;
+
        do {
+               unsigned int order = xa_get_order(xas.xa, xas.xa_index);
+               void *entry, *old = NULL;
+
+               if (order > thp_order(page))
+                       xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
+                                       order, gfp);
                xas_lock_irq(&xas);
-               old = xas_load(&xas);
-               if (old && !xa_is_value(old))
-                       xas_set_err(&xas, -EEXIST);
+               xas_for_each_conflict(&xas, entry) {
+                       old = entry;
+                       if (!xa_is_value(entry)) {
+                               xas_set_err(&xas, -EEXIST);
+                               goto unlock;
+                       }
+               }
+
+               if (old) {
+                       if (shadowp)
+                               *shadowp = old;
+                       /* entry may have been split before we acquired lock */
+                       order = xa_get_order(xas.xa, xas.xa_index);
+                       if (order > thp_order(page)) {
+                               xas_split(&xas, old, order);
+                               xas_reset(&xas);
+                       }
+               }
+
                xas_store(&xas, page);
                if (xas_error(&xas))
                        goto unlock;
 
-               if (xa_is_value(old)) {
+               if (old)
                        mapping->nrexceptional--;
-                       if (shadowp)
-                               *shadowp = old;
-               }
                mapping->nrpages++;
 
                /* hugetlb pages do not participate in page cache accounting */
@@ -872,7 +892,7 @@ static int __add_to_page_cache_locked(struct page *page,
                        __inc_lruvec_page_state(page, NR_FILE_PAGES);
 unlock:
                xas_unlock_irq(&xas);
-       } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
+       } while (xas_nomem(&xas, gfp));
 
        if (xas_error(&xas)) {
                error = xas_error(&xas);