]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: thp: replace DEBUG_VM BUG with VM_WARN when unmap fails for split
authorYang Shi <shy828301@gmail.com>
Wed, 2 Jun 2021 03:52:58 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:58 +0000 (13:52 +1000)
When debugging the bug reported by Wang Yugui [1], try_to_unmap() may
fail, but the first VM_BUG_ON_PAGE() just checks page_mapcount() however
it may miss the failure when head page is unmapped but other subpage is
mapped.  Then the second DEBUG_VM BUG() that check total mapcount would
catch it.  This may incur some confusion.  And this is not a fatal issue,
so consolidate the two DEBUG_VM checks into one VM_WARN_ON_ONCE_PAGE().

[1] https://lore.kernel.org/linux-mm/20210412180659.B9E3.409509F4@e16-tec=
h.com/

Link: https://lkml.kernel.org/r/20210526201239.3351-1-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/huge_memory.c

index 29ce3ef56c048f9f091acbf5263d1dee1928ce12..161c741bbbfb06fc778910c68d19ea17219cb9f3 100644 (file)
@@ -2298,21 +2298,20 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
 static void unmap_page(struct page *page)
 {
        enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
-       bool unmap_success;
 
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
-       if (PageAnon(page))
+       if (PageAnon(page)) {
                try_to_migrate(page, ttu_flags);
-       else
+       } else {
                /*
                 * Don't install migration entries for file backed pages. This
                 * helps handle cases when i_size is in the middle of the page
                 * as there is no need to unmap pages beyond i_size manually.
                 */
-               unmap_success = try_to_unmap(page, ttu_flags |
-                                               TTU_IGNORE_MLOCK);
-       VM_BUG_ON_PAGE(!unmap_success, page);
+               try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK);
+       }
+       VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
 }
 
 static void remap_page(struct page *page, unsigned int nr)
@@ -2623,7 +2622,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct deferred_split *ds_queue = get_deferred_split_queue(head);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
-       int count, mapcount, extra_pins, ret;
+       int extra_pins, ret;
        pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2682,7 +2681,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        unmap_page(head);
-       VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* block interrupt reentry in xa_lock and spinlock */
        local_irq_disable();
@@ -2700,9 +2698,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
-       count = page_count(head);
-       mapcount = total_mapcount(head);
-       if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+       if (page_ref_freeze(head, 1 + extra_pins)) {
                if (!list_empty(page_deferred_list(head))) {
                        ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
@@ -2722,16 +2718,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                __split_huge_page(page, list, end);
                ret = 0;
        } else {
-               if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-                       pr_alert("total_mapcount: %u, page_count(): %u\n",
-                                       mapcount, count);
-                       if (PageTail(page))
-                               dump_page(head, NULL);
-                       dump_page(page, "total_mapcount(head) > 0");
-                       BUG();
-               }
                spin_unlock(&ds_queue->split_queue_lock);
-fail:          if (mapping)
+fail:
+               if (mapping)
                        xa_unlock(&mapping->i_pages);
                local_irq_enable();
                remap_page(head, thp_nr_pages(head));