]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: rmap: make try_to_unmap() void function
authorYang Shi <shy828301@gmail.com>
Wed, 2 Jun 2021 03:53:01 +0000 (13:53 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:53:01 +0000 (13:53 +1000)
Currently try_to_unmap() return bool value by checking page_mapcount(),
however this may return false positive since page_mapcount() doesn't
check all subpages of compound page.  The total_mapcount() could be used
instead, but its cost is higher since it traverses all subpages.

Actually the most callers of try_to_unmap() don't care about the
return value at all.  So just need check if page is still mapped by
page_mapped() when necessary.  And page_mapped() does bail out early
when it finds mapped subpage.

Link: https://lkml.kernel.org/r/20210526201239.3351-2-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Suggested-by: Hugh Dickins <hughd@google.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
include/linux/rmap.h
mm/memory-failure.c
mm/rmap.c
mm/vmscan.c

index 3a1ce4ef9276ad742d14b8bd9c801fc74865e1c7..55a75cbb0f6cc2ebd571dffe09f97b2e72d51cb3 100644 (file)
@@ -191,7 +191,7 @@ int page_referenced(struct page *, int is_locked,
                        struct mem_cgroup *memcg, unsigned long *vm_flags);
 
 bool try_to_migrate(struct page *page, enum ttu_flags flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_unmap(struct page *, enum ttu_flags flags);
 
 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, struct page **pages,
index b88183a5107a195aad99ea290ebd998b3664e1e8..a55409c1b5e741ab17cb62c43815938f137cd5f0 100644 (file)
@@ -1206,7 +1206,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
        enum ttu_flags ttu = TTU_IGNORE_MLOCK;
        struct address_space *mapping;
        LIST_HEAD(tokill);
-       bool unmap_success = true;
+       bool unmap_success;
        int kill = 1, forcekill;
        struct page *hpage = *hpagep;
        bool mlocked = PageMlocked(hpage);
@@ -1269,7 +1269,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
 
        if (!PageHuge(hpage)) {
-               unmap_success = try_to_unmap(hpage, ttu);
+               try_to_unmap(hpage, ttu);
        } else {
                if (!PageAnon(hpage)) {
                        /*
@@ -1281,17 +1281,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                         */
                        mapping = hugetlb_page_mapping_lock_write(hpage);
                        if (mapping) {
-                               unmap_success = try_to_unmap(hpage,
-                                                    ttu|TTU_RMAP_LOCKED);
+                               try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
                                i_mmap_unlock_write(mapping);
-                       } else {
+                       } else
                                pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
-                               unmap_success = false;
-                       }
                } else {
-                       unmap_success = try_to_unmap(hpage, ttu);
+                       try_to_unmap(hpage, ttu);
                }
        }
+
+       unmap_success = !page_mapped(hpage);
        if (!unmap_success)
                pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
                       pfn, page_mapcount(hpage));
index fe062f63ef4d8c7559ca1d5c44cc4c8690692089..79393a9804f431cddf078c5cc7eb5b124ee8533e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1658,9 +1658,10 @@ static int page_not_mapped(struct page *page)
  * Tries to remove all the page table entries which are mapping this
  * page, used in the pageout path.  Caller must hold the page lock.
  *
- * If unmap is successful, return true. Otherwise, false.
+ * It is the callers' responsibility to check if the page is still
+ * mapped when needed.
  */
-bool try_to_unmap(struct page *page, enum ttu_flags flags)
+void try_to_unmap(struct page *page, enum ttu_flags flags)
 {
        struct rmap_walk_control rwc = {
                .rmap_one = try_to_unmap_one,
@@ -1946,8 +1947,6 @@ bool try_to_migrate(struct page *page, enum ttu_flags flags)
                rmap_walk_locked(page, &rwc);
        else
                rmap_walk(page, &rwc);
-
-       return !page_mapcount(page) ? true : false;
 }
 
 /*
index d7c3cb8688dd4868f68fe6d713b4956b505f3c37..abd610a526983d972868f6940e5ac9e9c10fa863 100644 (file)
@@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        if (unlikely(PageTransHuge(page)))
                                flags |= TTU_SPLIT_HUGE_PMD;
 
-                       if (!try_to_unmap(page, flags)) {
+                       try_to_unmap(page, flags);
+                       if (page_mapped(page)) {
                                stat->nr_unmap_fail += nr_pages;
                                if (!was_swapbacked && PageSwapBacked(page))
                                        stat->nr_lazyfree_fail += nr_pages;