]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: fix the handling Non-LRU pages returned by follow_page
authorHaiyue Wang <haiyue.wang@intel.com>
Tue, 23 Aug 2022 13:58:41 +0000 (21:58 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:27 +0000 (22:03 -0700)
The handling Non-LRU pages returned by follow_page() jumps directly, it
doesn't call put_page() to handle the reference count, since 'FOLL_GET'
flag for follow_page() has get_page() called.  Fix the zone device page
check by handling the page reference count correctly before returning.

And as David reviewed, "device pages are never PageKsm pages".  Drop this
zone device page check for break_ksm().

Since the zone device page can't be a transparent huge page, so drop the
redundant zone device page check for split_huge_pages_pid().  (by Miaohe)

Link: https://lkml.kernel.org/r/20220823135841.934465-3-haiyue.wang@intel.com
Fixes: 3218f8712d6b ("mm: handling Non-LRU pages returned by vm_normal_pages")
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alex Sierra <alex.sierra@amd.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/ksm.c
mm/migrate.c

index 428c42bfd4c4d73c5062b3046c49dcaa6222128e..f6ba132ff0be36363980af9234de9639759f33cf 100644 (file)
@@ -3001,7 +3001,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                /* FOLL_DUMP to ignore special (like zero) pages */
                page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
 
-               if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+               if (IS_ERR_OR_NULL(page))
                        continue;
 
                if (!is_transparent_hugepage(page))
index a98bc3beb8741326f5f58f7a1a24f20a68eb2a71..e34cc21d5556ee0972d26807efe5859e5319a806 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -475,7 +475,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
                cond_resched();
                page = follow_page(vma, addr,
                                FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
-               if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+               if (IS_ERR_OR_NULL(page))
                        break;
                if (PageKsm(page))
                        ret = handle_mm_fault(vma, addr,
@@ -560,12 +560,15 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
                goto out;
 
        page = follow_page(vma, addr, FOLL_GET);
-       if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+       if (IS_ERR_OR_NULL(page))
                goto out;
+       if (is_zone_device_page(page))
+               goto out_putpage;
        if (PageAnon(page)) {
                flush_anon_page(vma, page, addr);
                flush_dcache_page(page);
        } else {
+out_putpage:
                put_page(page);
 out:
                page = NULL;
@@ -2321,11 +2324,13 @@ next_mm:
                        if (ksm_test_exit(mm))
                                break;
                        *page = follow_page(vma, ksm_scan.address, FOLL_GET);
-                       if (IS_ERR_OR_NULL(*page) || is_zone_device_page(*page)) {
+                       if (IS_ERR_OR_NULL(*page)) {
                                ksm_scan.address += PAGE_SIZE;
                                cond_resched();
                                continue;
                        }
+                       if (is_zone_device_page(*page))
+                               goto next_page;
                        if (PageAnon(*page)) {
                                flush_anon_page(vma, *page, ksm_scan.address);
                                flush_dcache_page(*page);
@@ -2340,6 +2345,7 @@ next_mm:
                                mmap_read_unlock(mm);
                                return rmap_item;
                        }
+next_page:
                        put_page(*page);
                        ksm_scan.address += PAGE_SIZE;
                        cond_resched();
index d74573c36573b45f68404ffc6d93bc2fbebe0cad..eb594b0db8060c9f23fb32b7af2959a08a88bcdd 100644 (file)
@@ -1691,9 +1691,12 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                goto out;
 
        err = -ENOENT;
-       if (!page || is_zone_device_page(page))
+       if (!page)
                goto out;
 
+       if (is_zone_device_page(page))
+               goto out_putpage;
+
        err = 0;
        if (page_to_nid(page) == node)
                goto out_putpage;
@@ -1891,13 +1894,15 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
                if (IS_ERR(page))
                        goto set_status;
 
-               if (page && !is_zone_device_page(page)) {
+               err = -ENOENT;
+               if (!page)
+                       goto set_status;
+
+               if (!is_zone_device_page(page))
                        err = page_to_nid(page);
-                       if (foll_flags & FOLL_GET)
-                               put_page(page);
-               } else {
-                       err = -ENOENT;
-               }
+
+               if (foll_flags & FOLL_GET)
+                       put_page(page);
 set_status:
                *status = err;