]> www.infradead.org Git - users/hch/misc.git/commitdiff
mm/huge_memory: convert split_huge_pages_pid() from follow_page() to folio_walk
authorDavid Hildenbrand <david@redhat.com>
Fri, 2 Aug 2024 15:55:20 +0000 (17:55 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:26:01 +0000 (20:26 -0700)
Let's remove yet another follow_page() user.  Note that we have to do the
split without holding the PTL, after folio_walk_end().  We don't care
about losing the secretmem check in follow_page().

[david@redhat.com: teach can_split_folio() that we are not holding an additional reference]
Link: https://lkml.kernel.org/r/c75d1c6c-8ea6-424f-853c-1ccda6c77ba2@redhat.com
Link: https://lkml.kernel.org/r/20240802155524.517137-8-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/vmscan.c

index e25d9ebfdf89aabdf86dd44bd7a187903290894f..ce44caa40eed56947d48c12ea29593e1b90e2b08 100644 (file)
@@ -314,7 +314,7 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
                unsigned long len, unsigned long pgoff, unsigned long flags,
                vm_flags_t vm_flags);
 
-bool can_split_folio(struct folio *folio, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                unsigned int new_order);
 static inline int split_huge_page(struct page *page)
@@ -470,7 +470,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
 }
 
 static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
 {
        return false;
 }
index 29e76d285e4b5cbfa8dd31d1585050870a465139..666fa675e5b64056960ac35d5c8ba2c7f8795821 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/memory-tiers.h>
 #include <linux/compat.h>
 #include <linux/pgalloc_tag.h>
+#include <linux/pagewalk.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -3017,7 +3018,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 }
 
 /* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int *pextra_pins)
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
 {
        int extra_pins;
 
@@ -3029,7 +3030,8 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
                extra_pins = folio_nr_pages(folio);
        if (pextra_pins)
                *pextra_pins = extra_pins;
-       return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
+       return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+                                       caller_pins;
 }
 
 /*
@@ -3197,7 +3199,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
         * Racy check if we can split the page, before unmap_folio() will
         * split PMDs
         */
-       if (!can_split_folio(folio, &extra_pins)) {
+       if (!can_split_folio(folio, 1, &extra_pins)) {
                ret = -EAGAIN;
                goto out_unlock;
        }
@@ -3504,7 +3506,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
         */
        for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
                struct vm_area_struct *vma = vma_lookup(mm, addr);
-               struct page *page;
+               struct folio_walk fw;
                struct folio *folio;
 
                if (!vma)
@@ -3516,13 +3518,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                        continue;
                }
 
-               /* FOLL_DUMP to ignore special (like zero) pages */
-               page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
-               if (IS_ERR_OR_NULL(page))
+               folio = folio_walk_start(&fw, vma, addr, 0);
+               if (!folio)
                        continue;
 
-               folio = page_folio(page);
                if (!is_transparent_hugepage(folio))
                        goto next;
 
@@ -3536,18 +3535,24 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                 * can be split or not. So skip the check here.
                 */
                if (!folio_test_private(folio) &&
-                   !can_split_folio(folio, NULL))
+                   !can_split_folio(folio, 0, NULL))
                        goto next;
 
                if (!folio_trylock(folio))
                        goto next;
+               folio_get(folio);
+               folio_walk_end(&fw, vma);
 
                if (!split_folio_to_order(folio, new_order))
                        split++;
 
                folio_unlock(folio);
-next:
                folio_put(folio);
+
+               cond_resched();
+               continue;
+next:
+               folio_walk_end(&fw, vma);
                cond_resched();
        }
        mmap_read_unlock(mm);
index 6b70e80ca0eea85d28b6ce9b509ad37914bde59c..96ce889ea3d0e633f9b289caa306e26636280cc4 100644 (file)
@@ -1227,7 +1227,7 @@ retry:
                                        goto keep_locked;
                                if (folio_test_large(folio)) {
                                        /* cannot split folio, skip it */
-                                       if (!can_split_folio(folio, NULL))
+                                       if (!can_split_folio(folio, 1, NULL))
                                                goto activate_locked;
                                        /*
                                         * Split partially mapped folios right away.