]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-huge_memory-convert-split_huge_pages_pid-from-follow_page-to-folio_walk-fix
authorDavid Hildenbrand <david@redhat.com>
Tue, 6 Aug 2024 10:08:17 +0000 (12:08 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:52:51 +0000 (17:52 -0700)
teach can_split_folio() that we are not holding an additional reference

Link: https://lkml.kernel.org/r/c75d1c6c-8ea6-424f-853c-1ccda6c77ba2@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/vmscan.c

index e25d9ebfdf89aabdf86dd44bd7a187903290894f..ce44caa40eed56947d48c12ea29593e1b90e2b08 100644 (file)
@@ -314,7 +314,7 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
                unsigned long len, unsigned long pgoff, unsigned long flags,
                vm_flags_t vm_flags);
 
-bool can_split_folio(struct folio *folio, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                unsigned int new_order);
 static inline int split_huge_page(struct page *page)
@@ -470,7 +470,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
 }
 
 static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
 {
        return false;
 }
index fe4f8415edfd2505e04494568fb41187b4759efd..666fa675e5b64056960ac35d5c8ba2c7f8795821 100644 (file)
@@ -3018,7 +3018,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 }
 
 /* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int *pextra_pins)
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
 {
        int extra_pins;
 
@@ -3030,7 +3030,8 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
                extra_pins = folio_nr_pages(folio);
        if (pextra_pins)
                *pextra_pins = extra_pins;
-       return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
+       return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+                                       caller_pins;
 }
 
 /*
@@ -3198,7 +3199,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
         * Racy check if we can split the page, before unmap_folio() will
         * split PMDs
         */
-       if (!can_split_folio(folio, &extra_pins)) {
+       if (!can_split_folio(folio, 1, &extra_pins)) {
                ret = -EAGAIN;
                goto out_unlock;
        }
@@ -3534,7 +3535,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
                 * can be split or not. So skip the check here.
                 */
                if (!folio_test_private(folio) &&
-                   !can_split_folio(folio, NULL))
+                   !can_split_folio(folio, 0, NULL))
                        goto next;
 
                if (!folio_trylock(folio))
index 6b70e80ca0eea85d28b6ce9b509ad37914bde59c..96ce889ea3d0e633f9b289caa306e26636280cc4 100644 (file)
@@ -1227,7 +1227,7 @@ retry:
                                        goto keep_locked;
                                if (folio_test_large(folio)) {
                                        /* cannot split folio, skip it */
-                                       if (!can_split_folio(folio, NULL))
+                                       if (!can_split_folio(folio, 1, NULL))
                                                goto activate_locked;
                                        /*
                                         * Split partially mapped folios right away.