]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/hugetlb: rename isolate_hugetlb() to folio_isolate_hugetlb()
authorDavid Hildenbrand <david@redhat.com>
Mon, 13 Jan 2025 13:16:07 +0000 (14:16 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:41 +0000 (20:22 -0800)
Let's make the function name match "folio_isolate_lru()", and add some
kernel doc.

Link: https://lkml.kernel.org/r/20250113131611.2554758-3-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/gup.c
mm/hugetlb.c
mm/mempolicy.c
mm/migrate.c

index 49ec2362ce926d491f89137cd86c6af2110411ab..c95ad5cd7894d512afdc82911b80f4fc5891994d 100644 (file)
@@ -153,7 +153,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
                                                vm_flags_t vm_flags);
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
-bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
                                bool *migratable_cleared);
@@ -414,7 +414,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
        return NULL;
 }
 
-static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
+static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
 {
        return false;
 }
index 00a1269cbee0a6db3ed1d6e2bcddd412abc516dd..2cc3a9d28e70e06868d522ad8f4a1bd1dfb5357e 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2344,7 +2344,7 @@ static unsigned long collect_longterm_unpinnable_folios(
                        continue;
 
                if (folio_test_hugetlb(folio)) {
-                       isolate_hugetlb(folio, movable_folio_list);
+                       folio_isolate_hugetlb(folio, movable_folio_list);
                        continue;
                }
 
index 58c2c54982079adfae08b29e3d1d044176b0c6dc..15a68996426500079077ef6cc9e2cde8805b5d36 100644 (file)
@@ -2812,7 +2812,7 @@ retry:
                 * Fail with -EBUSY if not possible.
                 */
                spin_unlock_irq(&hugetlb_lock);
-               isolated = isolate_hugetlb(old_folio, list);
+               isolated = folio_isolate_hugetlb(old_folio, list);
                ret = isolated ? 0 : -EBUSY;
                spin_lock_irq(&hugetlb_lock);
                goto free_new;
@@ -2897,7 +2897,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
        if (hstate_is_gigantic(h))
                return -ENOMEM;
 
-       if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
+       if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
                ret = 0;
        else if (!folio_ref_count(folio))
                ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
@@ -7421,7 +7421,24 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
 
 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
 
-bool isolate_hugetlb(struct folio *folio, struct list_head *list)
+/**
+ * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
+ * @folio: the folio to isolate
+ * @list: the list to add the folio to on success
+ *
+ * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
+ * isolated/non-migratable, and moving it from the active list to the
+ * given list.
+ *
+ * Isolation will fail if @folio is not an allocated hugetlb folio, or if
+ * it is already isolated/non-migratable.
+ *
+ * On success, an additional folio reference is taken that must be dropped
+ * using folio_putback_active_hugetlb() to undo the isolation.
+ *
+ * Return: True if isolation worked, otherwise False.
+ */
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
 {
        bool ret = true;
 
index f83b73236ffe7285951bab5a530c2db3dac6986b..bbaadbeeb2919066811e27656df0f64b29dc302c 100644 (file)
@@ -647,7 +647,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
         */
        if ((flags & MPOL_MF_MOVE_ALL) ||
            (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
-               if (!isolate_hugetlb(folio, qp->pagelist))
+               if (!folio_isolate_hugetlb(folio, qp->pagelist))
                        qp->nr_failed++;
 unlock:
        spin_unlock(ptl);
index 32cc8e0b1ccef495277369a58299e788685a5c7a..c3052877e844e68fe0f0c54f2c7a6266c1451508 100644 (file)
@@ -128,7 +128,7 @@ static void putback_movable_folio(struct folio *folio)
  *
  * This function shall be used whenever the isolated pageset has been
  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
- * and isolate_hugetlb().
+ * and folio_isolate_hugetlb().
  */
 void putback_movable_pages(struct list_head *l)
 {
@@ -169,7 +169,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
        bool isolated, lru;
 
        if (folio_test_hugetlb(folio))
-               return isolate_hugetlb(folio, list);
+               return folio_isolate_hugetlb(folio, list);
 
        lru = !__folio_test_movable(folio);
        if (lru)
@@ -2200,7 +2200,7 @@ static int __add_folio_for_migration(struct folio *folio, int node,
                return -EACCES;
 
        if (folio_test_hugetlb(folio)) {
-               if (isolate_hugetlb(folio, pagelist))
+               if (folio_isolate_hugetlb(folio, pagelist))
                        return 1;
        } else if (folio_isolate_lru(folio)) {
                list_add_tail(&folio->lru, pagelist);