]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: migrate: add isolate_folio_to_list()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 16 Aug 2024 09:04:34 +0000 (17:04 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:27 +0000 (17:53 -0700)
Add isolate_folio_to_list() helper to try to isolate HugeTLB, no-LRU
movable and LRU folios to a list, which will be reused by
do_migrate_range() from memory hotplug soon, also drop the
mf_isolate_folio() since we could directly use new helper in the
soft_offline_in_use_page().

Link: https://lkml.kernel.org/r/20240816090435.888946-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
mm/memory-failure.c
mm/migrate.c

index 644be30b69c8684f843505aa4c1d1d00e2221afb..002e49b2ebd98ea3a00e9a46e34bbb4a3b97eb58 100644 (file)
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
                  unsigned int *ret_succeeded);
 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
 bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
                struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
        { return NULL; }
 static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
        { return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+       { return false; }
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct folio *dst, struct folio *src)
index 44a3fb43bf8c39733a6b9a9f16b37b96e9b1c83c..9520a2b256adb7f208791a2d3569efc9b0ae26eb 100644 (file)
@@ -2652,40 +2652,6 @@ EXPORT_SYMBOL(unpoison_memory);
 #undef pr_fmt
 #define pr_fmt(fmt) "Soft offline: " fmt
 
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
-       bool isolated = false;
-
-       if (folio_test_hugetlb(folio)) {
-               isolated = isolate_hugetlb(folio, pagelist);
-       } else {
-               bool lru = !__folio_test_movable(folio);
-
-               if (lru)
-                       isolated = folio_isolate_lru(folio);
-               else
-                       isolated = isolate_movable_page(&folio->page,
-                                                       ISOLATE_UNEVICTABLE);
-
-               if (isolated) {
-                       list_add(&folio->lru, pagelist);
-                       if (lru)
-                               node_stat_add_folio(folio, NR_ISOLATED_ANON +
-                                                   folio_is_file_lru(folio));
-               }
-       }
-
-       /*
-        * If we succeed to isolate the folio, we grabbed another refcount on
-        * the folio, so we can safely drop the one we got from get_any_page().
-        * If we failed to isolate the folio, it means that we cannot go further
-        * and we will return an error, so drop the reference we got from
-        * get_any_page() as well.
-        */
-       folio_put(folio);
-       return isolated;
-}
-
 /*
  * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
  * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2737,7 +2703,7 @@ static int soft_offline_in_use_page(struct page *page)
                return 0;
        }
 
-       if (mf_isolate_folio(folio, &pagelist)) {
+       if (isolate_folio_to_list(folio, &pagelist)) {
                ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
                        (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
                if (!ret) {
@@ -2759,6 +2725,16 @@ static int soft_offline_in_use_page(struct page *page)
                        pfn, msg_page[huge], page_count(page), &page->flags);
                ret = -EBUSY;
        }
+
+       /*
+        * If we succeed to isolate the folio, we grabbed another refcount on
+        * the folio, so we can safely drop the one we got from get_any_page().
+        * If we failed to isolate the folio, it means that we cannot go further
+        * and we will return an error, so drop the reference we got from
+        * get_any_page() as well.
+        */
+       folio_put(folio);
+
        return ret;
 }
 
index 76cfc6c42eb38a0cacfa6d89188e724bd121e050..bb23a63742eecd87240dee44465e5d818978fc7c 100644 (file)
@@ -137,6 +137,33 @@ static void putback_movable_folio(struct folio *folio)
        folio_clear_isolated(folio);
 }
 
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+       bool isolated = false;
+
+       if (folio_test_hugetlb(folio)) {
+               isolated = isolate_hugetlb(folio, list);
+       } else {
+               bool lru = !__folio_test_movable(folio);
+
+               if (lru)
+                       isolated = folio_isolate_lru(folio);
+               else
+                       isolated = isolate_movable_page(&folio->page,
+                                                       ISOLATE_UNEVICTABLE);
+
+               if (isolated) {
+                       list_add(&folio->lru, list);
+                       if (lru)
+                               node_stat_add_folio(folio, NR_ISOLATED_ANON +
+                                                   folio_is_file_lru(folio));
+               }
+       }
+
+       return isolated;
+}
+
 /*
  * Put previously isolated pages back onto the appropriate lists
  * from where they were once taken off for compaction/migration.