]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: migrate: add isolate_folio_to_list()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 27 Aug 2024 11:47:27 +0000 (19:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:59 +0000 (21:15 -0700)
Add isolate_folio_to_list() helper to try to isolate HugeTLB, no-LRU
movable and LRU folios to a list, which will be reused by
do_migrate_range() from memory hotplug soon, also drop the
mf_isolate_folio() since we could directly use new helper in the
soft_offline_in_use_page().

Link: https://lkml.kernel.org/r/20240827114728.3212578-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Tested-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
mm/memory-failure.c
mm/migrate.c

index 644be30b69c8684f843505aa4c1d1d00e2221afb..002e49b2ebd98ea3a00e9a46e34bbb4a3b97eb58 100644 (file)
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
                  unsigned int *ret_succeeded);
 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
 bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
                struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
        { return NULL; }
 static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
        { return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+       { return false; }
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct folio *dst, struct folio *src)
index 1f4f31e6d91d9fc7681fa82844dcc66f9821376b..96ce31e5a203be3e1e952cad59bf33e964135b5e 100644 (file)
@@ -2653,40 +2653,6 @@ EXPORT_SYMBOL(unpoison_memory);
 #undef pr_fmt
 #define pr_fmt(fmt) "Soft offline: " fmt
 
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
-       bool isolated = false;
-
-       if (folio_test_hugetlb(folio)) {
-               isolated = isolate_hugetlb(folio, pagelist);
-       } else {
-               bool lru = !__folio_test_movable(folio);
-
-               if (lru)
-                       isolated = folio_isolate_lru(folio);
-               else
-                       isolated = isolate_movable_page(&folio->page,
-                                                       ISOLATE_UNEVICTABLE);
-
-               if (isolated) {
-                       list_add(&folio->lru, pagelist);
-                       if (lru)
-                               node_stat_add_folio(folio, NR_ISOLATED_ANON +
-                                                   folio_is_file_lru(folio));
-               }
-       }
-
-       /*
-        * If we succeed to isolate the folio, we grabbed another refcount on
-        * the folio, so we can safely drop the one we got from get_any_page().
-        * If we failed to isolate the folio, it means that we cannot go further
-        * and we will return an error, so drop the reference we got from
-        * get_any_page() as well.
-        */
-       folio_put(folio);
-       return isolated;
-}
-
 /*
  * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
  * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2699,6 +2665,7 @@ static int soft_offline_in_use_page(struct page *page)
        struct folio *folio = page_folio(page);
        char const *msg_page[] = {"page", "hugepage"};
        bool huge = folio_test_hugetlb(folio);
+       bool isolated;
        LIST_HEAD(pagelist);
        struct migration_target_control mtc = {
                .nid = NUMA_NO_NODE,
@@ -2738,7 +2705,18 @@ static int soft_offline_in_use_page(struct page *page)
                return 0;
        }
 
-       if (mf_isolate_folio(folio, &pagelist)) {
+       isolated = isolate_folio_to_list(folio, &pagelist);
+
+       /*
+        * If we succeed to isolate the folio, we grabbed another refcount on
+        * the folio, so we can safely drop the one we got from get_any_page().
+        * If we failed to isolate the folio, it means that we cannot go further
+        * and we will return an error, so drop the reference we got from
+        * get_any_page() as well.
+        */
+       folio_put(folio);
+
+       if (isolated) {
                ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
                        (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
                if (!ret) {
index 2250baa85ac210cc8ff4f126529e0a411687b819..ba4893d42618e89ab1e4677d1f94fae3ff73bb1d 100644 (file)
@@ -178,6 +178,32 @@ void putback_movable_pages(struct list_head *l)
        }
 }
 
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+       bool isolated, lru;
+
+       if (folio_test_hugetlb(folio))
+               return isolate_hugetlb(folio, list);
+
+       lru = !__folio_test_movable(folio);
+       if (lru)
+               isolated = folio_isolate_lru(folio);
+       else
+               isolated = isolate_movable_page(&folio->page,
+                                               ISOLATE_UNEVICTABLE);
+
+       if (!isolated)
+               return false;
+
+       list_add(&folio->lru, list);
+       if (lru)
+               node_stat_add_folio(folio, NR_ISOLATED_ANON +
+                                   folio_is_file_lru(folio));
+
+       return true;
+}
+
 /*
  * Restore a potential migration pte to a working pte entry
  */