]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/migrate: remove MIGRATEPAGE_UNMAP
authorDavid Hildenbrand <david@redhat.com>
Mon, 11 Aug 2025 14:39:47 +0000 (16:39 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:44 +0000 (17:24 -0700)
migrate_folio_unmap() is the only user of MIGRATEPAGE_UNMAP.  We want to
remove MIGRATEPAGE_* completely.

It's rather weird to have a generic MIGRATEPAGE_UNMAP, documented to be
returned from address-space callbacks, when it's only used for an internal
helper.

Let's start by having only a single "success" return value for
migrate_folio_unmap() -- 0 -- by moving the "folio was already freed"
check into the single caller.

There is a remaining comment for PG_isolated, which we renamed to
PG_movable_ops_isolated recently and forgot to update.

While we might still run into that case with zsmalloc, it's something we
want to get rid of soon.  So let's just focus that optimization on real
folios only for now by excluding movable_ops pages.  Note that concurrent
freeing can happen at any time and this "already freed" check is not
relevant for correctness.

Link: https://lkml.kernel.org/r/20250811143949.1117439-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: David Sterba <dsterba@suse.com>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
mm/migrate.c

index 9009e27b5f44c11e2969e349c6441fec0d418b44..302f3e95faeaa9614ae302997e0533f7053a976d 100644 (file)
@@ -18,7 +18,6 @@ struct migration_target_control;
  * - zero on page migration success;
  */
 #define MIGRATEPAGE_SUCCESS            0
-#define MIGRATEPAGE_UNMAP              1
 
 /**
  * struct movable_operations - Driver page migration
index 9e5ef39ce73af0ce986c384954b10c304633d04d..4528f3f95e2eee5a9693bf445abe8f8f61a93c87 100644 (file)
@@ -1198,16 +1198,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
        bool locked = false;
        bool dst_locked = false;
 
-       if (folio_ref_count(src) == 1) {
-               /* Folio was freed from under us. So we are done. */
-               folio_clear_active(src);
-               folio_clear_unevictable(src);
-               /* free_pages_prepare() will clear PG_isolated. */
-               list_del(&src->lru);
-               migrate_folio_done(src, reason);
-               return MIGRATEPAGE_SUCCESS;
-       }
-
        dst = get_new_folio(src, private);
        if (!dst)
                return -ENOMEM;
@@ -1297,7 +1287,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 
        if (unlikely(page_has_movable_ops(&src->page))) {
                __migrate_folio_record(dst, old_page_state, anon_vma);
-               return MIGRATEPAGE_UNMAP;
+               return 0;
        }
 
        /*
@@ -1327,7 +1317,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 
        if (!folio_mapped(src)) {
                __migrate_folio_record(dst, old_page_state, anon_vma);
-               return MIGRATEPAGE_UNMAP;
+               return 0;
        }
 
 out:
@@ -1870,14 +1860,28 @@ static int migrate_pages_batch(struct list_head *from,
                                continue;
                        }
 
+                       /*
+                        * If we are holding the last folio reference, the folio
+                        * was freed from under us, so just drop our reference.
+                        */
+                       if (likely(!page_has_movable_ops(&folio->page)) &&
+                           folio_ref_count(folio) == 1) {
+                               folio_clear_active(folio);
+                               folio_clear_unevictable(folio);
+                               list_del(&folio->lru);
+                               migrate_folio_done(folio, reason);
+                               stats->nr_succeeded += nr_pages;
+                               stats->nr_thp_succeeded += is_thp;
+                               continue;
+                       }
+
                        rc = migrate_folio_unmap(get_new_folio, put_new_folio,
                                        private, folio, &dst, mode, reason,
                                        ret_folios);
                        /*
                         * The rules are:
-                        *      Success: folio will be freed
-                        *      Unmap: folio will be put on unmap_folios list,
-                        *             dst folio put on dst_folios list
+                        *      0: folio will be put on unmap_folios list,
+                        *         dst folio put on dst_folios list
                         *      -EAGAIN: stay on the from list
                         *      -ENOMEM: stay on the from list
                         *      Other errno: put on ret_folios list
@@ -1927,11 +1931,7 @@ static int migrate_pages_batch(struct list_head *from,
                                thp_retry += is_thp;
                                nr_retry_pages += nr_pages;
                                break;
-                       case MIGRATEPAGE_SUCCESS:
-                               stats->nr_succeeded += nr_pages;
-                               stats->nr_thp_succeeded += is_thp;
-                               break;
-                       case MIGRATEPAGE_UNMAP:
+                       case 0:
                                list_move_tail(&folio->lru, &unmap_folios);
                                list_add_tail(&dst->lru, &dst_folios);
                                break;