]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, swap: cleanup swap entry allocation parameter
authorKairui Song <kasong@tencent.com>
Mon, 6 Oct 2025 20:02:35 +0000 (04:02 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:31 +0000 (21:28 -0700)
We no longer need this GFP parameter after commit 8578e0c00dcf ("mm, swap:
use the swap table for the swap cache and switch API").  Before that
commit the GFP parameter is already almost identical for all callers, so
nothing changed by that commit.  Swap table just moved the GFP to lower
layer and make it more defined and changes depend on atomic or sleep
allocation.

Now this parameter is no longer used, just remove it.  No behavior change.

Link: https://lkml.kernel.org/r/20251007-swap-clean-after-swap-table-p1-v1-3-74860ef8ba74@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Chris Li <chrisl@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/shmem.c
mm/swapfile.c
mm/vmscan.c

index e818fbade1e2e1c3a836fcae7bbe52ab88cef253..a4b2648177359c8fab102708d243297e3238d8af 100644 (file)
@@ -462,7 +462,7 @@ static inline long get_nr_swap_pages(void)
 }
 
 extern void si_swapinfo(struct sysinfo *);
-int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
+int folio_alloc_swap(struct folio *folio);
 bool folio_free_swap(struct folio *folio);
 void put_swap_folio(struct folio *folio, swp_entry_t entry);
 extern swp_entry_t get_swap_page_of_type(int);
@@ -560,7 +560,7 @@ static inline int swp_swapcount(swp_entry_t entry)
        return 0;
 }
 
-static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
+static inline int folio_alloc_swap(struct folio *folio)
 {
        return -EINVAL;
 }
index 45f51745ad881282f336ad5920acc03fc332075d..63092cc0b1419d349feae89259a980dafab36c69 100644 (file)
@@ -1617,7 +1617,7 @@ try_split:
                folio_mark_uptodate(folio);
        }
 
-       if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+       if (!folio_alloc_swap(folio)) {
                bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
                int error;
 
index 732e07c70ce9e52ba2fefca346f86afa89072cfa..534b21aeef5a543855809eb818c06875061fef53 100644 (file)
@@ -1425,7 +1425,7 @@ static bool swap_sync_discard(void)
  * Context: Caller needs to hold the folio lock.
  * Return: Whether the folio was added to the swap cache.
  */
-int folio_alloc_swap(struct folio *folio, gfp_t gfp)
+int folio_alloc_swap(struct folio *folio)
 {
        unsigned int order = folio_order(folio);
        unsigned int size = 1 << order;
index a306b96e6515af0d40663ebf2c650d0a8e474329..f9bc4a427889ef117d88fc53b8049529879d7f04 100644 (file)
@@ -1296,7 +1296,7 @@ retry:
                                            split_folio_to_list(folio, folio_list))
                                                goto activate_locked;
                                }
-                               if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
+                               if (folio_alloc_swap(folio)) {
                                        int __maybe_unused order = folio_order(folio);
 
                                        if (!folio_test_large(folio))
@@ -1312,7 +1312,7 @@ retry:
                                        }
 #endif
                                        count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
-                                       if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
+                                       if (folio_alloc_swap(folio))
                                                goto activate_locked_split;
                                }
                                /*