]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, swap: drop the flag TTRS_DIRECT
authorKairui Song <kasong@tencent.com>
Mon, 24 Feb 2025 18:02:07 +0000 (02:02 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 28 Feb 2025 01:00:31 +0000 (17:00 -0800)
This flag exists temporarily to allow the allocator to bypass the slot
cache during freeing, so reclaiming one slot will free the slot
immediately.

But now we have already removed slot cache usage on freeing, so this flag
has no effect now.

Link: https://lkml.kernel.org/r/20250224180212.22802-3-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swapfile.c

index 5618cd1c4b030365d9cfbdb2af9fc8639b20ce1a..6f2de59c6355e60952b94787a0559349a21ac2f3 100644 (file)
@@ -158,8 +158,6 @@ static long swap_usage_in_pages(struct swap_info_struct *si)
 #define TTRS_UNMAPPED          0x2
 /* Reclaim the swap entry if swap is getting full */
 #define TTRS_FULL              0x4
-/* Reclaim directly, bypass the slot cache and don't touch device lock */
-#define TTRS_DIRECT            0x8
 
 static bool swap_only_has_cache(struct swap_info_struct *si,
                              unsigned long offset, int nr_pages)
@@ -257,23 +255,8 @@ again:
        if (!need_reclaim)
                goto out_unlock;
 
-       if (!(flags & TTRS_DIRECT)) {
-               /* Free through slot cache */
-               delete_from_swap_cache(folio);
-               folio_set_dirty(folio);
-               ret = nr_pages;
-               goto out_unlock;
-       }
-
-       xa_lock_irq(&address_space->i_pages);
-       __delete_from_swap_cache(folio, entry, NULL);
-       xa_unlock_irq(&address_space->i_pages);
-       folio_ref_sub(folio, nr_pages);
+       delete_from_swap_cache(folio);
        folio_set_dirty(folio);
-
-       ci = lock_cluster(si, offset);
-       swap_entry_range_free(si, ci, entry, nr_pages);
-       unlock_cluster(ci);
        ret = nr_pages;
 out_unlock:
        folio_unlock(folio);
@@ -697,7 +680,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
                        offset++;
                        break;
                case SWAP_HAS_CACHE:
-                       nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
+                       nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
                        if (nr_reclaim > 0)
                                offset += nr_reclaim;
                        else
@@ -849,7 +832,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
                        if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
                                spin_unlock(&ci->lock);
                                nr_reclaim = __try_to_reclaim_swap(si, offset,
-                                                                  TTRS_ANYWAY | TTRS_DIRECT);
+                                                                  TTRS_ANYWAY);
                                spin_lock(&ci->lock);
                                if (nr_reclaim) {
                                        offset += abs(nr_reclaim);