]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: swap: add a adaptive full cluster cache reclaim
authorKairui Song <kasong@tencent.com>
Wed, 31 Jul 2024 06:49:21 +0000 (23:49 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:52:47 +0000 (17:52 -0700)
Link all full cluster with one full list, and reclaim from it when the
allocation have ran out of all usable clusters.

There are many reason a folio can end up being in the swap cache while
having no swap count reference.  So the best way to search for such slots
is still by iterating the swap clusters.

With the list as an LRU, iterating from the oldest cluster and keep them
rotating is a very doable and clean way to free up potentially not inuse
clusters.

When any allocation failure, try reclaim and rotate only one cluster.
This is adaptive for high order allocations they can tolerate fallback.
So this avoids latency, and give the full cluster list an fair chance to
get reclaimed.  It release the usage stress for the fallback order 0
allocation or following up high order allocation.

If the swap device is getting very full, reclaim more aggresively to
ensure no OOM will happen.  This ensures order 0 heavy workload won't go
OOM as order 0 won't fail if any cluster still have any space.

Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-9-cb9c148b9297@kernel.org
Signed-off-by: Kairui Song <kasong@tencent.com>
Reported-by: Barry Song <21cnbao@gmail.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Kairui Song <ryncsn@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/swapfile.c

index e372122e4f4599c53ec1c6a936e58f92a3a3692f..476354391a24c0b9b6b7c7df2a98ab0a1c3c2406 100644 (file)
@@ -297,6 +297,7 @@ struct swap_info_struct {
        unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
        struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
        struct list_head free_clusters; /* free clusters list */
+       struct list_head full_clusters; /* full clusters list */
        struct list_head nonfull_clusters[SWAP_NR_ORDERS];
                                        /* list of cluster that contains at least one free slot */
        struct list_head frag_clusters[SWAP_NR_ORDERS];
index ffb663e6009414327c57f82f6dc3d1940e5fff5e..38f35aaf1fc2422ee3bbd0790c20a66d429ce7a7 100644 (file)
@@ -440,10 +440,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 
        VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
-       if (ci->flags & CLUSTER_FLAG_NONFULL)
-               list_move_tail(&ci->list, &si->discard_clusters);
-       else
-               list_add_tail(&ci->list, &si->discard_clusters);
+       list_move_tail(&ci->list, &si->discard_clusters);
        ci->flags = 0;
        schedule_work(&si->discard_work);
 }
@@ -453,10 +450,7 @@ static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info
        lockdep_assert_held(&si->lock);
        lockdep_assert_held(&ci->lock);
 
-       if (ci->flags & CLUSTER_FLAG_NONFULL)
-               list_move_tail(&ci->list, &si->free_clusters);
-       else
-               list_add_tail(&ci->list, &si->free_clusters);
+       list_move_tail(&ci->list, &si->free_clusters);
        ci->flags = CLUSTER_FLAG_FREE;
        ci->order = 0;
 }
@@ -576,12 +570,9 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
 
        if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
                VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
-               if (ci->flags & CLUSTER_FLAG_FRAG) {
+               if (ci->flags & CLUSTER_FLAG_FRAG)
                        p->frag_cluster_nr[ci->order]--;
-                       list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]);
-               } else {
-                       list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]);
-               }
+               list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]);
                ci->flags = CLUSTER_FLAG_NONFULL;
        }
 }
@@ -674,7 +665,7 @@ static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
                          (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
                if (ci->flags & CLUSTER_FLAG_FRAG)
                        si->frag_cluster_nr[ci->order]--;
-               list_del(&ci->list);
+               list_move_tail(&ci->list, &si->full_clusters);
                ci->flags = 0;
        }
 }
@@ -718,6 +709,46 @@ done:
        return offset;
 }
 
+static void swap_reclaim_full_clusters(struct swap_info_struct *si)
+{
+       long to_scan = 1;
+       unsigned long offset, end;
+       struct swap_cluster_info *ci;
+       unsigned char *map = si->swap_map;
+       int nr_reclaim, total_reclaimed = 0;
+
+       if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER)
+               to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
+
+       while (!list_empty(&si->full_clusters)) {
+               ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
+               list_move_tail(&ci->list, &si->full_clusters);
+               offset = cluster_offset(si, ci);
+               end = min(si->max, offset + SWAPFILE_CLUSTER);
+               to_scan--;
+
+               while (offset < end) {
+                       if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
+                               spin_unlock(&si->lock);
+                               nr_reclaim = __try_to_reclaim_swap(si, offset,
+                                                                  TTRS_ANYWAY | TTRS_DIRECT);
+                               spin_lock(&si->lock);
+                               if (nr_reclaim > 0) {
+                                       offset += nr_reclaim;
+                                       total_reclaimed += nr_reclaim;
+                                       continue;
+                               } else if (nr_reclaim < 0) {
+                                       offset += -nr_reclaim;
+                                       continue;
+                               }
+                       }
+                       offset++;
+               }
+               if (to_scan <= 0 || total_reclaimed)
+                       break;
+       }
+}
+
 /*
  * Try to get swap entries with specified order from current cpu's swap entry
  * pool (a cluster). This might involve allocating a new cluster for current CPU
@@ -826,7 +857,15 @@ new_cluster:
                                goto done;
                }
        }
+
 done:
+       /* Try reclaim from full clusters if device is nearfull */
+       if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) {
+               swap_reclaim_full_clusters(si);
+               if (!found && !order && si->pages != si->inuse_pages)
+                       goto new_cluster;
+       }
+
        cluster->next[order] = offset;
        return found;
 }
@@ -3118,6 +3157,7 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
        nr_good_pages = maxpages - 1;   /* omit header page */
 
        INIT_LIST_HEAD(&p->free_clusters);
+       INIT_LIST_HEAD(&p->full_clusters);
        INIT_LIST_HEAD(&p->discard_clusters);
 
        for (i = 0; i < SWAP_NR_ORDERS; i++) {