]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/shmem, swap: remove redundant error handling for replacing folio
authorKairui Song <kasong@tencent.com>
Wed, 10 Sep 2025 16:08:27 +0000 (00:08 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:54 +0000 (17:25 -0700)
Shmem may replace a folio in the swap cache if the cached one doesn't fit
the swapin's GFP zone.  When doing so, shmem has already double checked
that the swap cache folio is locked, still has the swap cache flag set,
and contains the wanted swap entry.  So it is impossible to fail due to an
XArray mismatch.  There is even a comment for that.

Delete the defensive error handling path, and add a WARN_ON instead: if
that happened, something has broken the basic principle of how the swap
cache works, we should catch and fix that.

Link: https://lkml.kernel.org/r/20250910160833.3464-10-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 410f27bc4752962ea321d7af9dffa02417cfdb7a..5f395fab489cda8c9e5c7ef3562fc84a1a337806 100644 (file)
@@ -1661,13 +1661,13 @@ try_split:
                }
 
                /*
-                * The delete_from_swap_cache() below could be left for
+                * The swap_cache_del_folio() below could be left for
                 * shrink_folio_list()'s folio_free_swap() to dispose of;
                 * but I'm a little nervous about letting this folio out of
                 * shmem_writeout() in a hybrid half-tmpfs-half-swap state
                 * e.g. folio_mapping(folio) might give an unexpected answer.
                 */
-               delete_from_swap_cache(folio);
+               swap_cache_del_folio(folio);
                goto redirty;
        }
        if (nr_pages > 1)
@@ -2045,7 +2045,7 @@ retry:
        new->swap = entry;
 
        memcg1_swapin(entry, nr_pages);
-       shadow = get_shadow_from_swap_cache(entry);
+       shadow = swap_cache_get_shadow(entry);
        if (shadow)
                workingset_refault(new, shadow);
        folio_add_lru(new);
@@ -2121,35 +2121,17 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
        /* Swap cache still stores N entries instead of a high-order entry */
        xa_lock_irq(&swap_mapping->i_pages);
        for (i = 0; i < nr_pages; i++) {
-               void *item = xas_load(&xas);
-
-               if (item != old) {
-                       error = -ENOENT;
-                       break;
-               }
-
-               xas_store(&xas, new);
+               WARN_ON_ONCE(xas_store(&xas, new));
                xas_next(&xas);
        }
-       if (!error) {
-               mem_cgroup_replace_folio(old, new);
-               shmem_update_stats(new, nr_pages);
-               shmem_update_stats(old, -nr_pages);
-       }
        xa_unlock_irq(&swap_mapping->i_pages);
 
-       if (unlikely(error)) {
-               /*
-                * Is this possible?  I think not, now that our callers
-                * check both the swapcache flag and folio->private
-                * after getting the folio lock; but be defensive.
-                * Reverse old to newpage for clear and free.
-                */
-               old = new;
-       } else {
-               folio_add_lru(new);
-               *foliop = new;
-       }
+       mem_cgroup_replace_folio(old, new);
+       shmem_update_stats(new, nr_pages);
+       shmem_update_stats(old, -nr_pages);
+
+       folio_add_lru(new);
+       *foliop = new;
 
        folio_clear_swapcache(old);
        old->private = NULL;
@@ -2183,7 +2165,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
        nr_pages = folio_nr_pages(folio);
        folio_wait_writeback(folio);
        if (!skip_swapcache)
-               delete_from_swap_cache(folio);
+               swap_cache_del_folio(folio);
        /*
         * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
         * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
@@ -2422,7 +2404,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
                folio->swap.val = 0;
                swapcache_clear(si, swap, nr_pages);
        } else {
-               delete_from_swap_cache(folio);
+               swap_cache_del_folio(folio);
        }
        folio_mark_dirty(folio);
        swap_free_nr(swap, nr_pages);