]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/swap_state: update zswap LRU's protection range with the folio locked
authorNhat Pham <nphamcs@gmail.com>
Mon, 5 Feb 2024 23:24:42 +0000 (15:24 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 20 Feb 2024 22:20:48 +0000 (14:20 -0800)
When a folio is swapped in, the protection size of the corresponding zswap
LRU is incremented, so that the zswap shrinker is more conservative with
its reclaiming action.  This field is embedded within the struct lruvec,
so updating it requires looking up the folio's memcg and lruvec.  However,
currently this lookup can happen after the folio is unlocked, for instance
if a new folio is allocated, and swap_read_folio() unlocks the folio
before returning.  In this scenario, there is no stability guarantee for
the binding between a folio and its memcg and lruvec:

* A folio's memcg and lruvec can be freed between the lookup and the
  update, leading to a UAF.
* Folio migration can clear the now-unlocked folio's memcg_data, which
  directs the zswap LRU protection size update towards the root memcg
  instead of the original memcg. This was recently picked up by the
  syzbot thanks to a warning in the inlined folio_lruvec() call.

Move the zswap LRU protection range update above the swap_read_folio()
call, and only when a new page is allocated, to prevent this.

[nphamcs@gmail.com: add VM_WARN_ON_ONCE() to zswap_folio_swapin()]
Link: https://lkml.kernel.org/r/20240206180855.3987204-1-nphamcs@gmail.com
[nphamcs@gmail.com: remove unneeded if (folio) checks]
Link: https://lkml.kernel.org/r/20240206191355.83755-1-nphamcs@gmail.com
Link: https://lkml.kernel.org/r/20240205232442.3240571-1-nphamcs@gmail.com
Fixes: b5ba474f3f51 ("zswap: shrink zswap pool based on memory pressure")
Reported-by: syzbot+17a611d10af7d18a7092@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/000000000000ae47f90610803260@google.com/
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Chengming Zhou <zhouchengming@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swap_state.c
mm/zswap.c

index e671266ad77241f461a17cbb2e486fe48a423f69..7255c01a1e4e16d758186019f904e70a7890a5cc 100644 (file)
@@ -680,9 +680,10 @@ skip:
        /* The page was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
@@ -855,9 +856,10 @@ skip:
        /* The folio was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
index d2423247acfd64c9759774ef94ed682d1fc8422d..36903d938c15e20ecbe5ea6c9fb6e12046f28253 100644 (file)
@@ -377,10 +377,9 @@ void zswap_folio_swapin(struct folio *folio)
 {
        struct lruvec *lruvec;
 
-       if (folio) {
-               lruvec = folio_lruvec(folio);
-               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
-       }
+       VM_WARN_ON_ONCE(!folio_test_locked(folio));
+       lruvec = folio_lruvec(folio);
+       atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 }
 
 /*********************************