]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-vmscan-simplify-the-folio-refcount-check-in-pageout-fix
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 22 Sep 2025 06:02:28 +0000 (14:02 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:22 +0000 (21:28 -0700)
remove warning and comment, per Hugh

Link: https://lkml.kernel.org/r/392a9ca3-31ac-4447-bd44-3c656d63e4ca@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 4907e255857a6cb02c9996051c2cdb9d7809dd0b..aadbee50a851a207b369bfe4708981b3bf67bcf8 100644 (file)
@@ -689,16 +689,8 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
         * A freeable shmem or swapcache folio is referenced only by the
         * caller that isolated the folio and the page cache.
         */
-       if (folio_ref_count(folio) != 1 + folio_nr_pages(folio))
+       if (folio_ref_count(folio) != 1 + folio_nr_pages(folio) || !mapping)
                return PAGE_KEEP;
-       if (!mapping) {
-               /*
-                * We should no longer have dirty folios with clean buffers and
-                * a NULL mapping. However, let's be careful for now.
-                */
-               VM_WARN_ON_FOLIO(true, folio);
-               return PAGE_KEEP;
-       }
 
        if (!shmem_mapping(mapping) && !folio_test_anon(folio))
                return PAGE_ACTIVATE;