]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: vmscan: simplify the logic for activating dirty file folios
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Fri, 17 Oct 2025 07:53:07 +0000 (15:53 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:41 +0000 (18:51 -0700)
After commit 6b0dfabb3555 ("fs: Remove aops->writepage"), we no longer
attempt to write back filesystem folios through reclaim.

However, in the shrink_folio_list() function, there still remains some
logic related to writeback control of dirty file folios.  The original
logic was that, for direct reclaim, or when folio_test_reclaim() is false,
or the PGDAT_DIRTY flag is not set, the dirty file folios would be
directly activated to avoid being scanned again; otherwise, it will try to
writeback the dirty file folios.  However, since we can no longer perform
writeback on dirty folios, the dirty file folios will still be activated.

Additionally, under the original logic, if we continue to try writeback
dirty file folios, we will also check the references flag,
sc->may_writepage, and may_enter_fs(), which may result in dirty file
folios being left in the inactive list.  This is unreasonable.  Even if
these dirty folios are scanned again, we still cannot clean them.

Therefore, the checks on these dirty file folios appear to be redundant
and can be removed.  Dirty file folios should be directly moved to the
active list to avoid being scanned again.  Since we set the PG_reclaim
flag for the dirty folios, once the writeback is completed, they will be
moved back to the tail of the inactive list to be retried for quick
reclaim.

Link: https://lkml.kernel.org/r/ba5c49955fd93c6850bcc19abf0e02e1573768aa.1760687075.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/vmscan.c

index 7fb7331c57250782a464a9583c6ea4867f4ffdab..4398e027f450ec4d1e783467eca4e711c3b6e1d0 100644 (file)
@@ -1060,10 +1060,6 @@ struct zone {
 } ____cacheline_internodealigned_in_smp;
 
 enum pgdat_flags {
-       PGDAT_DIRTY,                    /* reclaim scanning has recently found
-                                        * many dirty file pages at the tail
-                                        * of the LRU.
-                                        */
        PGDAT_WRITEBACK,                /* reclaim scanning has recently found
                                         * many pages under writeback
                                         */
index 320831e78b92fd520329510879a50d3a0d16eea2..92f4ca99b73c9dab87c64a3d1f64a7f05661b364 100644 (file)
@@ -1387,21 +1387,7 @@ retry:
 
                mapping = folio_mapping(folio);
                if (folio_test_dirty(folio)) {
-                       /*
-                        * Only kswapd can writeback filesystem folios
-                        * to avoid risk of stack overflow. But avoid
-                        * injecting inefficient single-folio I/O into
-                        * flusher writeback as much as possible: only
-                        * write folios when we've encountered many
-                        * dirty folios, and when we've already scanned
-                        * the rest of the LRU for clean folios and see
-                        * the same dirty folios again (with the reclaim
-                        * flag set).
-                        */
-                       if (folio_is_file_lru(folio) &&
-                           (!current_is_kswapd() ||
-                            !folio_test_reclaim(folio) ||
-                            !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+                       if (folio_is_file_lru(folio)) {
                                /*
                                 * Immediately reclaim when written back.
                                 * Similar in principle to folio_deactivate()
@@ -1410,7 +1396,8 @@ retry:
                                 */
                                node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
                                                nr_pages);
-                               folio_set_reclaim(folio);
+                               if (!folio_test_reclaim(folio))
+                                       folio_set_reclaim(folio);
 
                                goto activate_locked;
                        }
@@ -6105,11 +6092,6 @@ again:
                if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
                        set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
-               /* Allow kswapd to start writing pages during reclaim.*/
-               if (sc->nr.unqueued_dirty &&
-                       sc->nr.unqueued_dirty == sc->nr.file_taken)
-                       set_bit(PGDAT_DIRTY, &pgdat->flags);
-
                /*
                 * If kswapd scans pages marked for immediate
                 * reclaim and under writeback (nr_immediate), it
@@ -6850,7 +6832,6 @@ static void clear_pgdat_congested(pg_data_t *pgdat)
 
        clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
        clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
-       clear_bit(PGDAT_DIRTY, &pgdat->flags);
        clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
 }