]> www.infradead.org Git - users/hch/misc.git/commitdiff
mm: split out a writeout helper from pageout
authorChristoph Hellwig <hch@lst.de>
Mon, 5 May 2025 13:35:03 +0000 (15:35 +0200)
committerChristoph Hellwig <hch@lst.de>
Wed, 14 May 2025 04:51:23 +0000 (06:51 +0200)
Move the code to write back swap / shmem folios into a self-contained
helper to keep prepare for refactoring it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
mm/vmscan.c

index 0eda493fc383c1f145982d77c2526851624252ae..52e6eee4d8966cdf7844ed37fa0d2263977f59be 100644 (file)
@@ -652,14 +652,55 @@ typedef enum {
        PAGE_CLEAN,
 } pageout_t;
 
+static pageout_t writeout(struct folio *folio, struct address_space *mapping,
+               struct swap_iocb **plug, struct list_head *folio_list)
+{
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_NONE,
+               .nr_to_write = SWAP_CLUSTER_MAX,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+               .for_reclaim = 1,
+               .swap_plug = plug,
+       };
+       int res;
+
+       folio_set_reclaim(folio);
+
+       /*
+        * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
+        * or we failed to allocate contiguous swap entries.
+        */
+       if (shmem_mapping(mapping)) {
+               if (folio_test_large(folio))
+                       wbc.list = folio_list;
+               res = shmem_writeout(folio, &wbc);
+       } else {
+               res = swap_writeout(folio, &wbc);
+       }
+
+       if (res < 0)
+               handle_write_error(mapping, folio, res);
+       if (res == AOP_WRITEPAGE_ACTIVATE) {
+               folio_clear_reclaim(folio);
+               return PAGE_ACTIVATE;
+       }
+
+       /* synchronous write? */
+       if (!folio_test_writeback(folio))
+               folio_clear_reclaim(folio);
+
+       trace_mm_vmscan_write_folio(folio);
+       node_stat_add_folio(folio, NR_VMSCAN_WRITE);
+       return PAGE_SUCCESS;
+}
+
 /*
  * pageout is called by shrink_folio_list() for each dirty folio.
  */
 static pageout_t pageout(struct folio *folio, struct address_space *mapping,
                         struct swap_iocb **plug, struct list_head *folio_list)
 {
-       int (*writeout)(struct folio *, struct writeback_control *);
-
        /*
         * We no longer attempt to writeback filesystem folios here, other
         * than tmpfs/shmem.  That's taken care of in page-writeback.
@@ -690,51 +731,12 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
                }
                return PAGE_KEEP;
        }
-       if (shmem_mapping(mapping))
-               writeout = shmem_writeout;
-       else if (folio_test_anon(folio))
-               writeout = swap_writeout;
-       else
-               return PAGE_ACTIVATE;
-
-       if (folio_clear_dirty_for_io(folio)) {
-               int res;
-               struct writeback_control wbc = {
-                       .sync_mode = WB_SYNC_NONE,
-                       .nr_to_write = SWAP_CLUSTER_MAX,
-                       .range_start = 0,
-                       .range_end = LLONG_MAX,
-                       .for_reclaim = 1,
-                       .swap_plug = plug,
-               };
 
-               /*
-                * The large shmem folio can be split if CONFIG_THP_SWAP is
-                * not enabled or contiguous swap entries are failed to
-                * allocate.
-                */
-               if (shmem_mapping(mapping) && folio_test_large(folio))
-                       wbc.list = folio_list;
-
-               folio_set_reclaim(folio);
-               res = writeout(folio, &wbc);
-               if (res < 0)
-                       handle_write_error(mapping, folio, res);
-               if (res == AOP_WRITEPAGE_ACTIVATE) {
-                       folio_clear_reclaim(folio);
-                       return PAGE_ACTIVATE;
-               }
-
-               if (!folio_test_writeback(folio)) {
-                       /* synchronous write? */
-                       folio_clear_reclaim(folio);
-               }
-               trace_mm_vmscan_write_folio(folio);
-               node_stat_add_folio(folio, NR_VMSCAN_WRITE);
-               return PAGE_SUCCESS;
-       }
-
-       return PAGE_CLEAN;
+       if (!shmem_mapping(mapping) && !folio_test_anon(folio))
+               return PAGE_ACTIVATE;
+       if (!folio_clear_dirty_for_io(folio))
+               return PAGE_CLEAN;
+       return writeout(folio, mapping, plug, folio_list);
 }
 
 /*