From: Christoph Hellwig Date: Mon, 5 May 2025 13:35:03 +0000 (+0200) Subject: mm: split out a writeout helper from pageout X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=5296d08e94fbf54d2dc63292e743e593d7f7d8d2;p=users%2Fhch%2Fmisc.git mm: split out a writeout helper from pageout Move the code to write back swap / shmem folios into a self-contained helper to keep prepare for refactoring it. Signed-off-by: Christoph Hellwig --- diff --git a/mm/vmscan.c b/mm/vmscan.c index 0eda493fc383..52e6eee4d896 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -652,14 +652,55 @@ typedef enum { PAGE_CLEAN, } pageout_t; +static pageout_t writeout(struct folio *folio, struct address_space *mapping, + struct swap_iocb **plug, struct list_head *folio_list) +{ + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = SWAP_CLUSTER_MAX, + .range_start = 0, + .range_end = LLONG_MAX, + .for_reclaim = 1, + .swap_plug = plug, + }; + int res; + + folio_set_reclaim(folio); + + /* + * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled + * or we failed to allocate contiguous swap entries. + */ + if (shmem_mapping(mapping)) { + if (folio_test_large(folio)) + wbc.list = folio_list; + res = shmem_writeout(folio, &wbc); + } else { + res = swap_writeout(folio, &wbc); + } + + if (res < 0) + handle_write_error(mapping, folio, res); + if (res == AOP_WRITEPAGE_ACTIVATE) { + folio_clear_reclaim(folio); + return PAGE_ACTIVATE; + } + + /* synchronous write? */ + if (!folio_test_writeback(folio)) + folio_clear_reclaim(folio); + + trace_mm_vmscan_write_folio(folio); + node_stat_add_folio(folio, NR_VMSCAN_WRITE); + return PAGE_SUCCESS; +} + /* * pageout is called by shrink_folio_list() for each dirty folio. */ static pageout_t pageout(struct folio *folio, struct address_space *mapping, struct swap_iocb **plug, struct list_head *folio_list) { - int (*writeout)(struct folio *, struct writeback_control *); - /* * We no longer attempt to writeback filesystem folios here, other * than tmpfs/shmem. That's taken care of in page-writeback. @@ -690,51 +731,12 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, } return PAGE_KEEP; } - if (shmem_mapping(mapping)) - writeout = shmem_writeout; - else if (folio_test_anon(folio)) - writeout = swap_writeout; - else - return PAGE_ACTIVATE; - - if (folio_clear_dirty_for_io(folio)) { - int res; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = SWAP_CLUSTER_MAX, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1, - .swap_plug = plug, - }; - /* - * The large shmem folio can be split if CONFIG_THP_SWAP is - * not enabled or contiguous swap entries are failed to - * allocate. - */ - if (shmem_mapping(mapping) && folio_test_large(folio)) - wbc.list = folio_list; - - folio_set_reclaim(folio); - res = writeout(folio, &wbc); - if (res < 0) - handle_write_error(mapping, folio, res); - if (res == AOP_WRITEPAGE_ACTIVATE) { - folio_clear_reclaim(folio); - return PAGE_ACTIVATE; - } - - if (!folio_test_writeback(folio)) { - /* synchronous write? */ - folio_clear_reclaim(folio); - } - trace_mm_vmscan_write_folio(folio); - node_stat_add_folio(folio, NR_VMSCAN_WRITE); - return PAGE_SUCCESS; - } - - return PAGE_CLEAN; + if (!shmem_mapping(mapping) && !folio_test_anon(folio)) + return PAGE_ACTIVATE; + if (!folio_clear_dirty_for_io(folio)) + return PAGE_CLEAN; + return writeout(folio, mapping, plug, folio_list); } /*