]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/vmscan: convert reclaim_clean_pages_from_list() to folios
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 17 Jun 2022 15:42:44 +0000 (16:42 +0100)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:08 +0000 (20:15 -0400)
Patch series "nvert much of vmscan to folios"

vmscan always operates on folios since it puts the pages on the LRU list.
Switching all of these functions from pages to folios saves 1483 bytes of
text from removing all the baggage around calling compound_page() and
similar functions.

This patch (of 5):

This is a straightforward conversion which removes several hidden calls
to compound_head, saving 330 bytes of kernel text.

Link: https://lkml.kernel.org/r/20220617154248.700416-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20220617154248.700416-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/vmscan.c

index a5263a21b72f659e4c9406ca7429e1561dd19fee..109dda5a72a9bbfcfab278cd26bb98aa3e2fe45c 100644 (file)
@@ -676,6 +676,12 @@ static __always_inline bool PageAnon(struct page *page)
        return folio_test_anon(page_folio(page));
 }
 
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+       return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+                       PAGE_MAPPING_MOVABLE;
+}
+
 static __always_inline int __PageMovable(struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
index cd2201ba6d7c822f2c8cd67373803479769615a9..439a7e8499758c40427613e948090c9066bccd29 100644 (file)
@@ -2047,7 +2047,7 @@ keep:
 }
 
 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-                                           struct list_head *page_list)
+                                           struct list_head *folio_list)
 {
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
@@ -2055,16 +2055,16 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
        };
        struct reclaim_stat stat;
        unsigned int nr_reclaimed;
-       struct page *page, *next;
-       LIST_HEAD(clean_pages);
+       struct folio *folio, *next;
+       LIST_HEAD(clean_folios);
        unsigned int noreclaim_flag;
 
-       list_for_each_entry_safe(page, next, page_list, lru) {
-               if (!PageHuge(page) && page_is_file_lru(page) &&
-                   !PageDirty(page) && !__PageMovable(page) &&
-                   !PageUnevictable(page)) {
-                       ClearPageActive(page);
-                       list_move(&page->lru, &clean_pages);
+       list_for_each_entry_safe(folio, next, folio_list, lru) {
+               if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
+                   !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
+                   !folio_test_unevictable(folio)) {
+                       folio_clear_active(folio);
+                       list_move(&folio->lru, &clean_folios);
                }
        }
 
@@ -2075,11 +2075,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
         * change in the future.
         */
        noreclaim_flag = memalloc_noreclaim_save();
-       nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+       nr_reclaimed = shrink_page_list(&clean_folios, zone->zone_pgdat, &sc,
                                        &stat, true);
        memalloc_noreclaim_restore(noreclaim_flag);
 
-       list_splice(&clean_pages, page_list);
+       list_splice(&clean_folios, folio_list);
        mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
                            -(long)nr_reclaimed);
        /*