]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: remove PageUnevictable
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 21 Aug 2024 19:34:38 +0000 (20:34 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:44 +0000 (21:15 -0700)
There is only one caller of PageUnevictable() left; convert it to call
folio_test_unevictable() and remove all the page accessors.

Link: https://lkml.kernel.org/r/20240821193445.2294269-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/huge_memory.c

index b31d2c50b6f0891f1675e1c3a471f493979393f3..2150502195d5da814f2d3c52ceabfcc1cbcec359 100644 (file)
@@ -580,9 +580,9 @@ FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
 FOLIO_FLAG_FALSE(swapcache)
 #endif
 
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
-       __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
-       TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+       __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+       FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
 
 #ifdef CONFIG_MMU
 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
index ef6d4f617c4af2ede0ef7128e7bab275430e56ba..8dfb912ebdaa27e4d72a20965e0657238c9c32d2 100644 (file)
@@ -2932,25 +2932,25 @@ static void remap_page(struct folio *folio, unsigned long nr)
        }
 }
 
-static void lru_add_page_tail(struct page *head, struct page *tail,
+static void lru_add_page_tail(struct folio *folio, struct page *tail,
                struct lruvec *lruvec, struct list_head *list)
 {
-       VM_BUG_ON_PAGE(!PageHead(head), head);
-       VM_BUG_ON_PAGE(PageLRU(tail), head);
+       VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+       VM_BUG_ON_FOLIO(PageLRU(tail), folio);
        lockdep_assert_held(&lruvec->lru_lock);
 
        if (list) {
                /* page reclaim is reclaiming a huge page */
-               VM_WARN_ON(PageLRU(head));
+               VM_WARN_ON(folio_test_lru(folio));
                get_page(tail);
                list_add_tail(&tail->lru, list);
        } else {
                /* head is still on lru (and we have it frozen) */
-               VM_WARN_ON(!PageLRU(head));
-               if (PageUnevictable(tail))
+               VM_WARN_ON(!folio_test_lru(folio));
+               if (folio_test_unevictable(folio))
                        tail->mlock_count = 0;
                else
-                       list_add_tail(&tail->lru, &head->lru);
+                       list_add_tail(&tail->lru, &folio->lru);
                SetPageLRU(tail);
        }
 }
@@ -3049,7 +3049,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
         * pages to show after the currently processed elements - e.g.
         * migrate_pages
         */
-       lru_add_page_tail(head, page_tail, lruvec, list);
+       lru_add_page_tail(folio, page_tail, lruvec, list);
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,