TRACE_EVENT(mm_lru_activate,
 
-       TP_PROTO(struct page *page),
+       TP_PROTO(struct folio *folio),
 
-       TP_ARGS(page),
+       TP_ARGS(folio),
 
        TP_STRUCT__entry(
-               __field(struct page *,  page    )
+               __field(struct folio *, folio   )
                __field(unsigned long,  pfn     )
        ),
 
        TP_fast_assign(
-               __entry->page   = page;
-               __entry->pfn    = page_to_pfn(page);
+               __entry->folio  = folio;
+               __entry->pfn    = folio_pfn(folio);
        ),
 
-       /* Flag format is based on page-types.c formatting for pagemap */
-       TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn)
-
+       TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
 );
 
 #endif /* _TRACE_PAGEMAP_H */
 
                      page_is_file_lru(page), thp_nr_pages(page));
 }
 
-static void __activate_page(struct page *page, struct lruvec *lruvec)
+static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
 {
-       if (!PageActive(page) && !PageUnevictable(page)) {
-               int nr_pages = thp_nr_pages(page);
+       if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
+               long nr_pages = folio_nr_pages(folio);
 
-               del_page_from_lru_list(page, lruvec);
-               SetPageActive(page);
-               add_page_to_lru_list(page, lruvec);
-               trace_mm_lru_activate(page);
+               lruvec_del_folio(lruvec, folio);
+               folio_set_active(folio);
+               lruvec_add_folio(lruvec, folio);
+               trace_mm_lru_activate(folio);
 
                __count_vm_events(PGACTIVATE, nr_pages);
                __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
 }
 
 #ifdef CONFIG_SMP
+static void __activate_page(struct page *page, struct lruvec *lruvec)
+{
+       return __folio_activate(page_folio(page), lruvec);
+}
+
 static void activate_page_drain(int cpu)
 {
        struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
        return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
 }
 
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
 {
-       page = compound_head(page);
-       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+       if (folio_test_lru(folio) && !folio_test_active(folio) &&
+           !folio_test_unevictable(folio)) {
                struct pagevec *pvec;
 
+               folio_get(folio);
                local_lock(&lru_pvecs.lock);
                pvec = this_cpu_ptr(&lru_pvecs.activate_page);
-               get_page(page);
-               if (pagevec_add_and_need_flush(pvec, page))
+               if (pagevec_add_and_need_flush(pvec, &folio->page))
                        pagevec_lru_move_fn(pvec, __activate_page);
                local_unlock(&lru_pvecs.lock);
        }
 {
 }
 
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct lruvec *lruvec;
 
-       page = &folio->page;
-       if (TestClearPageLRU(page)) {
+       if (folio_test_clear_lru(folio)) {
                lruvec = folio_lruvec_lock_irq(folio);
-               __activate_page(page, lruvec);
+               __folio_activate(folio, lruvec);
                unlock_page_lruvec_irq(lruvec);
-               SetPageLRU(page);
+               folio_set_lru(folio);
        }
 }
 #endif
                 * LRU on the next drain.
                 */
                if (PageLRU(page))
-                       activate_page(page);
+                       folio_activate(page_folio(page));
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);