]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/huge_memory: Remove page_deferred_list()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 12:57:28 +0000 (08:57 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:00:56 +0000 (23:00 -0500)
Use folio->_deferred_list directly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
include/linux/huge_mm.h
mm/huge_memory.c

index aacfcb02606f45704336c00a93035921e9c6ccf2..b9978978a160965135b194de67f71e787691d972 100644 (file)
@@ -293,14 +293,6 @@ static inline bool thp_migration_supported(void)
        return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
 }
 
-static inline struct list_head *page_deferred_list(struct page *page)
-{
-       struct folio *folio = (struct folio *)page;
-
-       VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
-       return &folio->_deferred_list;
-}
-
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
index 13fcb349b3f56d397a1c248ceda3b7b54d82769e..05fa633b7cb7421ba06c05ff285cc594b56ce628 100644 (file)
@@ -580,12 +580,10 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page)
 
 void prep_transhuge_page(struct page *page)
 {
-       /*
-        * we use page->mapping and page->index in second tail page
-        * as list_head: assuming THP order >= 2
-        */
+       struct folio *folio = (struct folio *)page;
 
-       INIT_LIST_HEAD(page_deferred_list(page));
+       VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
+       INIT_LIST_HEAD(&folio->_deferred_list);
        set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
 }
 
@@ -2804,13 +2802,14 @@ out:
 
 void free_transhuge_page(struct page *page)
 {
+       struct folio *folio = (struct folio *)page;
        struct deferred_split *ds_queue = get_deferred_split_queue(page);
        unsigned long flags;
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-       if (!list_empty(page_deferred_list(page))) {
+       if (!list_empty(&folio->_deferred_list)) {
                ds_queue->split_queue_len--;
-               list_del(page_deferred_list(page));
+               list_del(&folio->_deferred_list);
        }
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
        free_compound_page(page);
@@ -2818,35 +2817,36 @@ void free_transhuge_page(struct page *page)
 
 void deferred_split_huge_page(struct page *page)
 {
+       struct folio *folio = page_folio(page);
        struct deferred_split *ds_queue = get_deferred_split_queue(page);
 #ifdef CONFIG_MEMCG
-       struct mem_cgroup *memcg = page_memcg(compound_head(page));
+       struct mem_cgroup *memcg = folio_memcg(folio);
 #endif
        unsigned long flags;
 
-       VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+       VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
 
        /*
         * The try_to_unmap() in page reclaim path might reach here too,
         * this may cause a race condition to corrupt deferred split queue.
-        * And, if page reclaim is already handling the same page, it is
+        * And, if page reclaim is already handling the same folio, it is
         * unnecessary to handle it again in shrinker.
         *
-        * Check PageSwapCache to determine if the page is being
-        * handled by page reclaim since THP swap would add the page into
+        * Check the swapcache flag to determine if the folio is being
+        * handled by page reclaim since THP swap would add the folio into
         * swap cache before calling try_to_unmap().
         */
-       if (PageSwapCache(page))
+       if (folio_test_swapcache(folio))
                return;
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-       if (list_empty(page_deferred_list(page))) {
+       if (list_empty(&folio->_deferred_list)) {
                count_vm_event(THP_DEFERRED_SPLIT_PAGE);
-               list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
+               list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG
                if (memcg)
-                       set_shrinker_bit(memcg, page_to_nid(page),
+                       set_shrinker_bit(memcg, folio_nid(folio),
                                         deferred_split_shrinker.id);
 #endif
        }