]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/huge_memory: Convert get_deferred_split_queue() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 14:26:39 +0000 (10:26 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:00:56 +0000 (23:00 -0500)
Removes a few calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/huge_memory.c

index 05fa633b7cb7421ba06c05ff285cc594b56ce628..3daa8c6e387ab2011c7d815e83c3359f48ebdc2c 100644 (file)
@@ -559,10 +559,11 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_MEMCG
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline
+struct deferred_split *get_deferred_split_queue(struct folio *folio)
 {
-       struct mem_cgroup *memcg = page_memcg(compound_head(page));
-       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+       struct mem_cgroup *memcg = folio_memcg(folio);
+       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
 
        if (memcg)
                return &memcg->deferred_split_queue;
@@ -570,9 +571,10 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page)
                return &pgdat->deferred_split_queue;
 }
 #else
-static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+static inline
+struct deferred_split *get_deferred_split_queue(struct folio *folio)
 {
-       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
 
        return &pgdat->deferred_split_queue;
 }
@@ -2652,7 +2654,7 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
 int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct folio *folio = page_folio(page);
-       struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page);
+       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
        XA_STATE(xas, &folio->mapping->i_pages, folio->index);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
@@ -2803,7 +2805,7 @@ out:
 void free_transhuge_page(struct page *page)
 {
        struct folio *folio = (struct folio *)page;
-       struct deferred_split *ds_queue = get_deferred_split_queue(page);
+       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
        unsigned long flags;
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
@@ -2818,7 +2820,7 @@ void free_transhuge_page(struct page *page)
 void deferred_split_huge_page(struct page *page)
 {
        struct folio *folio = page_folio(page);
-       struct deferred_split *ds_queue = get_deferred_split_queue(page);
+       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
 #ifdef CONFIG_MEMCG
        struct mem_cgroup *memcg = folio_memcg(folio);
 #endif