]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: convert free_zone_device_page to free_zone_device_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 5 Apr 2024 15:32:27 +0000 (16:32 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:44 +0000 (20:56 -0700)
Both callers already have a folio; pass it in and save a few calls to
compound_head().

Link: https://lkml.kernel.org/r/20240405153228.2563754-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memremap.c
mm/swap.c

index 8fd41f889a9587c91096e5a8de8ab02b38fa9a96..22152e0c8494a88d6a2c3cb56261b59a6220ce02 100644 (file)
@@ -1165,7 +1165,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end);
 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
                      unsigned long addr, int page_nid, int *flags);
 
-void free_zone_device_page(struct page *page);
+void free_zone_device_folio(struct folio *folio);
 int migrate_device_coherent_page(struct page *page);
 
 /*
index 9e9fb1972fff0af257d8b278012b44d7c7820bcd..e1776693e2eaed999bc6d57d431f8f1b7690f2d7 100644 (file)
@@ -456,21 +456,23 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
 }
 EXPORT_SYMBOL_GPL(get_dev_pagemap);
 
-void free_zone_device_page(struct page *page)
+void free_zone_device_folio(struct folio *folio)
 {
-       if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
+       if (WARN_ON_ONCE(!folio->page.pgmap->ops ||
+                       !folio->page.pgmap->ops->page_free))
                return;
 
-       mem_cgroup_uncharge(page_folio(page));
+       mem_cgroup_uncharge(folio);
 
        /*
         * Note: we don't expect anonymous compound pages yet. Once supported
         * and we could PTE-map them similar to THP, we'd have to clear
         * PG_anon_exclusive on all tail pages.
         */
-       VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
-       if (PageAnon(page))
-               __ClearPageAnonExclusive(page);
+       if (folio_test_anon(folio)) {
+               VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+               __ClearPageAnonExclusive(folio_page(folio, 0));
+       }
 
        /*
         * When a device managed page is freed, the folio->mapping field
@@ -481,20 +483,20 @@ void free_zone_device_page(struct page *page)
         *
         * For other types of ZONE_DEVICE pages, migration is either
         * handled differently or not done at all, so there is no need
-        * to clear page->mapping.
+        * to clear folio->mapping.
         */
-       page->mapping = NULL;
-       page->pgmap->ops->page_free(page);
+       folio->mapping = NULL;
+       folio->page.pgmap->ops->page_free(folio_page(folio, 0));
 
-       if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
-           page->pgmap->type != MEMORY_DEVICE_COHERENT)
+       if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE &&
+           folio->page.pgmap->type != MEMORY_DEVICE_COHERENT)
                /*
-                * Reset the page count to 1 to prepare for handing out the page
+                * Reset the refcount to 1 to prepare for handing out the page
                 * again.
                 */
-               set_page_count(page, 1);
+               folio_set_count(folio, 1);
        else
-               put_dev_pagemap(page->pgmap);
+               put_dev_pagemap(folio->page.pgmap);
 }
 
 void zone_device_page_init(struct page *page)
index 4f3964c983d8d1a87e6a1a9a18437e06d4e8018e..8ae5cd4ed180bd06d3be6f379751db1cc8b0f21b 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -115,7 +115,7 @@ static void page_cache_release(struct folio *folio)
 void __folio_put(struct folio *folio)
 {
        if (unlikely(folio_is_zone_device(folio))) {
-               free_zone_device_page(&folio->page);
+               free_zone_device_folio(folio);
                return;
        } else if (folio_test_hugetlb(folio)) {
                free_huge_folio(folio);
@@ -983,7 +983,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
                        if (put_devmap_managed_page_refs(&folio->page, nr_refs))
                                continue;
                        if (folio_ref_sub_and_test(folio, nr_refs))
-                               free_zone_device_page(&folio->page);
+                               free_zone_device_folio(folio);
                        continue;
                }