]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/zone_device: rename page_free callback to folio_free
authorBalbir Singh <balbirs@nvidia.com>
Wed, 1 Oct 2025 06:56:53 +0000 (16:56 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:36 +0000 (21:28 -0700)
Change page_free to folio_free to make the folio support for
zone device-private more consistent. The PCI P2PDMA callback
has also been updated and changed to folio_free() as a result.

For drivers that do not support folios (yet), the folio is
converted back into page via &folio->page and the page is used
as is, in the current callback implementation.

Link: https://lkml.kernel.org/r/20251001065707.920170-3-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/mm/memory-model.rst
arch/powerpc/kvm/book3s_hv_uvmem.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/drm_pagemap.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/pci/p2pdma.c
include/linux/memremap.h
lib/test_hmm.c
mm/memremap.c

index 5f3eafbbc52034f66579ddf7876f1f3640ba5fcf..7957122039e876758567566d226267af7d3e0c0b 100644 (file)
@@ -165,7 +165,7 @@ The users of `ZONE_DEVICE` are:
 * pmem: Map platform persistent memory to be used as a direct-I/O target
   via DAX mappings.
 
-* hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->page_free()`
+* hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->folio_free()`
   event callbacks to allow a device-driver to coordinate memory management
   events related to device-memory, typically GPU memory. See
   Documentation/mm/hmm.rst.
index 91f763410673f3164f41b6961793b99a11629cec..e5000bef90f2ae8f84ea9bffd3d1aaf9781e98f4 100644 (file)
@@ -1014,8 +1014,9 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
  * to a normal PFN during H_SVM_PAGE_OUT.
  * Gets called with kvm->arch.uvmem_lock held.
  */
-static void kvmppc_uvmem_page_free(struct page *page)
+static void kvmppc_uvmem_folio_free(struct folio *folio)
 {
+       struct page *page = &folio->page;
        unsigned long pfn = page_to_pfn(page) -
                        (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
        struct kvmppc_uvmem_page_pvt *pvt;
@@ -1034,7 +1035,7 @@ static void kvmppc_uvmem_page_free(struct page *page)
 }
 
 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
-       .page_free = kvmppc_uvmem_page_free,
+       .folio_free = kvmppc_uvmem_folio_free,
        .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
 };
 
index f6198e66dc5af61b7f2cd0b2dfc7673c292c89a4..6f1617436f4bad1d9b108e4ebf685460f07e39ec 100644 (file)
@@ -568,8 +568,9 @@ out:
        return r < 0 ? r : 0;
 }
 
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
 {
+       struct page *page = &folio->page;
        struct svm_range_bo *svm_bo = page->zone_device_data;
 
        if (svm_bo) {
@@ -1009,7 +1010,7 @@ out_mmput:
 }
 
 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
-       .page_free              = svm_migrate_page_free,
+       .folio_free             = svm_migrate_folio_free,
        .migrate_to_ram         = svm_migrate_to_ram,
 };
 
index 46a8edb279dccc2bb7d6bd9e4b1e239f8850cd53..37d7cfbbb3e8a6c0403938c2625de27812810797 100644 (file)
@@ -752,15 +752,15 @@ err_out:
 }
 
 /**
- * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
+ * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
+ * @folio: Pointer to the folio
  *
  * This function is a callback used to put the GPU SVM zone device data
  * associated with a page when it is being released.
  */
-static void drm_pagemap_page_free(struct page *page)
+static void drm_pagemap_folio_free(struct folio *folio)
 {
-       drm_pagemap_zdd_put(page->zone_device_data);
+       drm_pagemap_zdd_put(folio->page.zone_device_data);
 }
 
 /**
@@ -788,7 +788,7 @@ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
 }
 
 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
-       .page_free = drm_pagemap_page_free,
+       .folio_free = drm_pagemap_folio_free,
        .migrate_to_ram = drm_pagemap_migrate_to_ram,
 };
 
index 53cc1926b9da58ab8d19f425735fb5911271edd5..d34288ebe7d2361dcaf5a3bd8a3add5dfd60e871 100644 (file)
@@ -108,8 +108,9 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
        return chunk->bo->offset + off;
 }
 
-static void nouveau_dmem_page_free(struct page *page)
+static void nouveau_dmem_folio_free(struct folio *folio)
 {
+       struct page *page = &folio->page;
        struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
        struct nouveau_dmem *dmem = chunk->drm->dmem;
 
@@ -220,7 +221,7 @@ done:
 }
 
 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
-       .page_free              = nouveau_dmem_page_free,
+       .folio_free             = nouveau_dmem_folio_free,
        .migrate_to_ram         = nouveau_dmem_migrate_to_ram,
 };
 
index 78e108e47254ab629edb4eb7ef438a1cdca43fad..ee74b75d3e1f2c00fba5f33905fa493136e0ff82 100644 (file)
@@ -200,8 +200,9 @@ static const struct attribute_group p2pmem_group = {
        .name = "p2pmem",
 };
 
-static void p2pdma_page_free(struct page *page)
+static void p2pdma_folio_free(struct folio *folio)
 {
+       struct page *page = &folio->page;
        struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
        /* safe to dereference while a reference is held to the percpu ref */
        struct pci_p2pdma *p2pdma =
@@ -214,7 +215,7 @@ static void p2pdma_page_free(struct page *page)
 }
 
 static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
-       .page_free = p2pdma_page_free,
+       .folio_free = p2pdma_folio_free,
 };
 
 static void pci_p2pdma_release(void *data)
index d2487a19cba2a420bc7a8a921a714547505b5672..cd28d1666801730eecdafb93c57e590411fcfcad 100644 (file)
@@ -77,11 +77,11 @@ enum memory_type {
 
 struct dev_pagemap_ops {
        /*
-        * Called once the page refcount reaches 0.  The reference count will be
+        * Called once the folio refcount reaches 0.  The reference count will be
         * reset to one by the core code after the method is called to prepare
-        * for handing out the page again.
+        * for handing out the folio again.
         */
-       void (*page_free)(struct page *page);
+       void (*folio_free)(struct folio *folio);
 
        /*
         * Used for private (un-addressable) device memory only.  Must migrate
index 24d82121cde8a13de188f8ae7658f0c0ea3b3d88..9dbf265d103634f75ef981d551a1a96b681f9aee 100644 (file)
@@ -1374,8 +1374,9 @@ static const struct file_operations dmirror_fops = {
        .owner          = THIS_MODULE,
 };
 
-static void dmirror_devmem_free(struct page *page)
+static void dmirror_devmem_free(struct folio *folio)
 {
+       struct page *page = &folio->page;
        struct page *rpage = BACKING_PAGE(page);
        struct dmirror_device *mdevice;
 
@@ -1438,7 +1439,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
 }
 
 static const struct dev_pagemap_ops dmirror_devmem_ops = {
-       .page_free      = dmirror_devmem_free,
+       .folio_free     = dmirror_devmem_free,
        .migrate_to_ram = dmirror_devmem_fault,
 };
 
index e45dfb568710976f34bdbf35eeed71181efd3731..4c2e0d68eb279871b782f5aa60f2aba8c42eab8a 100644 (file)
@@ -289,8 +289,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                        WARN(1, "Missing migrate_to_ram method\n");
                        return ERR_PTR(-EINVAL);
                }
-               if (!pgmap->ops->page_free) {
-                       WARN(1, "Missing page_free method\n");
+               if (!pgmap->ops->folio_free) {
+                       WARN(1, "Missing folio_free method\n");
                        return ERR_PTR(-EINVAL);
                }
                if (!pgmap->owner) {
@@ -299,8 +299,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                }
                break;
        case MEMORY_DEVICE_COHERENT:
-               if (!pgmap->ops->page_free) {
-                       WARN(1, "Missing page_free method\n");
+               if (!pgmap->ops->folio_free) {
+                       WARN(1, "Missing folio_free method\n");
                        return ERR_PTR(-EINVAL);
                }
                if (!pgmap->owner) {
@@ -453,9 +453,9 @@ void free_zone_device_folio(struct folio *folio)
        switch (pgmap->type) {
        case MEMORY_DEVICE_PRIVATE:
        case MEMORY_DEVICE_COHERENT:
-               if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
+               if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
                        break;
-               pgmap->ops->page_free(&folio->page);
+               pgmap->ops->folio_free(folio);
                percpu_ref_put_many(&folio->pgmap->ref, nr);
                break;
 
@@ -472,9 +472,9 @@ void free_zone_device_folio(struct folio *folio)
                break;
 
        case MEMORY_DEVICE_PCI_P2PDMA:
-               if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
+               if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
                        break;
-               pgmap->ops->page_free(folio_page(folio, 0));
+               pgmap->ops->folio_free(folio);
                break;
        }
 }