]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memremap: remove unused get_dev_pagemap() parameter
authorAlistair Popple <apopple@nvidia.com>
Wed, 3 Sep 2025 22:59:26 +0000 (08:59 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:52 +0000 (17:25 -0700)
GUP no longer uses get_dev_pagemap().  As it was the only user of the
get_dev_pagemap() pgmap caching feature it can be removed.

Link: https://lkml.kernel.org/r/20250903225926.34702-2-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memremap.h
mm/memory-failure.c
mm/memory_hotplug.c
mm/memremap.c

index aa1b6aa877a029c58d86b18df387341e6a568b07..e5951ba12a2825ab7816a96de07bb7e982e80936 100644 (file)
@@ -211,8 +211,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid);
 void memunmap_pages(struct dev_pagemap *pgmap);
 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-               struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
 
 unsigned long memremap_compat_align(void);
@@ -234,8 +233,7 @@ static inline void devm_memunmap_pages(struct device *dev,
 {
 }
 
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-               struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
 {
        return NULL;
 }
index 2a95b41e0535e47cd68b2929fbd18560370768d6..6d9134e3d115e04c043a92bab071ea092b76f0e0 100644 (file)
@@ -2194,7 +2194,7 @@ int memory_failure(unsigned long pfn, int flags)
                        goto unlock_mutex;
 
                if (pfn_valid(pfn)) {
-                       pgmap = get_dev_pagemap(pfn, NULL);
+                       pgmap = get_dev_pagemap(pfn);
                        put_ref_page(pfn, flags);
                        if (pgmap) {
                                res = memory_failure_dev_pagemap(pfn, flags,
index 74318c7877156734c6e78d6d1dfb80d96aea64bc..883b8e4d51ba84f08f04892b1a8a375b423e8698 100644 (file)
@@ -375,7 +375,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
         * the section may be 'offline' but 'valid'. Only
         * get_dev_pagemap() can determine sub-section online status.
         */
-       pgmap = get_dev_pagemap(pfn, NULL);
+       pgmap = get_dev_pagemap(pfn);
        put_dev_pagemap(pgmap);
 
        /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
index a2d4bb88f64b62e0bed76d37a13722d682807b55..46cb1b0b6f72238b1688a823b97ad9bd95903bea 100644 (file)
@@ -153,14 +153,14 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
                                "altmap not supported for multiple ranges\n"))
                return -EINVAL;
 
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start));
        if (conflict_pgmap) {
                WARN(1, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
                return -ENOMEM;
        }
 
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end));
        if (conflict_pgmap) {
                WARN(1, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
@@ -397,26 +397,12 @@ EXPORT_SYMBOL_GPL(devm_memunmap_pages);
 /**
  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
  * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
- * is non-NULL but does not cover @pfn the reference to it will be released.
  */
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-               struct dev_pagemap *pgmap)
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
 {
+       struct dev_pagemap *pgmap;
        resource_size_t phys = PFN_PHYS(pfn);
 
-       /*
-        * In the cached case we're already holding a live reference.
-        */
-       if (pgmap) {
-               if (phys >= pgmap->range.start && phys <= pgmap->range.end)
-                       return pgmap;
-               put_dev_pagemap(pgmap);
-       }
-
-       /* fall back to slow path lookup */
        rcu_read_lock();
        pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
        if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))