]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: move follow_phys to arch/x86/mm/pat/memtype.c
authorChristoph Hellwig <hch@lst.de>
Sun, 24 Mar 2024 23:45:42 +0000 (07:45 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:12 +0000 (20:56 -0700)
follow_phys is only used by two callers in arch/x86/mm/pat/memtype.c.
Move it there and hardcode the two arguments that get the same values
passed by both callers.

[david@redhat.com: conflict resolutions]
Link: https://lkml.kernel.org/r/20240403212131.929421-4-david@redhat.com
Link: https://lkml.kernel.org/r/20240324234542.2038726-4-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fei Li <fei1.li@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/mm/pat/memtype.c
include/linux/mm.h
mm/memory.c

index 36b603d0cddefc7b208873e8d435cdf1ddbe20d9..d01c3b0bd6eb1cf2f33dc60d6e0e5ec5127ee863 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/pfn_t.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/fs.h>
 #include <linux/rbtree.h>
 
@@ -947,6 +948,32 @@ static void free_pfn_range(u64 paddr, unsigned long size)
                memtype_free(paddr, paddr + size);
 }
 
+static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
+               resource_size_t *phys)
+{
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return -EINVAL;
+
+       if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
+               return -EINVAL;
+
+       pte = ptep_get(ptep);
+
+       /* Never return PFNs of anon folios in COW mappings. */
+       if (vm_normal_folio(vma, vma->vm_start, pte)) {
+               pte_unmap_unlock(ptep, ptl);
+               return -EINVAL;
+       }
+
+       *prot = pgprot_val(pte_pgprot(pte));
+       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+}
+
 static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
                pgprot_t *pgprot)
 {
@@ -964,7 +991,7 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
         * detect the PFN. If we need the cachemode as well, we're out of luck
         * for now and have to fail fork().
         */
-       if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
+       if (!follow_phys(vma, &prot, paddr)) {
                if (pgprot)
                        *pgprot = __pgprot(prot);
                return 0;
index cb8e6158b577570164994cd4f6c661edcd2ac9ba..5dc65618e3863b94c276b4c622e61d57af6b89cc 100644 (file)
@@ -2424,8 +2424,6 @@ int
 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
 int follow_pte(struct mm_struct *mm, unsigned long address,
               pte_t **ptepp, spinlock_t **ptlp);
-int follow_phys(struct vm_area_struct *vma, unsigned long address,
-               unsigned int flags, unsigned long *prot, resource_size_t *phys);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                        void *buf, int len, int write);
 
index 652370703e41a15bf3cab382f6f4d49b140d9afa..62ee4a15092ae011e594492ff1db1acb15c080b2 100644 (file)
@@ -5928,38 +5928,6 @@ out:
 EXPORT_SYMBOL_GPL(follow_pte);
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-int follow_phys(struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags,
-               unsigned long *prot, resource_size_t *phys)
-{
-       int ret = -EINVAL;
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
-
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               goto out;
-
-       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
-               goto out;
-       pte = ptep_get(ptep);
-
-       /* Never return PFNs of anon folios in COW mappings. */
-       if (vm_normal_folio(vma, address, pte))
-               goto unlock;
-
-       if ((flags & FOLL_WRITE) && !pte_write(pte))
-               goto unlock;
-
-       *prot = pgprot_val(pte_pgprot(pte));
-       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
-
-       ret = 0;
-unlock:
-       pte_unmap_unlock(ptep, ptl);
-out:
-       return ret;
-}
-
 /**
  * generic_access_phys - generic implementation for iomem mmap access
  * @vma: the vma to access