From cbea8536d933d546ceb1005bf9c04f9d01da8092 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 26 Aug 2024 16:43:46 -0400 Subject: [PATCH] mm/x86/pat: use the new follow_pfnmap API Use the new API that can understand huge pfn mappings. Link: https://lkml.kernel.org/r/20240826204353.2228736-13-peterx@redhat.com Signed-off-by: Peter Xu Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: Alexander Gordeev Cc: Alex Williamson Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David Hildenbrand Cc: Gavin Shan Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Jason Gunthorpe Cc: Matthew Wilcox Cc: Niklas Schnelle Cc: Paolo Bonzini Cc: Ryan Roberts Cc: Sean Christopherson Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 1fa0bf6ed295..f73b5ce270b3 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -951,23 +951,20 @@ static void free_pfn_range(u64 paddr, unsigned long size) static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, resource_size_t *phys) { - pte_t *ptep, pte; - spinlock_t *ptl; + struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start }; - if (follow_pte(vma, vma->vm_start, &ptep, &ptl)) + if (follow_pfnmap_start(&args)) return -EINVAL; - pte = ptep_get(ptep); - /* Never return PFNs of anon folios in COW mappings. */ - if (vm_normal_folio(vma, vma->vm_start, pte)) { - pte_unmap_unlock(ptep, ptl); + if (!args.special) { + follow_pfnmap_end(&args); return -EINVAL; } - *prot = pgprot_val(pte_pgprot(pte)); - *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; - pte_unmap_unlock(ptep, ptl); + *prot = pgprot_val(args.pgprot); + *phys = (resource_size_t)args.pfn << PAGE_SHIFT; + follow_pfnmap_end(&args); return 0; } -- 2.49.0