]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/pagewalk: check pfnmap for folio_walk_start()
authorPeter Xu <peterx@redhat.com>
Mon, 26 Aug 2024 20:43:40 +0000 (16:43 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 17 Sep 2024 08:06:58 +0000 (01:06 -0700)
Teach folio_walk_start() to recognize special pmd/pud mappings, and fail
them properly as it means there's no folio backing them.

[peterx@redhat.com: remove some stale comments, per David]
Link: https://lkml.kernel.org/r/20240829202237.2640288-1-peterx@redhat.com
Link: https://lkml.kernel.org/r/20240826204353.2228736-7-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c
mm/pagewalk.c

index 93c0c25433d021d5ec6caa9c60ed9f6e4bde9d8e..9a540e2316255fec95b397b75a7bc5e87082bb73 100644 (file)
@@ -672,11 +672,10 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 {
        unsigned long pfn = pmd_pfn(pmd);
 
-       /*
-        * There is no pmd_special() but there may be special pmds, e.g.
-        * in a direct-access (dax) mapping, so let's just replicate the
-        * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
-        */
+       /* Currently it's only used for huge pfnmaps */
+       if (unlikely(pmd_special(pmd)))
+               return NULL;
+
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
                        if (!pfn_valid(pfn))
index cd79fb3b89e5ac5b6eace054ea3e0182a1594ab2..461ea3bbd8d991e74767eaae655d60231edefcc3 100644 (file)
@@ -753,7 +753,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
                fw->pudp = pudp;
                fw->pud = pud;
 
-               if (!pud_present(pud) || pud_devmap(pud)) {
+               if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
                        spin_unlock(ptl);
                        goto not_found;
                } else if (!pud_leaf(pud)) {