]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memory: factor out common code from vm_normal_page_*()
authorDavid Hildenbrand <david@redhat.com>
Mon, 11 Aug 2025 11:26:29 +0000 (13:26 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:47 +0000 (17:24 -0700)
Let's reduce the code duplication and factor out the non-pte/pmd related
magic into __vm_normal_page().

To keep it simpler, check the pfn against both zero folios, which
shouldn't really make a difference.

It's a good question if we can even hit the !CONFIG_ARCH_HAS_PTE_SPECIAL
scenario in the PMD case in practice: but doesn't really matter, as it's
now all unified in vm_normal_page_pfn().

Add kerneldoc for all involved functions.

Note that, as a side product, we now:
* Support the find_special_page special thingy also for PMD
* Don't check for is_huge_zero_pfn() anymore if we have
  CONFIG_ARCH_HAS_PTE_SPECIAL and the PMD is not special. The
  VM_WARN_ON_ONCE would catch any abuse

No functional change intended.

Link: https://lkml.kernel.org/r/20250811112631.759341-10-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index dc0107354d37b287f12b8fc93a0e72aa3d8ce29d..78af3f243cee7330a85f80e6e110ab1779df2429 100644 (file)
@@ -614,8 +614,14 @@ static void print_bad_page_map(struct vm_area_struct *vma,
 #define print_bad_pte(vma, addr, pte, page) \
        print_bad_page_map(vma, addr, pte_val(pte), page, PGTABLE_LEVEL_PTE)
 
-/*
- * vm_normal_page -- This function gets the "struct page" associated with a pte.
+/**
+ * __vm_normal_page() - Get the "struct page" associated with a page table entry.
+ * @vma: The VMA mapping the page table entry.
+ * @addr: The address where the page table entry is mapped.
+ * @pfn: The PFN stored in the page table entry.
+ * @special: Whether the page table entry is marked "special".
+ * @level: The page table level for error reporting purposes only.
+ * @entry: The page table entry value for error reporting purposes only.
  *
  * "Special" mappings do not wish to be associated with a "struct page" (either
  * it doesn't exist, or it exists but they don't want to touch it). In this
@@ -628,10 +634,10 @@ static void print_bad_page_map(struct vm_area_struct *vma,
  * Selected page table walkers (such as GUP) can still identify mappings of the
  * shared zero folios and work with the underlying "struct page".
  *
- * There are 2 broad cases. Firstly, an architecture may define a pte_special()
- * pte bit, in which case this function is trivial. Secondly, an architecture
- * may not have a spare pte bit, which requires a more complicated scheme,
- * described below.
+ * There are 2 broad cases. Firstly, an architecture may define a "special"
+ * page table entry bit, such as pte_special(), in which case this function is
+ * trivial. Secondly, an architecture may not have a spare page table
+ * entry bit, which requires a more complicated scheme, described below.
  *
  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
  * special mapping (even if there are underlying and valid "struct pages").
@@ -664,63 +670,94 @@ static void print_bad_page_map(struct vm_area_struct *vma,
  * don't have to follow the strict linearity rule of PFNMAP mappings in
  * order to support COWable mappings.
  *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ *        NULL if this is a "special" mapping.
  */
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-                           pte_t pte)
+static inline struct page *__vm_normal_page(struct vm_area_struct *vma,
+               unsigned long addr, unsigned long pfn, bool special,
+               unsigned long long entry, enum pgtable_level level)
 {
-       unsigned long pfn = pte_pfn(pte);
-
        if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
-               if (likely(!pte_special(pte)))
-                       goto check_pfn;
-               if (vma->vm_ops && vma->vm_ops->find_special_page)
-                       return vma->vm_ops->find_special_page(vma, addr);
-               if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
-                       return NULL;
-               if (is_zero_pfn(pfn))
-                       return NULL;
-
-               print_bad_pte(vma, addr, pte, NULL);
-               return NULL;
-       }
-
-       /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
-
-       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-               if (vma->vm_flags & VM_MIXEDMAP) {
-                       if (!pfn_valid(pfn))
+               if (unlikely(special)) {
+                       if (vma->vm_ops && vma->vm_ops->find_special_page)
+                               return vma->vm_ops->find_special_page(vma, addr);
+                       if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
                                return NULL;
-                       if (is_zero_pfn(pfn))
-                               return NULL;
-                       goto out;
-               } else {
-                       unsigned long off;
-                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
-                       if (pfn == vma->vm_pgoff + off)
-                               return NULL;
-                       if (!is_cow_mapping(vma->vm_flags))
+                       if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
                                return NULL;
+
+                       print_bad_page_map(vma, addr, entry, NULL, level);
+                       return NULL;
                }
-       }
+               /*
+                * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table
+                * mappings (incl. shared zero folios) are marked accordingly.
+                */
+       } else {
+               if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
+                       if (vma->vm_flags & VM_MIXEDMAP) {
+                               /* If it has a "struct page", it's "normal". */
+                               if (!pfn_valid(pfn))
+                                       return NULL;
+                       } else {
+                               unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
 
-       if (is_zero_pfn(pfn))
-               return NULL;
+                               /* Only CoW'ed anon folios are "normal". */
+                               if (pfn == vma->vm_pgoff + off)
+                                       return NULL;
+                               if (!is_cow_mapping(vma->vm_flags))
+                                       return NULL;
+                       }
+               }
+
+               if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
+                       return NULL;
+       }
 
-check_pfn:
        if (unlikely(pfn > highest_memmap_pfn)) {
-               print_bad_pte(vma, addr, pte, NULL);
+               /* Corrupted page table entry. */
+               print_bad_page_map(vma, addr, entry, NULL, level);
                return NULL;
        }
-
        /*
         * NOTE! We still have PageReserved() pages in the page tables.
-        * eg. VDSO mappings can cause them to exist.
+        * For example, VDSO mappings can cause them to exist.
         */
-out:
-       VM_WARN_ON_ONCE(is_zero_pfn(pfn));
+       VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
        return pfn_to_page(pfn);
 }
 
+/**
+ * vm_normal_page() - Get the "struct page" associated with a PTE
+ * @vma: The VMA mapping the @pte.
+ * @addr: The address where the @pte is mapped.
+ * @pte: The PTE.
+ *
+ * Get the "struct page" associated with a PTE. See __vm_normal_page()
+ * for details on "normal" and "special" mappings.
+ *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ *        NULL if this is a "special" mapping.
+ */
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                           pte_t pte)
+{
+       return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte),
+                               pte_val(pte), PGTABLE_LEVEL_PTE);
+}
+
+/**
+ * vm_normal_folio() - Get the "struct folio" associated with a PTE
+ * @vma: The VMA mapping the @pte.
+ * @addr: The address where the @pte is mapped.
+ * @pte: The PTE.
+ *
+ * Get the "struct folio" associated with a PTE. See __vm_normal_page()
+ * for details on "normal" and "special" mappings.
+ *
+ * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
+ *        NULL if this is a "special" mapping.
+ */
 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
                            pte_t pte)
 {
@@ -732,42 +769,37 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
 }
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+/**
+ * vm_normal_page_pmd() - Get the "struct page" associated with a PMD
+ * @vma: The VMA mapping the @pmd.
+ * @addr: The address where the @pmd is mapped.
+ * @pmd: The PMD.
+ *
+ * Get the "struct page" associated with a PTE. See __vm_normal_page()
+ * for details on "normal" and "special" mappings.
+ *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ *        NULL if this is a "special" mapping.
+ */
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                                pmd_t pmd)
 {
-       unsigned long pfn = pmd_pfn(pmd);
-
-       if (unlikely(pmd_special(pmd)))
-               return NULL;
-
-       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-               if (vma->vm_flags & VM_MIXEDMAP) {
-                       if (!pfn_valid(pfn))
-                               return NULL;
-                       goto out;
-               } else {
-                       unsigned long off;
-                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
-                       if (pfn == vma->vm_pgoff + off)
-                               return NULL;
-                       if (!is_cow_mapping(vma->vm_flags))
-                               return NULL;
-               }
-       }
-
-       if (is_huge_zero_pfn(pfn))
-               return NULL;
-       if (unlikely(pfn > highest_memmap_pfn))
-               return NULL;
-
-       /*
-        * NOTE! We still have PageReserved() pages in the page tables.
-        * eg. VDSO mappings can cause them to exist.
-        */
-out:
-       return pfn_to_page(pfn);
+       return __vm_normal_page(vma, addr, pmd_pfn(pmd), pmd_special(pmd),
+                               pmd_val(pmd), PGTABLE_LEVEL_PMD);
 }
 
+/**
+ * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD
+ * @vma: The VMA mapping the @pmd.
+ * @addr: The address where the @pmd is mapped.
+ * @pmd: The PMD.
+ *
+ * Get the "struct folio" associated with a PTE. See __vm_normal_page()
+ * for details on "normal" and "special" mappings.
+ *
+ * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
+ *        NULL if this is a "special" mapping.
+ */
 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
                                  unsigned long addr, pmd_t pmd)
 {