]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/huge_memory: move more common code into insert_pmd()
authorDavid Hildenbrand <david@redhat.com>
Mon, 11 Aug 2025 11:26:21 +0000 (13:26 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:45 +0000 (17:24 -0700)
Patch series "mm: vm_normal_page*() improvements", v3.

Cleanup and unify vm_normal_page_*() handling, also marking the huge
zerofolio as special in the PMD.  Add+use vm_normal_page_pud() and cleanup
that XEN vm_ops->find_special_page thingy.

There are plans of using vm_normal_page_*() more widely soon.

This patch (of 11):

Let's clean it all further up.

No functional change intended.

Link: https://lkml.kernel.org/r/20250811112631.759341-1-david@redhat.com
Link: https://lkml.kernel.org/r/20250811112631.759341-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 2b4ea5a2ce7d242f3f7ae8b9ed6aaf57064407b6..5314a89d676f1efb7f3446d52081d034fb5a5d93 100644 (file)
@@ -1379,15 +1379,25 @@ struct folio_or_pfn {
        bool is_folio;
 };
 
-static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
                pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
-               bool write, pgtable_t pgtable)
+               bool write)
 {
        struct mm_struct *mm = vma->vm_mm;
+       pgtable_t pgtable = NULL;
+       spinlock_t *ptl;
        pmd_t entry;
 
-       lockdep_assert_held(pmd_lockptr(mm, pmd));
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return VM_FAULT_SIGBUS;
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
+       ptl = pmd_lock(mm, pmd);
        if (!pmd_none(*pmd)) {
                const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
                                          fop.pfn;
@@ -1395,15 +1405,14 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
                if (write) {
                        if (pmd_pfn(*pmd) != pfn) {
                                WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
-                               return -EEXIST;
+                               goto out_unlock;
                        }
                        entry = pmd_mkyoung(*pmd);
                        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                        if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
                                update_mmu_cache_pmd(vma, addr, pmd);
                }
-
-               return -EEXIST;
+               goto out_unlock;
        }
 
        if (fop.is_folio) {
@@ -1424,11 +1433,17 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (pgtable) {
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                mm_inc_nr_ptes(mm);
+               pgtable = NULL;
        }
 
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
-       return 0;
+
+out_unlock:
+       spin_unlock(ptl);
+       if (pgtable)
+               pte_free(mm, pgtable);
+       return VM_FAULT_NOPAGE;
 }
 
 /**
@@ -1450,9 +1465,6 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
        struct folio_or_pfn fop = {
                .pfn = pfn,
        };
-       pgtable_t pgtable = NULL;
-       spinlock_t *ptl;
-       int error;
 
        /*
         * If we had pmd_special, we could avoid all these restrictions,
@@ -1464,25 +1476,9 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
 
-       if (addr < vma->vm_start || addr >= vma->vm_end)
-               return VM_FAULT_SIGBUS;
-
-       if (arch_needs_pgtable_deposit()) {
-               pgtable = pte_alloc_one(vma->vm_mm);
-               if (!pgtable)
-                       return VM_FAULT_OOM;
-       }
-
        pfnmap_setup_cachemode_pfn(pfn, &pgprot);
 
-       ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-       error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
-                          pgtable);
-       spin_unlock(ptl);
-       if (error && pgtable)
-               pte_free(vma->vm_mm, pgtable);
-
-       return VM_FAULT_NOPAGE;
+       return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
 }
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
@@ -1491,35 +1487,15 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
 {
        struct vm_area_struct *vma = vmf->vma;
        unsigned long addr = vmf->address & PMD_MASK;
-       struct mm_struct *mm = vma->vm_mm;
        struct folio_or_pfn fop = {
                .folio = folio,
                .is_folio = true,
        };
-       spinlock_t *ptl;
-       pgtable_t pgtable = NULL;
-       int error;
-
-       if (addr < vma->vm_start || addr >= vma->vm_end)
-               return VM_FAULT_SIGBUS;
 
        if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
                return VM_FAULT_SIGBUS;
 
-       if (arch_needs_pgtable_deposit()) {
-               pgtable = pte_alloc_one(vma->vm_mm);
-               if (!pgtable)
-                       return VM_FAULT_OOM;
-       }
-
-       ptl = pmd_lock(mm, vmf->pmd);
-       error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
-                          write, pgtable);
-       spin_unlock(ptl);
-       if (error && pgtable)
-               pte_free(mm, pgtable);
-
-       return VM_FAULT_NOPAGE;
+       return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
 }
 EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);