]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: allocate THP on hugezeropage wp-fault
authorDev Jain <dev.jain@arm.com>
Tue, 8 Oct 2024 06:17:46 +0000 (11:47 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 1 Nov 2024 04:29:04 +0000 (21:29 -0700)
Introduce do_huge_zero_wp_pmd() to handle wp-fault on a hugezeropage and
replace it with a PMD-mapped THP.  Remember to flush TLB entry
corresponding to the hugezeropage.  In case of failure, fallback to
splitting the PMD.

Link: https://lkml.kernel.org/r/20241008061746.285961-3-dev.jain@arm.com
Signed-off-by: Dev Jain <dev.jain@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@gentwo.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index a6f53ed005921d1e2e0cdc05f3e2e74bb7527cce..d1c0055aa805f68dfc46478c6b0ad28732103c78 100644 (file)
@@ -1781,6 +1781,38 @@ unlock:
        spin_unlock(vmf->ptl);
 }
 
+static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
+{
+       unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
+       struct mmu_notifier_range range;
+       struct folio *folio;
+       vm_fault_t ret = 0;
+
+       folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
+       if (unlikely(!folio))
+               return VM_FAULT_FALLBACK;
+
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
+                               haddr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
+       vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+       if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
+               goto release;
+       ret = check_stable_address_space(vma->vm_mm);
+       if (ret)
+               goto release;
+       (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
+       map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
+       goto unlock;
+release:
+       folio_put(folio);
+unlock:
+       spin_unlock(vmf->ptl);
+       mmu_notifier_invalidate_range_end(&range);
+       return ret;
+}
+
 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
 {
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
@@ -1793,8 +1825,15 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
        vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
 
-       if (is_huge_zero_pmd(orig_pmd))
+       if (is_huge_zero_pmd(orig_pmd)) {
+               vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
+
+               if (!(ret & VM_FAULT_FALLBACK))
+                       return ret;
+
+               /* Fallback to splitting PMD if THP cannot be allocated */
                goto fallback;
+       }
 
        spin_lock(vmf->ptl);