]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/fork: accept huge pfnmap entries
authorPeter Xu <peterx@redhat.com>
Mon, 26 Aug 2024 20:43:41 +0000 (16:43 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 17 Sep 2024 08:06:58 +0000 (01:06 -0700)
Teach the fork code to properly copy pfnmaps for pmd/pud levels.  Pud is
much easier, the write bit needs to be persisted though for writable and
shared pud mappings like PFNMAP ones, otherwise a follow up write in
either parent or child process will trigger a write fault.

Do the same for pmd level.

Link: https://lkml.kernel.org/r/20240826204353.2228736-8-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 7ab9a171e3d01ad7ad5ce4bcd4dc7d089603eb75..2a73efea02d73787c6cf717bebacaa435525cd8a 100644 (file)
@@ -1583,6 +1583,24 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pgtable_t pgtable = NULL;
        int ret = -ENOMEM;
 
+       pmd = pmdp_get_lockless(src_pmd);
+       if (unlikely(pmd_special(pmd))) {
+               dst_ptl = pmd_lock(dst_mm, dst_pmd);
+               src_ptl = pmd_lockptr(src_mm, src_pmd);
+               spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+               /*
+                * No need to recheck the pmd, it can't change with write
+                * mmap lock held here.
+                *
+                * Meanwhile, making sure it's not a CoW VMA with writable
+                * mapping, otherwise it means either the anon page wrongly
+                * applied special bit, or we made the PRIVATE mapping be
+                * able to wrongly write to the backend MMIO.
+                */
+               VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
+               goto set_pmd;
+       }
+
        /* Skip if can be re-fill on fault */
        if (!vma_is_anonymous(dst_vma))
                return 0;
@@ -1664,7 +1682,9 @@ out_zero_page:
        pmdp_set_wrprotect(src_mm, addr, src_pmd);
        if (!userfaultfd_wp(dst_vma))
                pmd = pmd_clear_uffd_wp(pmd);
-       pmd = pmd_mkold(pmd_wrprotect(pmd));
+       pmd = pmd_wrprotect(pmd);
+set_pmd:
+       pmd = pmd_mkold(pmd);
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 
        ret = 0;
@@ -1710,8 +1730,11 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * TODO: once we support anonymous pages, use
         * folio_try_dup_anon_rmap_*() and split if duplicating fails.
         */
-       pudp_set_wrprotect(src_mm, addr, src_pud);
-       pud = pud_mkold(pud_wrprotect(pud));
+       if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
+               pudp_set_wrprotect(src_mm, addr, src_pud);
+               pud = pud_wrprotect(pud);
+       }
+       pud = pud_mkold(pud);
        set_pud_at(dst_mm, addr, dst_pud, pud);
 
        ret = 0;