]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-hugetlb-fix-uffd-wr-protection-for-cow-optimization-path-v2
authorPeter Xu <peterx@redhat.com>
Tue, 21 Mar 2023 18:58:42 +0000 (14:58 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:24:01 +0000 (16:24 -0700)
v2

Link: https://lkml.kernel.org/r/ZBzOqwF2wrHgBVZb@x1n
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 61ee4930e68634845bc479e8a3f910415fba3d8a..9aea548b665c84264a79180e3930c6fc0ce07971 100644 (file)
@@ -5478,7 +5478,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
                       struct folio *pagecache_folio, spinlock_t *ptl)
 {
        const bool unshare = flags & FAULT_FLAG_UNSHARE;
-       pte_t pte, newpte;
+       pte_t pte = huge_ptep_get(ptep);
        struct hstate *h = hstate_vma(vma);
        struct page *old_page;
        struct folio *new_folio;
@@ -5487,6 +5487,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long haddr = address & huge_page_mask(h);
        struct mmu_notifier_range range;
 
+       /*
+        * Never handle CoW for uffd-wp protected pages.  It should be only
+        * handled when the uffd-wp protection is removed.
+        *
+        * Note that only the CoW optimization path can trigger this and
+        * got skipped, because hugetlb_fault() will always resolve uffd-wp
+        * bit first.
+        */
+       if (huge_pte_uffd_wp(pte))
+               return 0;
+
        /*
         * hugetlb does not support FOLL_FORCE-style write faults that keep the
         * PTE mapped R/O such as maybe_mkwrite() would do.
@@ -5500,7 +5511,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
                return 0;
        }
 
-       pte = huge_ptep_get(ptep);
        old_page = pte_page(pte);
 
        delayacct_wpcopy_start();
@@ -5622,10 +5632,8 @@ retry_avoidcopy:
                mmu_notifier_invalidate_range(mm, range.start, range.end);
                page_remove_rmap(old_page, vma, true);
                hugepage_add_new_anon_rmap(new_folio, vma, haddr);
-               newpte = make_huge_pte(vma, &new_folio->page, !unshare);
-               if (huge_pte_uffd_wp(pte))
-                       newpte = huge_pte_mkuffd_wp(newpte);
-               set_huge_pte_at(mm, haddr, ptep, newpte);
+               set_huge_pte_at(mm, haddr, ptep,
+                               make_huge_pte(vma, &new_folio->page, !unshare));
                folio_set_hugetlb_migratable(new_folio);
                /* Make the old page be freed below */
                new_folio = page_folio(old_page);