]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: fix uffd wr-protection for CoW optimization path
authorPeter Xu <peterx@redhat.com>
Tue, 21 Mar 2023 19:18:40 +0000 (15:18 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:24:01 +0000 (16:24 -0700)
This patch fixes an issue that a hugetlb uffd-wr-protected mapping can be
writable even with uffd-wp bit set.  It only happens with hugetlb private
mappings, when someone firstly wr-protects a missing pte (which will
install a pte marker), then a write to the same page without any prior
access to the page.

Userfaultfd-wp trap for hugetlb was implemented in hugetlb_fault() before
reaching hugetlb_wp() to avoid taking more locks that userfault won't
need.  However there's one CoW optimization path that can trigger
hugetlb_wp() inside hugetlb_no_page(), which will bypass the trap.

This patch skips hugetlb_wp() for CoW and retries the fault if uffd-wp bit
is detected.  The new path will only trigger in the CoW optimization path
because generic hugetlb_fault() (e.g.  when a present pte was
wr-protected) will resolve the uffd-wp bit already.  Also make sure
anonymous UNSHARE won't be affected and can still be resolved, IOW only
skip CoW not CoR.

This patch will be needed for v5.19+ hence copy stable.

Link: https://lkml.kernel.org/r/20230321191840.1897940-1-peterx@redhat.com
Fixes: 166f3ecc0daf ("mm/hugetlb: hook page faults for uffd write protection")
Signed-off-by: Peter Xu <peterx@redhat.com>
Reported-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
Tested-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 07abcb6eb203044e39ed9cb839023774c53f3a8a..61ee4930e68634845bc479e8a3f910415fba3d8a 100644 (file)
@@ -5478,7 +5478,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
                       struct folio *pagecache_folio, spinlock_t *ptl)
 {
        const bool unshare = flags & FAULT_FLAG_UNSHARE;
-       pte_t pte;
+       pte_t pte, newpte;
        struct hstate *h = hstate_vma(vma);
        struct page *old_page;
        struct folio *new_folio;
@@ -5622,8 +5622,10 @@ retry_avoidcopy:
                mmu_notifier_invalidate_range(mm, range.start, range.end);
                page_remove_rmap(old_page, vma, true);
                hugepage_add_new_anon_rmap(new_folio, vma, haddr);
-               set_huge_pte_at(mm, haddr, ptep,
-                               make_huge_pte(vma, &new_folio->page, !unshare));
+               newpte = make_huge_pte(vma, &new_folio->page, !unshare);
+               if (huge_pte_uffd_wp(pte))
+                       newpte = huge_pte_mkuffd_wp(newpte);
+               set_huge_pte_at(mm, haddr, ptep, newpte);
                folio_set_hugetlb_migratable(new_folio);
                /* Make the old page be freed below */
                new_folio = page_folio(old_page);