]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: only drop uffd-wp special pte if required
authorPeter Xu <peterx@redhat.com>
Thu, 14 Apr 2022 19:16:51 +0000 (12:16 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 14 Apr 2022 21:49:52 +0000 (17:49 -0400)
As with shmem uffd-wp special ptes, only drop the uffd-wp special swap pte
if unmapping an entire vma or synchronized such that faults can not race
with the unmap operation.  This requires passing zap_flags all the way to
the lowest level hugetlb unmap routine: __unmap_hugepage_range.

In general, unmap calls originated in hugetlbfs code will pass the
ZAP_FLAG_DROP_MARKER flag as synchronization is in place to prevent
faults.  The exception is hole punch which will first unmap without any
synchronization.  Later when hole punch actually removes the page from the
file, it will check to see if there was a subsequent fault and if so take
the hugetlb fault mutex while unmapping again.  This second unmap will
pass in ZAP_FLAG_DROP_MARKER.

The justification of "whether to apply ZAP_FLAG_DROP_MARKER flag when
unmap a hugetlb range" is (IMHO): we should never reach a state when a
page fault could errornously fault in a page-cache page that was
wr-protected to be writable, even in an extremely short period.  That
could happen if e.g.  we pass ZAP_FLAG_DROP_MARKER when
hugetlbfs_punch_hole() calls hugetlb_vmdelete_list(), because if a page
faults after that call and before remove_inode_hugepages() is executed,
the page cache can be mapped writable again in the small racy window, that
can cause unexpected data overwritten.

Link: https://lkml.kernel.org/r/20220405014915.14873-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/hugetlbfs/inode.c
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory.c

index 99c7477cee5c2b003934a86c4bec9d4a75fa08d9..8b5b9df2be7d8a4ef3a894aba5a571add6b71041 100644 (file)
@@ -404,7 +404,8 @@ static void remove_huge_page(struct page *page)
 }
 
 static void
-hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
+hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
+                     unsigned long zap_flags)
 {
        struct vm_area_struct *vma;
 
@@ -438,7 +439,7 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
                }
 
                unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
-                                                                       NULL);
+                                    NULL, zap_flags);
        }
 }
 
@@ -516,7 +517,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                                mutex_lock(&hugetlb_fault_mutex_table[hash]);
                                hugetlb_vmdelete_list(&mapping->i_mmap,
                                        index * pages_per_huge_page(h),
-                                       (index + 1) * pages_per_huge_page(h));
+                                       (index + 1) * pages_per_huge_page(h),
+                                       ZAP_FLAG_DROP_MARKER);
                                i_mmap_unlock_write(mapping);
                        }
 
@@ -582,7 +584,8 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
        i_mmap_lock_write(mapping);
        i_size_write(inode, offset);
        if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
-               hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
+               hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
+                                     ZAP_FLAG_DROP_MARKER);
        i_mmap_unlock_write(mapping);
        remove_inode_hugepages(inode, offset, LLONG_MAX);
 }
@@ -615,8 +618,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                i_mmap_lock_write(mapping);
                if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
                        hugetlb_vmdelete_list(&mapping->i_mmap,
-                                               hole_start >> PAGE_SHIFT,
-                                               hole_end  >> PAGE_SHIFT);
+                                             hole_start >> PAGE_SHIFT,
+                                             hole_end >> PAGE_SHIFT, 0);
                i_mmap_unlock_write(mapping);
                remove_inode_hugepages(inode, hole_start, hole_end);
                inode_unlock(inode);
index f1143f1fb4443bedf4615b1406843f7f87b45e40..9bbdd97f611a5fb5e7ac7f018f03ae1f894043a1 100644 (file)
@@ -143,11 +143,12 @@ long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
                         unsigned long *, unsigned long *, long, unsigned int,
                         int *);
 void unmap_hugepage_range(struct vm_area_struct *,
-                         unsigned long, unsigned long, struct page *);
+                         unsigned long, unsigned long, struct page *,
+                         unsigned long);
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                          struct vm_area_struct *vma,
                          unsigned long start, unsigned long end,
-                         struct page *ref_page);
+                         struct page *ref_page, unsigned long zap_flags);
 void hugetlb_report_meminfo(struct seq_file *);
 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
 void hugetlb_show_meminfo(void);
@@ -406,7 +407,8 @@ static inline unsigned long hugetlb_change_protection(
 
 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                        struct vm_area_struct *vma, unsigned long start,
-                       unsigned long end, struct page *ref_page)
+                       unsigned long end, struct page *ref_page,
+                       unsigned long zap_flags)
 {
        BUG();
 }
index f0a0f80b318066244931c97152e8d1d31f8c4d9f..33c768776edbbe79061a19c13e63975d1c257128 100644 (file)
@@ -4962,7 +4962,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
 
 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end,
-                                  struct page *ref_page)
+                                  struct page *ref_page, unsigned long zap_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -5018,7 +5018,18 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                 * unmapped and its refcount is dropped, so just clear pte here.
                 */
                if (unlikely(!pte_present(pte))) {
-                       huge_pte_clear(mm, address, ptep, sz);
+                       /*
+                        * If the pte was wr-protected by uffd-wp in any of the
+                        * swap forms, meanwhile the caller does not want to
+                        * drop the uffd-wp bit in this zap, then replace the
+                        * pte with a marker.
+                        */
+                       if (pte_swp_uffd_wp_any(pte) &&
+                           !(zap_flags & ZAP_FLAG_DROP_MARKER))
+                               set_huge_pte_at(mm, address, ptep,
+                                               make_pte_marker(PTE_MARKER_UFFD_WP));
+                       else
+                               huge_pte_clear(mm, address, ptep, sz);
                        spin_unlock(ptl);
                        continue;
                }
@@ -5046,7 +5057,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
                if (huge_pte_dirty(pte))
                        set_page_dirty(page);
-
+               /* Leave a uffd-wp pte marker if needed */
+               if (huge_pte_uffd_wp(pte) &&
+                   !(zap_flags & ZAP_FLAG_DROP_MARKER))
+                       set_huge_pte_at(mm, address, ptep,
+                                       make_pte_marker(PTE_MARKER_UFFD_WP));
                hugetlb_count_sub(pages_per_huge_page(h), mm);
                page_remove_rmap(page, vma, true);
 
@@ -5080,9 +5095,10 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
 
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                          struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page)
+                         unsigned long end, struct page *ref_page,
+                         unsigned long zap_flags)
 {
-       __unmap_hugepage_range(tlb, vma, start, end, ref_page);
+       __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
 
        /*
         * Clear this flag so that x86's huge_pmd_share page_table_shareable
@@ -5098,12 +5114,13 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page)
+                         unsigned long end, struct page *ref_page,
+                         unsigned long zap_flags)
 {
        struct mmu_gather tlb;
 
        tlb_gather_mmu(&tlb, vma->vm_mm);
-       __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
+       __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
        tlb_finish_mmu(&tlb);
 }
 
@@ -5158,7 +5175,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
                        unmap_hugepage_range(iter_vma, address,
-                                            address + huge_page_size(h), page);
+                                            address + huge_page_size(h), page, 0);
        }
        i_mmap_unlock_write(mapping);
 }
index 1df7fda07c860f7fa964a0c7ed3c8b46e1052d65..b91989cc3a94a3c8d5f00bfda06b5cc9d104f3df 100644 (file)
@@ -1684,8 +1684,11 @@ static void unmap_single_vma(struct mmu_gather *tlb,
                         * safe to do nothing in this case.
                         */
                        if (vma->vm_file) {
+                               unsigned long zap_flags = details ?
+                                   details->zap_flags : 0;
                                i_mmap_lock_write(vma->vm_file->f_mapping);
-                               __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
+                               __unmap_hugepage_range_final(tlb, vma, start, end,
+                                                            NULL, zap_flags);
                                i_mmap_unlock_write(vma->vm_file->f_mapping);
                        }
                } else