]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: prevent pagefault handler from racing with mmu_notifier registration
authorSuren Baghdasaryan <surenb@google.com>
Mon, 23 May 2022 16:40:59 +0000 (09:40 -0700)
committerSuren Baghdasaryan <surenb@google.com>
Wed, 23 Nov 2022 02:09:46 +0000 (02:09 +0000)
Page fault handlers might need to fire MMU notifications while a new
notifier is being registered. Modify mm_take_all_locks to write-lock all
VMAs and prevent this race with fault handlers that would hold VMA locks.
VMAs are locked before i_mmap_rwsem and anon_vma to keep the same
locking order as in page fault handlers.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
mm/mmap.c

index 50f6494d2178d070d830ae644ef962e2577e288b..5e2193f59a8d647b3769b7832d1d009d1ae91aba 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3571,6 +3571,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * of mm/rmap.c:
  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
  *     hugetlb mapping);
+ *   - all vmas marked locked
  *   - all i_mmap_rwsem locks;
  *   - all anon_vma->rwseml
  *
@@ -3596,6 +3597,7 @@ int mm_take_all_locks(struct mm_struct *mm)
        mas_for_each(&mas, vma, ULONG_MAX) {
                if (signal_pending(current))
                        goto out_unlock;
+               vma_write_lock(vma);
                if (vma->vm_file && vma->vm_file->f_mapping &&
                                is_vm_hugetlb_page(vma))
                        vm_lock_mapping(mm, vma->vm_file->f_mapping);
@@ -3682,6 +3684,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
                if (vma->vm_file && vma->vm_file->f_mapping)
                        vm_unlock_mapping(vma->vm_file->f_mapping);
        }
+       vma_write_unlock_mm(mm);
 
        mutex_unlock(&mm_all_locks_mutex);
 }