]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: write-lock VMAs before removing them from VMA tree
authorSuren Baghdasaryan <surenb@google.com>
Sat, 11 Jun 2022 05:15:49 +0000 (22:15 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 4 Jan 2023 20:59:25 +0000 (15:59 -0500)
Write-locking VMAs before isolating them ensures that page fault
handlers don't operate on isolated VMAs.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
mm/mmap.c
mm/nommu.c

index eeb8b098443af6e27ca2329ba21c0305928806c7..77b9c127e4480d18db28335d3aac13bb7bae3037 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -446,6 +446,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
  */
 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 {
+       vma_write_lock(vma);
        trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
@@ -2301,6 +2302,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 static inline int munmap_sidetree(struct vm_area_struct *vma,
                                   struct ma_state *mas_detach)
 {
+       vma_write_lock(vma);
        mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
        if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
                return -ENOMEM;
index b3154357ced52285efef952b615643009ff9f435..7ae91337ef14fbb7fca5b9e378abfdf9acf200ad 100644 (file)
@@ -552,6 +552,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
 
 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 {
+       vma_write_lock(vma);
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
        mas_store_prealloc(mas, NULL);
@@ -1551,6 +1552,10 @@ void exit_mmap(struct mm_struct *mm)
        mmap_write_lock(mm);
        for_each_vma(vmi, vma) {
                cleanup_vma_from_mm(vma);
+               /*
+                * No need to lock VMA because this is the only mm user and no
+                * page fault handled can race with it.
+                */
                delete_vma(mm, vma);
                cond_resched();
        }