mm/mmap: Refactor locking out of __vma_adjust()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 5 Aug 2022 20:07:25 +0000 (16:07 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Mon, 15 Aug 2022 14:46:43 +0000 (10:46 -0400)
Move the locking into lock_vma() and unlock_vma() for use elsewhere

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/internal.h
mm/mmap.c

index 15e8cb1188320e08dae59c1a6fd4b3d537840125..e8dc14625b2b6e2c5fa2c890538a0583d5207461 100644 (file)
@@ -860,4 +860,17 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
 
 DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
 
+/*
+ * VMA lock generalization
+ */
+struct vma_locking {
+       struct vm_area_struct *vma;
+       struct vm_area_struct *adj_next;
+       struct file *file;
+       struct address_space *mapping;
+       struct anon_vma *anon_vma;
+       struct vm_area_struct *insert;
+       struct vm_area_struct *remove;
+       struct vm_area_struct *remove2;
+};
 #endif /* __MM_INTERNAL_H */
index 3191d0da07131de243f2af5f2afe23f7d10e8a05..13c0390565aabe2f9fed942a066067631ba9d2ec 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -606,6 +606,126 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * lock_vma() - Helper function for locking VMAs prior to altering
+ * @vl: The initialized vma_locking struct
+ */
+static inline void lock_vma(struct vma_locking *vl)
+{
+       if (vl->file) {
+               uprobe_munmap(vl->vma, vl->vma->vm_start, vl->vma->vm_end);
+
+               if (vl->adj_next)
+                       uprobe_munmap(vl->adj_next, vl->adj_next->vm_start,
+                                     vl->adj_next->vm_end);
+
+               i_mmap_lock_write(vl->mapping);
+               if (vl->insert && vl->insert->vm_file) {
+                       /*
+                        * Put into interval tree now, so instantiated pages
+                        * are visible to arm/parisc __flush_dcache_page
+                        * throughout; but we cannot insert into address
+                        * space until vma start or end is updated.
+                        */
+                       __vma_link_file(vl->insert,
+                                       vl->insert->vm_file->f_mapping);
+               }
+       }
+
+       if (vl->anon_vma) {
+               anon_vma_lock_write(vl->anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vl->vma);
+               if (vl->adj_next)
+                       anon_vma_interval_tree_pre_update_vma(vl->adj_next);
+       }
+
+       if (vl->file) {
+               flush_dcache_mmap_lock(vl->mapping);
+               vma_interval_tree_remove(vl->vma, &vl->mapping->i_mmap);
+               if (vl->adj_next)
+                       vma_interval_tree_remove(vl->adj_next,
+                                                &vl->mapping->i_mmap);
+       }
+
+}
+
+/*
+ * unlock_vma - Helper function for unlocking after altering VMAs
+ * @vl: The vma_locking struct
+ * @mas: The maple state struct
+ * @mm: The mm_struct
+ */
+static inline void unlock_vma(struct vma_locking *vl, struct ma_state *mas,
+                             struct mm_struct *mm)
+{
+       if (vl->file) {
+               if (vl->adj_next)
+                       vma_interval_tree_insert(vl->adj_next,
+                                                &vl->mapping->i_mmap);
+               vma_interval_tree_insert(vl->vma, &vl->mapping->i_mmap);
+               flush_dcache_mmap_unlock(vl->mapping);
+       }
+
+       if (vl->remove && vl->file) {
+               __remove_shared_vm_struct(vl->remove, vl->file, vl->mapping);
+               if (vl->remove2)
+                       __remove_shared_vm_struct(vl->remove2, vl->file,
+                                                 vl->mapping);
+       } else if (vl->insert) {
+               /*
+                * split_vma has split insert from vma, and needs
+                * us to insert it before dropping the locks
+                * (it may either follow vma or precede it).
+                */
+               mas_reset(mas);
+               vma_mas_store(vl->insert, mas);
+               mm->map_count++;
+       }
+
+       if (vl->anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vl->vma);
+               if (vl->adj_next)
+                       anon_vma_interval_tree_post_update_vma(vl->adj_next);
+               anon_vma_unlock_write(vl->anon_vma);
+       }
+
+       if (vl->file) {
+               i_mmap_unlock_write(vl->mapping);
+               uprobe_mmap(vl->vma);
+
+               if (vl->adj_next)
+                       uprobe_mmap(vl->adj_next);
+       }
+
+       if (vl->remove) {
+again:
+               if (vl->file) {
+                       uprobe_munmap(vl->remove, vl->remove->vm_start,
+                                     vl->remove->vm_end);
+                       fput(vl->file);
+               }
+               if (vl->remove->anon_vma)
+                       anon_vma_merge(vl->vma, vl->remove);
+               mm->map_count--;
+               mpol_put(vma_policy(vl->remove));
+               if (!vl->remove2)
+                       BUG_ON(vl->vma->vm_end < vl->remove->vm_end);
+               vm_area_free(vl->remove);
+
+               /*
+                * In mprotect's case 6 (see comments on vma_merge),
+                * we must remove next_next too.
+                */
+               if (vl->remove2) {
+                       vl->remove = vl->remove2;
+                       vl->remove2 = NULL;
+                       goto again;
+               }
+       }
+       if (vl->insert && vl->file)
+               uprobe_mmap(vl->insert);
+}
+
 /*
  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
  * is already present in an i_mmap tree without adjusting the tree.
@@ -618,10 +738,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *expand)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
+       struct vm_area_struct *next_next = NULL;
+       struct vm_area_struct *next = find_vma(mm, vma->vm_end);
        struct vm_area_struct *orig_vma = vma;
-       struct address_space *mapping = NULL;
-       struct rb_root_cached *root = NULL;
        struct anon_vma *anon_vma = NULL;
        struct file *file = vma->vm_file;
        bool vma_changed = false;
@@ -629,6 +748,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        int remove_next = 0;
        MA_STATE(mas, &mm->mm_mt, 0, 0);
        struct vm_area_struct *exporter = NULL, *importer = NULL;
+       struct vma_locking vma_lock;
 
        if (next && !insert) {
                if (end >= next->vm_end) {
@@ -724,39 +844,22 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                           anon_vma != next->anon_vma);
 
        vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
-       if (file) {
-               mapping = file->f_mapping;
-               root = &mapping->i_mmap;
-               uprobe_munmap(vma, vma->vm_start, vma->vm_end);
 
-               if (adjust_next)
-                       uprobe_munmap(next, next->vm_start, next->vm_end);
-
-               i_mmap_lock_write(mapping);
-               if (insert && insert->vm_file) {
-                       /*
-                        * Put into interval tree now, so instantiated pages
-                        * are visible to arm/parisc __flush_dcache_page
-                        * throughout; but we cannot insert into address
-                        * space until vma start or end is updated.
-                        */
-                       __vma_link_file(insert, insert->vm_file->f_mapping);
-               }
-       }
-
-       if (anon_vma) {
-               anon_vma_lock_write(anon_vma);
-               anon_vma_interval_tree_pre_update_vma(vma);
-               if (adjust_next)
-                       anon_vma_interval_tree_pre_update_vma(next);
+       memset(&vma_lock, 0, sizeof(vma_lock));
+       vma_lock.vma = vma;
+       vma_lock.anon_vma = anon_vma;
+       vma_lock.file = file;
+       if (adjust_next)
+               vma_lock.adj_next = next;
+       if (file)
+               vma_lock.mapping = file->f_mapping;
+       vma_lock.insert = insert;
+       if (remove_next) {
+               vma_lock.remove = next;
+               vma_lock.remove2 = next_next;
        }
 
-       if (file) {
-               flush_dcache_mmap_lock(mapping);
-               vma_interval_tree_remove(vma, root);
-               if (adjust_next)
-                       vma_interval_tree_remove(next, root);
-       }
+       lock_vma(&vma_lock);
 
        if (start != vma->vm_start) {
                if ((vma->vm_start < start) &&
@@ -792,70 +895,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                vma_mas_store(next, &mas);
        }
 
-       if (file) {
-               if (adjust_next)
-                       vma_interval_tree_insert(next, root);
-               vma_interval_tree_insert(vma, root);
-               flush_dcache_mmap_unlock(mapping);
-       }
-
-       if (remove_next && file) {
-               __remove_shared_vm_struct(next, file, mapping);
-               if (remove_next == 2)
-                       __remove_shared_vm_struct(next_next, file, mapping);
-       } else if (insert) {
-               /*
-                * split_vma has split insert from vma, and needs
-                * us to insert it before dropping the locks
-                * (it may either follow vma or precede it).
-                */
-               mas_reset(&mas);
-               vma_mas_store(insert, &mas);
-               mm->map_count++;
-       }
-
-       if (anon_vma) {
-               anon_vma_interval_tree_post_update_vma(vma);
-               if (adjust_next)
-                       anon_vma_interval_tree_post_update_vma(next);
-               anon_vma_unlock_write(anon_vma);
-       }
-
-       if (file) {
-               i_mmap_unlock_write(mapping);
-               uprobe_mmap(vma);
-
-               if (adjust_next)
-                       uprobe_mmap(next);
-       }
-
-       if (remove_next) {
-again:
-               if (file) {
-                       uprobe_munmap(next, next->vm_start, next->vm_end);
-                       fput(file);
-               }
-               if (next->anon_vma)
-                       anon_vma_merge(vma, next);
-               mm->map_count--;
-               mpol_put(vma_policy(next));
-               if (remove_next != 2)
-                       BUG_ON(vma->vm_end < next->vm_end);
-               vm_area_free(next);
-
-               /*
-                * In mprotect's case 6 (see comments on vma_merge),
-                * we must remove next_next too.
-                */
-               if (remove_next == 2) {
-                       remove_next = 1;
-                       next = next_next;
-                       goto again;
-               }
-       }
-       if (insert && file)
-               uprobe_mmap(insert);
-
+       unlock_vma(&vma_lock, &mas, mm);
        mas_destroy(&mas);
        validate_mm(mm);