return -ENOMEM;
}
+/*
+ * lock_vma() - Helper function for locking VMAs prior to altering
+ * @vl: The initialized vma_locking struct
+ */
+static inline void lock_vma(struct vma_locking *vl)
+{
+ if (vl->file) {
+ uprobe_munmap(vl->vma, vl->vma->vm_start, vl->vma->vm_end);
+
+ if (vl->adj_next)
+ uprobe_munmap(vl->adj_next, vl->adj_next->vm_start,
+ vl->adj_next->vm_end);
+
+ i_mmap_lock_write(vl->mapping);
+ if (vl->insert && vl->insert->vm_file) {
+ /*
+ * Put into interval tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(vl->insert,
+ vl->insert->vm_file->f_mapping);
+ }
+ }
+
+ if (vl->anon_vma) {
+ anon_vma_lock_write(vl->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vl->vma);
+ if (vl->adj_next)
+ anon_vma_interval_tree_pre_update_vma(vl->adj_next);
+ }
+
+ if (vl->file) {
+ flush_dcache_mmap_lock(vl->mapping);
+ vma_interval_tree_remove(vl->vma, &vl->mapping->i_mmap);
+ if (vl->adj_next)
+ vma_interval_tree_remove(vl->adj_next,
+ &vl->mapping->i_mmap);
+ }
+
+}
+
+/*
+ * unlock_vma - Helper function for unlocking after altering VMAs
+ * @vl: The vma_locking struct
+ * @mas: The maple state struct
+ * @mm: The mm_struct
+ */
+static inline void unlock_vma(struct vma_locking *vl, struct ma_state *mas,
+ struct mm_struct *mm)
+{
+ if (vl->file) {
+ if (vl->adj_next)
+ vma_interval_tree_insert(vl->adj_next,
+ &vl->mapping->i_mmap);
+ vma_interval_tree_insert(vl->vma, &vl->mapping->i_mmap);
+ flush_dcache_mmap_unlock(vl->mapping);
+ }
+
+ if (vl->remove && vl->file) {
+ __remove_shared_vm_struct(vl->remove, vl->file, vl->mapping);
+ if (vl->remove2)
+ __remove_shared_vm_struct(vl->remove2, vl->file,
+ vl->mapping);
+ } else if (vl->insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ mas_reset(mas);
+ vma_mas_store(vl->insert, mas);
+ mm->map_count++;
+ }
+
+ if (vl->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vl->vma);
+ if (vl->adj_next)
+ anon_vma_interval_tree_post_update_vma(vl->adj_next);
+ anon_vma_unlock_write(vl->anon_vma);
+ }
+
+ if (vl->file) {
+ i_mmap_unlock_write(vl->mapping);
+ uprobe_mmap(vl->vma);
+
+ if (vl->adj_next)
+ uprobe_mmap(vl->adj_next);
+ }
+
+ if (vl->remove) {
+again:
+ if (vl->file) {
+ uprobe_munmap(vl->remove, vl->remove->vm_start,
+ vl->remove->vm_end);
+ fput(vl->file);
+ }
+ if (vl->remove->anon_vma)
+ anon_vma_merge(vl->vma, vl->remove);
+ mm->map_count--;
+ mpol_put(vma_policy(vl->remove));
+ if (!vl->remove2)
+ BUG_ON(vl->vma->vm_end < vl->remove->vm_end);
+ vm_area_free(vl->remove);
+
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we must remove next_next too.
+ */
+ if (vl->remove2) {
+ vl->remove = vl->remove2;
+ vl->remove2 = NULL;
+ goto again;
+ }
+ }
+ if (vl->insert && vl->file)
+ uprobe_mmap(vl->insert);
+}
+
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
+ struct vm_area_struct *next_next = NULL;
+ struct vm_area_struct *next = find_vma(mm, vma->vm_end);
struct vm_area_struct *orig_vma = vma;
- struct address_space *mapping = NULL;
- struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool vma_changed = false;
int remove_next = 0;
MA_STATE(mas, &mm->mm_mt, 0, 0);
struct vm_area_struct *exporter = NULL, *importer = NULL;
+ struct vma_locking vma_lock;
if (next && !insert) {
if (end >= next->vm_end) {
anon_vma != next->anon_vma);
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
- if (file) {
- mapping = file->f_mapping;
- root = &mapping->i_mmap;
- uprobe_munmap(vma, vma->vm_start, vma->vm_end);
- if (adjust_next)
- uprobe_munmap(next, next->vm_start, next->vm_end);
-
- i_mmap_lock_write(mapping);
- if (insert && insert->vm_file) {
- /*
- * Put into interval tree now, so instantiated pages
- * are visible to arm/parisc __flush_dcache_page
- * throughout; but we cannot insert into address
- * space until vma start or end is updated.
- */
- __vma_link_file(insert, insert->vm_file->f_mapping);
- }
- }
-
- if (anon_vma) {
- anon_vma_lock_write(anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
- if (adjust_next)
- anon_vma_interval_tree_pre_update_vma(next);
+ memset(&vma_lock, 0, sizeof(vma_lock));
+ vma_lock.vma = vma;
+ vma_lock.anon_vma = anon_vma;
+ vma_lock.file = file;
+ if (adjust_next)
+ vma_lock.adj_next = next;
+ if (file)
+ vma_lock.mapping = file->f_mapping;
+ vma_lock.insert = insert;
+ if (remove_next) {
+ vma_lock.remove = next;
+ vma_lock.remove2 = next_next;
}
- if (file) {
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_remove(vma, root);
- if (adjust_next)
- vma_interval_tree_remove(next, root);
- }
+ lock_vma(&vma_lock);
if (start != vma->vm_start) {
if ((vma->vm_start < start) &&
vma_mas_store(next, &mas);
}
- if (file) {
- if (adjust_next)
- vma_interval_tree_insert(next, root);
- vma_interval_tree_insert(vma, root);
- flush_dcache_mmap_unlock(mapping);
- }
-
- if (remove_next && file) {
- __remove_shared_vm_struct(next, file, mapping);
- if (remove_next == 2)
- __remove_shared_vm_struct(next_next, file, mapping);
- } else if (insert) {
- /*
- * split_vma has split insert from vma, and needs
- * us to insert it before dropping the locks
- * (it may either follow vma or precede it).
- */
- mas_reset(&mas);
- vma_mas_store(insert, &mas);
- mm->map_count++;
- }
-
- if (anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- if (adjust_next)
- anon_vma_interval_tree_post_update_vma(next);
- anon_vma_unlock_write(anon_vma);
- }
-
- if (file) {
- i_mmap_unlock_write(mapping);
- uprobe_mmap(vma);
-
- if (adjust_next)
- uprobe_mmap(next);
- }
-
- if (remove_next) {
-again:
- if (file) {
- uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
- }
- if (next->anon_vma)
- anon_vma_merge(vma, next);
- mm->map_count--;
- mpol_put(vma_policy(next));
- if (remove_next != 2)
- BUG_ON(vma->vm_end < next->vm_end);
- vm_area_free(next);
-
- /*
- * In mprotect's case 6 (see comments on vma_merge),
- * we must remove next_next too.
- */
- if (remove_next == 2) {
- remove_next = 1;
- next = next_next;
- goto again;
- }
- }
- if (insert && file)
- uprobe_mmap(insert);
-
+ unlock_vma(&vma_lock, &mas, mm);
mas_destroy(&mas);
validate_mm(mm);