goto success;
}
+ mas_lock(&mas);
mas_set(&mas, newbrk);
brkvma = mas_walk(&mas);
if (brkvma) { // munmap necessary, there is something at newbrk.
success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
+ mas_unlock(&mas);
if (downgraded)
mmap_read_unlock(mm);
else
return brk;
out:
+ mas_unlock(&mas);
mmap_write_unlock(mm);
return origbrk;
}
}
if (i != mm->map_count) {
pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+ mt_dump(mas.tree);
bug = 1;
}
{
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+ mas_lock(&mas);
vma_mas_link(mm, vma, &mas);
+ mas_unlock(&mas);
}
/*
struct vm_area_struct *vma)
{
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+ mas_lock(&mas);
BUG_ON(mas_find(&mas, vma->vm_end - 1));
mas_reset(&mas);
vma_mas_store(vma, &mas);
mm->map_count++;
+ mas_unlock(&mas);
}
inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
bool vma_changed = false;
long adjust_next = 0;
int remove_next = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
validate_mm(mm);
+ mas_lock(&mas);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
}
if (start != vma->vm_start) {
- if (vma->vm_start < start)
- vma_mt_szero(mm, vma->vm_start, start);
+ if (vma->vm_start < start) {
+ mas_set_range(&mas, vma->vm_start, start - 1);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ }
else
vma_changed = true;
vma->vm_start = start;
}
if (end != vma->vm_end) {
- if (vma->vm_end > end)
- vma_mt_szero(mm, end, vma->vm_end);
- else
+ if (vma->vm_end > end) {
+ mas_set_range(&mas, end, vma->vm_end - 1);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ } else
vma_changed = true;
vma->vm_end = end;
}
- if (vma_changed)
- vma_mt_store(mm, vma);
+ if (vma_changed) {
+ mas_reset(&mas);
+ vma_mas_store(vma, &mas);
+ }
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
- vma_mt_store(mm, next);
+ mas_reset(&mas);
+ vma_mas_store(next, &mas);
}
if (file) {
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- __insert_vm_struct(mm, insert);
+ mas_reset(&mas);
+ vma_mas_store(insert, &mas);
+ mm->map_count++;
}
if (anon_vma) {
if (insert && file)
uprobe_mmap(insert);
+ mas_unlock(&mas);
validate_mm(mm);
return 0;
}
max);
tlb_finish_mmu(&tlb, start, end);
}
+
+/*
+ *
+ * Does not support inserting a new vma and modifying the other side of the vma
+ * mas will point to insert or the new zeroed area.
+ */
+static inline
+int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *insert)
+#if 0
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *expand = NULL, *next = vma_next(mm, vma), *orig_vma = vma;
+ struct address_space *mapping = NULL;
+ struct rb_root_cached *root = NULL;
+ struct anon_vma *anon_vma = NULL;
+ struct file *file = vma->vm_file;
+ bool vma_changed = false;
+ long adjust_next = 0;
+ int remove_next = 0;
+
+ validate_mm(mm);
+ if (next && !insert) {
+ struct vm_area_struct *exporter = NULL, *importer = NULL;
+
+ if (end >= next->vm_end) {
+ /*
+ * vma expands, overlapping all the next, and
+ * perhaps the one after too (mprotect case 6).
+ * The only other cases that gets here are
+ * case 1, case 7 and case 8.
+ */
+ if (next == expand) {
+ /*
+ * The only case where we don't expand "vma"
+ * and we expand "next" instead is case 8.
+ */
+ VM_WARN_ON(end != next->vm_end);
+ /*
+ * remove_next == 3 means we're
+ * removing "vma" and that to do so we
+ * swapped "vma" and "next".
+ */
+ remove_next = 3;
+ VM_WARN_ON(file != next->vm_file);
+ swap(vma, next);
+ } else {
+ VM_WARN_ON(expand != vma);
+ /*
+ * case 1, 6, 7, remove_next == 2 is case 6,
+ * remove_next == 1 is case 1 or 7.
+ */
+ remove_next = 1 + (end > next->vm_end);
+ VM_WARN_ON(remove_next == 2 &&
+ end != vma_next(mm, next)->vm_end);
+ /* trim end to next, for case 6 first pass */
+ end = next->vm_end;
+ }
+
+ exporter = next;
+ importer = vma;
+
+ /*
+ * If next doesn't have anon_vma, import from vma after
+ * next, if the vma overlaps with it.
+ */
+ if (remove_next == 2 && !next->anon_vma)
+ exporter = vma_next(mm, next);
+
+ } else if (end > next->vm_start) {
+ /*
+ * vma expands, overlapping part of the next:
+ * mprotect case 5 shifting the boundary up.
+ */
+ adjust_next = (end - next->vm_start);
+ exporter = next;
+ importer = vma;
+ VM_WARN_ON(expand != importer);
+ } else if (end < vma->vm_end) {
+ /*
+ * vma shrinks, and !insert tells it's not
+ * split_vma inserting another: so it must be
+ * mprotect case 4 shifting the boundary down.
+ */
+ adjust_next = -(vma->vm_end - end);
+ exporter = vma;
+ importer = next;
+ VM_WARN_ON(expand != importer);
+ }
+
+ /*
+ * Easily overlooked: when mprotect shifts the boundary,
+ * make sure the expanding vma has anon_vma set if the
+ * shrinking vma had, to cover any anon pages imported.
+ */
+ if (exporter && exporter->anon_vma && !importer->anon_vma) {
+ int error;
+
+ importer->anon_vma = exporter->anon_vma;
+ error = anon_vma_clone(importer, exporter);
+ if (error)
+ return error;
+ }
+ }
+again:
+ vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
+
+ if (file) {
+ mapping = file->f_mapping;
+ root = &mapping->i_mmap;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+ if (adjust_next)
+ uprobe_munmap(next, next->vm_start, next->vm_end);
+
+ i_mmap_lock_write(mapping);
+ if (insert) {
+ /*
+ * Put into interval tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(insert);
+ }
+ }
+
+ anon_vma = vma->anon_vma;
+ if (!anon_vma && adjust_next)
+ anon_vma = next->anon_vma;
+ if (anon_vma) {
+ VM_WARN_ON(adjust_next && next->anon_vma &&
+ anon_vma != next->anon_vma);
+ anon_vma_lock_write(anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ if (adjust_next)
+ anon_vma_interval_tree_pre_update_vma(next);
+ }
+
+ if (file) {
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_remove(vma, root);
+ if (adjust_next)
+ vma_interval_tree_remove(next, root);
+ }
+
+ if (start != vma->vm_start) {
+ if (vma->vm_start < start) {
+ mas_set_range(mas, vma->vm_start, start - 1);
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
+ }
+ else
+ vma_changed = true;
+ vma->vm_start = start;
+ }
+ if (end != vma->vm_end) {
+ if (vma->vm_end > end) {
+ mas_set_range(mas, end, vma->vm_end - 1);
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
+ } else
+ vma_changed = true;
+ vma->vm_end = end;
+ }
+
+ if (vma_changed) {
+ mas_reset(mas);
+ vma_mas_store(vma, mas);
+ }
+
+ vma->vm_pgoff = pgoff;
+ if (adjust_next) {
+ next->vm_start += adjust_next;
+ next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+ mas_reset(mas);
+ vma_mas_store(next, mas);
+ }
+
+ if (file) {
+ if (adjust_next)
+ vma_interval_tree_insert(next, root);
+ vma_interval_tree_insert(vma, root);
+ flush_dcache_mmap_unlock(mapping);
+ }
+
+ if (remove_next && file) {
+ __remove_shared_vm_struct(next, file, mapping);
+ } else if (insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ mas_reset(mas);
+ vma_mas_store(insert, mas);
+ mm->map_count++;
+ }
+
+ if (anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ if (adjust_next)
+ anon_vma_interval_tree_post_update_vma(next);
+ anon_vma_unlock_write(anon_vma);
+ }
+
+ if (file) {
+ i_mmap_unlock_write(mapping);
+ uprobe_mmap(vma);
+
+ if (adjust_next)
+ uprobe_mmap(next);
+ }
+
+ if (remove_next) {
+ if (file) {
+ uprobe_munmap(next, next->vm_start, next->vm_end);
+ fput(file);
+ }
+ if (next->anon_vma)
+ anon_vma_merge(vma, next);
+ mm->map_count--;
+ mpol_put(vma_policy(next));
+ vm_area_free(next);
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we must remove another next too. It would clutter
+ * up the code too much to do both in one go.
+ */
+ if (remove_next != 3) {
+ /*
+ * If "next" was removed and vma->vm_end was
+ * expanded (up) over it, in turn
+ * "next->vm_prev->vm_end" changed and the
+ * "vma->vm_next" gap must be updated.
+ */
+ next = vma_next(mm, vma);
+ } else {
+ /*
+ * For the scope of the comment "next" and
+ * "vma" considered pre-swap(): if "vma" was
+ * removed, next->vm_start was expanded (down)
+ * over it and the "next" gap must be updated.
+ * Because of the swap() the post-swap() "vma"
+ * actually points to pre-swap() "next"
+ * (post-swap() "next" as opposed is now a
+ * dangling pointer).
+ */
+ next = vma;
+ }
+ if (remove_next == 2) {
+ remove_next = 1;
+ end = next->vm_end;
+ goto again;
+ }
+ }
+ if (insert && file)
+ uprobe_mmap(insert);
+
+ validate_mm(mm);
+ return 0;
+}
+#else
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct address_space *mapping = NULL;
+ struct rb_root_cached *root = NULL;
+ struct anon_vma *anon_vma = NULL;
+ struct file *file = vma->vm_file;
+ unsigned long old_end = vma->vm_end, old_start = vma->vm_start;
+
+ validate_mm(mm);
+ printk("%s: %px start %lu - %lu => %lu - %lu\n", __func__, vma, vma->vm_start,
+ vma->vm_end, start, end);
+
+ vma_adjust_trans_huge(vma, start, end, 0);
+ if (file) {
+ mapping = file->f_mapping;
+ root = &mapping->i_mmap;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+ i_mmap_lock_write(mapping);
+ /*
+ * Put into interval tree now, so instantiated pages are visible
+ * to arm/parisc __flush_dcache_page throughout; but we cannot
+ * insert into address space until vma start or end is updated.
+ */
+
+ if (insert) {
+ printk("insert %lu - %lu to interval\n",
+ insert->vm_start, insert->vm_end);
+ __vma_link_file(insert);
+ printk("Done\n");
+ }
+ }
+
+ anon_vma = vma->anon_vma;
+ if (anon_vma) {
+ anon_vma_lock_write(anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
+
+ if (file) {
+ flush_dcache_mmap_lock(mapping);
+ printk("remove %lu - %lu to interval\n",
+ vma->vm_start, vma->vm_end);
+ vma_interval_tree_remove(vma, root);
+ printk("Done\n");
+ }
+
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+ if (!insert) {
+
+ /* If vm_start changed, and the insert does not end at the old
+ * start, then that area needs to be zeroed
+ */
+ if (old_start != vma->vm_start) {
+ mas->last = end;
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
+ }
+
+ /* If vm_end changed, and the insert does not start at the new
+ * end, then that area needs to be zeroed
+ */
+ if (old_end != vma->vm_end) {
+ mas->index = end;
+ mas->last = old_end;
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
+ }
+ }
+
+ if (file) {
+ printk("insert %lu - %lu to interval\n",
+ vma->vm_start, vma->vm_end);
+ vma_interval_tree_insert(vma, root);
+ printk("Done\n");
+ flush_dcache_mmap_unlock(mapping);
+ }
+
+ if (insert) { // Insert.
+ vma_mas_store(insert, mas);
+ mm->map_count++;
+ }
+
+ if (anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(anon_vma);
+ }
+
+ if (file) {
+ i_mmap_unlock_write(mapping);
+ uprobe_mmap(vma);
+ if (insert)
+ uprobe_mmap(insert);
+ }
+
+ printk("%s: %px end\n", __func__, vma);
+ validate_mm(mm);
+ return 0;
+}
+#endif
+
+/*
+ * mas_split_vma() - Split the VMA into two.
+ *
+ * @mm: The mm_struct
+ * @mas: The maple state - must point to the vma being altered
+ * @vma: The vma to split
+ * @addr: The address to split @vma
+ * @new_below: Add the new vma at the lower address (first part) of vma.
+ *
+ * Note: The @mas must point to the vma that is being split or MAS_START.
+ * Upon return, @mas points to the new VMA.
+ */
+int mas_split_vma(struct mm_struct *mm, struct ma_state *mas,
+ struct vm_area_struct *vma, unsigned long addr, int new_below)
+{
+ struct vm_area_struct *new;
+ int err;
+
+ validate_mm(mm);
+ if (vma->vm_ops && vma->vm_ops->may_split) {
+ err = vma->vm_ops->may_split(vma, addr);
+ if (err)
+ return err;
+ }
+
+ new = vm_area_dup(vma);
+ if (!new)
+ return -ENOMEM;
+
+ if (new_below)
+ new->vm_end = addr;
+ else {
+ new->vm_start = addr;
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
+ err = vma_dup_policy(vma, new);
+ if (err)
+ goto out_free_vma;
+
+ err = anon_vma_clone(new, vma);
+ if (err)
+ goto out_free_mpol;
+
+ if (new->vm_file)
+ get_file(new->vm_file);
+
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+ if (new_below)
+ err = vma_shrink(mas, vma, addr, vma->vm_end, vma->vm_pgoff +
+ ((addr - new->vm_start) >> PAGE_SHIFT), new);
+ else
+ err = vma_shrink(mas, vma, vma->vm_start, addr, vma->vm_pgoff,
+ new);
+
+ validate_mm(mm);
+ /* Success. */
+ if (!err)
+ return 0;
+
+ /* Clean everything up if vma_adjust failed. */
+ if (new->vm_ops && new->vm_ops->close)
+ new->vm_ops->close(new);
+ if (new->vm_file)
+ fput(new->vm_file);
+ unlink_anon_vmas(new);
+ out_free_mpol:
+ mpol_put(vma_policy(new));
+ out_free_vma:
+ vm_area_free(new);
+ return err;
+}
+
/*
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
* has already been checked or doesn't make sense to fail.
return __split_vma(mm, vma, addr, new_below);
}
-static inline unsigned long detach_range(struct mm_struct *mm,
- struct ma_state *src, struct ma_state *dst,
- struct vm_area_struct **vma, struct vm_area_struct *prev)
+
+static inline void detach_range(struct mm_struct *mm, struct ma_state *mas,
+ struct ma_state *dst, struct vm_area_struct **vma)
{
+ unsigned long start = dst->index;
+ unsigned long end = dst->last;
int count = 0;
- struct ma_state mas;
- /*
- * unlock any mlock()ed ranges before detaching vmas, count the number
- * of VMAs to be dropped, and return the tail entry of the affected
- * area.
- */
- mas = *src;
do {
- BUG_ON((*vma)->vm_start < src->index);
- BUG_ON((*vma)->vm_end > (src->last + 1));
count++;
+ *vma = mas_prev(mas, start);
+ BUG_ON((*vma)->vm_start < start);
+ BUG_ON((*vma)->vm_end > end + 1);
if ((*vma)->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(*vma);
munlock_vma_pages_all(*vma);
}
+
vma_mas_store(*vma, dst);
- } while ((*vma = mas_find(&mas, src->last)) != NULL);
+ } while ((*vma)->vm_start > start);
- mas_set(&mas, src->last + 1);
/* Drop removed area from the tree */
- *vma = mas_find(&mas, ULONG_MAX);
- mas_lock(src);
- mas_store_gfp(src, NULL, GFP_KERNEL);
- mas_unlock(src);
+ mas->last = end;
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
/* Decrement map_count */
mm->map_count -= count;
- /* Set the upper limit */
- if (!(*vma))
- return USER_PGTABLES_CEILING;
-
- validate_mm(mm);
- return (*vma)->vm_start;
}
/* do_mas_align_munmap() - munmap the aligned region from @start to @end.
struct mm_struct *mm, unsigned long start, unsigned long end,
struct list_head *uf, bool downgrade)
{
- struct vm_area_struct *prev, *last;
+ struct vm_area_struct *prev, *last, *next = NULL;
struct maple_tree mt_detach;
- unsigned long max;
- MA_STATE(dst, NULL, start, start);
+ unsigned long max = USER_PGTABLES_CEILING;
+ MA_STATE(dst, NULL, start, end - 1);
struct ma_state tmp;
/* we have start < vma->vm_end */
validate_mm(mm);
- /* arch_unmap() might do unmaps itself. */
+ /* arch_unmap() might do unmaps itself. */
arch_unmap(mm, start, end);
/*
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
- error = __split_vma(mm, vma, start, 0);
+ printk("Split start %lu-%lu\n", vma->vm_start, vma->vm_end);
+ error = mas_split_vma(mm, mas, vma, start, 0);
if (error)
return error;
prev = vma;
- // Split invalidated node, reset.
- mas_set_range(mas, start, end - 1);
+ vma = mas_walk(mas);
} else {
tmp = *mas;
prev = mas_prev(&tmp, 0);
}
- if (vma->vm_end >= end)
+ if (end < vma->vm_end) {
last = vma;
- else {
- tmp = *mas;
- mas_set(&tmp, end - 1);
- last = mas_walk(&tmp);
+ } else {
+ mas_set(mas, end - 1);
+ last = mas_walk(mas);
}
/* Does it split the last one? */
if (last && end < last->vm_end) {
- int error = __split_vma(mm, last, end, 1);
+ int error;
+ printk("Split end %lu-%lu\n", vma->vm_start, vma->vm_end);
+ error = mas_split_vma(mm, mas, last, end, 1);
if (error)
return error;
- // Split invalidated node, reset.
- mas_set_range(mas, start, end - 1);
}
-
- if (mas->node == MAS_START)
- vma = mas_find(mas, end - 1);
+ next = mas_next(mas, ULONG_MAX);
if (unlikely(uf)) {
/*
}
/* Point of no return */
+ if (next)
+ max = next->vm_start;
+
mtree_init(&mt_detach, MAPLE_ALLOC_RANGE);
dst.tree = &mt_detach;
- mas->last = end - 1;
- max = detach_range(mm, mas, &dst, &vma, prev);
+ detach_range(mm, mas, &dst, &vma);
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
*/
if (downgrade) {
- if (vma && (vma->vm_flags & VM_GROWSDOWN))
+ if (next && (next->vm_flags & VM_GROWSDOWN))
downgrade = false;
else if (prev && (prev->vm_flags & VM_GROWSUP))
downgrade = false;
}
/* Unmap the region */
- mas_set(&dst, start);
- tmp = dst;
- vma = mas_find(&dst, end - 1); // head of list.
unmap_region(mm, vma, &dst, start, end, prev, max);
/* Statistics and freeing VMAs */
- dst = tmp;
+ mas_set(&dst, start);
remove_mt(mm, &dst);
mtree_destroy(&mt_detach);
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
+ int ret;
MA_STATE(mas, &mm->mm_mt, start, start);
- return do_mas_munmap(&mas, mm, start, len, uf, false);
+
+ mas_lock(&mas);
+ ret = do_mas_munmap(&mas, mm, start, len, uf, false);
+ mas_unlock(&mas);
+ return ret;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
struct ma_state ma_prev, tmp;
MA_STATE(mas, &mm->mm_mt, addr, end - 1);
- validate_mm(mm);
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
return -ENOMEM;
}
+ mas_lock(&mas);
+ validate_mm(mm);
/* Unmap any existing mapping in the area */
- if (do_mas_munmap(&mas, mm, addr, len, uf, false))
+ if (do_mas_munmap(&mas, mm, addr, len, uf, false)) {
+ mas_unlock(&mas);
return -ENOMEM;
+ }
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory_mm(mm, charged))
+ if (security_vm_enough_memory_mm(mm, charged)) {
+ mas_unlock(&mas);
return -ENOMEM;
+ }
vm_flags |= VM_ACCOUNT;
}
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
*/
- WARN_ON_ONCE(addr != vma->vm_start);
if (addr != vma->vm_start) {
+ WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
mas_set_range(&mas, addr, end - 1);
}
vma->vm_flags |= VM_SOFTDIRTY;
vma_set_page_prot(vma);
-
validate_mm(mm);
+ mas_unlock(&mas);
+
return addr;
unmap_and_free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
+ mas_unlock(&mas);
return error;
}
if (mmap_write_lock_killable(mm))
return -EINTR;
-
+ mas_lock(&mas);
ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
+ mas_unlock(&mas);
/*
* Returning 1 indicates mmap_lock is downgraded.
* But 1 is not legal return value of vm_munmap() and munmap(), reset
if (!vma->vm_file)
goto out;
+ mas_lock(&mas);
if (start + size > vma->vm_end) {
struct vm_area_struct *prev, *next;
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
+ mas_unlock(&mas);
mmap_write_unlock(mm);
if (populate)
mm_populate(ret, populate);
}
vma->vm_end = addr + len;
vma->vm_flags |= VM_SOFTDIRTY;
- mas_lock(ma_prev);
if (mas_store_gfp(ma_prev, vma, GFP_KERNEL)) {
mas_unlock(ma_prev);
goto mas_mod_fail;
}
- mas_unlock(ma_prev);
if (vma->anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
anon_vma_unlock_write(vma->anon_vma);
return -EINTR;
// This vma left intentionally blank.
+ mas_lock(&mas);
mas_walk(&mas);
ret = do_brk_flags(&mas, &mas, &vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
+ mas_unlock(&mas);
mmap_write_unlock(mm);
if (populate && !ret)
mm_populate_vma(vma, addr, addr + len);
mmap_write_unlock(mm);
}
+ mas_lock(&mas);
if (mm->locked_vm) {
mas_for_each(&mas, vma, ULONG_MAX) {
if (vma->vm_flags & VM_LOCKED) {
arch_exit_mmap(mm);
vma = mas_find(&mas, ULONG_MAX);
- if (!vma) /* Can happen if dup_mmap() received an OOM */
+ if (!vma) { /* Can happen if dup_mmap() received an OOM */
+ mas_unlock(&mas);
return;
+ }
lru_add_drain();
flush_cache_mm(mm);
cond_resched();
}
+ mas_unlock(&mas);
trace_exit_mmap(mm);
mtree_destroy(&mm->mm_mt);
vm_unacct_memory(nr_accounted);
BUG_ON(mmap_read_trylock(mm));
mutex_lock(&mm_all_locks_mutex);
+ mas_lock(&mas);
mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
vm_lock_anon_vma(mm, avc->anon_vma);
}
+ mas_unlock(&mas);
return 0;
out_unlock:
BUG_ON(mmap_read_trylock(mm));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+ mas_lock(&mas);
+
mas_for_each(&mas, vma, ULONG_MAX) {
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
}
mutex_unlock(&mm_all_locks_mutex);
+ mas_unlock(&mas);
}
/*