if (mmap_write_lock_killable(mm))
return -EINTR;
+ mas_lock(&mas);
origbrk = mm->brk;
goto out;
mm->brk = brk;
-
success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
if (downgraded)
mmap_read_unlock(mm);
- else
+ else {
+ mas_unlock(&mas);
mmap_write_unlock(mm);
+ }
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
+ mas_unlock(&mas);
mmap_write_unlock(mm);
return origbrk;
}
{
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+ mas_lock(&mas);
vma_mas_link(mm, vma, &mas);
+ mas_unlock(&mas);
}
/*
do {
count++;
*vma = mas_prev(mas, start);
- BUG_ON((*vma)->vm_start < start);
- BUG_ON((*vma)->vm_end > end + 1);
vma_mas_store(*vma, dst);
if ((*vma)->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(*vma);
}
/* Point of no return */
- mas_lock(mas);
if (next)
max = next->vm_start;
mtree_init(&mt_detach, MAPLE_ALLOC_RANGE);
dst.tree = &mt_detach;
detach_range(mm, mas, &dst, &vma);
- mas_unlock(mas);
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
downgrade = false;
else if (prev && (prev->vm_flags & VM_GROWSUP))
downgrade = false;
- else
+ else {
+ mas_unlock(mas);
mmap_write_downgrade(mm);
+ }
}
/* Unmap the region */
* This function takes a @mas that is in the correct state to remove the
* mapping(s). The @len will be aligned and any arch_unmap work will be
* preformed.
+ * Note: May unlock mas if lock is degraded.
+ *
+ * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise.
+ *
*/
int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
int ret;
MA_STATE(mas, &mm->mm_mt, start, start);
+ mas_lock(&mas);
ret = do_mas_munmap(&mas, mm, start, len, uf, false);
+ mas_unlock(&mas);
return ret;
}
unsigned long merge_start = addr, merge_end = end;
unsigned long max = USER_PGTABLES_CEILING;
pgoff_t vm_pgoff;
- int error;
+ int error = ENOMEM;
struct ma_state ma_prev, tmp;
MA_STATE(mas, &mm->mm_mt, addr, end - 1);
+ mas_lock(&mas);
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
if (!may_expand_vm(mm, vm_flags,
(len >> PAGE_SHIFT) - nr_pages))
- return -ENOMEM;
+ goto no_mem;
}
validate_mm(mm);
/* Unmap any existing mapping in the area */
- if (do_mas_munmap(&mas, mm, addr, len, uf, false)) {
- return -ENOMEM;
- }
+ if (do_mas_munmap(&mas, mm, addr, len, uf, false))
+ goto no_mem;
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory_mm(mm, charged)) {
- return -ENOMEM;
- }
+ if (security_vm_enough_memory_mm(mm, charged))
+ goto no_mem;
vm_flags |= VM_ACCOUNT;
}
* not unmapped, but the maps are removed from the list.
*/
vma = vm_area_alloc(mm);
- if (!vma) {
- error = -ENOMEM;
+ if (!vma)
goto unacct_error;
- }
vma->vm_start = addr;
vma->vm_end = end;
vma->vm_flags |= VM_SOFTDIRTY;
vma_set_page_prot(vma);
validate_mm(mm);
+ mas_unlock(&mas);
return addr;
unmap_and_free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
+no_mem:
+ mas_unlock(&mas);
return error;
}
if (mmap_write_lock_killable(mm))
return -EINTR;
+ mas_lock(&mas);
ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
/*
* Returning 1 indicates mmap_lock is downgraded.
if (ret == 1) {
mmap_read_unlock(mm);
ret = 0;
- } else
+ } else {
+ mas_unlock(&mas);
mmap_write_unlock(mm);
+ }
userfaultfd_unmap_complete(mm, &uf);
return ret;
if (mmap_write_lock_killable(mm))
return -EINTR;
+ rcu_read_lock();
mas_set(&mas, start);
vma = mas_walk(&mas);
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
+ rcu_read_unlock();
mmap_write_unlock(mm);
if (populate)
mm_populate(ret, populate);
* @oldbrk: The end of the address to unmap
* @uf: The userfaultfd list_head
*
- * Returns: 0 on success.
+ * Returns: 0 on success, 1 on success and downgraded write lock, negative
+ * otherwise.
* unmaps a partial VMA mapping. Does not handle alignment, downgrades lock if
* possible.
*/
munlock_vma_pages_range(&unmap, newbrk, oldbrk);
}
+ mas_unlock(mas);
mmap_write_downgrade(mm);
unmap_region(mm, &unmap, mas, newbrk, oldbrk, vma,
next ? next->vm_start : 0);
anon_vma_lock_write(vma->anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
}
- mas_lock(ma_prev);
vma->vm_end = addr + len;
vma->vm_flags |= VM_SOFTDIRTY;
- if (mas_store_gfp(ma_prev, vma, GFP_KERNEL)) {
- mas_unlock(ma_prev);
+ if (mas_store_gfp(ma_prev, vma, GFP_KERNEL))
goto mas_mod_fail;
- }
if (vma->anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
if (mmap_write_lock_killable(mm))
return -EINTR;
+ mas_lock(&mas);
// This vma left intentionally blank.
mas_walk(&mas);
ret = do_brk_flags(&mas, &mas, &vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
+ mas_unlock(&mas);
mmap_write_unlock(mm);
if (populate && !ret)
mm_populate(addr, len);
arch_exit_mmap(mm);
+ mas_lock(&mas);
vma = mas_find(&mas, ULONG_MAX);
if (!vma) { /* Can happen if dup_mmap() received an OOM */
- rcu_read_unlock();
+ mas_unlock(&mas);
return;
}
unmap_vmas(&tlb, vma, &mas, 0, -1);
free_pgtables(&tlb, &mas2, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb);
+ mas_unlock(&mas);
/*
* Walk the list again, actually closing and freeing it,
*/
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
- if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
- return -ENOMEM;
+ MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+
+ mas_lock(&mas);
+ if (mas_find(&mas, vma->vm_end - 1))
+ goto no_mem;
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
- return -ENOMEM;
+ goto no_mem;
+
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
- vma_link(mm, vma);
+ mas_reset(&mas);
+ vma_mas_link(mm, vma, &mas);
+ mas_unlock(&mas);
return 0;
+
+no_mem:
+ mas_unlock(&mas);
+ return -ENOMEM;
}
/*