bool populate;
bool downgraded = false;
LIST_HEAD(uf);
- MA_STATE(mas, &mm->mm_mt, mm->start_brk, mm->start_brk);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
if (mmap_write_lock_killable(mm))
return -EINTR;
mm->brk = brk;
goto success;
}
- if (mm->brk > mm->start_brk)
- mas_set(&mas, mm->brk - 1);
- brkvma = mas_walk(&mas);
- if (brkvma) {
+ mas_set(&mas, newbrk);
+ brkvma = mas_walk(&mas);
+ if (brkvma) { // munmap necessary, there is something at newbrk.
/*
* Always allow shrinking brk.
* do_brk_munmap() may downgrade mmap_lock to read.
*/
- if (brk <= mm->brk) { // newbrk < oldbrk
- int ret;
- /*
- * mm->brk must to be protected by write mmap_lock.
- * do_brk_munmap() may downgrade the lock, so update it
- * before calling do_brk_munmap().
- */
- mm->brk = brk;
- ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
- if (ret == 1) {
- downgraded = true;
- goto success;
- } else if (!ret)
- goto success;
-
- mm->brk = origbrk;
- goto out;
- }
- next = brkvma->vm_next;
- } else {
- next = mas_next(&mas, ULONG_MAX);
- mas_prev(&mas, 0);
+ int ret;
+ /*
+ * mm->brk must to be protected by write mmap_lock.
+ * do_brk_munmap() may downgrade the lock, so update it
+ * before calling do_brk_munmap().
+ */
+ mm->brk = brk;
+ mas.last = oldbrk - 1;
+ ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
+ if (ret == 1) {
+ downgraded = true;
+ goto success;
+ } else if (!ret)
+ goto success;
+
+ mm->brk = origbrk;
+ goto out;
}
-
+ next = mas_next(&mas, ULONG_MAX);
/* Check against existing mmap mappings. */
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk-oldbrk, 0) < 0)
+ brkvma = mas_prev(&mas, mm->start_brk);
+ if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
goto out;
mm->brk = brk;
// Change the oldbrk of vma to the newbrk of the munmap area
vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0);
- anon_vma_lock_write(vma->anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
+ if (vma->anon_vma) {
+ anon_vma_lock_write(vma->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
vma->vm_end = newbrk;
if (vma_mas_remove(&unmap, mas))
goto mas_store_fail;
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(vma->anon_vma);
+ if (vma->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
+ }
unmap_pages = vma_pages(&unmap);
if (unmap.vm_flags & VM_LOCKED) {
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- if (brkvma && *brkvma) {
+ mas->last = addr + len - 1;
+ if (*brkvma) {
vma = *brkvma;
/* Expand the existing vma if possible; almost never a singular
* list, so this will almost always fail. */
- if (list_is_singular(&vma->anon_vma_chain) &&
- ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
- anon_vma_lock_write(vma->anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_end = addr + len;
+
+ if ((!vma->anon_vma ||
+ list_is_singular(&vma->anon_vma_chain)) &&
+ ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
mas->index = vma->vm_start;
- mas->last = vma->vm_end - 1;
+
+ vma_adjust_trans_huge(vma, addr, addr + len, 0);
+ if (vma->anon_vma) {
+ anon_vma_lock_write(vma->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
+ vma->vm_end = addr + len;
+ vma->vm_flags |= VM_SOFTDIRTY;
if (mas_store_gfp(mas, vma, GFP_KERNEL))
goto mas_mod_fail;
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(vma->anon_vma);
+ if (vma->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
+ }
+ khugepaged_enter_vma_merge(vma, flags);
goto out;
}
prev = vma;
}
+ mas->index = addr;
+ mas_walk(mas);
/* create a vma struct for an anonymous mapping */
vma = vm_area_alloc(mm);
if (!prev)
prev = mas_prev(mas, 0);
+
__vma_link_list(mm, vma, prev);
mm->map_count++;
*brkvma = vma;