* Always allow shrinking brk.
* do_brk_munmap() may downgrade mmap_lock to read.
*/
- if (brk <= mm->brk) {
+ if (brk <= mm->brk) { // newbrk < oldbrk
int ret;
/*
* mm->brk must to be protected by write mmap_lock.
vma_mt_store(mm, vma);
}
-/* vma_mt_brk() - Change the brk vma.
- * */
-void vma_mt_brk(struct vm_area_struct *vma, unsigned long new_end)
-{
- bool store = false;
-
- if (vma->anon_vma) {
- anon_vma_lock_write(vma->anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
- }
-
- // changing the back.
- if (vma->vm_end > new_end) {
- vma_mt_szero(vma->vm_mm, new_end, vma->vm_end);
- vmacache_invalidate(vma->vm_mm);
- } else
- store = true;
-
- vma->vm_end = new_end;
- if (store)
- vma_mt_store(vma->vm_mm, vma);
-
-
- if (vma->anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(vma->anon_vma);
- }
-}
-
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev)
{
// Change the oldbrk of vma to the newbrk of the munmap area
vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0);
- vma_mt_brk(vma, newbrk);
+ if (vma->anon_vma) {
+ anon_vma_lock_write(vma->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
+ vma_mt_szero(vma->vm_mm, newbrk, vma->vm_end);
+ vma->vm_end = newbrk;
+ vmacache_invalidate(vma->vm_mm);
+ if (vma->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
+ }
unmap_pages = vma_pages(&unmap);
if (unmap.vm_flags & VM_LOCKED) {
vma = *brkvma;
/* Fast path, expand the existing vma if possible */
if (vma && ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
- vma_mt_brk(vma, addr + len);
+ vma->vm_end = addr + len;
+ vma_mt_store(vma->vm_mm, vma);
goto out;
}
}