return next;
}
-static int do_brk_munmap(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
+static bool do_brk_munmap(struct vm_area_struct *vma, unsigned long newbrk,
+ unsigned long oldbrk);
static int do_brk_flags(unsigned long addr, unsigned long request,
struct vm_area_struct *vma, unsigned long flags);
SYSCALL_DEFINE1(brk, unsigned long, brk)
goto success;
}
- /*
- * Always allow shrinking brk.
- * do_brk_munmap() may downgrade mmap_lock to read.
- */
brkvma = find_vma_intersection(mm, mm->start_brk, mm->brk);
if (brkvma) {
+ /*
+ * Always allow shrinking brk.
+ * do_brk_munmap() may downgrade mmap_lock to read.
+ */
if (brk <= mm->brk) {
- int ret;
-
/*
* mm->brk must to be protected by write mmap_lock.
* do_brk_munmap() may downgrade the lock, so update it
- * before calling do_brk_munmap().downgrading mmap_lock. if do_brk_munmap() fails,
- * mm->brk will be restored from origbrk.
+ * before calling do_brk_munmap().
*/
mm->brk = brk;
- ret = do_brk_munmap(brkvma, newbrk, brkvma->vm_end);
- if (ret < 0) {
- mm->brk = origbrk;
- goto out;
- } else if (ret == 1) {
- downgraded = true;
- }
+ downgraded = do_brk_munmap(brkvma, newbrk, oldbrk);
goto success;
}
-
next = brkvma->vm_next;
} else {
next = find_vma(mm, mm->brk);
vma_mt_store(mm, vma);
}
-void vma_mt_modify(struct vm_area_struct *vma, unsigned long new_start,
- unsigned long new_end)
+/* vma_mt_brk() - Change the brk vma.
+ * */
+void vma_mt_brk(struct vm_area_struct *vma, unsigned long new_end)
{
- if (vma->anon_vma) {
- anon_vma_interval_tree_pre_update_vma(vma);
- anon_vma_lock_write(vma->anon_vma);
- }
- // Shrinking front.
- if (vma->vm_start < new_start) {
- vma_mt_szero(vma->vm_mm, vma->vm_start, new_start);
- }
+ bool store = false;
+
+ anon_vma_lock_write(vma->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
- // Shrinking back.
- if (vma->vm_end > new_end)
+ // changing the back.
+ if (vma->vm_end > new_end) {
vma_mt_szero(vma->vm_mm, new_end, vma->vm_end);
+ vmacache_invalidate(vma->vm_mm);
+ } else
+ store = true;
- vma->vm_pgoff += (new_end - new_start) >> PAGE_SHIFT;
- vma->vm_start = new_start;
vma->vm_end = new_end;
- vma_mt_store(vma->vm_mm, vma);
- if (vma->anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(vma->anon_vma);
- }
+ if (store)
+ vma_mt_store(vma->vm_mm, vma);
+
+
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
}
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* bkr_munmap() - Unmap a parital vma.
* @vma: The vma to be modified
- * @start: the start of the address to unmap
- * @end: The end of the address to unmap
+ * @newbrk: the start of the address to unmap
+ * @oldbrk: The end of the address to unmap
*
* Returns: 0 on success.
* unmaps a partial VMA mapping. Does not handle alignment, downgrades lock if
* possible.
*/
-int do_brk_munmap(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
+static bool do_brk_munmap(struct vm_area_struct *vma, unsigned long newbrk,
+ unsigned long oldbrk)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct unmap;
- unsigned long unmap_pages = 0;
- int ret = 0;
+ unsigned long unmap_pages;
+ int downgrade = true;
- arch_unmap(mm, start, end);
+ arch_unmap(mm, newbrk, oldbrk);
- if (vma->vm_ops && vma->vm_ops->split) {
- ret = vma->vm_ops->split(vma, start);
- if (ret)
- return ret;
- }
+ if (vma->vm_start == newbrk) { // remove entire mapping.
+ struct vm_area_struct *prev = vma->vm_prev;
- memset(&unmap, 0, sizeof(struct vm_area_struct));
- INIT_LIST_HEAD(&unmap.anon_vma_chain);
- ret = vma_dup_policy(vma, &unmap);
- if (ret)
- return ret;
+ if (mm->locked_vm)
+ unlock_range(vma, oldbrk);
- if (mm->locked_vm)
- unlock_range(vma, end);
+ if (!detach_vmas_to_be_unmapped(mm, vma, prev, oldbrk))
+ downgrade = false;
+ else
+ mmap_write_downgrade(mm);
+
+ unmap_region(mm, vma, prev, newbrk, oldbrk);
+ /* Fix up all other VM information */
+ remove_vma_list(mm, vma);
+ goto munmap_full_vma;
+ }
- vma_mt_modify(vma, vma->vm_start, start);
- unmap.vm_mm = vma->vm_mm;
- unmap.vm_start = start;
- unmap.vm_end = end;
+ vma_init(&unmap, mm);
+ unmap.vm_start = newbrk;
+ unmap.vm_end = oldbrk;
unmap.vm_flags = vma->vm_flags;
- ret = 1;
- if (vma->vm_flags & VM_GROWSDOWN)
- ret = 0;
+
+ unmap_pages = vma_pages(&unmap);
+
+ if (vma->vm_flags & VM_LOCKED) {
+ mm->locked_vm -= unmap_pages;
+ munlock_vma_pages_range(vma, newbrk, oldbrk);
+ }
+
+ // Change the oldbrk of vma to the newbrk of the munmap area
+ vma_mt_brk(vma, newbrk);
+
+ if (vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN))
+ downgrade = false;
if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
- ret = 0;
+ downgrade = false;
- if (ret)
+ if (downgrade)
mmap_write_downgrade(mm);
- unmap_region(mm, &unmap, vma, start, end);
- unmap_pages = vma_pages(&unmap);
- vm_stat_account(mm, vma->vm_flags, -unmap_pages);
- if (vma->vm_flags & VM_ACCOUNT)
+
+ unmap_region(mm, &unmap, vma, newbrk, oldbrk);
+ /* Statistics */
+ vm_stat_account(mm, unmap.vm_flags, -unmap_pages);
+ if (unmap.vm_flags & VM_ACCOUNT)
vm_unacct_memory(unmap_pages);
+
+munmap_full_vma:
validate_mm_mt(mm);
- return ret;
+ return downgrade;
}
/*
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *prev = NULL;
- pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;
validate_mm_mt(mm);
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- if (vma && flags == vma->vm_flags) {
- vma_mt_modify(vma, vma->vm_start, addr+len);
+ /* Fast path, expand the existing vma if possible */
+ if (vma && ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
+ vma_mt_brk(vma, addr + len);
goto out;
}
- /*
- * create a vma struct for an anonymous mapping
- */
+ /* create a vma struct for an anonymous mapping */
prev = vma;
vma = vm_area_alloc(mm);
if (!vma) {
vma_set_anonymous(vma);
vma->vm_start = addr;
vma->vm_end = addr + len;
- vma->vm_pgoff = pgoff;
+ vma->vm_pgoff = addr >> PAGE_SHIFT;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
if (!prev)