return next;
}
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
- struct list_head *uf);
+static int do_brk_flags(unsigned long addr, unsigned long request,
+ struct vm_area_struct *vma, unsigned long flags,
+ struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
unsigned long newbrk, oldbrk, origbrk;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *next;
+ struct vm_area_struct *vma_brk, *next;
unsigned long min_brk;
bool populate;
bool downgraded = false;
}
/* Check against existing mmap mappings. */
- next = find_vma(mm, oldbrk);
+ next = find_vma_prev(mm, oldbrk, &vma_brk);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
+ if (do_brk_flags(oldbrk, newbrk-oldbrk, vma_brk, 0, &uf) < 0)
goto out;
mm->brk = brk;
flush_dcache_mmap_unlock(mapping);
}
}
+
/* Private
* vma_mt_erase() - erase a VMA entry from the maple tree.
*
GFP_KERNEL);
}
+void vma_mt_modify(struct vm_area_struct *vma, unsigned long new_start,
+ unsigned long new_end)
+{
+ // Shrinking front.
+ if (vma->vm_start < new_start)
+ vma_mt_szero(vma->vm_mm, vma->vm_start, new_start);
+
+ // Shrinking back.
+ if (vma->vm_end > new_end)
+ vma_mt_szero(vma->vm_mm, new_end, vma->vm_end);
+
+ vma->vm_start = new_start;
+ vma->vm_end = new_end;
+ vma_mt_store(vma->vm_mm, vma);
+}
+
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev)
{
EXPORT_SYMBOL(get_unmapped_area);
/**
- * find_vma() - Find the VMA for a given address, or the next vma. May return
- * NULL in the case of no vma at addr or above
+ * find_vma() - Find the VMA for a given address, or the next vma.
* @mm The mm_struct to check
* @addr: The address
*
}
/*
- * this is really a simplified "do_mmap". it only handles
- * anonymous maps. eventually we may be able to do some
- * brk-specific accounting here.
+ * do_brk_flags() - Increase the brk vma if the flags match.
+ * @addr: The start address
+ * @len: The length of the increase
+ * @vma: The vma,
+ * @flags: The VMA Flags
+ *
+ * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
+ * do not match then create a new anonymous VMA. Eventually we may be able to
+ * do some brk-specific accounting here.
*/
static int do_brk_flags(unsigned long addr, unsigned long len,
- unsigned long flags, struct list_head *uf)
+ struct vm_area_struct *vma, unsigned long flags,
+ struct list_head *uf)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev;
+ struct vm_area_struct *prev = NULL;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;
if (error)
return error;
- /* Clear old maps, set up prev and uf */
- if (munmap_vma_range(mm, addr, len, &prev, uf))
- return -ENOMEM;
-
- /* Check against address space limits *after* clearing old maps... */
+ /* Check against address space limits by the changed size */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- /* Can we just expand an old private anonymous mapping? */
- vma = vma_merge(mm, prev, addr, addr + len, flags,
- NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
- if (vma)
+ if (vma && flags == vma->vm_flags) {
+ vma_mt_modify(vma, vma->vm_start, addr+len);
goto out;
+ }
/*
* create a vma struct for an anonymous mapping
*/
+ prev = vma;
vma = vm_area_alloc(mm);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
+ if (!prev)
+ find_vma_prev(mm, addr, &prev);
vma_link(mm, vma, prev);
out:
perf_event_mmap(vma);
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_brk_flags(addr, len, flags, &uf);
+ ret = do_brk_flags(addr, len, NULL, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);