/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
+extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
* are necessary. The "insert" vma (if any) is to be inserted
* before we drop the necessary locks.
*/
-int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand)
+int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
MA_STATE(mas, &mm->mm_mt, 0, 0);
struct vm_area_struct *exporter = NULL, *importer = NULL;
- if (next && !insert) {
+ if (next) {
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
- * vma shrinks, and !insert tells it's not
- * split_vma inserting another: so it must be
- * mprotect case 4 shifting the boundary down.
+ * vma shrinks so it must be mprotect case 4 shifting
+ * the boundary down.
*/
adjust_next = -(vma->vm_end - end);
exporter = vma;
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
- if (insert && insert->vm_file) {
- /*
- * Put into interval tree now, so instantiated pages
- * are visible to arm/parisc __flush_dcache_page
- * throughout; but we cannot insert into address
- * space until vma start or end is updated.
- */
- __vma_link_file(insert, insert->vm_file->f_mapping);
- }
}
anon_vma = vma->anon_vma;
}
if (start != vma->vm_start) {
- if ((vma->vm_start < start) &&
- (!insert || (insert->vm_end != start))) {
+ if (vma->vm_start < start) {
vma_mas_szero(&mas, vma->vm_start, start);
- VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
} else {
vma_changed = true;
}
}
if (end != vma->vm_end) {
if (vma->vm_end > end) {
- if (!insert || (insert->vm_start != end)) {
- vma_mas_szero(&mas, end, vma->vm_end);
- mas_reset(&mas);
- VM_WARN_ON(insert &&
- insert->vm_end < vma->vm_end);
- }
+ vma_mas_szero(&mas, end, vma->vm_end);
+ mas_reset(&mas);
} else {
vma_changed = true;
}
flush_dcache_mmap_unlock(mapping);
}
- if (remove_next && file) {
+ if (remove_next && file)
__remove_shared_vm_struct(next, file, mapping);
- } else if (insert) {
- /*
- * split_vma has split insert from vma, and needs
- * us to insert it before dropping the locks
- * (it may either follow vma or precede it).
- */
- mas_reset(&mas);
- vma_mas_store(insert, &mas);
- mm->map_count++;
- }
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
goto again;
}
}
- if (insert && file) {
- uprobe_mmap(insert);
- }
mas_destroy(&mas);
validate_mm(mm);
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) {
/* cases 1, 6 */
- err = __vma_adjust(prev, prev->vm_start,
- next->vm_end, prev->vm_pgoff, NULL,
- prev);
+ err = vma_adjust(prev, prev->vm_start, next->vm_end,
+ prev->vm_pgoff, prev);
} else /* cases 2, 5, 7 */
- err = __vma_adjust(prev, prev->vm_start,
- end, prev->vm_pgoff, NULL, prev);
+ err = vma_adjust(prev, prev->vm_start, end,
+ prev->vm_pgoff, prev);
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx, anon_name)) {
if (prev && addr < prev->vm_end) /* case 4 */
- err = __vma_adjust(prev, prev->vm_start,
- addr, prev->vm_pgoff, NULL, next);
+ err = vma_adjust(prev, prev->vm_start, addr,
+ prev->vm_pgoff, next);
else { /* cases 3, 8 */
- err = __vma_adjust(area, addr, next->vm_end,
- next->vm_pgoff - pglen, NULL, next);
+ err = vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, next);
/*
* In case 3 area is already equal to next and
* this is a noop, but in case 8 "area" has
mas_store(&mas, start);
mm->map_count++;
mas_set_range(&mas, end->vm_start, end->vm_end - 1);
- mas_store_prealloc(&mas, end);
+ mas_store(&mas, end);
/* mmap_count is fine here since one vma was just overwritten */
BUG_ON(start->vm_end != end->vm_start);