VMA_ITERATOR(vmi, mm, new_start);
struct vm_area_struct *next;
struct mmu_gather tlb;
+ MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
BUG_ON(new_start > new_end);
/*
* cover the whole range: [new_start, old_end)
*/
- if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff))
+ if (vma_expand(&mas, vma, new_start, old_end, vma->vm_pgoff, NULL))
return -ENOMEM;
/*
tlb_finish_mmu(&tlb);
/*
- * Shrink the vma to just the new range. Always succeeds.
+ * Shrink the vma to just the new range.
*/
- vma_adjust(vma, new_start, new_end, vma->vm_pgoff);
+ if (vma_shrink(&mas, vma, new_start, new_end, vma->vm_pgoff))
+ return -ENOMEM;
return 0;
}
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff)
-{
- return __vma_adjust(vma, start, end, pgoff, NULL);
-}
+extern int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next);
+extern int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
*
* Returns: 0 on success
*/
-inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next)
+int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next)
{
bool remove_next = false;
return -ENOMEM;
}
+/*
+ * vma_shrink() - Reduce an existing VMAs memory area
+ * @mas: The maple state
+ * @vma: The VMA to modify
+ * @start: The new start
+ * @end: The new end
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff)
+{
+ struct vma_locking vma_lock;
+
+ WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+
+ if (mas_preallocate(mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
+ init_vma_lock(&vma_lock, vma);
+ vma_adjust_trans_huge(vma, start, end, 0);
+ lock_vma(&vma_lock);
+
+ if (vma->vm_start < start)
+ vma_mas_szero(mas, vma->vm_start, start);
+
+ if (vma->vm_end > end)
+ vma_mas_szero(mas, end, vma->vm_end);
+
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+ unlock_vma(&vma_lock, mas, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+ return 0;
+}
+
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
lock_vma(&vma_lock);
- if (vma->vm_start < start) {
- vma_mas_szero(&mas, vma->vm_start, start);
- } else if (start != vma->vm_start) {
+ if (start < vma->vm_start || end > vma->vm_end)
vma_changed = true;
- }
-
- if (vma->vm_end > end) {
- vma_mas_szero(&mas, end, vma->vm_end);
- } else if (end != vma->vm_end) {
- vma_changed = true;
- }
vma->vm_start = start;
vma->vm_end = end;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
- vma_mas_store(next, &mas);
+ if (adjust_next < 0) {
+ BUG_ON(vma_changed);
+ vma_mas_store(next, &mas);
+ }
}
unlock_vma(&vma_lock, &mas, mm);