}
/*
- * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
- * has already been checked or doesn't make sense to fail.
+ * vma_replace() - Replace one vma with two new VMAs.
+ * @vma: The vma to be replaced
+ * @start: The lower address VMA
+ * @end: The higher address VMA
+ *
+ * Currently does not support @start and @end leaving a portion of @vma.
+ */
+static inline int vma_replace(struct vm_area_struct *vma,
+ struct vm_area_struct *start, struct vm_area_struct *end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct address_space *mapping = NULL;
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct file *file = vma->vm_file;
+ MA_STATE(mas, &vma->vm_mm->mm_mt, start->vm_start, start->vm_end - 1);
+
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
+ vma_adjust_trans_huge(vma, vma->vm_start, end->vm_start, 0);
+ if (file) {
+ mapping = file->f_mapping;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+ i_mmap_lock_write(mapping);
+ /*
+ * Put into interval tree now, so instantiated pages are visible
+ * to arm/parisc __flush_dcache_page throughout; but we cannot
+ * insert into address space until vma vm_start or vm_end is
+ * updated.
+ */
+ __vma_link_file(start, start->vm_file->f_mapping);
+ __vma_link_file(end, end->vm_file->f_mapping);
+ }
+
+ if (anon_vma)
+ unlink_anon_vmas(vma);
+
+ mas_store(&mas, start);
+ mm->map_count++;
+ mas_set_range(&mas, end->vm_start, end->vm_end - 1);
+ mas_store_prealloc(&mas, end);
+ /* mmap_count is fine here since one vma was just overwritten */
+ BUG_ON(start->vm_end != end->vm_start);
+
+ if (file) {
+ __remove_shared_vm_struct(vma, file, mapping);
+ i_mmap_unlock_write(mapping);
+ uprobe_mmap(start);
+ uprobe_mmap(end);
+ }
+
+ remove_vma(vma);
+ validate_mm(mm);
+ return 0;
+}
+
+/*
+ * __split_vma() - Split one VMA into two new VMAs.
+ * @mm: The mm_struct
+ * @vma: Pointer to the vma to be split
+ * @addr: The address to split the vma
+ * @new_below: Put the new one at a lower address, sets the pointer @vma to the
+ * other VMA.
+ *
+ * Note: __split_vma() bypasses sysctl_max_map_count checking. We use this
+ * where it has already been checked or doesn't make sense to fail.
*/
-int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+int __split_vma(struct mm_struct *mm, struct vm_area_struct **vma,
unsigned long addr, int new_below)
{
- struct vm_area_struct *new;
+ struct vm_area_struct *start, *end;
int err;
validate_mm_mt(mm);
- if (vma->vm_ops && vma->vm_ops->may_split) {
- err = vma->vm_ops->may_split(vma, addr);
+ if ((*vma)->vm_ops && (*vma)->vm_ops->may_split) {
+ err = (*vma)->vm_ops->may_split(*vma, addr);
if (err)
return err;
}
- new = vm_area_dup(vma);
- if (!new)
- return -ENOMEM;
+ err = -ENOMEM;
+ start = vm_area_dup(*vma);
+ if (!start)
+ goto no_start;
- if (new_below)
- new->vm_end = addr;
- else {
- new->vm_start = addr;
- new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
- }
+ end = vm_area_dup(*vma);
+ if (!end)
+ goto no_end;
+
+ start->vm_end = addr;
+ end->vm_start = addr;
+ end->vm_pgoff += ((addr - start->vm_start) >> PAGE_SHIFT);
- err = vma_dup_policy(vma, new);
+ err = vma_dup_policy(*vma, start);
if (err)
- goto out_free_vma;
+ goto no_start_policy;
- err = anon_vma_clone(new, vma);
+ err = vma_dup_policy(*vma, end);
if (err)
- goto out_free_mpol;
+ goto no_end_policy;
- if (new->vm_file)
- get_file(new->vm_file);
+ err = anon_vma_clone(start, *vma);
+ if (err)
+ goto no_start_anon;
- if (new->vm_ops && new->vm_ops->open)
- new->vm_ops->open(new);
+ err = anon_vma_clone(end, *vma);
+ if (err)
+ goto no_end_anon;
+
+ if (start->vm_file) {
+ get_file(start->vm_file);
+ get_file(end->vm_file);
+ }
+
+ if (start->vm_ops && start->vm_ops->open) {
+ start->vm_ops->open(start);
+ end->vm_ops->open(end);
+ }
+
+
+ if (vma_replace(*vma, start, end))
+ goto no_replace;
if (new_below)
- err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
- ((addr - new->vm_start) >> PAGE_SHIFT), new);
+ *vma = end;
else
- err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
- /* Success. */
- if (!err)
- return 0;
+ *vma = start;
+ return 0;
- /* Clean everything up if vma_adjust failed. */
- if (new->vm_ops && new->vm_ops->close)
- new->vm_ops->close(new);
- if (new->vm_file)
- fput(new->vm_file);
- unlink_anon_vmas(new);
- out_free_mpol:
- mpol_put(vma_policy(new));
- out_free_vma:
- vm_area_free(new);
+no_replace:
+ /* Clean everything up if vma_replace failed. */
+ if (start->vm_ops && start->vm_ops->close) {
+ start->vm_ops->close(start);
+ end->vm_ops->close(end);
+ }
+ if (start->vm_file) {
+ fput(start->vm_file);
+ fput(end->vm_file);
+ }
+ unlink_anon_vmas(end);
+no_end_anon:
+ unlink_anon_vmas(start);
+no_start_anon:
+ mpol_put(vma_policy(end));
+no_end_policy:
+ mpol_put(vma_policy(start));
+no_start_policy:
+ vm_area_free(end);
+no_end:
+ vm_area_free(start);
+no_start:
validate_mm_mt(mm);
return err;
}
-/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
+/**
+ * split_vma() - Split one VMA and replace it by two new VMAs
+ * @mm: The mm_struct
+ * @vma: Pointer to the vma
+ * @addr: The address to split the VMA
+ * @new_below: Put the new one at a lower address, sets the pointer @vma to the
+ * other VMA.
+ * Return: 0 on success, errno otherwise.
*/
-int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+int split_vma(struct mm_struct *mm, struct vm_area_struct **vma,
unsigned long addr, int new_below)
{
if (mm->map_count >= sysctl_max_map_count)
* mas_pause() is not needed since mas->index needs to be set
* differently than vma->vm_end anyways.
*/
- error = __split_vma(mm, vma, start, 1);
+ error = __split_vma(mm, &vma, start, 1);
if (error)
return error;
if (next->vm_end > end) {
int error;
- error = __split_vma(mm, next, end, 0);
+ error = __split_vma(mm, &next, end, 0);
if (error)
return error;
+ if (next->vm_start == start)
+ vma = next;
mas_set(mas, end);
}
count++;