}
return gap;
}
-#if defined(CONFIG_DEBUG_VM_RB)
+
+#ifdef CONFIG_DEBUG_VM_RB
static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
unsigned long max = vma_compute_gap(vma), subtree_gap;
vma);
}
}
+
static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
}
if (highest_address != mm->highest_vm_end) {
pr_emerg("mm->highest_vm_end %lx, found %lx\n",
- mm->highest_vm_end, highest_address);
+ mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(mm);
mm->map_count++;
validate_mm(mm);
}
+
/*
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
* mm's list and rbtree. It has already been inserted into the interval tree.
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
&prev, &rb_link, &rb_parent))
BUG();
-
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
} else {
VM_WARN_ON(expand != vma);
/*
- * case 1, 6, 7: remove_next == 2 is case 6,
+ * case 1, 6, 7, remove_next == 2 is case 6,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
} else {
if (start_changed)
vma_gap_update(vma);
-
if (end_changed) {
if (!next)
mm->highest_vm_end = vm_end_gap(vma);
if (file) {
i_mmap_unlock_write(mapping);
uprobe_mmap(vma);
+
if (adjust_next)
uprobe_mmap(next);
}
/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
return -ENOMEM;
+
/*
* Private writable mapping: check memory availability
*/
VM_BUG_ON_VMA((unsigned long)ret != (unsigned long)mt_ret , ret);
return ret;
}
-
EXPORT_SYMBOL(find_vma);
-
/**
* mt_find_vma_prev() - Find the VMA for a given address, or the next vma and
* sets %pprev to the previous VMA, if any.
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
*/
-
struct vm_area_struct *
rb_find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
} else {
struct rb_node *rb_node = rb_last(&mm->mm_rb);
- *pprev = rb_node ?
- rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
+ *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
}
return vma;
}
-
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
VM_BUG_ON_VMA(mt_prev != *pprev, *pprev);
return ret;
}
+
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
if (address < mmap_min_addr)
return -EPERM;
-
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
/* Check that both stack segments have the same anon_vma? */
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
return -ENOMEM;
-
/* Check against address space limits *after* clearing old maps... */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;