}
#endif
+#ifdef CONFIG_STACK_GROWSUP
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+ unsigned long addr, bool write_locked)
+{
+ struct vm_area_struct *vma, *prev;
+ int err;
+
+ addr &= PAGE_MASK;
+ vma = find_vma_prev(mm, addr, &prev);
+ if (vma && (vma->vm_start <= addr))
+ return vma;
+ if (!prev)
+ return NULL;
+ if (write_locked) {
+ err = expand_stack(prev, addr, true);
+ } else {
+ mmap_read_unlock(mm);
+ mmap_write_lock(mm);
+ vma = find_vma_prev(mm, addr, &prev);
+ if (vma && (vma->vm_start <= addr)) {
+ mmap_write_downgrade(mm);
+ return vma;
+ }
+ if (prev)
+ err = expand_stack(prev, addr, true);
+ mmap_write_downgrade(mm);
+ }
+ if (err)
+ return NULL;
+ if (prev->vm_flags & VM_LOCKED)
+ populate_vma_page_range(prev, addr, prev->vm_end, NULL);
+ return prev;
+}
+#else
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+ unsigned long addr, bool write_locked)
+{
+ struct vm_area_struct *vma;
+ unsigned long start;
+ int err;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(mm, addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ start = vma->vm_start;
+ err = expand_stack(vma, addr, write_locked);
+ if (unlikely(err)) {
+ if (err != -EAGAIN)
+ return NULL;
+ if (!upgrade_mmap_lock_carefully(mm, NULL))
+ return NULL;
+ vma = find_vma(mm, addr);
+ if (!vma || expand_stack(vma, addr, true)) {
+ mmap_write_downgrade(mm);
+ return NULL;
+ }
+ mmap_write_downgrade(mm);
+ }
+ if (vma->vm_flags & VM_LOCKED)
+ populate_vma_page_range(vma, addr, start, NULL);
+ return vma;
+}
+#endif
+
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
+ unsigned long addr)
+{
+ return find_extend_vma_locked(mm, addr, false);
+}
+EXPORT_SYMBOL_GPL(find_extend_vma);
+
#ifdef CONFIG_PER_VMA_LOCK
/*
* Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
return expand_upwards(vma, address, write_locked);
}
-struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
- unsigned long addr, bool write_locked)
-{
- struct vm_area_struct *vma, *prev;
- int err;
-
- addr &= PAGE_MASK;
- vma = find_vma_prev(mm, addr, &prev);
- if (vma && (vma->vm_start <= addr))
- return vma;
- if (!prev)
- return NULL;
- if (write_locked) {
- err = expand_stack(prev, addr, true);
- } else {
- mmap_read_unlock(mm);
- mmap_write_lock(mm);
- vma = find_vma_prev(mm, addr, &prev);
- if (vma && (vma->vm_start <= addr)) {
- mmap_write_downgrade(mm);
- return vma;
- }
- if (prev)
- err = expand_stack(prev, addr, true);
- mmap_write_downgrade(mm);
- }
- if (err)
- return NULL;
- if (prev->vm_flags & VM_LOCKED)
- populate_vma_page_range(prev, addr, prev->vm_end, NULL);
- return prev;
-}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address,
bool write_locked)
return -EINVAL;
return expand_downwards(vma, address, write_locked);
}
-
-struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
- unsigned long addr, bool write_locked)
-{
- struct vm_area_struct *vma;
- unsigned long start;
- int err;
-
- addr &= PAGE_MASK;
- vma = find_vma(mm, addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- start = vma->vm_start;
- err = expand_stack(vma, addr, write_locked);
- if (unlikely(err)) {
- if (err != -EAGAIN)
- return NULL;
- if (!upgrade_mmap_lock_carefully(mm, NULL))
- return NULL;
- vma = find_vma(mm, addr);
- if (!vma || expand_stack(vma, addr, true)) {
- mmap_write_downgrade(mm);
- return NULL;
- }
- mmap_write_downgrade(mm);
- }
- if (vma->vm_flags & VM_LOCKED)
- populate_vma_page_range(vma, addr, start, NULL);
- return vma;
-}
#endif
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
- unsigned long addr)
-{
- return find_extend_vma_locked(mm, addr, false);
-}
-EXPORT_SYMBOL_GPL(find_extend_vma);
-
/*
* Ok - we have the memory areas we should free on a maple tree so release them,
* and do the vma updates.