]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
move find_extend_vma() to memory.c
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 16 Jun 2023 15:36:55 +0000 (11:36 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 16 Jun 2023 15:38:49 +0000 (11:38 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/memory.c
mm/mmap.c

index cb35e52bbd1e86846c03e50bf01ecb4605effe10..44d10fa417be0ab17b1d0ccc915ac3d27b0d834b 100644 (file)
@@ -5367,6 +5367,80 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
 }
 #endif
 
+#ifdef CONFIG_STACK_GROWSUP
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
+{
+       struct vm_area_struct *vma, *prev;
+       int err;
+
+       addr &= PAGE_MASK;
+       vma = find_vma_prev(mm, addr, &prev);
+       if (vma && (vma->vm_start <= addr))
+               return vma;
+       if (!prev)
+               return NULL;
+       if (write_locked) {
+               err = expand_stack(prev, addr, true);
+       } else {
+               mmap_read_unlock(mm);
+               mmap_write_lock(mm);
+               vma = find_vma_prev(mm, addr, &prev);
+               if (vma && (vma->vm_start <= addr)) {
+                       mmap_write_downgrade(mm);
+                       return vma;
+               }
+               if (prev)
+                       err = expand_stack(prev, addr, true);
+               mmap_write_downgrade(mm);
+       }
+       if (err)
+               return NULL;
+       if (prev->vm_flags & VM_LOCKED)
+               populate_vma_page_range(prev, addr, prev->vm_end, NULL);
+       return prev;
+}
+#else
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
+{
+       struct vm_area_struct *vma;
+       unsigned long start;
+       int err;
+
+       addr &= PAGE_MASK;
+       vma = find_vma(mm, addr);
+       if (!vma)
+               return NULL;
+       if (vma->vm_start <= addr)
+               return vma;
+       start = vma->vm_start;
+       err = expand_stack(vma, addr, write_locked);
+       if (unlikely(err)) {
+               if (err != -EAGAIN)
+                       return NULL;
+               if (!upgrade_mmap_lock_carefully(mm, NULL))
+                       return NULL;
+               vma = find_vma(mm, addr);
+               if (!vma || expand_stack(vma, addr, true)) {
+                       mmap_write_downgrade(mm);
+                       return NULL;
+               }
+               mmap_write_downgrade(mm);
+       }
+       if (vma->vm_flags & VM_LOCKED)
+               populate_vma_page_range(vma, addr, start, NULL);
+       return vma;
+}
+#endif
+
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
+               unsigned long addr)
+{
+       return find_extend_vma_locked(mm, addr, false);
+}
+EXPORT_SYMBOL_GPL(find_extend_vma);
+
 #ifdef CONFIG_PER_VMA_LOCK
 /*
  * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
index 11d6af071c4d04b002b662b07520b13c8292c12f..c7c5e0928d4829d3f5147b0a6ea421540f2d329b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2137,38 +2137,6 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address,
        return expand_upwards(vma, address, write_locked);
 }
 
-struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
-               unsigned long addr, bool write_locked)
-{
-       struct vm_area_struct *vma, *prev;
-       int err;
-
-       addr &= PAGE_MASK;
-       vma = find_vma_prev(mm, addr, &prev);
-       if (vma && (vma->vm_start <= addr))
-               return vma;
-       if (!prev)
-               return NULL;
-       if (write_locked) {
-               err = expand_stack(prev, addr, true);
-       } else {
-               mmap_read_unlock(mm);
-               mmap_write_lock(mm);
-               vma = find_vma_prev(mm, addr, &prev);
-               if (vma && (vma->vm_start <= addr)) {
-                       mmap_write_downgrade(mm);
-                       return vma;
-               }
-               if (prev)
-                       err = expand_stack(prev, addr, true);
-               mmap_write_downgrade(mm);
-       }
-       if (err)
-               return NULL;
-       if (prev->vm_flags & VM_LOCKED)
-               populate_vma_page_range(prev, addr, prev->vm_end, NULL);
-       return prev;
-}
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address,
                bool write_locked)
@@ -2177,47 +2145,8 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address,
                return -EINVAL;
        return expand_downwards(vma, address, write_locked);
 }
-
-struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
-               unsigned long addr, bool write_locked)
-{
-       struct vm_area_struct *vma;
-       unsigned long start;
-       int err;
-
-       addr &= PAGE_MASK;
-       vma = find_vma(mm, addr);
-       if (!vma)
-               return NULL;
-       if (vma->vm_start <= addr)
-               return vma;
-       start = vma->vm_start;
-       err = expand_stack(vma, addr, write_locked);
-       if (unlikely(err)) {
-               if (err != -EAGAIN)
-                       return NULL;
-               if (!upgrade_mmap_lock_carefully(mm, NULL))
-                       return NULL;
-               vma = find_vma(mm, addr);
-               if (!vma || expand_stack(vma, addr, true)) {
-                       mmap_write_downgrade(mm);
-                       return NULL;
-               }
-               mmap_write_downgrade(mm);
-       }
-       if (vma->vm_flags & VM_LOCKED)
-               populate_vma_page_range(vma, addr, start, NULL);
-       return vma;
-}
 #endif
 
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
-               unsigned long addr)
-{
-       return find_extend_vma_locked(mm, addr, false);
-}
-EXPORT_SYMBOL_GPL(find_extend_vma);
-
 /*
  * Ok - we have the memory areas we should free on a maple tree so release them,
  * and do the vma updates.