]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
exec: Use VMA iterator instead of linked list
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 4 Jan 2021 19:45:37 +0000 (14:45 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 26 Nov 2021 19:50:32 +0000 (14:50 -0500)
Remove a use of the vm_next list by doing the initial lookup with the
VMA iterator and then using it to find the next entry.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
fs/exec.c

index fee18b63ed35cf3fcbae7e244c638af6f3a990a1..f033745c148a88591c95c81f528097636be3e3ab 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -680,6 +680,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        unsigned long length = old_end - old_start;
        unsigned long new_start = old_start - shift;
        unsigned long new_end = old_end - shift;
+       VMA_ITERATOR(vmi, mm, new_start);
+       struct vm_area_struct *next;
        struct mmu_gather tlb;
 
        BUG_ON(new_start > new_end);
@@ -688,7 +690,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
         * ensure there are no vmas between where we want to go
         * and where we are
         */
-       if (vma != find_vma(mm, new_start))
+       if (vma != vma_next(&vmi))
                return -EFAULT;
 
        /*
@@ -707,12 +709,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
+       next = vma_next(&vmi);
        if (new_end > old_start) {
                /*
                 * when the old and new regions overlap clear from new_end.
                 */
                free_pgd_range(&tlb, new_end, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        } else {
                /*
                 * otherwise, clean from old_start; this is done to not touch
@@ -721,7 +724,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * for the others its just a little faster.
                 */
                free_pgd_range(&tlb, old_start, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        }
        tlb_finish_mmu(&tlb);