]> www.infradead.org Git - users/willy/linux.git/commitdiff
fs/exec: Use vma_next() instead of linked list
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 4 Jan 2021 19:45:37 +0000 (14:45 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 20 Oct 2021 20:00:33 +0000 (16:00 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
fs/exec.c

index c618a55a20d3fe57f1fbc86767fa57742b9d9425..75d68ff856a16821977cce9e6a39ac7359b5ac57 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -680,6 +680,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        unsigned long length = old_end - old_start;
        unsigned long new_start = old_start - shift;
        unsigned long new_end = old_end - shift;
+       struct vm_area_struct *next;
        struct mmu_gather tlb;
 
        BUG_ON(new_start > new_end);
@@ -707,12 +708,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
+       next = vma_next(mm, vma);
        if (new_end > old_start) {
                /*
                 * when the old and new regions overlap clear from new_end.
                 */
                free_pgd_range(&tlb, new_end, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        } else {
                /*
                 * otherwise, clean from old_start; this is done to not touch
@@ -721,7 +723,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * for the others its just a little faster.
                 */
                free_pgd_range(&tlb, old_start, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        }
        tlb_finish_mmu(&tlb);