]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86: remove vma linked list walks
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 22 Aug 2022 15:06:16 +0000 (15:06 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:15 +0000 (22:03 -0700)
Use the VMA iterator instead.

Link: https://lkml.kernel.org/r/20220822150128.1562046-36-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/entry/vdso/vma.c

index 1000d457c3321e2caf3c9428e7961b0e1c572458..6292b960037b79d50d187d0982455df8ecd51fb4 100644 (file)
@@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 {
        struct mm_struct *mm = task->mm;
        struct vm_area_struct *vma;
+       VMA_ITERATOR(vmi, mm, 0);
 
        mmap_read_lock(mm);
-
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+       for_each_vma(vmi, vma) {
                unsigned long size = vma->vm_end - vma->vm_start;
 
                if (vma_is_special_mapping(vma, &vvar_mapping))
                        zap_page_range(vma, vma->vm_start, size);
        }
-
        mmap_read_unlock(mm);
+
        return 0;
 }
 #else
@@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
+       VMA_ITERATOR(vmi, mm, 0);
 
        mmap_write_lock(mm);
        /*
@@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
         * We could search vma near context.vdso, but it's a slowpath,
         * so let's explicitly check all VMAs to be completely sure.
         */
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+       for_each_vma(vmi, vma) {
                if (vma_is_special_mapping(vma, &vdso_mapping) ||
                                vma_is_special_mapping(vma, &vvar_mapping)) {
                        mmap_write_unlock(mm);