]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
arch/parisc: Remove mmap linked list from kernel/cache
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 4 Jan 2021 19:25:19 +0000 (14:25 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 1 Oct 2021 17:55:46 +0000 (13:55 -0400)
Start using the maple tree

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
arch/parisc/kernel/cache.c

index 39e02227e231007040c37868aa23e4c4517703d9..f6316aafba107e0804b792d02e357ab0edb28378 100644 (file)
@@ -519,9 +519,13 @@ static inline unsigned long mm_total_size(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
        unsigned long usize = 0;
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
-       for (vma = mm->mmap; vma; vma = vma->vm_next)
+       rcu_read_lock();
+       mas_for_each(&mas, vma, ULONG_MAX)
                usize += vma->vm_end - vma->vm_start;
+       rcu_read_unlock();
+
        return usize;
 }
 
@@ -547,6 +551,7 @@ void flush_cache_mm(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
        pgd_t *pgd;
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
        /* Flushing the whole cache on each cpu takes forever on
           rp3440, etc.  So, avoid it if the mm isn't too big.  */
@@ -559,17 +564,20 @@ void flush_cache_mm(struct mm_struct *mm)
        }
 
        if (mm->context == mfsp(3)) {
-               for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               rcu_read_lock();
+               mas_for_each(&mas, vma, ULONG_MAX) {
                        flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
                        if (vma->vm_flags & VM_EXEC)
                                flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
                        flush_tlb_range(vma, vma->vm_start, vma->vm_end);
                }
+               rcu_read_unlock();
                return;
        }
 
        pgd = mm->pgd;
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+       rcu_read_lock();
+       mas_for_each(&mas, vma, ULONG_MAX) {
                unsigned long addr;
 
                for (addr = vma->vm_start; addr < vma->vm_end;
@@ -589,6 +597,7 @@ void flush_cache_mm(struct mm_struct *mm)
                        }
                }
        }
+       rcu_read_unlock();
 }
 
 void flush_cache_range(struct vm_area_struct *vma,