From 465662a9c094ae81e363dfb1c428e96e19946028 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 21 Dec 2020 21:00:35 -0500 Subject: [PATCH] mm/mmap: Linked list fallout Signed-off-by: Liam R. Howlett --- mm/mmap.c | 72 ++++++++++++++++++++++++++----------------------------- 1 file changed, 34 insertions(+), 38 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index d3c321796ec4..cab5192e5a12 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2279,10 +2279,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, } static inline unsigned long detach_range(struct mm_struct *mm, - struct ma_state *src, struct ma_state *dst, - struct vm_area_struct *prev, struct vm_area_struct **last) + struct ma_state *src, struct ma_state *dst, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct vm_area_struct **last) { - struct vm_area_struct *tmp; int count = 0; struct ma_state mas; @@ -2292,28 +2291,31 @@ static inline unsigned long detach_range(struct mm_struct *mm, * area. */ mas = *src; - mas_set(&mas, src->index); - mas_for_each(&mas, tmp, src->last) { - *last = tmp; + mas.last = src->index; + do { + BUG_ON(vma->vm_start < src->index); + BUG_ON(vma->vm_end > (src->last + 1)); + *last = vma; count++; - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(vma); + munlock_vma_pages_all(vma); } - vma_mas_store(tmp, dst); - } + vma_mas_store(vma, dst); + } while ((vma = mas_find(&mas, src->last)) != NULL); - /* Decrement map_count */ - mm->map_count -= count; /* Find the one after the series before overwrite */ - tmp = mas_find(&mas, ULONG_MAX); + mas.index = mas.last = src->last + 1; + vma = mas_find(&mas, -1); /* Drop removed area from the tree */ mas_store_gfp(src, NULL, GFP_KERNEL); + /* Decrement map_count */ + mm->map_count -= count; /* Set the upper limit */ - if (!tmp) + if (!vma) return USER_PGTABLES_CEILING; - return tmp->vm_start; + return vma->vm_start; } /* do_mas_align_munmap() - munmap the aligned region from @start to @end. @@ -2361,9 +2363,7 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, return error; prev = vma; // Split invalidated node, reset. - mas->index = start; - mas_reset(mas); - vma = mas_walk(mas); + mas_set_range(mas, start, end - 1); } else { tmp = *mas; prev = mas_prev(&tmp, 0); @@ -2373,7 +2373,6 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, last = vma; else { tmp = *mas; - mas_reset(&tmp); mas_set(&tmp, end - 1); last = mas_walk(&tmp); } @@ -2384,11 +2383,12 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, if (error) return error; // Split invalidated node, reset. - mas->index = start; - mas_reset(mas); - vma = mas_walk(mas); + mas_set_range(mas, start, end - 1); + } + if (mas->node == MAS_START) + vma = mas_walk(mas); if (unlikely(uf)) { /* @@ -2407,7 +2407,7 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, } /* Point of no return */ - max = detach_range(mm, mas, &dst, prev, &last); + max = detach_range(mm, mas, &dst, vma, prev, &last); /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or @@ -2423,14 +2423,14 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mmap_write_downgrade(mm); } - mas_reset(&dst); + /* Unmap the region */ mas_set(&dst, start); + tmp = dst; vma = mas_walk(&dst); unmap_region(mm, vma, &dst, start, end, prev, max); - /* Fix up all other VM information */ - mas_reset(&dst); - mas_set(&dst, start); + /* Statistics and freeing VMAs */ + dst = tmp; remove_mt(mm, &dst); return downgrade ? 1 : 0; @@ -2457,7 +2457,8 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, unsigned long end; struct vm_area_struct *vma; - if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) + if ((offset_in_page(start)) || (start > TASK_SIZE) || + (len > TASK_SIZE - start)) return -EINVAL; end = start + PAGE_ALIGN(len); @@ -2472,8 +2473,7 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, if (!vma) return 0; -// printk("vma found at %lx %lu\n", vma->vm_start, vma->vm_end); - mas_set_range(mas, start, end - 1); + mas->last = end - 1; return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); } /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. @@ -2535,7 +2535,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vm_flags |= VM_ACCOUNT; } - mas_reset(&mas); mas_set_range(&mas, addr, end - 1); if (vm_flags & VM_SPECIAL) { ma_prev = mas; @@ -2675,7 +2674,8 @@ cannot_expand: // Very likely a shorter walk. mas = ma_prev; - mas_set_range(&mas, addr, end - 1); + mas.last = end - 1; + mas.index = addr; mas_walk(&mas); vma_mas_link(mm, vma, &mas); @@ -2888,6 +2888,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, arch_unmap(mm, newbrk, oldbrk); if (likely(vma->vm_start >= newbrk)) { // remove entire mapping(s) + mas->last = oldbrk - 1; ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf, true); goto munmap_full_vma; } @@ -3135,7 +3136,6 @@ void exit_mmap(struct mm_struct *mm) munlock_vma_pages_all(vma); } } - mas_reset(&mas); mas_set(&mas, FIRST_USER_ADDRESS); } @@ -3146,7 +3146,6 @@ void exit_mmap(struct mm_struct *mm) return; mas2 = mas; - mas_reset(&mas); mas_set(&mas, FIRST_USER_ADDRESS); lru_add_drain(); @@ -3162,7 +3161,6 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ - mas_reset(&mas); mas_set(&mas, 0); mas_for_each(&mas, vma, -1) { if (vma->vm_flags & VM_ACCOUNT) @@ -3573,7 +3571,6 @@ int mm_take_all_locks(struct mm_struct *mm) vm_lock_mapping(mm, vma->vm_file->f_mapping); } - mas_reset(&mas); mas_set(&mas, 0); mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) @@ -3583,7 +3580,6 @@ int mm_take_all_locks(struct mm_struct *mm) vm_lock_mapping(mm, vma->vm_file->f_mapping); } - mas_reset(&mas); mas_set(&mas, 0); mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) -- 2.50.1