From: Liam R. Howlett Date: Tue, 24 Nov 2020 19:50:43 +0000 (-0500) Subject: mm/mmap: Add do_mas_munmap() and wraper for __do_munmap() X-Git-Tag: howlett/maple_spf/20210128~48 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=78417cc49e7f06c4e22b586adb7f2565a17069b4;p=users%2Fjedix%2Flinux-maple.git mm/mmap: Add do_mas_munmap() and wraper for __do_munmap() To avoid extra tree work, it is necessary to support passing in a maple state to key functions. Start this work with __do_munmap(). Signed-off-by: Liam R. Howlett --- diff --git a/mm/mmap.c b/mm/mmap.c index 11562bece7a4..965d75ce6f3d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2456,34 +2456,24 @@ static inline int unlock_range(struct vm_area_struct *start, return count; } -/* Munmap is split into 2 main parts -- this part which finds - * what needs doing, and the areas themselves, which do the - * work. This now handles partial unmappings. - * Jeremy Fitzhardinge + +/* do_mas_align_munmap() - munmap the aligned region from @start to @end. + * + * @mas: The maple_state, ideally set up to alter the correct tree location. + * @vma: The starting vm_area_struct + * @mm: The mm_struct + * @start: The aligned start address to munmap. + * @end: The aligned end address to munmap. + * @uf: The userfaultfd list_head + * @downgrade: Set to true to attempt a downwrite of the mmap_sem + * + * */ -int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, - struct list_head *uf, bool downgrade) +int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *uf, bool downgrade) { - unsigned long end; - struct vm_area_struct *vma, *prev, *last; - MA_STATE(mas, &mm->mm_mt, start, start); - - if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) - return -EINVAL; - - end = start + PAGE_ALIGN(len); - if (end == start) - return -EINVAL; - - /* arch_unmap() might do unmaps itself. */ - arch_unmap(mm, start, end); - - /* Find the first overlapping VMA */ - vma = mas_find(&mas, end - 1); - if (!vma) - return 0; - - mas.last = end - 1; + struct vm_area_struct *prev, *last; /* we have start < vma->vm_end */ /* @@ -2508,8 +2498,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return error; prev = vma; vma = vma_next(mm, prev); - mas.index = start; - mas_reset(&mas); + mas->index = start; + mas_reset(mas); } else { prev = vma->vm_prev; } @@ -2525,7 +2515,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (error) return error; vma = vma_next(mm, prev); - mas_reset(&mas); + mas_reset(mas); } @@ -2552,7 +2542,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, */ mm->map_count -= unlock_range(vma, &last, end); /* Drop removed area from the tree */ - mas_store_gfp(&mas, NULL, GFP_KERNEL); + mas_store_gfp(mas, NULL, GFP_KERNEL); /* Detach vmas from the MM linked list */ vma->vm_prev = NULL; @@ -2589,6 +2579,59 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return downgrade ? 1 : 0; } +/* + * do_mas_munmap() - munmap a given range. + * @mas: The maple state + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length of the range to munmap + * @uf: The userfaultfd list_head + * @downgrade: set to true if the user wants to attempt to write_downgrade the + * mmap_sem + * + * This function takes a @mas that is in the correct state to remove the + * mapping(s). The @len will be aligned and any arch_unmap work will be + * preformed. + */ +int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, + unsigned long start, size_t len, struct list_head *uf, + bool downgrade) +{ + unsigned long end; + struct vm_area_struct *vma; + + if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + + end = start + PAGE_ALIGN(len); + if (end == start) + return -EINVAL; + + /* arch_unmap() might do unmaps itself. */ + arch_unmap(mm, start, end); + + /* Find the first overlapping VMA */ + vma = mas_find(mas, end - 1); + if (!vma) + return 0; + + mas->last = end - 1; + return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); +} + +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, + struct list_head *uf, bool downgrade) +{ + MA_STATE(mas, &mm->mm_mt, start, start); + return do_mas_munmap(&mas, mm, start, len, uf, downgrade); +} + +/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length to be munmapped. + * @uf: The userfaultfd list_head + */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { @@ -2626,7 +2669,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } /* Unmap any existing mapping in the area */ - if (do_munmap(mm, addr, len, uf)) + if (do_mas_munmap(&mas, mm, addr, len, uf, false)) return -ENOMEM; /* @@ -2998,7 +3041,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, mas_set(mas, newbrk); if (vma->vm_start != newbrk) mas_reset(mas); // cause a re-walk for the first overlap. - ret = __do_munmap(mm, newbrk, oldbrk - newbrk, uf, true); + ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true); goto munmap_full_vma; }