From 06227f64b90a3fb439e8f5ac7861de14f8189f76 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 15 Dec 2020 20:56:21 -0500 Subject: [PATCH] mm/mmap: linked list work.. part many+2 of many Signed-off-by: Liam R. Howlett --- include/linux/mm.h | 2 -- mm/internal.h | 6 +--- mm/memory.c | 73 +++++++++------------------------------------- mm/mmap.c | 15 +++++----- 4 files changed, 21 insertions(+), 75 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 68eb6204fa38..a9ad566f24fd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1647,8 +1647,6 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long start, unsigned long end); -void unmap_vmas_mt(struct mmu_gather *tlb, struct vm_area_struct *start_vma, struct ma_state *mas, unsigned long start, unsigned long end); struct mmu_notifier_range; diff --git a/mm/internal.h b/mm/internal.h index 4cfb9b23ddef..39fe96678504 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,11 +36,7 @@ void page_writeback_init(void); vm_fault_t do_swap_page(struct vm_fault *vmf); -void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); - -void free_mt_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - struct ma_state *mas, +void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, unsigned long floor, unsigned long ceiling); static inline bool can_madv_lru_vma(struct vm_area_struct *vma) diff --git a/mm/memory.c b/mm/memory.c index 1375638621ab..18246038e731 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -387,49 +387,15 @@ void free_pgd_range(struct mmu_gather *tlb, } while (pgd++, addr = next, addr != end); } -void free_mt_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, - struct ma_state *mas, unsigned long floor, unsigned long ceiling) +void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, + unsigned long floor, unsigned long ceiling) { struct ma_state ma_next = *mas; + struct vm_area_struct *vma; - ceiling--; - mas_find(&ma_next, ceiling); - mas_for_each(mas, vma, ceiling) { - struct vm_area_struct *next = mas_find(&ma_next, ceiling); - unsigned long addr = vma->vm_start; - - /* - * Hide vma from rmap and truncate_pagecache before freeing - * pgtables - */ - unlink_anon_vmas(vma); - unlink_file_vma(vma); - - if (is_vm_hugetlb_page(vma)) { - hugetlb_free_pgd_range(tlb, addr, vma->vm_end, - floor, next ? next->vm_start : ceiling); - } else { - /* - * Optimization: gather nearby vmas into one call down - */ - while (next && next->vm_start <= vma->vm_end + PMD_SIZE - && !is_vm_hugetlb_page(next)) { - next = mas_find(&ma_next, ceiling); - vma = mas_find(mas, ceiling); - unlink_anon_vmas(vma); - unlink_file_vma(vma); - } - free_pgd_range(tlb, addr, vma->vm_end, - floor, next ? next->vm_start : ceiling); - } - } -} - -void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, - unsigned long floor, unsigned long ceiling) -{ - while (vma) { - struct vm_area_struct *next = vma->vm_next; + mas_find(&ma_next, ceiling - 1); + mas_for_each(mas, vma, ceiling - 1) { + struct vm_area_struct *next = mas_find(&ma_next, ceiling - 1); unsigned long addr = vma->vm_start; /* @@ -448,15 +414,14 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { - vma = next; - next = vma->vm_next; + next = mas_find(&ma_next, ceiling - 1); + vma = mas_find(mas, ceiling - 1); unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } - vma = next; } } @@ -1512,19 +1477,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, } } -void unmap_vmas_mt(struct mmu_gather *tlb, - struct vm_area_struct *vma, struct ma_state *mas, - unsigned long start_addr, unsigned long end_addr) -{ - struct mmu_notifier_range range; - - mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, - start_addr, end_addr); - mmu_notifier_invalidate_range_start(&range); - mas_for_each(mas, vma, end_addr - 1) - unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); - mmu_notifier_invalidate_range_end(&range); -} /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather @@ -1544,15 +1496,15 @@ void unmap_vmas_mt(struct mmu_gather *tlb, * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, - struct vm_area_struct *vma, unsigned long start_addr, - unsigned long end_addr) + struct vm_area_struct *vma, struct ma_state *mas, + unsigned long start_addr, unsigned long end_addr) { struct mmu_notifier_range range; mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(&range); - for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) + mas_for_each(mas, vma, end_addr - 1) unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); mmu_notifier_invalidate_range_end(&range); } @@ -1570,6 +1522,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, { struct mmu_notifier_range range; struct mmu_gather tlb; + MA_STATE(mas, &vma->vm_mm->mm_mt, start, start); lru_add_drain(); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, @@ -1577,7 +1530,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end); update_hiwater_rss(vma->vm_mm); mmu_notifier_invalidate_range_start(&range); - for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) + mas_for_each(&mas, vma, range.end - 1) unmap_single_vma(&tlb, vma, start, range.end, NULL); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb, start, range.end); diff --git a/mm/mmap.c b/mm/mmap.c index 9e5f29013dbd..c270aba9c263 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -234,7 +234,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) mas_set(&mas, newbrk); brkvma = mas_walk(&mas); ma_next = mas; - next = mas_next(&mas, newbrk + PAGE_SIZE + stack_guard_gap); + next = mas_next(&ma_next, -1); if (brkvma) { // munmap necessary, there is something at newbrk. /* * Always allow shrinking brk. @@ -2248,12 +2248,13 @@ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long max) { struct mmu_gather tlb; + struct ma_state ma_pgtb = *mas; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); - unmap_vmas_mt(&tlb, vma, mas, start, end); - free_pgtables(&tlb, vma, + unmap_vmas(&tlb, vma, mas, start, end); + free_pgtables(&tlb, &ma_pgtb, prev ? prev->vm_end : FIRST_USER_ADDRESS, max); tlb_finish_mmu(&tlb, start, end); @@ -3189,7 +3190,7 @@ void exit_mmap(struct mm_struct *mm) } if (mm->locked_vm) { - mas_for_each(&mas, vma, ULONG_MAX) { + mas_for_each(&mas, vma, -1) { if (vma->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(vma); munlock_vma_pages_all(vma); @@ -3210,12 +3211,10 @@ void exit_mmap(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use 0 here to ensure all VMAs in the mm are unmapped */ -// unmap_vmas(&tlb, vma, 0, -1); - unmap_vmas_mt(&tlb, vma, &mas, 0, -1); + unmap_vmas(&tlb, vma, &mas, 0, -1); mas_reset(&mas); mas_set(&mas, FIRST_USER_ADDRESS); -// free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); - free_mt_pgtables(&tlb, vma, &mas, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); + free_pgtables(&tlb, &mas, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* -- 2.50.1