} while (pgd++, addr = next, addr != end);
}
-void free_mt_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
- struct ma_state *mas, unsigned long floor, unsigned long ceiling)
+void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
+ unsigned long floor, unsigned long ceiling)
{
struct ma_state ma_next = *mas;
+ struct vm_area_struct *vma;
- ceiling--;
- mas_find(&ma_next, ceiling);
- mas_for_each(mas, vma, ceiling) {
- struct vm_area_struct *next = mas_find(&ma_next, ceiling);
- unsigned long addr = vma->vm_start;
-
- /*
- * Hide vma from rmap and truncate_pagecache before freeing
- * pgtables
- */
- unlink_anon_vmas(vma);
- unlink_file_vma(vma);
-
- if (is_vm_hugetlb_page(vma)) {
- hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
- floor, next ? next->vm_start : ceiling);
- } else {
- /*
- * Optimization: gather nearby vmas into one call down
- */
- while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_vm_hugetlb_page(next)) {
- next = mas_find(&ma_next, ceiling);
- vma = mas_find(mas, ceiling);
- unlink_anon_vmas(vma);
- unlink_file_vma(vma);
- }
- free_pgd_range(tlb, addr, vma->vm_end,
- floor, next ? next->vm_start : ceiling);
- }
- }
-}
-
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long floor, unsigned long ceiling)
-{
- while (vma) {
- struct vm_area_struct *next = vma->vm_next;
+ mas_find(&ma_next, ceiling - 1);
+ mas_for_each(mas, vma, ceiling - 1) {
+ struct vm_area_struct *next = mas_find(&ma_next, ceiling - 1);
unsigned long addr = vma->vm_start;
/*
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
- vma = next;
- next = vma->vm_next;
+ next = mas_find(&ma_next, ceiling - 1);
+ vma = mas_find(mas, ceiling - 1);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next ? next->vm_start : ceiling);
}
- vma = next;
}
}
}
}
-void unmap_vmas_mt(struct mmu_gather *tlb,
- struct vm_area_struct *vma, struct ma_state *mas,
- unsigned long start_addr, unsigned long end_addr)
-{
- struct mmu_notifier_range range;
-
- mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
- start_addr, end_addr);
- mmu_notifier_invalidate_range_start(&range);
- mas_for_each(mas, vma, end_addr - 1)
- unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
- mmu_notifier_invalidate_range_end(&range);
-}
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr)
+ struct vm_area_struct *vma, struct ma_state *mas,
+ unsigned long start_addr, unsigned long end_addr)
{
struct mmu_notifier_range range;
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
+ mas_for_each(mas, vma, end_addr - 1)
unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(&range);
}
{
struct mmu_notifier_range range;
struct mmu_gather tlb;
+ MA_STATE(mas, &vma->vm_mm->mm_mt, start, start);
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
+ mas_for_each(&mas, vma, range.end - 1)
unmap_single_vma(&tlb, vma, start, range.end, NULL);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, start, range.end);
mas_set(&mas, newbrk);
brkvma = mas_walk(&mas);
ma_next = mas;
- next = mas_next(&mas, newbrk + PAGE_SIZE + stack_guard_gap);
+ next = mas_next(&ma_next, -1);
if (brkvma) { // munmap necessary, there is something at newbrk.
/*
* Always allow shrinking brk.
struct vm_area_struct *prev, unsigned long max)
{
struct mmu_gather tlb;
+ struct ma_state ma_pgtb = *mas;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
- unmap_vmas_mt(&tlb, vma, mas, start, end);
- free_pgtables(&tlb, vma,
+ unmap_vmas(&tlb, vma, mas, start, end);
+ free_pgtables(&tlb, &ma_pgtb,
prev ? prev->vm_end : FIRST_USER_ADDRESS,
max);
tlb_finish_mmu(&tlb, start, end);
}
if (mm->locked_vm) {
- mas_for_each(&mas, vma, ULONG_MAX) {
+ mas_for_each(&mas, vma, -1) {
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(vma);
munlock_vma_pages_all(vma);
tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use 0 here to ensure all VMAs in the mm are unmapped */
-// unmap_vmas(&tlb, vma, 0, -1);
- unmap_vmas_mt(&tlb, vma, &mas, 0, -1);
+ unmap_vmas(&tlb, vma, &mas, 0, -1);
mas_reset(&mas);
mas_set(&mas, FIRST_USER_ADDRESS);
-// free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
- free_mt_pgtables(&tlb, vma, &mas, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+ free_pgtables(&tlb, &mas, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
/*