{
struct ma_state ma_next = *mas;
- mas_find(&ma_next, ceiling - 1);
- mas_for_each(mas, vma, ceiling - 1) {
- struct vm_area_struct *next = mas_find(&ma_next, ceiling - 1);
+ ceiling--;
+ mas_find(&ma_next, ceiling);
+ mas_for_each(mas, vma, ceiling) {
+ struct vm_area_struct *next = mas_find(&ma_next, ceiling);
unsigned long addr = vma->vm_start;
/*
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
- next = mas_find(&ma_next, ceiling - 1);
- vma = mas_find(mas, ceiling - 1);
+ next = mas_find(&ma_next, ceiling);
+ vma = mas_find(mas, ceiling);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
/* Set the upper limit */
if (!tmp) {
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
- return mm->highest_vm_end;
+ return USER_PGTABLES_CEILING;
}
return tmp->vm_start;
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ MA_STATE(mas, &mm->mm_mt, FIRST_USER_ADDRESS, FIRST_USER_ADDRESS);
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
}
}
mas_reset(&mas);
- mas_set(&mas, 0);
+ mas_set(&mas, FIRST_USER_ADDRESS);
}
arch_exit_mmap(mm);
flush_cache_mm(mm);
tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
- mas_reset(&mas);
- mas_set(&mas, FIRST_USER_ADDRESS);
/* Use 0 here to ensure all VMAs in the mm are unmapped */
// unmap_vmas(&tlb, vma, 0, -1);
unmap_vmas_mt(&tlb, vma, &mas, 0, -1);