void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *vma, unsigned long floor,
- unsigned long ceiling)
+ unsigned long ceiling, unsigned long start_t)
{
- MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
+ MA_STATE(mas, mt, start_t, start_t);
do {
unsigned long addr = vma->vm_start;
*/
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr)
+ unsigned long end_addr, unsigned long start_t,
+ unsigned long end_t)
{
struct mmu_notifier_range range;
struct zap_details details = {
/* Careful - we need to zap private pages too! */
.even_cows = true,
};
- MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
+ MA_STATE(mas, mt, start_t, start_t);
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
- } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
+ } while ((vma = mas_find(&mas, end_t - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
struct vm_area_struct *vma, struct vm_area_struct *prev,
struct vm_area_struct *next, unsigned long start,
- unsigned long end);
+ unsigned long end, unsigned long start_t, unsigned long end_t);
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
*/
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
struct vm_area_struct *vma, struct vm_area_struct *prev,
- struct vm_area_struct *next,
- unsigned long start, unsigned long end)
+ struct vm_area_struct *next, unsigned long start,
+ unsigned long end, unsigned long start_t, unsigned long end_t)
{
struct mmu_gather tlb;
lru_add_drain();
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
- unmap_vmas(&tlb, mt, vma, start, end);
+ unmap_vmas(&tlb, mt, vma, start, end, start_t, end_t);
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : USER_PGTABLES_CEILING);
+ next ? next->vm_start : USER_PGTABLES_CEILING, start_t);
tlb_finish_mmu(&tlb);
}
return __split_vma(mm, vma, addr, new_below);
}
-static inline int munmap_sidetree(struct vm_area_struct *vma,
+static inline int munmap_sidetree(struct vm_area_struct *vma, int count,
struct ma_state *mas_detach)
{
- mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
+ mas_set(mas_detach, count);
if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
return -ENOMEM;
mas_set(mas, end);
split = mas_prev(mas, 0);
- error = munmap_sidetree(split, &mas_detach);
+ error = munmap_sidetree(split, count, &mas_detach);
if (error)
goto munmap_sidetree_failed;
vma = split;
break;
}
- error = munmap_sidetree(next, &mas_detach);
+ error = munmap_sidetree(next, count, &mas_detach);
if (error)
goto munmap_sidetree_failed;
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */
{
- MA_STATE(test, &mt_detach, start, end - 1);
+ MA_STATE(test, &mt_detach, 0, 0);
struct vm_area_struct *vma_mas, *vma_test;
int test_count = 0;
+ unsigned long s, e;
rcu_read_lock();
- vma_test = mas_find(&test, end - 1);
+ vma_test = mas_find(&test, count);
mas_for_each(mas, vma_mas, end - 1) {
+ if (!test_count)
+ s = vma_mas->vm_start;
BUG_ON(vma_mas != vma_test);
test_count++;
- vma_test = mas_next(&test, end - 1);
+ if (test_count == count)
+ e = vma_mas->vm_end;
+ vma_test = mas_next(&test, count);
}
rcu_read_unlock();
BUG_ON(count != test_count);
mmap_write_downgrade(mm);
}
- unmap_region(mm, &mt_detach, vma, prev, next, start, end);
+ unmap_region(mm, &mt_detach, vma, prev, next, start, end, 1, count);
/* Statistics and freeing VMAs */
- mas_set(&mas_detach, start);
+ mas_set(&mas_detach, 0);
remove_mt(mm, &mas_detach);
__mt_destroy(&mt_detach);
vma->vm_file = NULL;
/* Undo any partial mapping done by a device driver. */
- unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
+ unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end,
+ vma->vm_end, vma->vm_end);
if (file && (vm_flags & VM_SHARED))
mapping_unmap_writable(file->f_mapping);
free_vma:
tlb_gather_mmu_fullmm(&tlb, mm);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
- unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
+ unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX, vma->vm_end, ULONG_MAX);
mmap_read_unlock(mm);
/*
mmap_write_lock(mm);
mt_clear_in_rcu(&mm->mm_mt);
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
- USER_PGTABLES_CEILING);
+ USER_PGTABLES_CEILING, vma->vm_end);
tlb_finish_mmu(&tlb);
/*