return count;
}
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
-int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
- struct list_head *uf, bool downgrade)
+static int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
- MA_STATE(mas, &mm->mm_mt, start, start);
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */
- vma = mas_find(&mas, end - 1);
+ vma = mas_find(mas, end - 1);
if (!vma)
return 0;
- mas.last = end - 1;
+ mas->last = end - 1;
/* we have start < vma->vm_end */
/*
return error;
prev = vma;
vma = vma_next(mm, prev);
- mas.index = start;
- mas_reset(&mas);
+ mas->index = start;
+ mas_reset(mas);
} else {
prev = vma->vm_prev;
}
if (error)
return error;
vma = vma_next(mm, prev);
- mas_reset(&mas);
+ mas_reset(mas);
}
*/
mm->map_count -= unlock_range(vma, &last, end);
/* Drop removed area from the tree */
- mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_store_gfp(mas, NULL, GFP_KERNEL);
/* Detach vmas from the MM linked list */
vma->vm_prev = NULL;
return downgrade ? 1 : 0;
}
+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+ struct list_head *uf, bool downgrade)
+{
+ MA_STATE(mas, &mm->mm_mt, start, start);
+ return do_mas_munmap(&mas, mm, start, len, uf, downgrade);
+}
+
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{