{
struct vm_area_struct *vma;
struct vm_region *region;
- struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0, size;
-
- mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ mmap_read_lock(mm);
+ rcu_read_lock();
+ mas_for_each(&mas, vma, ULONG_MAX) {
bytes += kobjsize(vma);
-
region = vma->vm_region;
if (region) {
size = kobjsize(region);
sbytes += kobjsize(mm);
else
bytes += kobjsize(mm);
-
+
if (current->fs && current->fs->users > 1)
sbytes += kobjsize(current->fs);
else
"Shared:\t%8lu bytes\n",
bytes, slack, sbytes);
+ rcu_read_unlock();
mmap_read_unlock(mm);
}
unsigned long task_vsize(struct mm_struct *mm)
{
struct vm_area_struct *vma;
- struct rb_node *p;
unsigned long vsize = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ rcu_read_lock();
+ mas_for_each(&mas, vma, ULONG_MAX)
vsize += vma->vm_end - vma->vm_start;
- }
+ rcu_read_unlock();
mmap_read_unlock(mm);
return vsize;
}
{
struct vm_area_struct *vma;
struct vm_region *region;
- struct rb_node *p;
unsigned long size = kobjsize(mm);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ rcu_read_lock();
+ mas_for_each(&mas, vma, ULONG_MAX) {
size += kobjsize(vma);
region = vma->vm_region;
if (region) {
>> PAGE_SHIFT;
*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
>> PAGE_SHIFT;
+ rcu_read_unlock();
mmap_read_unlock(mm);
size >>= PAGE_SHIFT;
size += *text + *data;
*/
static int show_map(struct seq_file *m, void *_p)
{
- struct rb_node *p = _p;
-
- return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
+ return nommu_vma_show(m, _p);
}
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct mm_struct *mm;
- struct rb_node *p;
- loff_t n = *pos;
+ struct vm_area_struct *vma;
+ unsigned long addr = *pos;
+ MA_STATE(mas, &priv->mm->mm_mt, addr, addr);
+
+ /* See m_next(). Zero at the start or after lseek. */
+ if (addr == -1UL)
+ return NULL;
/* pin the task and mm whilst we play with them */
priv->task = get_proc_task(priv->inode);
return ERR_PTR(-EINTR);
}
- /* start from the Nth VMA */
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
- if (n-- == 0)
- return p;
+ /* start the next element from addr */
+ vma = mas_find(&mas, ULONG_MAX);
mmap_read_unlock(mm);
mmput(mm);
- return NULL;
+ return vma;
}
static void m_stop(struct seq_file *m, void *_vml)
static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
{
- struct rb_node *p = _p;
+ struct vm_area_struct *vma = _p;
- (*pos)++;
- return p ? rb_next(p) : NULL;
+ *pos = vma->vm_end;
+ return vma_next(vma->vm_mm, vma);
}
static const struct seq_operations proc_pid_maps_ops = {
*/
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
{
- struct vm_area_struct *pvma, *prev;
struct address_space *mapping;
- struct rb_node **p, *parent, *rb_prev;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
BUG_ON(!vma->vm_region);
mm->map_count++;
+ printk("mm at %u\n", mm->map_count);
vma->vm_mm = mm;
/* add the VMA to the mapping */
}
/* add the VMA to the tree */
- parent = rb_prev = NULL;
- p = &mm->mm_rb.rb_node;
- while (*p) {
- parent = *p;
- pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
-
- /* sort by: start addr, end addr, VMA struct addr in that order
- * (the latter is necessary as we may get identical VMAs) */
- if (vma->vm_start < pvma->vm_start)
- p = &(*p)->rb_left;
- else if (vma->vm_start > pvma->vm_start) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else if (vma->vm_end < pvma->vm_end)
- p = &(*p)->rb_left;
- else if (vma->vm_end > pvma->vm_end) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else if (vma < pvma)
- p = &(*p)->rb_left;
- else if (vma > pvma) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else
- BUG();
- }
-
- rb_link_node(&vma->vm_rb, parent, p);
- rb_insert_color(&vma->vm_rb, &mm->mm_rb);
-
- /* add VMA to the VMA list also */
- prev = NULL;
- if (rb_prev)
- prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
+ vma_mas_store(vma, &mas);
}
/*
struct address_space *mapping;
struct mm_struct *mm = vma->vm_mm;
struct task_struct *curr = current;
+ MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
break;
}
}
-
/* remove the VMA from the mapping */
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
i_mmap_unlock_write(mapping);
}
- /* remove from the MM's tree and list */
- rb_erase(&vma->vm_rb, &mm->mm_rb);
+ vma_mas_remove(vma, &mas);
}
/*
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
/* check the cache first */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;
- /* trawl the list (there may be multiple mappings in which addr
- * resides) */
- mas_for_each(&mas, vma, ULONG_MAX) {
- if (vma->vm_start > addr)
- return NULL;
- if (vma->vm_end > addr) {
- vmacache_update(addr, vma);
- return vma;
- }
- }
+ rcu_read_lock();
+ vma = mas_walk(&mas);
+ rcu_read_unlock();
- return NULL;
+ if (vma)
+ vmacache_update(addr, vma);
+
+ return vma;
}
EXPORT_SYMBOL(find_vma);
{
struct vm_area_struct *vma;
unsigned long end = addr + len;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
-
- /* check the cache first */
- vma = vmacache_find_exact(mm, addr, end);
- if (vma)
- return vma;
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
- /* trawl the list (there may be multiple mappings in which addr
- * resides) */
- mas_for_each(&mas, vma, ULONG_MAX) {
- if (vma->vm_start < addr)
- continue;
- if (vma->vm_start > addr)
- return NULL;
- if (vma->vm_end == end) {
- vmacache_update(addr, vma);
- return vma;
- }
- }
+ rcu_read_lock();
+ vma = mas_walk(&mas);
+ rcu_read_unlock();
+ if (vma->vm_start != addr)
+ return NULL;
+ if (vma->vm_end != end)
+ return NULL;
- return NULL;
+ return vma;
}
/*