From 3b72d97a5d51e993e3e6d414c96e9db5217ef2f7 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 23 Feb 2021 11:24:23 -0500 Subject: [PATCH] mm/nommu and fs/proc/task_nommu: convert to maple tree Signed-off-by: Liam R. Howlett --- fs/proc/task_nommu.c | 55 +++++++++++++------------- mm/nommu.c | 92 +++++++++++--------------------------------- 2 files changed, 50 insertions(+), 97 deletions(-) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index a6d21fc0033c..be02e8997ddf 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -22,15 +22,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) { struct vm_area_struct *vma; struct vm_region *region; - struct rb_node *p; unsigned long bytes = 0, sbytes = 0, slack = 0, size; - - mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + MA_STATE(mas, &mm->mm_mt, 0, 0); + mmap_read_lock(mm); + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { bytes += kobjsize(vma); - region = vma->vm_region; if (region) { size = kobjsize(region); @@ -53,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) sbytes += kobjsize(mm); else bytes += kobjsize(mm); - + if (current->fs && current->fs->users > 1) sbytes += kobjsize(current->fs); else @@ -77,20 +75,21 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) "Shared:\t%8lu bytes\n", bytes, slack, sbytes); + rcu_read_unlock(); mmap_read_unlock(mm); } unsigned long task_vsize(struct mm_struct *mm) { struct vm_area_struct *vma; - struct rb_node *p; unsigned long vsize = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) vsize += vma->vm_end - vma->vm_start; - } + rcu_read_unlock(); mmap_read_unlock(mm); return vsize; } @@ -101,12 +100,12 @@ unsigned long task_statm(struct mm_struct *mm, { struct vm_area_struct *vma; struct vm_region *region; - struct rb_node *p; unsigned long size = kobjsize(mm); + MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_read_lock(mm); - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { - vma = rb_entry(p, struct vm_area_struct, vm_rb); + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { size += kobjsize(vma); region = vma->vm_region; if (region) { @@ -119,6 +118,7 @@ unsigned long task_statm(struct mm_struct *mm, >> PAGE_SHIFT; *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) >> PAGE_SHIFT; + rcu_read_unlock(); mmap_read_unlock(mm); size >>= PAGE_SHIFT; size += *text + *data; @@ -190,17 +190,20 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) */ static int show_map(struct seq_file *m, void *_p) { - struct rb_node *p = _p; - - return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); + return nommu_vma_show(m, _p); } static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_maps_private *priv = m->private; struct mm_struct *mm; - struct rb_node *p; - loff_t n = *pos; + struct vm_area_struct *vma; + unsigned long addr = *pos; + MA_STATE(mas, &priv->mm->mm_mt, addr, addr); + + /* See m_next(). Zero at the start or after lseek. */ + if (addr == -1UL) + return NULL; /* pin the task and mm whilst we play with them */ priv->task = get_proc_task(priv->inode); @@ -216,14 +219,12 @@ static void *m_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-EINTR); } - /* start from the Nth VMA */ - for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) - if (n-- == 0) - return p; + /* start the next element from addr */ + vma = mas_find(&mas, ULONG_MAX); mmap_read_unlock(mm); mmput(mm); - return NULL; + return vma; } static void m_stop(struct seq_file *m, void *_vml) @@ -242,10 +243,10 @@ static void m_stop(struct seq_file *m, void *_vml) static void *m_next(struct seq_file *m, void *_p, loff_t *pos) { - struct rb_node *p = _p; + struct vm_area_struct *vma = _p; - (*pos)++; - return p ? rb_next(p) : NULL; + *pos = vma->vm_end; + return vma_next(vma->vm_mm, vma); } static const struct seq_operations proc_pid_maps_ops = { diff --git a/mm/nommu.c b/mm/nommu.c index 0260db903ab2..98be7864a195 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -562,13 +562,13 @@ static void put_nommu_region(struct vm_region *region) */ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *pvma, *prev; struct address_space *mapping; - struct rb_node **p, *parent, *rb_prev; + MA_STATE(mas, &mm->mm_mt, 0, 0); BUG_ON(!vma->vm_region); mm->map_count++; + printk("mm at %u\n", mm->map_count); vma->vm_mm = mm; /* add the VMA to the mapping */ @@ -583,40 +583,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) } /* add the VMA to the tree */ - parent = rb_prev = NULL; - p = &mm->mm_rb.rb_node; - while (*p) { - parent = *p; - pvma = rb_entry(parent, struct vm_area_struct, vm_rb); - - /* sort by: start addr, end addr, VMA struct addr in that order - * (the latter is necessary as we may get identical VMAs) */ - if (vma->vm_start < pvma->vm_start) - p = &(*p)->rb_left; - else if (vma->vm_start > pvma->vm_start) { - rb_prev = parent; - p = &(*p)->rb_right; - } else if (vma->vm_end < pvma->vm_end) - p = &(*p)->rb_left; - else if (vma->vm_end > pvma->vm_end) { - rb_prev = parent; - p = &(*p)->rb_right; - } else if (vma < pvma) - p = &(*p)->rb_left; - else if (vma > pvma) { - rb_prev = parent; - p = &(*p)->rb_right; - } else - BUG(); - } - - rb_link_node(&vma->vm_rb, parent, p); - rb_insert_color(&vma->vm_rb, &mm->mm_rb); - - /* add VMA to the VMA list also */ - prev = NULL; - if (rb_prev) - prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); + vma_mas_store(vma, &mas); } /* @@ -628,6 +595,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) struct address_space *mapping; struct mm_struct *mm = vma->vm_mm; struct task_struct *curr = current; + MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { @@ -637,7 +605,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) break; } } - /* remove the VMA from the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; @@ -649,8 +616,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) i_mmap_unlock_write(mapping); } - /* remove from the MM's tree and list */ - rb_erase(&vma->vm_rb, &mm->mm_rb); + vma_mas_remove(vma, &mas); } /* @@ -673,25 +639,21 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; - MA_STATE(mas, &mm->mm_mt, 0, 0); + MA_STATE(mas, &mm->mm_mt, addr, addr); /* check the cache first */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; - /* trawl the list (there may be multiple mappings in which addr - * resides) */ - mas_for_each(&mas, vma, ULONG_MAX) { - if (vma->vm_start > addr) - return NULL; - if (vma->vm_end > addr) { - vmacache_update(addr, vma); - return vma; - } - } + rcu_read_lock(); + vma = mas_walk(&mas); + rcu_read_unlock(); - return NULL; + if (vma) + vmacache_update(addr, vma); + + return vma; } EXPORT_SYMBOL(find_vma); @@ -723,27 +685,17 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, { struct vm_area_struct *vma; unsigned long end = addr + len; - MA_STATE(mas, &mm->mm_mt, 0, 0); - - /* check the cache first */ - vma = vmacache_find_exact(mm, addr, end); - if (vma) - return vma; + MA_STATE(mas, &mm->mm_mt, addr, addr); - /* trawl the list (there may be multiple mappings in which addr - * resides) */ - mas_for_each(&mas, vma, ULONG_MAX) { - if (vma->vm_start < addr) - continue; - if (vma->vm_start > addr) - return NULL; - if (vma->vm_end == end) { - vmacache_update(addr, vma); - return vma; - } - } + rcu_read_lock(); + vma = mas_walk(&mas); + rcu_read_unlock(); + if (vma->vm_start != addr) + return NULL; + if (vma->vm_end != end) + return NULL; - return NULL; + return vma; } /* -- 2.50.1