From: Liam R. Howlett Date: Mon, 4 Jan 2021 19:58:11 +0000 (-0500) Subject: mm/ksm: Use maple tree iterators instead of vma linked list X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c85f3f8a2575f78e6d28ddc2cbfcb9c2ecddceca;p=users%2Fjedix%2Flinux-maple.git mm/ksm: Use maple tree iterators instead of vma linked list Signed-off-by: Liam R. Howlett --- diff --git a/mm/ksm.c b/mm/ksm.c index 08092e6f0b73..1e2218a81bfc 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -975,11 +975,14 @@ static int unmerge_and_remove_all_rmap_items(void) struct mm_slot, mm_list); spin_unlock(&ksm_mmlist_lock); - for (mm_slot = ksm_scan.mm_slot; - mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { + for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; + mm_slot = ksm_scan.mm_slot) { + MA_STATE(mas, &mm_slot->mm->mm_mt, 0, 0); + mm = mm_slot->mm; mmap_read_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) { + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { if (ksm_test_exit(mm)) break; if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) @@ -991,6 +994,7 @@ static int unmerge_and_remove_all_rmap_items(void) } remove_trailing_rmap_items(&mm_slot->rmap_list); + rcu_read_unlock(); mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); @@ -1014,6 +1018,7 @@ static int unmerge_and_remove_all_rmap_items(void) return 0; error: + rcu_read_unlock(); mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; @@ -2228,6 +2233,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) struct vm_area_struct *vma; struct rmap_item *rmap_item; int nid; + MA_STATE(mas, NULL, 0, 0); if (list_empty(&ksm_mm_head.mm_list)) return NULL; @@ -2285,13 +2291,15 @@ next_mm: } mm = slot->mm; + mas.tree = &mm->mm_mt; + mmap_read_lock(mm); + rcu_read_lock(); if (ksm_test_exit(mm)) - vma = NULL; - else - vma = find_vma(mm, ksm_scan.address); + goto no_vmas; - for (; vma; vma = vma->vm_next) { + mas_set(&mas, ksm_scan.address); + mas_for_each(&mas, vma, ULONG_MAX) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) @@ -2319,6 +2327,7 @@ next_mm: ksm_scan.address += PAGE_SIZE; } else put_page(*page); + rcu_read_unlock(); mmap_read_unlock(mm); return rmap_item; } @@ -2329,6 +2338,7 @@ next_mm: } if (ksm_test_exit(mm)) { +no_vmas: ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; } @@ -2357,9 +2367,11 @@ next_mm: free_mm_slot(slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); + rcu_read_unlock(); mmap_read_unlock(mm); mmdrop(mm); } else { + rcu_read_unlock(); mmap_read_unlock(mm); /* * mmap_read_unlock(mm) first because after