From 7e26e4e2b313cd877cc045401bd6cc59ab2f7fd1 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 2 Sep 2020 15:40:56 -0400 Subject: [PATCH] maple_tree: Fix up after rebase v5.9-rc1 and history. Rebased maple/mainline_verbose to v5.9-rc1 then to maple3, then overwrote maple3. Signed-off-by: Liam R. Howlett --- Documentation/core-api/index.rst | 2 +- kernel/fork.c | 2 - lib/maple_tree.c | 11 ---- mm/mmap.c | 96 ++++++++------------------------ 4 files changed, 24 insertions(+), 87 deletions(-) diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 63fe6e742dc0..5b2d3b269d5c 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -41,9 +41,9 @@ Library functionality that is used throughout the kernel. packing bus-virt-phys-mapping this_cpu_ops - maple-tree timekeeping errseq + maple-tree Concurrency primitives ====================== diff --git a/kernel/fork.c b/kernel/fork.c index 5ea4b5690508..44a154f95746 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -475,8 +475,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); MA_STATE(mas, &mm->mm_mt, 0, 0); LIST_HEAD(uf); - MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); - struct vm_area_struct *old_vma; uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 8abcb85f3877..37097609db76 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3068,17 +3068,6 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) mast.orig_l = &l_mas; mast.orig_r = &r_mas; - // FIXME: Is this needed? -#if 0 - mas_dup_state(&l_mas, mas); - mas->last = mas->index; - mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); - mas->index = mas->last = l_mas.last; - mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); - mas_dup_state(mas, &l_mas); -#endif - - // Set up right side. mas_dup_state(&r_mas, mas); r_mas.depth = mas->depth; diff --git a/mm/mmap.c b/mm/mmap.c index 103d81b4d43a..c78317c8fdca 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -122,7 +122,6 @@ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ -// LRH: Needed void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; @@ -140,7 +139,6 @@ void vma_set_page_prot(struct vm_area_struct *vma) /* * Requires inode->i_mapping->i_mmap_rwsem */ -// LRH: Needed static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { @@ -158,7 +156,6 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ -// LRH: Needed void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; @@ -174,7 +171,6 @@ void unlink_file_vma(struct vm_area_struct *vma) /* * Close a vm structure and free it, returning the next. */ -// LRH: Needed static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; @@ -189,7 +185,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -// LRH: Needed static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) @@ -374,17 +369,14 @@ static void validate_mm(struct mm_struct *mm) while (vma) { struct anon_vma *anon_vma = vma->anon_vma; -// struct anon_vma_chain *avc; + struct anon_vma_chain *avc; -// pr_cont("vma: %lu-%lu", vma->vm_start, vma->vm_end); if (anon_vma) { -// pr_cont(" anon"); anon_vma_lock_read(anon_vma); -// list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) -// anon_vma_interval_tree_verify(avc); + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) + anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } -// pr_cont("\n"); highest_address = vm_end_gap(vma); vma = vma->vm_next; @@ -479,12 +471,15 @@ static inline struct vm_area_struct *vma_next(struct mm_struct *mm, } /* - * munmap_vma_range() - munmap VMAs that overlap a range. + * munmap_vma_range() - munmap VMAs that overlap the range. * @mm: The mm struct * @start: The start of the range. * @len: The length of the range. * @pprev: pointer to the pointer that will be set to previous vm_area_struct * + * Find all the vm_area_struct that overlap from @start to + * @end and munmap them. Set @pprev to the previous vm_area_struct. + * * Returns: -ENOMEM on munmap failure or 0 on success. */ static inline int @@ -648,7 +643,6 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and the mm tree. It has already been inserted into the interval tree. */ -extern void mt_dump(const struct maple_tree *mt); static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; @@ -852,8 +846,6 @@ again: * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - /* maple tree store is done in the __vma_link call in this - * call graph */ __insert_vm_struct(mm, insert); } else if (end_changed && !next) { mm->highest_vm_end = vm_end_gap(vma); @@ -1076,18 +1068,6 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ - -//LRH: -// p = prev, n = next, a = add, nn = next next -// 0. Adding page over partial p, cannot merge -// 1. Adding page between p and n, all become p -// 2. Adding page between p and n, a merges with p -// 3. Adding page between p and n, a merges with n -// 4. Adding page over p, a merges with n -// 5. Adding page over n, a merges with p -// 6. Adding page over all of n, p-a-nn all become p -// 7. Adding page over all of n, p-a all become p -// 8. Adding page over all of n, a-nn all become nn. struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, @@ -1156,10 +1136,10 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { - if (prev && addr < prev->vm_end){ /* case 4 */ + if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); - }else { /* cases 3, 8 */ + else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* @@ -1685,7 +1665,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Clear old maps, set up prev and uf */ if (munmap_vma_range(mm, addr, len, &prev, uf)) return -ENOMEM; - /* * Private writable mapping: check memory availability */ @@ -2102,15 +2081,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); -/* - * find_vma() - Find the best VMA for a given address. +/** + * find_vma() - Find the VMA for a given address, or the next vma. May return + * NULL in the case of no vma at addr or above * @mm The mm_struct to check * @addr: The address * - * Searches the user memory map for the VMA which either contains this address - * or is the next VMA after this address. - * - * Returns: The VMA or %NULL if the address is higher than any mapping. + * Returns: The VMA assoicated with addr, or the next vma. + * May return NULL in the case of no vma at addr or above. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { @@ -2129,17 +2107,15 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(find_vma); -/* - * find_vma_prev() - Find the best VMA for a given address and the previous VMA. +/** + * find_vma_prev() - Find the VMA for a given address, or the next vma and + * sets %pprev to the previous VMA, if any. * @mm The mm_struct to check * @addr: The address * @pprev: The pointer to set to the previous VMA * - * Searches the user memory map for the VMA which either contains this address - * or is the next VMA after this address. Sets %pprev to the previous VMA or - * NULL if this is the first VMA. - * - * Returns: The VMA or %NULL if the address is higher than any mapping. + * Returns: The VMA associated with @addr, or the next vma. + * May return NULL in the case of no vma at addr or above. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, @@ -2356,36 +2332,10 @@ int expand_downwards(struct vm_area_struct *vma, anon_vma_interval_tree_post_update_vma(vma); spin_unlock(&mm->page_table_lock); - error = acct_stack_growth(vma, size, grow); - if (error) - goto no_update; - /* - * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem - * lock here, so we need to protect against - * concurrent vma expansions. - * anon_vma_lock_write() doesn't help here, as - * we don't guarantee that all growable vmas - * in a mm share the same root anon vma. - * So, we reuse mm->page_table_lock to guard - * against concurrent vma expansions. - */ - spin_lock(&mm->page_table_lock); - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, grow); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; - // Overwrite old entry in mtree. - __vma_mt_store(mm, vma); - anon_vma_interval_tree_post_update_vma(vma); - vma_gap_update(vma); - spin_unlock(&mm->page_table_lock); - - perf_event_mmap(vma); + perf_event_mmap(vma); + } + } } -no_update: anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); @@ -2482,7 +2432,7 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); - //validate_mm(mm); + validate_mm(mm); } /* -- 2.50.1