From 24b660891f27e3402866534c38b21a0248bdb5a9 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 9 Feb 2024 17:15:43 -0500 Subject: [PATCH] mm: Remove the vma_iter_free and mas_destroy calls Now that the maple tree uses per-cpu arrays for allocating, there is no longer a need to destroy or free the allocations that would have been stored in the maple state. This patch removes the calls to these functions as they are no longer necessary. Signed-off-by: Liam R. Howlett --- include/linux/mm.h | 6 ---- kernel/fork.c | 1 - lib/maple_tree.c | 55 +++++++++++--------------------- mm/mmap.c | 14 ++------ mm/nommu.c | 1 - tools/testing/radix-tree/linux.c | 4 --- 6 files changed, 22 insertions(+), 59 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index bb9d90c68e4c4..65f766d70ecd1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1006,12 +1006,6 @@ static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, return 0; } -/* Free any unused preallocations */ -static inline void vma_iter_free(struct vma_iterator *vmi) -{ - mas_destroy(&vmi->mas); -} - static inline int vma_iter_bulk_store(struct vma_iterator *vmi, struct vm_area_struct *vma) { diff --git a/kernel/fork.c b/kernel/fork.c index 0d944e92a43ff..db3b9b0469bac 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -755,7 +755,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, /* a new mm has just been created */ retval = arch_dup_mmap(oldmm, mm); loop_out: - vma_iter_free(&vmi); if (!retval) { mt_set_in_rcu(vmi.mas.tree); } else if (mpnt) { diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 824ed2c30455c..b1f815c2b179a 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -70,11 +70,9 @@ * Maple state flags * * MA_STATE_BULK - Bulk insert mode * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert - * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation */ #define MA_STATE_BULK 1 #define MA_STATE_REBALANCE 2 -#define MA_STATE_PREALLOC 4 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) @@ -5267,21 +5265,17 @@ void *mas_store(struct ma_state *mas, void *entry) * want to examine what happens if a single store operation was to * overwrite multiple entries within a self-balancing B-Tree. */ - if (mas->mas_flags & MA_STATE_PREALLOC) { - mas_wr_prealloc_setup(&wr_mas); - mas_wr_store_type(&wr_mas); - mas_wr_store_entry(&wr_mas); - MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); - return wr_mas.content; - } - mas_wr_store_prealloc(&wr_mas, entry); WARN_ON_ONCE(mas->store_type == wr_invalid); - if (mas_is_err(mas)) - return NULL; + + /* + * Highly unlikely, but if there is no memory this can fail. This is + * expected to be preallocated, but a failure is still possible. + */ + if (unlikely(mas_is_err(mas))) + return wr_mas.content; mas_wr_store_entry(&wr_mas); - mas_destroy(mas); return wr_mas.content; } EXPORT_SYMBOL_GPL(mas_store); @@ -5300,21 +5294,21 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) MA_WR_STATE(wr_mas, mas, entry); int ret; - trace_ma_write(__func__, mas, 0, entry); mas->gfp = gfp; + trace_ma_write(__func__, mas, 0, entry); retry: mas_wr_preallocate(&wr_mas, entry, gfp); WARN_ON_ONCE(mas->store_type == wr_invalid); if (unlikely(mas_nomem(mas, gfp))) goto retry; + if (mas_is_err(mas)) goto out; mas_wr_store_entry(&wr_mas); out: ret = xa_err(mas->node); - mas_destroy(mas); return ret; } EXPORT_SYMBOL_GPL(mas_store_gfp); @@ -5345,8 +5339,6 @@ store: mas->gfp = __GFP_NOFAIL; mas_wr_store_entry(&wr_mas); MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); - mas_destroy(mas); - } EXPORT_SYMBOL_GPL(mas_store_prealloc); @@ -5361,17 +5353,14 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc); int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) { MA_WR_STATE(wr_mas, mas, entry); - int ret = 0; + int ret; mas_wr_preallocate(&wr_mas, entry, gfp); - if (mas_is_err(mas)) { - ret = xa_err(mas->node); - mas_destroy(mas); - mas_reset(mas); - return ret; - } + if (likely(!mas_is_err(mas))) + return 0; - mas->mas_flags |= MA_STATE_PREALLOC; + ret = xa_err(mas->node); + mas_reset(mas); return ret; } EXPORT_SYMBOL_GPL(mas_preallocate); @@ -5402,7 +5391,8 @@ void mas_destroy(struct ma_state *mas) mas->mas_flags &= ~MA_STATE_REBALANCE; } - mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); + + mas->mas_flags &= ~MA_STATE_BULK; } EXPORT_SYMBOL_GPL(mas_destroy); @@ -5454,9 +5444,6 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) /* Add working room for split (2 nodes) + new parents */ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); - /* Detect if allocations run out */ - mas->mas_flags |= MA_STATE_PREALLOC; - if (!mas_is_err(mas)) return 0; @@ -6200,7 +6187,6 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first, unsigned long last, void *entry, gfp_t gfp) { MA_STATE(ms, mt, first, last); - int ret = 0; if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -6216,11 +6202,10 @@ retry: goto retry; mtree_unlock(mt); - if (mas_is_err(&ms)) - ret = xa_err(ms.node); + if (!mas_is_err(&ms)) + return 0; - mas_destroy(&ms); - return ret; + return xa_err(ms.node); } EXPORT_SYMBOL(mtree_insert_range); @@ -6276,7 +6261,6 @@ retry: unlock: mtree_unlock(mt); - mas_destroy(&mas); return ret; } EXPORT_SYMBOL(mtree_alloc_range); @@ -6316,7 +6300,6 @@ retry: unlock: mtree_unlock(mt); - mas_destroy(&mas); return ret; } EXPORT_SYMBOL(mtree_alloc_rrange); diff --git a/mm/mmap.c b/mm/mmap.c index 3281287771c9c..021eaada07a06 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2006,10 +2006,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - mas_destroy(&mas); + if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); @@ -2057,7 +2055,6 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); - mas_destroy(&mas); validate_mm(mm); return error; } @@ -2099,10 +2096,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - mas_destroy(&mas); + if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); @@ -2151,7 +2146,6 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); - mas_destroy(&mas); validate_mm(mm); return error; } @@ -2358,7 +2352,7 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, err = vma_dup_policy(vma, new); if (err) - goto out_free_vmi; + goto out_free_vma; err = anon_vma_clone(new, vma); if (err) @@ -2395,8 +2389,6 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, out_free_mpol: mpol_put(vma_policy(new)); -out_free_vmi: - vma_iter_free(vmi); out_free_vma: vm_area_free(new); return err; diff --git a/mm/nommu.c b/mm/nommu.c index b6dc558d31440..064169773bccc 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1219,7 +1219,6 @@ share: error_just_free: up_write(&nommu_region_sem); error: - vma_iter_free(&vmi); if (region->vm_file) fput(region->vm_file); kmem_cache_free(vm_region_jar, region); diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index e3fafe60696cb..f47b4fddff9a3 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -219,10 +219,6 @@ int kmem_cache_prefill_percpu_array(struct kmem_cache *s, unsigned int count, gfp_t gfp) { if (count > s->non_kernel) { - if ((gfp & (GFP_ATOMIC | __GFP_NOFAIL)) == - (GFP_ATOMIC | __GFP_NOFAIL)) - return 0; - if (gfp & __GFP_DIRECT_RECLAIM) { kmem_cache_set_non_kernel(s, count); return 0; -- 2.50.1