From: Liam R. Howlett Date: Tue, 8 Aug 2023 18:54:27 +0000 (-0400) Subject: maple_tree: Remove MA_STATE_PREALLOC X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=7f84e4580552db0e04848423bd12940a54b5c7a0;p=users%2Fjedix%2Flinux-maple.git maple_tree: Remove MA_STATE_PREALLOC MA_SATE_PREALLOC was added to catch any writes that try to allocate when the maple state is being used in preallocation mode. This can safely be removed in favour of the percpu array of nodes. Note that mas_expected_entries() still expects no allocations during operation and so MA_STATE_BULK can be used in place of preallocations for this case, which is primarily used for forking. Signed-off-by: Liam R. Howlett Signed-off-by: Vlastimil Babka --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 1fbfd4a7f01d..c17c63cbef3c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -70,11 +70,9 @@ * Maple state flags * * MA_STATE_BULK - Bulk insert mode * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert - * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation */ #define MA_STATE_BULK 1 #define MA_STATE_REBALANCE 2 -#define MA_STATE_PREALLOC 4 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) @@ -1231,12 +1229,8 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) return; mas_set_alloc_req(mas, 0); - if (mas->mas_flags & MA_STATE_PREALLOC) { - if (allocated) - return; - BUG_ON(!allocated); - WARN_ON(!allocated); - } + if (mas->mas_flags & MA_STATE_BULK) + return; if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { node = (struct maple_alloc *)mt_alloc_one(gfp); @@ -5485,7 +5479,6 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) /* node store, slot store needs one node */ ask_now: mas_node_count_gfp(mas, request, gfp); - mas->mas_flags |= MA_STATE_PREALLOC; if (likely(!mas_is_err(mas))) return 0; @@ -5528,7 +5521,7 @@ void mas_destroy(struct ma_state *mas) mas->mas_flags &= ~MA_STATE_REBALANCE; } - mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); + mas->mas_flags &= ~MA_STATE_BULK; total = mas_allocated(mas); while (total) { @@ -5577,9 +5570,6 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) * of nodes during the operation. */ - /* Optimize splitting for bulk insert in-order */ - mas->mas_flags |= MA_STATE_BULK; - /* * Avoid overflow, assume a gap between each entry and a trailing null. * If this is wrong, it just means allocation can happen during @@ -5596,8 +5586,9 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) /* Add working room for split (2 nodes) + new parents */ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); - /* Detect if allocations run out */ - mas->mas_flags |= MA_STATE_PREALLOC; + /* Optimize splitting for bulk insert in-order */ + mas->mas_flags |= MA_STATE_BULK; + if (!mas_is_err(mas)) return 0;