From: Liam R. Howlett Date: Wed, 22 Nov 2023 16:17:31 +0000 (-0500) Subject: maple_tree: don't use maple state for allocations X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=732e4a3c29bc3be298280c41ddaf2610da8fc618;p=users%2Fjedix%2Flinux-maple.git maple_tree: don't use maple state for allocations Use the per-cpu array directly. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index b3d63123b945..198a430c0df3 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -432,6 +432,7 @@ struct ma_state { unsigned char offset; unsigned char mas_flags; unsigned char end; /* The end of the node */ + gfp_t gfp; }; struct ma_wr_state { diff --git a/include/linux/mm.h b/include/linux/mm.h index f5a97dec5169..bb9d90c68e4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -988,6 +988,7 @@ static inline unsigned long vma_iter_end(struct vma_iterator *vmi) { return vmi->mas.last + 1; } + static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, unsigned long count) { diff --git a/lib/maple_tree.c b/lib/maple_tree.c index b21c53cb085d..5e8b5d37d0fc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -157,7 +157,7 @@ struct maple_subtree_state { /* Functions */ static inline struct maple_node *mt_alloc_one(gfp_t gfp) { - return kmem_cache_alloc(maple_node_cache, gfp); + return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO); } static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) @@ -1128,160 +1128,6 @@ static int mas_ascend(struct ma_state *mas) return 0; } -/* - * mas_pop_node() - Get a previously allocated maple node from the maple state. - * @mas: The maple state - * - * Return: A pointer to a maple node. - */ -static inline struct maple_node *mas_pop_node(struct ma_state *mas) -{ - struct maple_alloc *ret, *node = mas->alloc; - unsigned long total = mas_allocated(mas); - unsigned int req = mas_alloc_req(mas); - - /* nothing or a request pending. */ - if (WARN_ON(!total)) - return NULL; - - if (total == 1) { - /* single allocation in this ma_state */ - mas->alloc = NULL; - ret = node; - goto single_node; - } - - if (node->node_count == 1) { - /* Single allocation in this node. */ - mas->alloc = node->slot[0]; - mas->alloc->total = node->total - 1; - ret = node; - goto new_head; - } - node->total--; - ret = node->slot[--node->node_count]; - node->slot[node->node_count] = NULL; - -single_node: -new_head: - if (req) { - req++; - mas_set_alloc_req(mas, req); - } - - memset(ret, 0, sizeof(*ret)); - return (struct maple_node *)ret; -} - -/* - * mas_push_node() - Push a node back on the maple state allocation. - * @mas: The maple state - * @used: The used maple node - * - * Stores the maple node back into @mas->alloc for reuse. Updates allocated and - * requested node count as necessary. - */ -static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) -{ - struct maple_alloc *reuse = (struct maple_alloc *)used; - struct maple_alloc *head = mas->alloc; - unsigned long count; - unsigned int requested = mas_alloc_req(mas); - - count = mas_allocated(mas); - - reuse->request_count = 0; - reuse->node_count = 0; - if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { - head->slot[head->node_count++] = reuse; - head->total++; - goto done; - } - - reuse->total = 1; - if ((head) && !((unsigned long)head & 0x1)) { - reuse->slot[0] = head; - reuse->node_count = 1; - reuse->total += head->total; - } - - mas->alloc = reuse; -done: - if (requested > 1) - mas_set_alloc_req(mas, requested - 1); -} - -/* - * mas_alloc_nodes() - Allocate nodes into a maple state - * @mas: The maple state - * @gfp: The GFP Flags - */ -static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) -{ - struct maple_alloc *node; - unsigned long allocated = mas_allocated(mas); - unsigned int requested = mas_alloc_req(mas); - unsigned int count; - void **slots = NULL; - unsigned int max_req = 0; - - if (!requested) - return; - - mas_set_alloc_req(mas, 0); - if (mas->mas_flags & MA_STATE_BULK) - return; - - if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { - node = (struct maple_alloc *)mt_alloc_one(gfp); - if (!node) - goto nomem_one; - - if (allocated) { - node->slot[0] = mas->alloc; - node->node_count = 1; - } else { - node->node_count = 0; - } - - mas->alloc = node; - node->total = ++allocated; - requested--; - } - - node = mas->alloc; - node->request_count = 0; - while (requested) { - max_req = MAPLE_ALLOC_SLOTS - node->node_count; - slots = (void **)&node->slot[node->node_count]; - max_req = min(requested, max_req); - count = mt_alloc_bulk(gfp, max_req, slots); - if (!count) - goto nomem_bulk; - - if (node->node_count == 0) { - node->slot[0]->node_count = 0; - node->slot[0]->request_count = 0; - } - - node->node_count += count; - allocated += count; - node = node->slot[0]; - requested -= count; - } - mas->alloc->total = allocated; - return; - -nomem_bulk: - /* Clean up potential freed allocations on bulk failure */ - memset(slots, 0, max_req * sizeof(unsigned long)); -nomem_one: - mas_set_alloc_req(mas, requested); - if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) - mas->alloc->total = allocated; - mas_set_err(mas, -ENOMEM); -} - /* * mas_free() - Free an encoded maple node * @mas: The maple state @@ -1297,7 +1143,7 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) if (mt_in_rcu(mas->tree)) ma_free_rcu(tmp); else - mas_push_node(mas, tmp); + mt_free_one(tmp); } /* @@ -1309,12 +1155,14 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) */ static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) { - unsigned long allocated = mas_allocated(mas); + int ret; - if (allocated < count) { - mas_set_alloc_req(mas, count - allocated); - mas_alloc_nodes(mas, gfp); - } + ret = kmem_cache_prefill_percpu_array(maple_node_cache, count, gfp); + if (!ret) + return; + + mas_set_err(mas, ret); + mas_set_alloc_req(mas, count); } /* @@ -1327,7 +1175,7 @@ static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) */ static void mas_node_count(struct ma_state *mas, int count) { - return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); + return mas_node_count_gfp(mas, count, mas->gfp); } /* @@ -2344,7 +2192,7 @@ static inline void mast_ascend(struct maple_subtree_state *mast) static inline struct maple_enode *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) { - return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); + return mt_mk_node(ma_mnode_ptr(mt_alloc_one(mas->gfp)), b_node->type); } /* @@ -2519,7 +2367,7 @@ static inline void mas_topiary_node(struct ma_state *mas, if (in_rcu) ma_free_rcu(tmp); else - mas_push_node(mas, tmp); + mt_free_one(tmp); } /* @@ -2910,7 +2758,7 @@ static int mas_spanning_rebalance(struct ma_state *mas, count++; } - l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), + l_mas.node = mt_mk_node(ma_mnode_ptr(mt_alloc_one(mas->gfp)), mte_node_type(mast->orig_l->node)); l_mas.depth++; mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); @@ -3030,7 +2878,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end if (mas_is_err(mas)) return; - newnode = mas_pop_node(mas); + newnode = mt_alloc_one(mas->gfp); } else { newnode = &reuse; } @@ -3087,7 +2935,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end mas->node = mt_mk_node(newnode, mt); ma_set_meta(newnode, mt, 0, tmp); - new_left = mas_pop_node(mas); + new_left = mt_alloc_one(mas->gfp); new_left->parent = left->parent; mt = mte_node_type(l_mas.node); slots = ma_slots(new_left, mt); @@ -3100,7 +2948,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end /* replace parent. */ offset = mte_parent_slot(mas->node); mt = mas_parent_type(&l_mas, l_mas.node); - parent = mas_pop_node(mas); + parent = mt_alloc_one(mas->gfp); slots = ma_slots(parent, mt); pivs = ma_pivots(parent, mt); memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node)); @@ -3448,7 +3296,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, if (mas_is_err(wr_mas->mas)) return 0; - node = mas_pop_node(wr_mas->mas); + node = mt_alloc_one(wr_mas->mas->gfp); node->parent = mas_mn(wr_mas->mas)->parent; wr_mas->mas->node = mt_mk_node(node, b_type); mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); @@ -3477,7 +3325,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) if (unlikely(mas_is_err(mas))) return 0; - node = mas_pop_node(mas); + node = mt_alloc_one(mas->gfp); pivots = ma_pivots(node, type); slots = ma_slots(node, type); node->parent = ma_parent_ptr(mas_tree_parent(mas)); @@ -3749,7 +3597,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry) if (mas_is_err(mas)) return 0; - node = mas_pop_node(mas); + node = mt_alloc_one(mas->gfp); pivots = ma_pivots(node, type); slots = ma_slots(node, type); node->parent = ma_parent_ptr(mas_tree_parent(mas)); @@ -3901,7 +3749,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, if (mas_is_err(mas)) return false; - newnode = mas_pop_node(mas); + newnode = mt_alloc_one(mas->gfp); } else { memset(&reuse, 0, sizeof(struct maple_node)); newnode = &reuse; @@ -5356,8 +5204,8 @@ reset: * @entry: The entry to store. * * The @mas->index and @mas->last is used to set the range for the @entry. - * Note: The @mas should have pre-allocated entries to ensure there is memory to - * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. + * Note: The per-cpu array should have pre-allocated entries to ensure there is + * memory to store the entry. * * Return: the first entry between mas->index and mas->last or %NULL. */ @@ -5366,6 +5214,8 @@ void *mas_store(struct ma_state *mas, void *entry) MA_WR_STATE(wr_mas, mas, entry); trace_ma_write(__func__, mas, 0, entry); + mas->gfp = GFP_NOWAIT | __GFP_NOWARN; + #ifdef CONFIG_DEBUG_MAPLE_TREE if (MAS_WARN_ON(mas, mas->index > mas->last)) pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); @@ -5404,6 +5254,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) mas_wr_store_setup(&wr_mas); trace_ma_write(__func__, mas, 0, entry); + mas->gfp = gfp; retry: mas_wr_store_entry(&wr_mas); if (unlikely(mas_nomem(mas, gfp))) @@ -5429,6 +5280,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) mas_wr_store_setup(&wr_mas); trace_ma_write(__func__, mas, 0, entry); + mas->gfp = __GFP_NOFAIL; retry: mas_wr_store_entry(&wr_mas); if (unlikely(mas_nomem(mas, GFP_ATOMIC | __GFP_NOFAIL))) @@ -6223,18 +6075,19 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) return false; } + mas->status = ma_start; if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { mtree_unlock(mas->tree); - mas_alloc_nodes(mas, gfp); + mas_node_count_gfp(mas, mas_alloc_req(mas), gfp); mtree_lock(mas->tree); } else { - mas_alloc_nodes(mas, gfp); + mas_node_count_gfp(mas, mas_alloc_req(mas), gfp); } - if (!mas_allocated(mas)) + mas_set_alloc_req(mas, 0); + if (mas_is_err(mas)) return false; - mas->status = ma_start; return true; } @@ -6314,6 +6167,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, if (index > last) return -EINVAL; + mas.gfp = gfp; mtree_lock(mt); retry: mas_wr_store_entry(&wr_mas); @@ -6367,6 +6221,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first, if (first > last) return -EINVAL; + ms.gfp = gfp; mtree_lock(mt); retry: mas_insert(&ms, entry); @@ -6411,6 +6266,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; + mas.gfp = gfp; mtree_lock(mt); retry: ret = mas_empty_area(&mas, min, max, size); @@ -6449,6 +6305,7 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; + mas.gfp = gfp; mtree_lock(mt); retry: ret = mas_empty_area_rev(&mas, min, max, size); @@ -6491,6 +6348,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) MA_STATE(mas, mt, index, index); trace_ma_op(__func__, &mas); + mas.gfp = GFP_NOWAIT | __GFP_NOWARN; mtree_lock(mt); entry = mas_erase(&mas); mtree_unlock(mt); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 29185ac5c727..0b4f0780e1aa 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -2636,139 +2636,6 @@ static noinline void __init check_fuzzer(struct maple_tree *mt) mtree_test_erase(mt, ULONG_MAX - 10); } -/* duplicate the tree with a specific gap */ -static noinline void __init check_dup_gaps(struct maple_tree *mt, - unsigned long nr_entries, bool zero_start, - unsigned long gap) -{ - unsigned long i = 0; - struct maple_tree newmt; - int ret; - void *tmp; - MA_STATE(mas, mt, 0, 0); - MA_STATE(newmas, &newmt, 0, 0); - struct rw_semaphore newmt_lock; - - init_rwsem(&newmt_lock); - mt_set_external_lock(&newmt, &newmt_lock); - - if (!zero_start) - i = 1; - - mt_zero_nr_tallocated(); - for (; i <= nr_entries; i++) - mtree_store_range(mt, i*10, (i+1)*10 - gap, - xa_mk_value(i), GFP_KERNEL); - - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); - mt_set_non_kernel(99999); - down_write(&newmt_lock); - ret = mas_expected_entries(&newmas, nr_entries); - mt_set_non_kernel(0); - MT_BUG_ON(mt, ret != 0); - - rcu_read_lock(); - mas_for_each(&mas, tmp, ULONG_MAX) { - newmas.index = mas.index; - newmas.last = mas.last; - mas_store(&newmas, tmp); - } - rcu_read_unlock(); - mas_destroy(&newmas); - - __mt_destroy(&newmt); - up_write(&newmt_lock); -} - -/* Duplicate many sizes of trees. Mainly to test expected entry values */ -static noinline void __init check_dup(struct maple_tree *mt) -{ - int i; - int big_start = 100010; - - /* Check with a value at zero */ - for (i = 10; i < 1000; i++) { - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - check_dup_gaps(mt, i, true, 5); - mtree_destroy(mt); - rcu_barrier(); - } - - cond_resched(); - mt_cache_shrink(); - /* Check with a value at zero, no gap */ - for (i = 1000; i < 2000; i++) { - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - check_dup_gaps(mt, i, true, 0); - mtree_destroy(mt); - rcu_barrier(); - } - - cond_resched(); - mt_cache_shrink(); - /* Check with a value at zero and unreasonably large */ - for (i = big_start; i < big_start + 10; i++) { - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - check_dup_gaps(mt, i, true, 5); - mtree_destroy(mt); - rcu_barrier(); - } - - cond_resched(); - mt_cache_shrink(); - /* Small to medium size not starting at zero*/ - for (i = 200; i < 1000; i++) { - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - check_dup_gaps(mt, i, false, 5); - mtree_destroy(mt); - rcu_barrier(); - } - - cond_resched(); - mt_cache_shrink(); - /* Unreasonably large not starting at zero*/ - for (i = big_start; i < big_start + 10; i++) { - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - check_dup_gaps(mt, i, false, 5); - mtree_destroy(mt); - rcu_barrier(); - cond_resched(); - mt_cache_shrink(); - } - - /* Check non-allocation tree not starting at zero */ - for (i = 1500; i < 3000; i++) { - mt_init_flags(mt, 0); - check_dup_gaps(mt, i, false, 5); - mtree_destroy(mt); - rcu_barrier(); - cond_resched(); - if (i % 2 == 0) - mt_cache_shrink(); - } - - mt_cache_shrink(); - /* Check non-allocation tree starting at zero */ - for (i = 200; i < 1000; i++) { - mt_init_flags(mt, 0); - check_dup_gaps(mt, i, true, 5); - mtree_destroy(mt); - rcu_barrier(); - cond_resched(); - } - - mt_cache_shrink(); - /* Unreasonably large */ - for (i = big_start + 5; i < big_start + 10; i++) { - mt_init_flags(mt, 0); - check_dup_gaps(mt, i, true, 5); - mtree_destroy(mt); - rcu_barrier(); - mt_cache_shrink(); - cond_resched(); - } -} - static noinline void __init check_bnode_min_spanning(struct maple_tree *mt) { int i = 50; @@ -3860,10 +3727,6 @@ static int __init maple_tree_seed(void) check_fuzzer(&tree); mtree_destroy(&tree); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - check_dup(&tree); - mtree_destroy(&tree); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_bnode_min_spanning(&tree); mtree_destroy(&tree); diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 8af8efdf9a5e..e3fafe60696c 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -58,11 +58,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, { void *p; - if (!(gfp & __GFP_DIRECT_RECLAIM)) { - if (!cachep->non_kernel) - return NULL; + if ((gfp & (GFP_ATOMIC | __GFP_NOFAIL)) != (GFP_ATOMIC | __GFP_NOFAIL)) { + if (!(gfp & __GFP_DIRECT_RECLAIM)) { + if (!cachep->non_kernel) + return NULL; - cachep->non_kernel--; + cachep->non_kernel--; + } } pthread_mutex_lock(&cachep->lock); @@ -216,8 +218,18 @@ int kmem_cache_setup_percpu_array(struct kmem_cache *s, unsigned int count) int kmem_cache_prefill_percpu_array(struct kmem_cache *s, unsigned int count, gfp_t gfp) { - if (count > s->non_kernel) - kmem_cache_set_non_kernel(s, count); + if (count > s->non_kernel) { + if ((gfp & (GFP_ATOMIC | __GFP_NOFAIL)) == + (GFP_ATOMIC | __GFP_NOFAIL)) + return 0; + + if (gfp & __GFP_DIRECT_RECLAIM) { + kmem_cache_set_non_kernel(s, count); + return 0; + } + + return -ENOMEM; + } return 0; } diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index f1caf4bcf937..b4ed64e9a15e 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -62,407 +62,6 @@ struct rcu_reader_struct { struct rcu_test_struct2 *test; }; -static int get_alloc_node_count(struct ma_state *mas) -{ - int count = 1; - struct maple_alloc *node = mas->alloc; - - if (!node || ((unsigned long)node & 0x1)) - return 0; - while (node->node_count) { - count += node->node_count; - node = node->slot[0]; - } - return count; -} - -static void check_mas_alloc_node_count(struct ma_state *mas) -{ - mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL); - mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL); - MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total); - mas_destroy(mas); -} - -/* - * check_new_node() - Check the creation of new nodes and error path - * verification. - */ -static noinline void __init check_new_node(struct maple_tree *mt) -{ - - struct maple_node *mn, *mn2, *mn3; - struct maple_alloc *smn; - struct maple_node *nodes[100]; - int i, j, total; - - MA_STATE(mas, mt, 0, 0); - - check_mas_alloc_node_count(&mas); - - /* Try allocating 3 nodes */ - mtree_lock(mt); - mt_set_non_kernel(0); - /* request 3 nodes to be allocated. */ - mas_node_count(&mas, 3); - /* Allocation request of 3. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); - /* Allocate failed. */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - mas_push_node(&mas, mn); - mas_reset(&mas); - mas_nomem(&mas, GFP_KERNEL); /* free */ - mtree_unlock(mt); - - - /* Try allocating 1 node, then 2 more */ - mtree_lock(mt); - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - /* Check Allocation request of 1. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - /* Eat the requested node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - mas.status = ma_start; - mas_nomem(&mas, GFP_KERNEL); - /* Allocate 3 nodes, will fail. */ - mas_node_count(&mas, 3); - /* Drop the lock and allocate 3 nodes. */ - mas_nomem(&mas, GFP_KERNEL); - /* Ensure 3 are allocated. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Allocation request of 0. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); - - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - /* Ensure we counted 3. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Free. */ - mas_reset(&mas); - mas_nomem(&mas, GFP_KERNEL); - - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - /* Check the node is only one node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - MT_BUG_ON(mt, mas.alloc->node_count); - - mas_set_alloc_req(&mas, 2); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - for (i = 2; i >= 0; i--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - - total = 64; - mas_set_alloc_req(&mas, total); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != total); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (i = total; i > 0; i--) { - unsigned int e = 0; /* expected node_count */ - - if (!MAPLE_32BIT) { - if (i >= 35) - e = i - 34; - else if (i >= 5) - e = i - 4; - else if (i >= 2) - e = i - 1; - } else { - if (i >= 4) - e = i - 3; - else if (i >= 1) - e = i - 1; - else - e = 0; - } - - MT_BUG_ON(mt, mas.alloc->node_count != e); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); - MT_BUG_ON(mt, !mn); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - - total = 100; - for (i = 1; i < total; i++) { - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = i; j > 0; j--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - nodes[j] = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - - while (j) { - j--; - mas_push_node(&mas, nodes[j]); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != i); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - mas_reset(&mas); - MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); - - } - - /* Set allocation request. */ - total = 500; - mas_node_count(&mas, total); - /* Drop the lock and allocate the nodes. */ - mas_nomem(&mas, GFP_KERNEL); - MT_BUG_ON(mt, !mas.alloc); - i = 1; - smn = mas.alloc; - while (i < total) { - for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { - i++; - MT_BUG_ON(mt, !smn->slot[j]); - if (i == total) - break; - } - smn = smn->slot[0]; /* next. */ - } - MT_BUG_ON(mt, mas_allocated(&mas) != total); - mas_reset(&mas); - mas_nomem(&mas, GFP_KERNEL); /* Free. */ - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - for (i = 1; i < 128; i++) { - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = i; j > 0; j--) { /*Free the requests */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - } - - for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { - MA_STATE(mas2, mt, 0, 0); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas2, mn); - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mas_allocated(&mas2) != i); - - for (j = i; j > 0; j--) { /*Free the requests */ - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - mn = mas_pop_node(&mas2); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas2) != 0); - } - - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); - - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); - - /* Check the limit of pop/push/pop */ - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_alloc_req(&mas)); - MT_BUG_ON(mt, mas.alloc->node_count != 1); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas.alloc->node_count != 1); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - - for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put it back */ - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mn3 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_push_node(&mas, mn3); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - mas_destroy(&mas); - } - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 5); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != 5); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.status = ma_start; - MT_BUG_ON(mt, mas_allocated(&mas) != 10); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.status = ma_start; - MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); - mas_destroy(&mas); - - mtree_unlock(mt); -} - /* * Check erasing including RCU. */ @@ -35431,15 +35030,6 @@ static void check_dfs_preorder(struct maple_tree *mt) mtree_destroy(mt); mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - mas_reset(&mas); - mt_zero_nr_tallocated(); - mt_set_non_kernel(200); - mas_expected_entries(&mas, max); - for (count = 0; count <= max; count++) { - mas.index = mas.last = count; - mas_store(&mas, xa_mk_value(count)); - MT_BUG_ON(mt, mas_is_err(&mas)); - } mas_destroy(&mas); rcu_barrier(); /* @@ -35451,139 +35041,6 @@ static void check_dfs_preorder(struct maple_tree *mt) } /* End of depth first search tests */ -/* Preallocation testing */ -static noinline void __init check_prealloc(struct maple_tree *mt) -{ - unsigned long i, max = 100; - unsigned long allocated; - unsigned char height; - struct maple_node *mn; - void *ptr = check_prealloc; - MA_STATE(mas, mt, 10, 20); - - mt_set_non_kernel(1000); - for (i = 0; i <= max; i++) - mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); - - /* Spanning store */ - mas_set_range(&mas, 470, 500); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - mn->parent = ma_parent_ptr(mn); - ma_free_rcu(mn); - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - /* Slot store does not need allocations */ - mas_set_range(&mas, 6, 9); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - mas_set_range(&mas, 6, 10); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 0); - mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - /* Split */ - mas_set_range(&mas, 54, 54); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 1 + height * 2); - mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mt_set_non_kernel(1); - /* Spanning store */ - mas_set_range(&mas, 1, 100); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 0); - mas_destroy(&mas); - - - /* Spanning store */ - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mas_store_prealloc(&mas, ptr); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_set_range(&mas, 0, 200); - mt_set_non_kernel(1); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated != 0); -} -/* End of preallocation testing */ - /* Spanning writes, writes that span nodes and layers of the tree */ static noinline void __init check_spanning_write(struct maple_tree *mt) { @@ -36256,10 +35713,6 @@ void farmer_tests(void) check_dfs_preorder(&tree); mtree_destroy(&tree); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); - check_prealloc(&tree); - mtree_destroy(&tree); - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_spanning_write(&tree); mtree_destroy(&tree); @@ -36277,10 +35730,6 @@ void farmer_tests(void) check_erase_testset(&tree); mtree_destroy(&tree); - mt_init_flags(&tree, 0); - check_new_node(&tree); - mtree_destroy(&tree); - if (!MAPLE_32BIT) { mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_rcu_simulated(&tree);