From: Liam R. Howlett Date: Sat, 17 Oct 2020 00:06:05 +0000 (-0400) Subject: maple_tree: Switch to kmem_cache_alloc_bulk() instead of single node allocations X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=1d193e3e8833416c153e6392e8f87a99343e8efc;p=users%2Fjedix%2Flinux-maple.git maple_tree: Switch to kmem_cache_alloc_bulk() instead of single node allocations Signed-off-by: Liam R. Howlett --- diff --git a/Thoughts b/Thoughts index 9df22cb0886b..792d48588321 100644 --- a/Thoughts +++ b/Thoughts @@ -115,23 +115,28 @@ is not a node (eg the root). If bit 1 is set, the rest of the bits are a negative errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the node type. This encoding is chosen so that in the optimised iteration loop the inline function can simply test (unsigned -long)state->node & 127 and bail to the out-of-line functions if it's -not 0. The maple_dense enum must remain at 0 for this to work. - -state->alloc uses the low 7 bits for storing a number. If state->node -indicates -ENOMEM, the number is the number of nodes which need to be -allocated. If state->node is a pointer to a node, this space is reused to keep -track of a slot number. If more than one node is allocated, then the nodes are -placed in state->node slots. Once those slots are full, the slots in the next -allocated node is filled. This pattern is continued in sequence such that the -location of a node could be defined as follows: - -state->node->slot[X / MAPLE_NODE_SLOTS]->slot[X % MAPLE_NODE_SLOTS] - -Where X is the allocation count -1 (for the initial state->node). - -The high bits of state->alloc are either NULL or a pointer to the first node -allocated. +long)state->node & MAPLE_NODE_MAST and bail to the out-of-line functions if +it's not 0. The maple_dense enum must remain at 0 for this to work. + + +state->alloc either has a request number of nodes or an allocated node. If +stat->alloc has a requested number of nodes, the first bit will be set (0x1) +and the remaining bits are the value. If state->alloc is a node, then the node +will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for storing +more allocated nodes, a total, and the node_count in this node. total is the +number of nodes allocated. node_count is the number of allocated nodes in this +node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further +nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc by +removing a node from the state->alloc node until state->alloc->node_count is 1, +when state->alloc is returned and the state->alloc->slot[0] is promoted to +state->alloc. Nodes are pushed onto state->alloc by putting the current +state->alloc into the pushed node's slot[0]. + +The state also contains the implied min/max of the state->node, the depth of +this search, and the offset. The implied min/max are either from the parent +node or are 0-oo for the root node. The depth is incremented or decremented +every time a node is walked down or up. The offset is the slot/pivot of +interest in the node - either for reading or writing. Tree Operations diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 6ff6dd6b1d95..4675e59c4ab7 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -96,6 +96,13 @@ struct maple_arange_64 { unsigned long gap[MAPLE_ARANGE64_SLOTS]; }; +struct maple_alloc { + unsigned long total; + unsigned char node_count; + unsigned int request_count; + struct maple_alloc *slot[MAPLE_NODE_SLOTS - 1]; +}; + struct maple_topiary { struct maple_pnode *parent; struct maple_enode *next; /* Overlaps the pivot */ @@ -152,6 +159,7 @@ struct maple_node { }; struct maple_range_64 mr64; struct maple_arange_64 ma64; + struct maple_alloc alloc; }; }; @@ -193,8 +201,9 @@ struct ma_state { struct maple_enode *node; /* The node containing this entry */ unsigned long min; /* The minimum index of this node - implied pivot min */ unsigned long max; /* The maximum index of this node - implied pivot max */ - struct maple_node *alloc; /* Allocated nodes for this operation */ + struct maple_alloc *alloc; /* Allocated nodes for this operation */ unsigned char depth; /* depth of tree descent during write */ + unsigned char offset; }; #define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 1b363e0a84f1..5ac7e7519bf3 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -85,11 +85,23 @@ struct maple_subtree_state { }; // Functions -static struct maple_node *mt_alloc_one(gfp_t gfp) +static inline struct maple_node *mt_alloc_one(gfp_t gfp) { return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO); } +static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) +{ + return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size, + nodes); +} + + +static inline void mt_free_bulk(size_t size, void **nodes) +{ + kmem_cache_free_bulk(maple_node_cache, size, nodes); +} + static void mt_free_rcu(struct rcu_head *head) { struct maple_node *node = container_of(head, struct maple_node, rcu); @@ -371,45 +383,7 @@ static inline bool mte_dead_node(const struct maple_enode *enode) } /* - * mas_get_alloc() - Get the first allocated node. - * @mas: The maple state. - * - * The first allocated node may be used for accounting of many other nodes. - * Please see mas_alloc_count() and mas_next_alloc() for complete use. - * - * Returns: The first allocated node. - * - */ -static inline struct maple_node *mas_get_alloc(const struct ma_state *mas) -{ - return (struct maple_node *) - ((unsigned long)mas->alloc & ~MAPLE_NODE_MASK); -} - -/* - * ma_node_alloc_count() - Get the number of allocations stored in this node. - * @node: The maple node - * - * Used to calculate the total allocated nodes in a maple state. See - * mas_alloc_count(). - * - * Returns: The count of the allocated nodes stored in this nodes slots. - * - */ -static inline int ma_node_alloc_count(const struct maple_node *node) -{ - int slot = 0; - - while (slot < MAPLE_NODE_SLOTS) { - if (!node->slot[slot]) - break; - slot++; - } - return slot; -} - -/* - * mas_alloc_count() - Get the number of nodes allocated in a maple state. + * mas_allocated() - Get the number of nodes allocated in a maple state. * @mas: The maple state * * Walks through the allocated nodes and returns the number allocated. @@ -417,42 +391,45 @@ static inline int ma_node_alloc_count(const struct maple_node *node) * Returns: The total number of nodes allocated * */ -static inline int mas_alloc_count(const struct ma_state *mas) +static inline unsigned mas_allocated(const struct ma_state *mas) { - struct maple_node *node = mas_get_alloc(mas); - int ret = 1; - int slot = 0; - - if (!node) + if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) return 0; - slot = ma_node_alloc_count(node); - ret += slot; - while (--slot >= 0) { - if (ma_mnode_ptr(node->slot[slot])->slot[0]) - ret += ma_node_alloc_count(node->slot[slot]); - } - return ret; + return mas->alloc->total; } /* * mas_set_alloc_req() - Set the requested number of allocations. - * @mas - the maple state - * @count - the number of allocations. + * @mas: the maple state + * @count: the number of allocations. */ -static inline void mas_set_alloc_req(struct ma_state *mas, int count) +static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) { - mas->alloc = (struct maple_node *) - (((unsigned long)mas->alloc & ~MAPLE_NODE_MASK) | count); + if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { + if (!count) + mas->alloc = NULL; + else + mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); + return; + } + + mas->alloc->request_count += count; } /* * mas_alloc_req() - get the requested number of allocations. + * @mas: The maple state. + * * Returns: The allocation request count. */ -static inline int mas_alloc_req(const struct ma_state *mas) +static inline unsigned int mas_alloc_req(const struct ma_state *mas) { - return (int)(((unsigned long)mas->alloc & MAPLE_NODE_MASK)); + if ((unsigned long)mas->alloc & 0x1) + return (unsigned long)(mas->alloc) >> 1; + else if (mas->alloc) + return mas->alloc->request_count; + return 0; } /* @@ -461,9 +438,9 @@ static inline int mas_alloc_req(const struct ma_state *mas) * * Returns: The offset into the @mas node of interest. */ -static inline int mas_offset(const struct ma_state *mas) +static inline unsigned char mas_offset(const struct ma_state *mas) { - return mas_alloc_req(mas); + return mas->offset; } /* @@ -472,9 +449,9 @@ static inline int mas_offset(const struct ma_state *mas) * @offset: The offset * */ -static inline void mas_set_offset(struct ma_state *mas, int offset) +static inline void mas_set_offset(struct ma_state *mas, unsigned char offset) { - mas_set_alloc_req(mas, offset); + mas->offset = offset; } /* @@ -569,6 +546,7 @@ mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char piv) return pivots[piv - 1] + 1; } + // Check what the maximum value the pivot could represent. static inline unsigned long mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, @@ -757,7 +735,7 @@ static inline void mas_dup_state(struct ma_state *dst, struct ma_state *src) dst->node = src->node; dst->max = src->max; dst->min = src->min; - mas_set_offset(dst, mas_offset(src)); + dst->offset = src->offset; } /* @@ -848,126 +826,160 @@ no_parent: mas->node = p_enode; } -static inline struct maple_node *mas_next_alloc(struct ma_state *ms) +static inline struct maple_node *mas_pop_node(struct ma_state *mas) { - int count; - struct maple_node *mn, *smn; + struct maple_alloc *ret, *node = mas->alloc; + unsigned total = mas_allocated(mas); - if (!ms->alloc) + if (!total) // nothing or a request pending. return NULL; - count = mas_alloc_count(ms); - mn = mas_get_alloc(ms); - if (count == 1) { - ms->alloc = NULL; - } else if (count <= MAPLE_NODE_SLOTS + 1) { - count -= 2; - smn = mn->slot[count]; - mn->slot[count] = NULL; - mn = smn; - } else if (count > MAPLE_NODE_SLOTS + 1) { - count -= 2; - smn = mn->slot[(count / MAPLE_NODE_SLOTS) - 1]; - mn = smn->slot[(count % MAPLE_NODE_SLOTS)]; - smn->slot[count % MAPLE_NODE_SLOTS] = NULL; + if (total == 1) { // single allocation in this ma_state + mas->alloc = NULL; + ret = node; + goto single_node; } - return mn; -} + if (!node->node_count) { // Single allocation in this node. + BUG_ON(!node->slot[0]); + mas->alloc = node->slot[0]; + mas->alloc->total = node->total - 1; + ret = node; + goto new_head; + } + node->total--; + ret = node->slot[node->node_count]; + node->slot[node->node_count--] = NULL; + +single_node: +new_head: + ret->total = 0; + ret->node_count = 0; + if (ret->request_count) + mas_set_alloc_req(mas, ret->request_count + 1); + ret->request_count = 0; + return (struct maple_node *)ret; +} static inline void mas_push_node(struct ma_state *mas, struct maple_enode *used) { - struct maple_node *reuse = mte_to_node(used); - struct maple_node *node = mas_get_alloc(mas); - int count; + struct maple_alloc *reuse = (struct maple_alloc *)mte_to_node(used); + struct maple_alloc *head = mas->alloc; + unsigned count; + unsigned int requested = mas_alloc_req(mas); memset(reuse, 0, sizeof(*reuse)); - count = mas_alloc_count(mas); - if (count == 0) { - mas->alloc = reuse; - } else if (count <= MAPLE_NODE_SLOTS) { - count--; - node->slot[count] = reuse; - } else { - struct maple_node *smn; + count = mas_allocated(mas); - count--; - smn = node->slot[(count/MAPLE_NODE_SLOTS) - 1]; - smn->slot[count % MAPLE_NODE_SLOTS] = reuse; - } - count = mas_alloc_count(mas); + if (count && (head->node_count < MAPLE_NODE_SLOTS)) { + if (head->slot[0]) + head->node_count++; + head->slot[head->node_count] = reuse; - BUG_ON(!mas_alloc_count(mas)); -} + head->total++; + goto done; + } -static inline void mas_free(struct ma_state *mas, struct maple_enode *used) -{ - if (mt_in_rcu(mas->tree)) - mte_free(used); - else { - mas_push_node(mas, used); + reuse->total = 1; + if ((head) && !(((unsigned long)head & 0x1))) { + reuse->slot[0] = head; + reuse->total += head->total; } + + reuse->node_count = 0; + mas->alloc = reuse; +done: + if (requested) + mas_set_alloc_req(mas, requested - 1); } -static inline void mas_node_node(struct ma_state *ms, gfp_t gfp) +static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) { - struct maple_node *mn, *smn; - int req = mas_alloc_req(ms); - int allocated = mas_alloc_count(ms); - int slot; + struct maple_alloc *node; + struct maple_alloc **nodep = &mas->alloc; + unsigned int allocated = mas_allocated(mas); + unsigned int requested = mas_alloc_req(mas); + unsigned int count, success = 0; - if (!req) + if (!requested) return; - mn = mas_get_alloc(ms); - if (!mn) { - mn = mt_alloc_one(gfp); - if (!mn) - goto list_failed; - req--; - allocated++; - } - - ms->alloc = mn; - slot = (allocated - 1); - if (allocated - 1 >= MAPLE_NODE_SLOTS) { - slot /= MAPLE_NODE_SLOTS; - mn = mn->slot[slot - 1]; - } - - while (req > 0) { - smn = mt_alloc_one(gfp); - if (!smn) - goto slot_failed; - smn->parent = NULL; - mn->slot[slot] = smn; - req--; - allocated++; - slot++; - if (slot >= MAPLE_NODE_SLOTS) { - slot = (allocated - 1) / MAPLE_NODE_SLOTS; - mn = ms->alloc->slot[slot - 1]; - slot = 0; + mas_set_alloc_req(mas, 0); + if (!mas_allocated(mas) || + mas->alloc->node_count == MAPLE_NODE_SLOTS - 1) { + node = (struct maple_alloc *)mt_alloc_one(gfp); + if (!node) + goto nomem; + + if (mas_allocated(mas)) { + node->total = mas->alloc->total; + node->slot[0] = mas->alloc; } + node->total++; + mas->alloc = node; + requested--; } -slot_failed: - mas_set_alloc_req(ms, req); + node = mas->alloc; + success = allocated = node->total; + while(requested) { + void **slots = (void**)&node->slot; + unsigned int max = MAPLE_NODE_SLOTS - 1; + + if (node->slot[0]) { + slots = (void**)&node->slot[node->node_count + 1]; + max -= node->node_count; + } + + count = min(requested, max); + count = mt_alloc_bulk(gfp, count, slots); + if (!count) + goto nomem; + + if (slots == (void**)&node->slot) + node->node_count = count - 1; // zero indexed. + else + node->node_count += count; + + success += count; + nodep = &node->slot[0]; + node = *nodep; + // decrement. + requested -= count; + } + mas->alloc->total = success; + return; +nomem: + mas_set_alloc_req(mas, requested); + if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) + mas->alloc->total = success; + mas_set_err(mas, -ENOMEM); + return; -list_failed: - if (req > 0) - mas_set_err(ms, -ENOMEM); +} + +static inline void mas_free(struct ma_state *mas, struct maple_enode *used) +{ + if (mt_in_rcu(mas->tree)) + mte_free(used); + else { + mas_push_node(mas, used); + } } // Free the allocations. void mas_empty_alloc(struct ma_state *mas) { - struct maple_node *node; + struct maple_alloc *node; - while (mas_get_alloc(mas)) { - node = mas_next_alloc(mas); + while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) { + node = mas->alloc; + mas->alloc = mas->alloc->slot[0]; + if (node->node_count > 0) + mt_free_bulk(node->node_count, (void**)&node->slot[1]); kmem_cache_free(maple_node_cache, node); } + mas->alloc = NULL; } /* @@ -984,27 +996,27 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) if (gfpflags_allow_blocking(gfp)) { mtree_unlock(mas->tree); - mas_node_node(mas, gfp); + mas_alloc_nodes(mas, gfp); mtree_lock(mas->tree); } else { - mas_node_node(mas, gfp); + mas_alloc_nodes(mas, gfp); } - if (!mas_get_alloc(mas)) + + if (!mas_allocated(mas)) return false; + mas->node = MAS_START; return true; } -static inline struct maple_node *mas_node_count(struct ma_state *mas, int count) +static void mas_node_count(struct ma_state *mas, int count) { - int allocated = mas_alloc_count(mas); + unsigned allocated = mas_allocated(mas); - //BUG_ON(count > 127); if (allocated < count) { mas_set_alloc_req(mas, count - allocated); - mas_node_node(mas, GFP_NOWAIT | __GFP_NOWARN); + mas_alloc_nodes(mas, GFP_NOWAIT | __GFP_NOWARN); } - return mas->alloc; } int mas_entry_count(struct ma_state *mas, unsigned long nr_entries) @@ -1389,7 +1401,7 @@ static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) struct maple_enode *entry; void **slots = ma_slots(mte_to_node(mas->node), mt); - for (offset= mas_offset(mas); offset < mt_slots[mt]; offset++) { + for (offset = mas_offset(mas); offset < mt_slots[mt]; offset++) { entry = slots[offset]; if (!entry) // end of node data. break; @@ -1980,7 +1992,7 @@ mast_ascend_free(struct maple_subtree_state *mast) static inline struct maple_enode *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) { - return mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), b_node->type); + return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); } /* @@ -2351,7 +2363,7 @@ static inline int mas_spanning_rebalance(struct ma_state *mas, if (!count) count++; } - l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), + l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), mte_node_type(mast->orig_l->node)); mast->orig_l->depth++; mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas); @@ -2708,7 +2720,7 @@ static inline int mas_commit_b_node(struct ma_state *mas, if (mas_is_err(mas)) return 0; - new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node)); + new_node = mt_mk_node(mas_pop_node(mas), mte_node_type(mas->node)); mte_to_node(new_node)->parent = mas_mn(mas)->parent; mas->node = new_node; @@ -2733,7 +2745,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) if (mas_is_err(mas)) return 0; - node = mas_next_alloc(mas); + node = mas_pop_node(mas); pivots = ma_pivots(node, type); slots = ma_slots(node, type); mas->node = mt_mk_node(node, type); @@ -3106,7 +3118,7 @@ static inline bool mas_node_store(struct ma_state *mas, void *entry, if (mas_is_err(mas)) return false; - newnode = mas_next_alloc(mas); + newnode = mas_pop_node(mas); } else { memset(&reuse, 0, sizeof(struct maple_node)); newnode = &reuse; @@ -4551,7 +4563,7 @@ void mt_destroy_walk(struct rcu_head *head) enum maple_type type; end = mas_dead_leaves(&mas, slots); - kmem_cache_free_bulk(maple_node_cache, end, slots); + mt_free_bulk(end, slots); if (mas.node == start) break; @@ -4568,7 +4580,7 @@ void mt_destroy_walk(struct rcu_head *head) } free_leaf: - kmem_cache_free(maple_node_cache, node); + ma_free_rcu(node); } void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 7586b52c7a5b..d3d2b41d4c5f 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -193,7 +193,8 @@ static noinline void check_nomem(struct maple_tree *mt) static noinline void check_new_node(struct maple_tree *mt) { - struct maple_node *mn, *smn; + struct maple_node *mn; + struct maple_alloc *smn; int i, j, total, full_slots, count = 0; MA_STATE(mas, mt, 0, 0); @@ -208,11 +209,12 @@ static noinline void check_new_node(struct maple_tree *mt) MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_alloc_count(&mas) != 3); - mn = mas_get_alloc(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + mn = mas_pop_node(&mas); MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] == NULL); - MT_BUG_ON(mt, mn->slot[1] == NULL); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + mas_push_node(&mas, mn); mas_nomem(&mas, GFP_KERNEL); // free; mtree_unlock(mt); @@ -227,12 +229,12 @@ static noinline void check_new_node(struct maple_tree *mt) // Validate allocation request. MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); // Eat the requested node. - mn = mas_next_alloc(&mas); + mn = mas_pop_node(&mas); MT_BUG_ON(mt, mn == NULL); MT_BUG_ON(mt, mn->slot[0] != NULL); MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_alloc_count(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); ma_free_rcu(mn); mas.node = MAS_START; @@ -242,76 +244,72 @@ static noinline void check_new_node(struct maple_tree *mt) // Drop the lock and allocate 3 nodes. mas_nomem(&mas, GFP_KERNEL); // Ensure 3 are allocated. - MT_BUG_ON(mt, mas_alloc_count(&mas) != 3); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); // Allocation request of 0. MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); - mn = mas.alloc; - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] == NULL); - MT_BUG_ON(mt, mn->slot[1] == NULL); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); // Ensure we counted 3. - MT_BUG_ON(mt, mas_alloc_count(&mas) != 3); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); // Free. mas_nomem(&mas, GFP_KERNEL); // Set allocation request to 127. total = 127; - full_slots = (total - MAPLE_NODE_SLOTS) / MAPLE_NODE_SLOTS; mas_node_count(&mas, total); // Drop the lock and allocate 127 nodes. mas_nomem(&mas, GFP_KERNEL); - mn = mas_get_alloc(&mas); - MT_BUG_ON(mt, mn == NULL); - count++; - for (i = 0; i < MAPLE_NODE_SLOTS; i++) { - j = 0; - smn = mn->slot[i]; - MT_BUG_ON(mt, smn == NULL); - count++; - while ((i < full_slots) && (j < MAPLE_NODE_SLOTS)) { - MT_BUG_ON(mt, smn->slot[j] == NULL); - count++; - j++; + MT_BUG_ON(mt, !mas.alloc); + i = 1; + smn = mas.alloc; + while(i < total) { + for (j = 0; j < MAPLE_NODE_SLOTS - 1; j++) { + i++; + MT_BUG_ON(mt, !smn->slot[j]); + if (i == total) + break; } + smn = smn->slot[0]; // next. } - MT_BUG_ON(mt, mas_alloc_count(&mas) != 127); + MT_BUG_ON(mt, mas_allocated(&mas) != 127); mas_nomem(&mas, GFP_KERNEL); // Free. - MT_BUG_ON(mt, mas_alloc_count(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); for (i = 1; i < 128; i++) { mas_node_count(&mas, i); // Request mas_nomem(&mas, GFP_KERNEL); // Fill request - MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled + MT_BUG_ON(mt, mas_allocated(&mas) != i); // check request filled for (j = i; j > 0; j--) { //Free the requests - mn = mas_next_alloc(&mas); // get the next node. + mn = mas_pop_node(&mas); // get the next node. MT_BUG_ON(mt, mn == NULL); ma_free_rcu(mn); } - MT_BUG_ON(mt, mas_alloc_count(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); } for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { MA_STATE(mas2, mt, 0, 0); mas_node_count(&mas, i); // Request mas_nomem(&mas, GFP_KERNEL); // Fill request - MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled + MT_BUG_ON(mt, mas_allocated(&mas) != i); // check request filled for (j = 1; j <= i; j++) { // Move the allocations to mas2 - mn = mas_next_alloc(&mas); // get the next node. + mn = mas_pop_node(&mas); // get the next node. MT_BUG_ON(mt, mn == NULL); mas_push_node(&mas2, (struct maple_enode *)mn); - MT_BUG_ON(mt, mas_alloc_count(&mas2) != j); + MT_BUG_ON(mt, mas_allocated(&mas2) != j); } - MT_BUG_ON(mt, mas_alloc_count(&mas) != 0); - MT_BUG_ON(mt, mas_alloc_count(&mas2) != i); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas2) != i); for (j = i; j > 0; j--) { //Free the requests - MT_BUG_ON(mt, mas_alloc_count(&mas2) != j); - mn = mas_next_alloc(&mas2); // get the next node. + MT_BUG_ON(mt, mas_allocated(&mas2) != j); + mn = mas_pop_node(&mas2); // get the next node. MT_BUG_ON(mt, mn == NULL); ma_free_rcu(mn); } - MT_BUG_ON(mt, mas_alloc_count(&mas2) != 0); + MT_BUG_ON(mt, mas_allocated(&mas2) != 0); } mtree_unlock(mt);