From: Liam R. Howlett Date: Fri, 24 Sep 2021 18:22:10 +0000 (-0400) Subject: maple_tree: Clean up mas_store_b_node X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=refs%2Fheads%2Fmaple_next_rcu;p=users%2Fjedix%2Flinux-maple.git maple_tree: Clean up mas_store_b_node Signed-off-by: Liam R. Howlett --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 6083438538b2..897c21f39eac 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1966,29 +1966,31 @@ static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, */ static inline unsigned char mas_store_b_node(struct ma_state *mas, struct maple_big_node *b_node, - void *entry, unsigned char end) + void *entry, unsigned char end, + unsigned char offset_end, + void *content) { unsigned char slot = mas->offset; - void *contents; - unsigned char b_end = 0; + unsigned char b_end; /* Possible underflow of piv will wrap back to 0 before use. */ - unsigned long piv = mas->min - 1; - struct maple_node *node = mas_mn(mas); - enum maple_type mt = mte_node_type(mas->node); - unsigned long *pivots = ma_pivots(node, mt); + unsigned long piv; + struct maple_node *node; + enum maple_type mt; + unsigned long *pivots; + b_end = 0; if (slot) { /* Copy start data up to insert. */ mas_mab_cp(mas, 0, slot - 1, b_node, 0); b_end = b_node->b_end; piv = b_node->pivot[b_end - 1]; - } + } else + piv = mas->min - 1; - contents = mas_slot_locked(mas, ma_slots(node, mt), slot); if (piv + 1 < mas->index) { /* Handle range starting after old range */ - b_node->slot[b_end] = contents; - if (!contents) + b_node->slot[b_end] = content; + if (!content) b_node->gap[b_end] = mas->index - 1 - piv; b_node->pivot[b_end++] = mas->index - 1; } @@ -1998,39 +2000,38 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas, b_node->slot[b_end] = entry; b_node->pivot[b_end] = mas->last; + /* Appended. */ + if (mas->last >= mas->max) + return b_end; + /* Handle new range ending before old range ends */ - piv = _mas_safe_pivot(mas, pivots, slot, mt); + node = mas_mn(mas); + mt = mte_node_type(mas->node); + pivots = ma_pivots(node, mt); + piv = mas_logical_pivot(mas, pivots, offset_end, mt); if (piv > mas->last) { if (piv == ULONG_MAX) mas_bulk_rebalance(mas, b_node->b_end, mt); - b_node->slot[++b_end] = contents; - if (!contents) + if (offset_end != slot) { + void **slots = ma_slots(node, mt); + + content = slots[offset_end]; + } + + b_node->slot[++b_end] = content; + if (!content) b_node->gap[b_end] = piv - mas->last + 1; b_node->pivot[b_end] = piv; - } else - piv = mas->last; + } - /* Appended. */ - if (piv >= mas->max) + slot = offset_end + 1; + if (slot > end) return b_end; - do { - /* Handle range overwrites */ - piv = _mas_safe_pivot(mas, pivots, ++slot, mt); - } while ((piv <= mas->last) && (slot <= end)); - - if (piv > mas->last) { - /* Copy end data to the end of the node. */ - if (slot > end) { - b_node->slot[++b_end] = NULL; - b_node->pivot[b_end] = piv; - } else { - mas_mab_cp(mas, slot, end + 1, b_node, ++b_end); - b_end = b_node->b_end - 1; - } - } - + /* Copy end data to the end of the node. */ + mas_mab_cp(mas, slot, end + 1, b_node, ++b_end); + b_end = b_node->b_end - 1; return b_end; } @@ -3303,13 +3304,11 @@ static inline int mas_commit_b_node(struct ma_state *mas, } - if (b_end >= mt_slots[b_type]) { + if (b_end >= mt_slots[b_type]) return mas_split(mas, b_node); - } - if (mas_reuse_node(mas, b_node, end)) { + if (mas_reuse_node(mas, b_node, end)) goto reuse_node; - } mas_node_count(mas, 1); if (mas_is_err(mas)) @@ -3726,6 +3725,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) struct maple_subtree_state mast; unsigned char height = mas_mt_height(mas); int node_count = 1 + height * 3; + void *content; /* Holds new left and right sub-tree */ MA_STATE(l_mas, mas->tree, mas->index, mas->index); @@ -3765,6 +3765,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) l_mas.depth = mas->depth; l_mas.offset = 0; __mas_walk(&l_mas, &range_min, &range_max); + content = mas_get_slot(&l_mas, l_mas.offset); if (!entry) { mas_extend_null(&l_mas, &r_mas); @@ -3776,7 +3777,8 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) /* Copy l_mas and store the value in b_node. */ b_node.b_end = mas_store_b_node(&l_mas, &b_node, entry, - mas_data_end(&l_mas)); + mas_data_end(&l_mas), mas_data_end(&l_mas), + content); /* Copy r_mas into b_node. */ mas_mab_cp(&r_mas, r_mas.offset, mt_slot_count(r_mas.node), &b_node, b_node.b_end + 1); @@ -4139,7 +4141,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite slow_path: b_node.type = mte_node_type(mas->node); - b_node.b_end = mas_store_b_node(mas, &b_node, entry, end); + b_node.b_end = mas_store_b_node(mas, &b_node, entry, end, offset_end, content); b_node.min = mas->min; zero = MAPLE_BIG_NODE_SLOTS - b_node.b_end - 1;