From 1ca9a871abc20c1c5bc099b4707420dba1ec153e Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 19 Aug 2025 11:06:04 -0400 Subject: [PATCH] FIXME, debug maple_tree: Make ma_wr_states reliable for reuse in spanning store Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 28 ++ lib/maple_tree.c | 507 ++++++++++++++++++++++++++++++- tools/testing/radix-tree/maple.c | 2 + 3 files changed, 521 insertions(+), 16 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index bafe143b1f78..46f8c1359b42 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -146,6 +146,7 @@ enum maple_type { maple_leaf_64, maple_range_64, maple_arange_64, + maple_copy, }; enum store_type { @@ -161,6 +162,32 @@ enum store_type { wr_slot_store, }; +struct maple_copy { + struct { + struct maple_node *node; + unsigned long max; + enum maple_type mt; + } dst[3]; + struct { + struct maple_node *node; + unsigned long max; + unsigned char start; + unsigned char end; + enum maple_type mt; + } src[4]; + /* Simulated node */ + void __rcu *slot[1]; + unsigned long gap[1]; + unsigned long min; + unsigned long max; + + /*Avoid passing these around */ + unsigned char s_count; + unsigned char d_count; + unsigned char split; + unsigned char data; +}; + /** * DOC: Maple tree flags * @@ -307,6 +334,7 @@ struct maple_node { struct maple_range_64 mr64; struct maple_arange_64 ma64; struct maple_alloc alloc; + struct maple_copy cp; }; }; diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 992df6a12d8c..7ef6c481702b 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -103,6 +103,7 @@ static const unsigned long mt_max[] = { [maple_leaf_64] = ULONG_MAX, [maple_range_64] = ULONG_MAX, [maple_arange_64] = ULONG_MAX, + [maple_copy] = ULONG_MAX, }; #define mt_node_max(x) mt_max[mte_node_type(x)] #endif @@ -112,6 +113,7 @@ static const unsigned char mt_slots[] = { [maple_leaf_64] = MAPLE_RANGE64_SLOTS, [maple_range_64] = MAPLE_RANGE64_SLOTS, [maple_arange_64] = MAPLE_ARANGE64_SLOTS, + [maple_copy] = 1, }; #define mt_slot_count(x) mt_slots[mte_node_type(x)] @@ -120,6 +122,7 @@ static const unsigned char mt_pivots[] = { [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, + [maple_copy] = 0, }; #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] @@ -128,6 +131,7 @@ static const unsigned char mt_min_slots[] = { [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, + [maple_copy] = 1, /* Should never be used */ }; #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] @@ -669,6 +673,7 @@ static inline unsigned long *ma_pivots(struct maple_node *node, case maple_range_64: case maple_leaf_64: return node->mr64.pivot; + case maple_copy: case maple_dense: return NULL; } @@ -688,6 +693,8 @@ static inline unsigned long *ma_gaps(struct maple_node *node, switch (type) { case maple_arange_64: return node->ma64.gap; + case maple_copy: + return node->cp.gap; case maple_range_64: case maple_leaf_64: case maple_dense: @@ -754,6 +761,7 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, case maple_arange_64: node->ma64.pivot[piv] = val; break; + case maple_copy: case maple_dense: break; } @@ -777,6 +785,8 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) return mn->mr64.slot; case maple_dense: return mn->slot; + case maple_copy: + return mn->cp.slot; } return NULL; @@ -1348,7 +1358,7 @@ static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) */ static void mas_node_count(struct ma_state *mas, int count) { - return mas_node_count_gfp(mas, count, GFP_NOWAIT); + return mas_node_count_gfp(mas, count*2, GFP_NOWAIT); } /* @@ -2261,6 +2271,45 @@ static inline void mast_rebalance_prev(struct maple_subtree_state *mast) mast->l->offset += end; } +static inline +bool mas_spanning_move(struct maple_subtree_state *mast, + struct ma_state *nneighbour) +{ + struct ma_state r_tmp = *mast->orig_r; + struct ma_state l_tmp = *mast->orig_l; + unsigned char depth = 0; + + do { + mas_ascend(&r_tmp); + mas_ascend(&l_tmp); + depth++; + if (r_tmp.offset < mas_data_end(&r_tmp)) { + r_tmp.offset++; + mas_descend(&r_tmp); + r_tmp.offset = 0; + while (--depth) + mas_descend(&r_tmp); + + r_tmp.end = mas_data_end(&r_tmp); + *nneighbour = r_tmp; + return true; + } else if (l_tmp.offset) { + l_tmp.offset--; + do { + mas_descend(&l_tmp); + l_tmp.offset = mas_data_end(&l_tmp); + } while (--depth); + + l_tmp.end = l_tmp.offset; + *nneighbour = l_tmp; + return false; + } + } while (!mte_is_root(r_tmp.node)); + + WARN_ON_ONCE(1); + return false; +} + /* * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring * the node to the right. Checking the nodes to the right then the left at each @@ -2813,6 +2862,342 @@ dead_node: return NULL; } + +/* Direct node to node copy */ +static inline +unsigned char node_copy(struct maple_node *src, unsigned char start, + unsigned char size, unsigned long s_max, enum maple_type s_mt, + struct maple_node *dst, unsigned char d_start, enum maple_type d_mt) +{ + void __rcu *s_slots; + void __rcu *d_slots; + unsigned long *s_pivots, *d_pivots; + unsigned long *s_gaps, *d_gaps; + bool set_last_piv = true; + + s_slots = ma_slots(src, s_mt) + start; + d_slots = ma_slots(dst, d_mt) + d_start; + s_pivots = ma_pivots(src, s_mt) + start; + d_pivots = ma_pivots(dst, d_mt) + d_start; + + printk("slot %p piv %p\n", s_slots, s_pivots); + printk("d slot %p piv %p\n", d_slots, d_pivots); + printk("\t\t\t\t\t\tcp %p[%u-%u] => %p[%u-%u]\n", src, start, start + size - 1, + dst, d_start, d_start + size - 1); + fflush(stdout); + memcpy(d_slots, s_slots, size * sizeof(void*)); + + d_gaps = ma_gaps(dst, d_mt); + if (d_gaps) { + s_gaps = ma_gaps(src, s_mt) + start; + d_gaps += d_start; + printk("CP GAPS??\n"); + fflush(stdout); + memcpy(d_gaps, s_gaps, size * sizeof(unsigned long)); + } + + if (d_start + size >= mt_slots[d_mt]) { + set_last_piv = false; + size--; + } + + if (start + size >= mt_slots[s_mt]) { + set_last_piv = false; + size--; + d_pivots[size] = s_max; + } + + if (size) + memcpy(d_pivots, s_pivots, size * sizeof(unsigned long)); + + if (set_last_piv) + d_pivots[size] = s_max; + + return size; +} + +static inline +void node_finalise(struct maple_node *node, enum maple_type mt, unsigned char end) +{ + unsigned char max_end = mt_slots[mt]; + unsigned char size; + unsigned long *gaps; + + if (end >= max_end - 1) + return; + + size = max_end - end; + memset(ma_slots(node, mt) + end, 0, size * sizeof(void*)); + + gaps = ma_gaps(node, mt); + if (gaps) + memset(gaps + end, 0, size * sizeof(unsigned long)); + + if (--size) { + ma_set_meta(node, mt, 0, end - 1); + if (--size) + memset(ma_pivots(node, mt) + end, 0, size * sizeof(unsigned long)); + } +} + +static inline void append_node_cp(struct maple_copy *cp, + struct ma_state *mas, unsigned char start, unsigned char end) +{ + unsigned char count = cp->s_count; + + cp->src[count].node = mas_mn(mas); + cp->src[count].mt = mte_node_type(mas->node); + cp->src[count].max = mas->max; + cp->src[count].start = start; + cp->src[count].end = end; + cp->s_count++; +} + +static void mt_dump_node(const struct maple_tree *mt, void *entry, + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format); + +static inline void spanning_data_calc(struct maple_copy *cp, + struct ma_state *mas, struct maple_subtree_state *mast, + struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas, + struct ma_state *sib) +{ + /* data from left + new entry */ + cp->data = mast->orig_l->offset + 1; + printk("data size is off %u + 1\n", mast->orig_l->offset); + printk("write is now %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry); + /* new entry will overwrite one part of left */ + if (l_wr_mas->r_min == mas->index) { + printk("min doesn't split, subtrack one\n"); + cp->data--; + } else { + printk("min splits start %lx vs %lx\n", l_wr_mas->r_min, mas->index); + } + + printk("%p data + 1 = %u\n", mast->orig_l->node, cp->data); + + /* Data from right (offset to end) + 1 for zero, +1 for splitting */ + cp->data += mast->orig_r->end - mast->orig_r->offset + 2; + printk("end %u - off %u + 1\n", mast->orig_r->end, mast->orig_r->offset); + /* new entry splits the insert location */ + printk("end piv %lx vs last %lx\n", r_wr_mas->r_max, mas->last); + if (r_wr_mas->r_max == mas->last) { + printk("cp->data--\n"); + cp->data--; + } + + printk("%p data = %u\n", mast->orig_r->node, cp->data); + + if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && + (cp->data < mt_min_slots[l_wr_mas->type])) { + mas_spanning_move(mast, sib); + cp->data += sib->end + 1; + printk("%p data = %u\n", sib->node, cp->data); + } else { + sib->end = 0; + } + +} + +static inline +void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas, + enum maple_type mt) +{ + cp->d_count = 0; + /* Calc split here */ + if (cp->data < mt_slots[mt]) { + cp->split = cp->data; + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + /* New root */ + } else if (cp->data >= mt_slots[mt] * 2 - 1) { + cp->split = cp->data / 3; + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + /* New root */ + } else { + cp->split = (cp->data + 1) / 2; + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + cp->dst[cp->d_count].mt = mt; + cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas)); + } + printk("split = %u data %u d_count %u\n", cp->split, cp->data, cp->d_count); +} + +static inline +void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas, + struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas, + struct ma_state *sib) +{ + cp->s_count = 0; + if (sib->end && sib->max < l_wr_mas->mas->min) + append_node_cp(cp, sib, 0, sib->end); + + /* Copy left 0 - offset */ + if (l_wr_mas->mas->offset || l_wr_mas->r_min < mas->index) { + unsigned long l_end = l_wr_mas->mas->offset; + + if (l_wr_mas->r_min == mas->index) + l_end--; + append_node_cp(cp, l_wr_mas->mas, 0, l_end); + } + + if (cp->s_count) + cp->src[cp->s_count - 1].max = mas->index - 1; + /* Insert, overwrite prev pivot */ + cp->src[cp->s_count].node = ma_mnode_ptr(cp); + cp->src[cp->s_count].max = mas->last; + cp->src[cp->s_count].mt = maple_copy; + cp->src[cp->s_count].end = 0; + cp->src[cp->s_count].start = 0; + cp->slot[0] = l_wr_mas->entry; + cp->max = mas->index; + if (l_wr_mas->entry) + cp->gap[0] = 0; + else + cp->gap[0] = mas->last - mas->index; + cp->s_count++; + + /* Copy right either from offset or offset + 1 pending on r_max */ + if (r_wr_mas->mas->end != r_wr_mas->mas->offset || r_wr_mas->r_max > mas->last) { + unsigned long r_start = r_wr_mas->mas->offset; + + if (r_wr_mas->r_max == mas->last) + r_start++; + + append_node_cp(cp, r_wr_mas->mas, r_wr_mas->mas->offset, r_start); + } + + if (sib->end) { + if (sib->min > r_wr_mas->mas->max) { + append_node_cp(cp, sib, 0, sib->end); + /* Copy sibling, if necessary */ + /* set sib to right */ + //r_wr_mas->mas = sib; + } else { + /* set sib to left*/ + //l_wr_mas->mas = sib; + } + } +} +static void spanning_data_write(struct maple_copy *cp, struct ma_state *mas) +{ + struct maple_node *dst, *src; + unsigned char s, d; + unsigned char dst_offset; + unsigned char data, data_offset; + unsigned char src_end, s_offset; + unsigned char split, next_node, size; + unsigned long s_max; + enum maple_type s_mt, d_mt; + + s = d = 0; + /* Readability help */ + src = cp->src[s].node; + dst = cp->dst[d].node; + s_offset = cp->src[s].start; + src_end = cp->src[s].end; + split = cp->split; + s_max = cp->src[s].max; + s_mt = cp->src[s].mt; + d_mt = cp->dst[d].mt; + data = cp->data; + + dst_offset = 0; + data_offset = 0; + next_node = split; + do { + do { + size = next_node - data_offset; + printk("try to use size %d\n", size); + /* Fill the destination */ + //printk("%d: %u %u %u\n", __LINE__, size, next_node, data_offset); + + if (src_end - s_offset < size) { + printk("size too big for src end: %u %u\n", src_end, s_offset); + size = src_end - s_offset; + } + //printk("%d: %u\n", __LINE__, size); + + printk("split %u dst_offset %u size %u\n", split, dst_offset, size); + if (split - dst_offset < size) { + size = split - dst_offset; + } + printk("%d: size %u\n", __LINE__, size); + + size++; + node_copy(src, s_offset, size, s_max, s_mt, + dst, dst_offset, d_mt); + + data_offset += size; + dst_offset += size; + s_offset += size; + + if (s_offset > src_end) { + printk("\t\tnext src %u / %u\n", s +1, cp->s_count); + /* This source is exhausted */ + s++; + if (s >= cp->s_count) { + node_finalise(dst, d_mt, dst_offset); + return; + } + /* Reset local src */ + src = cp->src[s].node; + s_offset = cp->src[s].start; + src_end = cp->src[s].end; + s_max = cp->src[s].max; + s_mt = cp->src[s].mt; + } + printk("\toffset is %u next node is %u\n", data_offset, next_node); + } while (data_offset <= next_node); + + next_node *= 2; + if (next_node > data + 1) { + printk("next node reduced to data %u", data); + next_node = data + 1; + } + + split = cp->split; + printk("reset split %u\n", split); + /* Handle null entries */ + if (s_max != ULONG_MAX && !ma_slots(dst, d_mt)[dst_offset - 1]) { + printk("NULL entry end %u and max %lx\n", dst_offset - 1, s_max); + //BUG_ON(1); + if (s_offset == cp->src[s].start) { + src = cp->src[--s].node; + src_end = cp->src[s].end; + s_max = cp->src[s].max; + s_mt = cp->src[s].mt; + s_offset = src_end; + } else { + s_offset--; + } + /* Set dst max and clear pivot */ + next_node++; + split++; + printk("inc split %u\n", split); + data_offset--; + dst_offset--; + } + node_finalise(dst, d_mt, dst_offset); + mt_dump_node(mas->tree, mt_mk_node(dst, d_mt), 0, ULONG_MAX, 1, mt_dump_hex); + if (d >= cp->d_count) { + WARN_ON(data_offset < data); + return; + } + printk("\t\tnext dst\n"); + /* Reset local dst */ + dst = cp->dst[++d].node; + d_mt = cp->dst[d].mt; + dst_offset = 0; + } while (data_offset <= data); +} + static void mas_spanning_rebalance_loop(struct ma_state *mas, struct maple_subtree_state *mast, unsigned char count) { @@ -2823,6 +3208,7 @@ static void mas_spanning_rebalance_loop(struct ma_state *mas, struct maple_enode *left = NULL, *middle = NULL, *right = NULL; struct maple_enode *old_enode; + bool debug = true; /* * Each level of the tree is examined and balanced, pushing data to the left or * right, or rebalancing against left or right nodes is employed to avoid @@ -2844,6 +3230,28 @@ static void mas_spanning_rebalance_loop(struct ma_state *mas, mast_cp_to_nodes(mast, left, middle, right, split, mid_split); new_height++; + if (debug) { + for (int i = 0; i < 3; i++) { + struct maple_enode *dst = NULL; + struct ma_state *state; + + if (i == 0) { + dst = left; + state = mast->l; + } else if (i == 1) { + dst = middle; + state = mast->m; + } else { + state = mast->r; + dst = right; + } + if (!dst) + continue; + + mt_dump_node(mas->tree, dst, state->min, state->max, count, mt_dump_hex); + } + debug = false; + } /* * Copy data from next level in the tree to mast->bn from next * iteration @@ -2969,31 +3377,56 @@ static void mas_spanning_rebalance(struct ma_state *mas, static noinline void mas_wr_spanning_rebalance(struct ma_state *mas, struct maple_subtree_state *mast, unsigned char height, - struct ma_wr_state *l_wr_mas) + struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas) { struct maple_big_node b_node; + struct ma_state sib; + struct maple_copy cp; MA_STATE(l_mas, mas->tree, mas->index, mas->index); MA_STATE(r_mas, mas->tree, mas->index, mas->last); MA_STATE(m_mas, mas->tree, mas->index, mas->index); + /* + * Spanning store is different in that the write is actually from + * l_mas->index to r_mas->last - 1. The -1 is because the last + * was incremented to get to the next node in the case of NULL entries + * being stored to the last slot of the left node. + */ + + mt_dump(mas->tree, mt_dump_hex); + + spanning_data_calc(&cp, mas, mast, l_wr_mas, r_wr_mas, &sib); + spanning_split_dest_setup(&cp, mas, l_wr_mas->type); + spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib); + + + spanning_data_write(&cp, mas); + memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ mas_store_b_node(l_wr_mas, &b_node, mast->orig_l->end); + printk("big node end %u\n", b_node.b_end); + + /* Copy r_mas into b_node if there is anything to copy. */ - if (mast->orig_r->max > mast->orig_r->last) + if (mast->orig_r->max > mast->orig_r->last) { mas_mab_cp(mast->orig_r, mast->orig_r->offset, mast->orig_r->end, &b_node, b_node.b_end + 1); - else + printk("big node end %u\n", b_node.b_end); + } else { b_node.b_end++; + printk("big node ++ end %u\n", b_node.b_end); + } + /* Stop spanning searches by searching for just index. */ - mast->orig_l->last = mas->index; + mast->orig_l->index = mast->orig_l->last = mas->index; mast->bn = &b_node; /* Combine l_mas and r_mas and split them up evenly again. */ /* - * The tree needs to be rebalanced and leaves need to be kept at the same level. + * The tree needs to be sibd and leaves need to be kept at the same level. * Rebalancing is done by use of the ``struct maple_topiary``. */ mast->l = &l_mas; @@ -3003,9 +3436,51 @@ static noinline void mas_wr_spanning_rebalance(struct ma_state *mas, /* Check if this is not root and has sufficient data. */ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && - unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) + unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) { +#if 0 + if (cp.data_size >= mt_min_slots[l_wr_mas->type]) { + printk("data size is %u vs %u\n", cp.data_size, mast->bn->b_end); + printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry); + mt_dump(mas->tree, mt_dump_hex); + BUG_ON(1); + } +#endif mast_spanning_rebalance(mast); +#if 0 + printk("big node end sib %u\n", b_node.b_end); + if (mast->orig_l->node == sib.node) + printk("left\n"); + else if (mast->orig_r->node == sib.node) + printk("right (%p)\n", sib.node); + else + BUG_ON(1); + printk("end %u vs %u\n", mast->bn->b_end, cp.data_size); +#endif + } +#if 0 + if (mast->bn->b_end != cp.data_size) { + mt_dump(mas->tree, mt_dump_hex); + printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry); + fflush(stdout); + BUG_ON(1); + } + + printk("END \n\n\n"); +#endif +#if 1 + { + unsigned long min = mas->min; + printk("Count is %u\n", cp.d_count); + for (int i = 0; i < cp.d_count; i++) { + printk("dump %p %lu - %lu\n", cp.dst[i].node, min, cp.dst[i].max); + mt_dump_node(mas->tree, mt_mk_node(cp.dst[i].node, cp.dst[i].mt), + min, cp.dst[i].max, height, mt_dump_hex); + min = cp.dst[i].max + 1; + } + printk ("VS\n"); + } +#endif mas_spanning_rebalance_loop(mas, mast, height); } /* @@ -3657,18 +4132,14 @@ static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, l_mas->index = l_mas->min; l_mas->offset = l_slot - 1; + l_wr_mas->r_min = l_mas->index; } if (!r_wr_mas->content) { if (r_mas->last < r_wr_mas->r_max) - r_mas->last = r_wr_mas->r_max; - r_mas->offset++; - } else if ((r_mas->last == r_wr_mas->r_max) && - (r_mas->last < r_mas->max) && - !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { - r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, - r_wr_mas->type, r_mas->offset + 1); - r_mas->offset++; + r_mas->offset++; + r_mas->last = r_wr_mas->r_max; + printk("Extend end pivot\n"); } } @@ -3806,6 +4277,7 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) */ mas = wr_mas->mas; trace_ma_op(__func__, mas); + printk("init write is %lx - %lx => %p\n", mas->index, mas->last, wr_mas->entry); if (unlikely(!mas->index && mas->last == ULONG_MAX)) return mas_new_root(mas, wr_mas->entry); @@ -3828,6 +4300,8 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) r_mas.index = r_mas.last; mas_wr_walk_index(&r_wr_mas); r_mas.last = r_mas.index = mas->last; + printk("r_mas is at %p\n", r_mas.node); + BUG_ON(!r_mas.end); /* Set up left side. */ l_mas = *mas; @@ -3848,7 +4322,8 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) mast.orig_l = &l_mas; mast.orig_r = &r_mas; - mas_wr_spanning_rebalance(mas, &mast, height + 1, &l_wr_mas); + mas_wr_spanning_rebalance(mas, &mast, height + 1, &l_wr_mas, + &r_wr_mas); } /* diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 172700fb7784..ac44c32b3c0e 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -98,6 +98,7 @@ static noinline void __init check_new_node(struct maple_tree *mt) int i, j, total; MA_STATE(mas, mt, 0, 0); + return; check_mas_alloc_node_count(&mas); @@ -36465,6 +36466,7 @@ static inline void check_bulk_rebalance(struct maple_tree *mt) build_full_tree(mt, 0, 2); + return; /* erase every entry in the tree */ do { /* set up bulk store mode */ -- 2.51.0