From 1f9484862dbaad4cc465a8dd5c5660bfab87f384 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Sun, 2 Aug 2020 20:30:01 -0400 Subject: [PATCH] maple_tree: Fix overflow on store and 3 way splits Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 61 ++++++++++++++++++++++++++--------- lib/test_maple_tree.c | 75 ------------------------------------------- 2 files changed, 45 insertions(+), 91 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d12ad07ea628..fb28b36cd79e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -312,7 +312,7 @@ static inline enum maple_type mas_parent_enum(struct ma_state *mas, return mte_parent_range_enum(parent); } -/* Private +/* * mte_set_parent() - Set the parent node and encode the slot. * * Type is encoded in the node->parent @@ -653,7 +653,7 @@ void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree) mte_free(mn); } -/* Private +/* * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. * * Add the %dead_enode to the linked list in %mat. @@ -1521,7 +1521,7 @@ static inline int mab_calc_split(struct maple_big_node *b_node, */ while (((b_node->pivot[split] - b_node->min) < slot_cnt - 1) && (split < slot_cnt - 1) && - (b_node->b_end - split > mt_min_slots[b_node->type] - 1)) + (b_node->b_end - split > mt_min_slots[b_node->type])) split++; } @@ -1766,7 +1766,7 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) return ma_enode_ptr(MAS_NONE); } -/* Private +/* * mast_topiary() - Add the portions of the tree to the removal list; either to * be freed or discarded (destroy walk). * @@ -2011,7 +2011,7 @@ static inline void mab_set_b_end(struct maple_big_node *b_node, b_node->pivot[b_node->b_end++] = mas->max; } -/* Private +/* * mas_set_split_parent() - combine_then_separate helper function. Sets the parent * of @mas->node to either @left or @right, depending on @slot and @split * @@ -2075,14 +2075,28 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast, unsigned char mid_split) { unsigned char slot; + struct maple_enode *l = left; + struct maple_enode *r = right; if (mas_is_none(mast->l)) return; + if (middle) + r = middle; + slot = mas_get_slot(mast->l); - mas_set_split_parent(mast->l, left, right, &slot, split); - mas_set_split_parent(mast->m, left, right, &slot, split); - mas_set_split_parent(mast->r, left, right, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set left parent. + mas_set_split_parent(mast->l, l, r, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set middle parent. + mas_set_split_parent(mast->m, l, r, &slot, split); + + mte_mid_split_check(&l, &r, right, slot, &split, mid_split); + // Set right parent. + mas_set_split_parent(mast->r, l, r, &slot, split); } static inline void mas_wmb_replace(struct ma_state *mas, struct ma_topiary *free, @@ -2208,10 +2222,9 @@ static inline void mast_setup_bnode_for_split(struct maple_subtree_state *mast) mast->bn->type = mte_node_type(mast->orig_l->node); } -/* Private +/* * - * mas_combine_then_separate() - Combine two nodes and then separate them as - * needed. + * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. * @mas: The starting maple state * @mast: The maple_subtree_state, keeps track of 4 maple states. * @count: The estimated count of iterations needed. @@ -2228,7 +2241,7 @@ static inline void mast_setup_bnode_for_split(struct maple_subtree_state *mast) * * Returns the number of elements in b_node during the last loop. */ -static inline int mas_combine_then_separate(struct ma_state *mas, +static inline int mas_spanning_rebalance(struct ma_state *mas, struct maple_subtree_state *mast, unsigned char count) { @@ -2249,6 +2262,12 @@ static inline int mas_combine_then_separate(struct ma_state *mas, mast->destroy = &destroy; l_mas.node = r_mas.node = m_mas.node = MAS_NONE; + if (mast->orig_l->depth != mast->orig_r->depth) { + printk("Error: %d != %d\n", + mast->orig_l->depth, mast->orig_r->depth); + } + MT_BUG_ON(mas->tree, mast->orig_l->depth != mast->orig_r->depth); + mast->orig_l->depth = 0; mast_topiary(mast); while (count--) { mast_setup_bnode_for_split(mast); @@ -2322,7 +2341,7 @@ new_root: return mast->bn->b_end; } -/* Private +/* * mas_rebalance() - Rebalance a given node. * * @mas: The maple state @@ -2369,7 +2388,7 @@ static inline int mas_rebalance(struct ma_state *mas, l_mas.index = l_mas.last = l_mas.min; } - return mas_combine_then_separate(mas, &mast, empty_cnt); + return mas_spanning_rebalance(mas, &mast, empty_cnt); } static inline bool mas_split_final_node(struct maple_subtree_state *mast, @@ -2882,6 +2901,17 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) mast.orig_l = &l_mas; mast.orig_r = &r_mas; + // FIXME: Is this needed? +#if 0 + mas_dup_state(&l_mas, mas); + mas->last = mas->index; + mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); + mas->index = mas->last = l_mas.last; + mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max); + mas_dup_state(mas, &l_mas); +#endif + + // Set up right side. mas_dup_state(&r_mas, mas); r_mas.depth = mas->depth; @@ -2921,8 +2951,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) count = mas_cnt_positive(mas) + mas->tree->ma_height - mas->depth + 1; // Combine l_mas and r_mas and split them up evenly again. - l_mas.depth = 0; - return mas_combine_then_separate(mas, &mast, count); + return mas_spanning_rebalance(mas, &mast, count); } static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index c8cb13d29f52..389da4399f2b 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -31736,81 +31736,6 @@ static noinline void check_ranges(struct maple_tree *mt) check_store_range(mt, 1792, 1799, NULL, 0); mt_validate(mt); mtree_destroy(mt); - - mtree_init(mt, MAPLE_ALLOC_RANGE); - for (i = 0; i <= 200; i++) { - val = i*10; - val2 = (i+1)*10; - check_store_range(mt, val, val2, xa_mk_value(val), 0); - } - - for (i = 10; i <= 19; i++) { - val = i*100 + 5; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val += 10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - - val += 39; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val += 10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - - for (i = 13; i <= 14; i++) { - val = i*100 + 75; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val += 9; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val += 9; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - for (i = 0; i <= 3; i++) { - val = 1200 + i*10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - for (i = 0; i <= 5; i++) { - val = 1270 + i; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val+= 10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - for (i = 0; i <= 6; i++) { - val = 1400 + i*10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val++; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - - for (i = 0; i <= 5; i++) { - val = 1370 + i; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val+= 10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - val+= 10; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - for (i = 0; i < ARRAY_SIZE(overflow); i++) { - val = overflow[i]; - check_store_range(mt, val, val, xa_mk_value(val), 0); - } - - - // Cause a 3 child split all the way up the tree. - check_store_range(mt, 1349, 1350, NULL, 0); - mt_validate(mt); - mtree_destroy(mt); } static noinline void check_next_entry(struct maple_tree *mt) -- 2.50.1