From 05541a42fdab86bb2c1e90db013ba2dd57772bc0 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 17 Sep 2020 14:14:05 -0400 Subject: [PATCH] maple_tree: Restore maple tree node fullness. Rework mas_is_span_wr, mas_node_walk. Add dup null check Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 144 +++++++++++++++++++++++++----------------- lib/test_maple_tree.c | 25 ++++---- 2 files changed, 98 insertions(+), 71 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 150b2baff5c1..d3342ec10e3a 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -53,7 +53,7 @@ unsigned char mt_min_slots[] = { [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, #if defined(NODE256) - [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 2, + [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, #else [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2), #endif @@ -1030,7 +1030,7 @@ static inline unsigned char mas_data_end(const struct ma_state *mas) while (offset < mt_slots[type]) { piv = _mas_safe_pivot(mas, pivots, offset, type); - if (piv >= mas->max) + if ((!piv && offset) || piv >= mas->max) break; offset++; } @@ -2309,7 +2309,7 @@ static inline int mas_rebalance(struct ma_state *mas, trace_mas_rebalance(mas); - mas_node_cnt(mas, 1 + empty_cnt * 2); + mas_node_cnt(mas, 1 + empty_cnt * 3); if (mas_is_err(mas)) return 0; @@ -2596,18 +2596,13 @@ static inline int mas_commit_b_node(struct ma_state *mas, { struct maple_enode *new_node; - if ((b_node->b_end < mt_min_slot_cnt(mas->node)) && - (!mte_is_root(mas->node)) && - (mas_mt_height(mas) > 1)) + if ((b_node->b_end < mt_min_slots[b_node->type]) && + (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1)) return mas_rebalance(mas, b_node); - if (b_node->b_end >= mt_slot_count(mas->node)) { - if (mas_is_err(mas)) - return 0; - + if (b_node->b_end >= mt_slots[b_node->type]) return mas_split(mas, b_node); - } if (mas_reuse_node(mas, b_node, end)) goto reused_node; @@ -2701,7 +2696,7 @@ exists: * */ static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv, - void *entry) + enum maple_type type, void *entry) { if (mas->span_enode) // Already a spanning store. return true; @@ -2712,24 +2707,20 @@ static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv, /* Writing ULONG_MAX is not a spanning write regardless of the value * being written as long as the range fits in the node. */ - if (mas->last == ULONG_MAX && - mas->min <= mas->index && - mas->last == mas->max) + if ((mas->last == ULONG_MAX) && (mas->last == mas->max)) return false; - if (!mte_is_leaf(mas->node)) { // Internal nodes. - if (mas->last < piv) // Fits in the slot. - return false; - - if (entry && piv == mas->last) // Writes a value to the end of the child node - return false; - } else { // A leaf node. + if (ma_is_leaf(type)) { + trace_mas_is_span_wr(mas, piv, entry); if (mas->last < mas->max) // Fits in the node, but may span slots. return false; - - if (entry && mas->last == mas->max) // Writes to the end of the node but not null. + if (entry && (mas->last == mas->max)) // Writes to the end of the node but not null. + return false; + } else { + if (entry && piv == mas->last) return false; } + trace_mas_is_span_wr(mas, piv, entry); mas->span_enode = mas->node; return true; @@ -2738,40 +2729,38 @@ static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv, static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type, unsigned long *range_min, unsigned long *range_max) { - unsigned char i; - unsigned long min = mas->min, pivot = 0; unsigned long *pivots = ma_pivots(mas_mn(mas), type); + unsigned long min = mas->min, pivot = 0; + unsigned char i; bool ret = true; + if (ma_is_dense(type)) { + // Linear node. + // What if mas->index != mas->last? + pivot = min = mas->index; + i = mas->index = mas->min; + goto dense; + } - switch (type) { - default: - for (i = mas_offset(mas); i < mt_slots[type]; i++) { - pivot = _mas_safe_pivot(mas, pivots, i, type); + for (i = mas_offset(mas); i < mt_slots[type]; i++) { + pivot = _mas_safe_pivot(mas, pivots, i, type); - if (!pivot && i) { - if (mas->max < mas->index) { - i = MAPLE_NODE_SLOTS; - ret = false; - } - pivot = mas->max; - break; + if (!pivot && i) { + if (mas->max < mas->index) { + i = MAPLE_NODE_SLOTS; + ret = false; } - - if (mas->index <= pivot) - break; - min = pivot + 1; + pivot = mas->max; + break; } - break; - case maple_dense: - // Linear node. - // What if mas->index != mas->last? - pivot = min = mas->index; - i = mas->index = mas->min; - break; + if (mas->index <= pivot) + break; + + min = pivot + 1; } +dense: if (ret) { *range_min = min; *range_max = pivot; @@ -2809,7 +2798,6 @@ static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min, unsigned long *range_max, void *entry) { enum maple_type type; - struct maple_enode *next; unsigned char end; bool ret = false; @@ -2825,28 +2813,24 @@ static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min, if (unlikely(!mas_node_walk(mas, type, range_min, range_max))) return false; - if (mas_is_span_wr(mas, *range_max, entry)) + if (mas_is_span_wr(mas, *range_max, type, entry)) return ma_is_leaf(type); if (ma_is_leaf(type)) return true; end = mas_data_end(mas); - if (end <= mt_min_slots[type]) + if (end < mt_min_slots[type]) mas_cnt_empty(mas); - else if (end >= mt_slots[type] - 2) + else if (end >= mt_slots[type] - 1) mas_cnt_full(mas); else mas->full_cnt = 0; - next = mas_get_slot(mas, mas_offset(mas)); // Traverse. mas->max = *range_max; mas->min = *range_min; - if (unlikely(!next)) - return false; - - mas->node = next; + mas->node = mas_get_slot(mas, mas_offset(mas)); mas_set_offset(mas, 0); } return ret; @@ -2968,7 +2952,7 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) /* Node rebalancing may occur due to this store, so there may be two new * entries per level plus a new root. */ - node_cnt += 1 + mas_mt_height(mas) * 2; + node_cnt += 1 + mas_mt_height(mas) * 3; mas_node_cnt(mas, node_cnt); if (mas_is_err(mas)) return 0; @@ -4597,6 +4581,7 @@ void *mtree_load(struct maple_tree *mt, unsigned long index) void *entry; MA_STATE(mas, mt, index, index); + trace_mtree_load(&mas); rcu_read_lock(); entry = mas_load(&mas); rcu_read_unlock(); @@ -4612,6 +4597,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, { MA_STATE(mas, mt, index, last); + trace_mtree_store_range(&mas, entry); if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -4742,6 +4728,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) void *entry = NULL; MA_STATE(mas, mt, index, index); + trace_mtree_erase(&mas); mtree_lock(mt); entry = mas_erase(&mas); @@ -5167,6 +5154,44 @@ static inline void mas_dfs_postorder(struct ma_state *mas, unsigned long max) mas->min = p_min; } +void mt_validate_nulls(struct maple_tree *mt) +{ + void *entry, *last = (void*)1; + unsigned char end, offset = 0; + void **slots; + MA_STATE(mas, mt, 0, 0); + + mas_start(&mas); + if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) + return; + + while (!mte_is_leaf(mas.node)) { + mas_descend(&mas); + } + + slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); + end = mas_data_end(&mas); + do { + entry = slots[offset]; + if (!last && !entry) { + printk("Sequential nulls end at %p[%u]\n", + mas_mn(&mas), offset); + } + MT_BUG_ON(mt, !last && !entry); + last = entry; + if (offset == end) { + mas_next_node(&mas, ULONG_MAX); + if (mas_is_none(&mas)) + return; + offset = 0; + end = mas_data_end(&mas); + slots = ma_slots(mte_to_node(mas.node), + mte_node_type(mas.node)); + } else + offset++; + + } while(!mas_is_none(&mas)); +} /* * validate a maple tree by checking: * 1. The limits (pivots are within mas->min to mas->max) @@ -5180,7 +5205,7 @@ void mt_validate(struct maple_tree *mt) rcu_read_lock(); mas_start(&mas); mas_first_entry(&mas, ULONG_MAX); - while (mas.node != MAS_NONE) { + while (!mas_is_none(&mas)) { if (!mte_is_root(mas.node)) { end = mas_data_end(&mas); if ((end < mt_min_slot_cnt(mas.node)) && @@ -5198,6 +5223,7 @@ void mt_validate(struct maple_tree *mt) mas_validate_gaps(&mas); mas_dfs_postorder(&mas, ULONG_MAX); } + mt_validate_nulls(mt); rcu_read_unlock(); } diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 74d56e8c4100..41005574cd86 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -32101,33 +32101,35 @@ static noinline void check_ranges(struct maple_tree *mt) MT_BUG_ON(mt, !mt_height(mt)); mtree_destroy(mt); - // FIXME: After changing mas_wr_walk to actually count full nodes, this - // testcase and the one below should cause a height increment. mtree_init(mt, MAPLE_ALLOC_RANGE); -// for (i = 0; i <= 1590; i++) { - for (i = 0; i <= 1420; i++) { + for (i = 0; i <= 1590; i++) { +// for (i = 0; i <= 1420; i++) { val = i*10; val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); MT_BUG_ON(mt, mt_height(mt) >= 4); } // Cause a 3 child split all the way up the tree. - check_store_range(mt, 9755, 9759, NULL, 0); - MT_BUG_ON(mt, mt_height(mt) >= 4); - //MT_BUG_ON(mt, mt_height(mt) < 4); + check_store_range(mt, 15519, 15519, NULL, 0); + +// check_store_range(mt, 9755, 9759, NULL, 0); +// MT_BUG_ON(mt, mt_height(mt) >= 4); + MT_BUG_ON(mt, mt_height(mt) < 4); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); - for (i = 0; i <= 1420; i++) { + for (i = 0; i <= 1590; i++) { +// for (i = 0; i <= 1420; i++) { val = i*10; val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); MT_BUG_ON(mt, mt_height(mt) >= 4); } // triple split across multiple levels. - check_store_range(mt, 9595, 9599, NULL, 0); - MT_BUG_ON(mt, mt_height(mt) >= 4); - //MT_BUG_ON(mt, mt_height(mt) < 4); +// check_store_range(mt, 9595, 9599, NULL, 0); + check_store_range(mt, 9115, 9121, NULL, 0); +// MT_BUG_ON(mt, mt_height(mt) >= 4); + MT_BUG_ON(mt, mt_height(mt) != 4); } static noinline void check_next_entry(struct maple_tree *mt) @@ -32623,7 +32625,6 @@ static int maple_tree_seed(void) check_rev_seq(&tree, 1000, true); mtree_destroy(&tree); - check_lower_bound_split(&tree); check_upper_bound_split(&tree); check_mid_split(&tree); -- 2.50.1