From e9f16d926bd7b41da3d7f7ad5965357881681b22 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 13 Dec 2019 15:07:04 -0500 Subject: [PATCH] maple_tree: Continue fixing fallout from mas_data_end changes. Rewrite mas_node_push and mas_next_alloc and add test cases to stress test. Fix mas_split calculations to use correct pivots. Reuse null at the end of a node to avoid splitting Fix mas_next_slot on root node running off the end of the node. Fix _mas_awalk when there are deleted entries which caused a step over the end. Set slot to 0 on descend in __mas_awalk. Only use 4 for node calculations on mas_replace_tree. Fix testcase inserting 0 to use xa_mk_value to avoid coalescing NULL entries. Add set 4 to check_erase_testcase2 from KVM testing. Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 6 +-- lib/maple_tree.c | 90 ++++++++++++++++++++------------------ lib/test_maple_tree.c | 82 ++++++++++++++++++++++++++++++++-- 3 files changed, 127 insertions(+), 51 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 522819689e38..0ac4092f8c3d 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -403,11 +403,7 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, static unsigned int tests_run; static unsigned int tests_passed; -#ifndef MAPLE_TREE_DEBUG -# ifdef __KERNEL__ -void mt_dump(const struct maple_tree *mt) { } -# endif -#undef MT_BUG_ON +#ifdef CONFIG_DEBUG_MAPLE_TREE #define MT_BUG_ON(tree, x) do { \ tests_run++; \ if (x) { \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 67b23aa78745..a0b62b76082c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -846,22 +846,16 @@ static inline struct maple_node *mas_next_alloc(struct ma_state *ms) mn = mas_get_alloc(ms); if (cnt == 1) { ms->alloc = NULL; - } else { - unsigned char slot = cnt - 2; - struct maple_node *zero = mn; - - if (cnt - 1 >= MAPLE_NODE_SLOTS) { - slot /= MAPLE_NODE_SLOTS; - slot--; - } - smn = ma_mnode_ptr(mn->slot[slot]); - if (cnt - 1 >= MAPLE_NODE_SLOTS) { - slot = ((cnt - 1) % MAPLE_NODE_SLOTS); - zero = smn; - smn = ma_mnode_ptr(smn->slot[slot]); - } - zero->slot[slot] = NULL; - mn = smn; + } else if (cnt <= 16) { + cnt-=2; + smn = mn->slot[cnt]; + mn->slot[cnt] = NULL; + mn = smn; + } else if (cnt > 16) { + cnt-=2; + smn = mn->slot[(cnt / 15) - 1]; + mn = smn->slot[(cnt % 15)]; + smn->slot[cnt % 15] = NULL; } return mn; @@ -876,18 +870,16 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_enode *used) cnt = mas_get_alloc_cnt(mas); if (cnt == 0) { mas->alloc = reuse; - } else if (cnt <= 16) { + } else if (cnt <= 15) { cnt--; node->slot[cnt] = reuse; } else { struct maple_node *smn; cnt--; - smn = node->slot[cnt/15]; + smn = node->slot[(cnt/15) - 1]; smn->slot[cnt % 15] = reuse; } - if (cnt != mas_get_alloc_cnt(mas) + 1) - BUG_ON(0); } static inline void mas_node_node(struct ma_state *ms, gfp_t gfp) { @@ -1769,10 +1761,11 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, if (p_end - coalesce >= mt_slots[ptype] - 1) { /* Must split the parent */ split = mas_split(mas, p_slot, active); - if (mas_is_err(mas)) + if (mas_is_err(mas)) { return 0; + } if (split < p_slot) - p_slot -= split + 1; + p_slot -= split; // Split will return the parent. old_parent = mas->node; mas_set_slot(mas, p_slot); @@ -1785,9 +1778,8 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, } mas_node_cnt(mas, 3); - if (mas_is_err(mas)) { + if (mas_is_err(mas)) return 0; - } // Allocations. new_parent = mt_mk_node(mas_next_alloc(mas), ptype); @@ -1821,11 +1813,11 @@ static inline int mas_split(struct ma_state *mas, unsigned char slot, r_max = p_max; // the node type for the children types. // Node types must be set to copy data into them. - mas_split_data(mas, left, right, split, r_max); + mas_split_data(mas, left, right, split - 1, r_max); if (right) { - pivot = mte_get_pivot(left, split); + pivot = mte_get_pivot(left, split - 1); if (!pivot) // dense node - pivot = mas->min + split - 1; + pivot = mas->min + split - 2; } else { pivot = mt_node_max(left); if (!p_slot || mte_node_type(left) == maple_dense) @@ -2002,6 +1994,7 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, bool append = false; bool split = false; bool null_entry = false; + bool reuse_null_end = false; int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce); int new_end = old_end; int ret = 0; @@ -2067,12 +2060,12 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, // Possible pivot before the new range. data_slot = slot; if (mas->index > min) { - if (!slot) { // storing to slot 0 means we need the null. + if (!slot) // storing to slot 0 means we need the null. ret++; - } else if (append) { // Appending will need a null. + else if (append) // Appending will need a null. ret++; - } else { // a starting null is only needed if there isn't one - // there. + else { // a starting null is only needed if there isn't one + // there. struct maple_enode *slot_val; slot_val = mte_get_rcu_slot(mas->node, slot); @@ -2093,9 +2086,14 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, ret++; } + // If there is a null at the end already, then it can be reused. + if (last_piv > mas->last && !mte_get_rcu_slot(mas->node, old_end)) + reuse_null_end = true; + /* Check for splitting */ new_end += ret - coalesce + 1; - if (new_end == slot_cnt && mas->max == mt_node_max(mas->node)) + if (new_end == slot_cnt && mas->max == mt_node_max(mas->node) && + !reuse_null_end) split = true; // Need a NULL for the maximum range. else if (new_end > slot_cnt) split = true; // There is not enough space. @@ -2109,7 +2107,6 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, if (mas_is_err(mas)) return 0; - split++; if (split <= slot) slot = slot - split; @@ -2293,7 +2290,7 @@ static inline int mas_safe_slot(struct ma_state *mas, unsigned char *slot, static inline int mas_dead_node(struct ma_state *mas, unsigned long index); static inline void mas_next_slot(struct ma_state *mas, unsigned long max) - __must_hold(ms->tree->lock) + __must_hold(mas->tree->lock) { unsigned char slot; @@ -2301,6 +2298,9 @@ static inline void mas_next_slot(struct ma_state *mas, unsigned long max) while (1) { slot = mte_parent_slot(mas->node); walk_again: + if (mte_is_root(mas->node)) + goto no_entry; + mas_encoded_parent(mas); if (mas->max > max) goto no_entry; @@ -2330,6 +2330,8 @@ walk_down: } else if (slot < mt_slot_count(mas->node)) { slot++; goto walk_down; + } else if (mte_is_root(mas->node)) { + goto no_entry; } else { goto walk_again; } @@ -2757,15 +2759,13 @@ static inline void *_mas_next(struct ma_state *mas, unsigned long max, unsigned long *range_start) { void *entry = NULL; + unsigned long range_max; if (mas->node && !mas_searchable(mas)) return NULL; - if (!mas->node || mas_is_start(mas)) { // First run. - unsigned long range_max; - + if (!mas->node || mas_is_start(mas)) // First run. entry = mas_range_load(mas, range_start, &range_max); - } if (entry) return entry; @@ -3488,8 +3488,11 @@ static inline bool _mas_awalk(struct ma_state *mas, unsigned long size) pivot = _mas_get_safe_pivot(mas, i, type); /* End of data in this leaf */ - if (i && !pivot) - break; + if (i && !pivot) { + if (min > mas->max) + break; + pivot = mas->max; + } /* Not within lower bounds */ if (mas->index > pivot) @@ -3630,6 +3633,7 @@ skip_entry: max = pivot; break; } + min = pivot + 1; } @@ -3657,6 +3661,7 @@ skip_entry: goto done; mas->node = next; + mas_set_slot(mas, 0); } done: mas_set_slot(mas, i); @@ -3899,9 +3904,10 @@ static inline int mas_replace_tree(struct ma_state *mas, void *new_entry) node_cnt = 1; // Root node. while (nodes) { node_cnt += nodes; - nodes /= 8; + nodes /= 4; } + // FIXME: When splitting & reusing we need an extra node. mas_node_cnt(mas, node_cnt + 1); if (mas_is_err(mas)) @@ -3948,7 +3954,7 @@ static inline int mas_replace_tree(struct ma_state *mas, void *new_entry) mas_set_slot(mas, mte_parent_slot(mas->node)); mas_next_node(mas, mas->index); slot = 0; - } while (mas->node != MAS_NONE && mas->min < mas->index); + } while (!mas_is_none(mas) && mas->min < mas->index); // Insert the new value. new_mas.index = mas->index; diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index fd0447753f37..592624439b58 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -280,6 +280,42 @@ static noinline void check_new_node(struct maple_tree *mt) MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 127); mas_nomem(&mas, GFP_KERNEL); // Free. MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0); + for (int i = 1; i < 128; i++) { + int j; + mas_node_cnt(&mas, i); // Request + mas_nomem(&mas, GFP_KERNEL); // Fill request + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled + for (j = i; j > 0; j--) { //Free the requests + mn = mas_next_alloc(&mas); // get the next node. + MT_BUG_ON(mt, mn == NULL); + ma_free(mn); + } + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0); + } + for (int i = 1; i < 128; i++) { + int j; + MA_STATE(mas2, mt, 0, 0); + + mas_node_cnt(&mas, i); // Request + mas_nomem(&mas, GFP_KERNEL); // Fill request + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled + for (j = 1; j <= i; j++) { // Move the allocations to mas2 + mn = mas_next_alloc(&mas); // get the next node. + MT_BUG_ON(mt, mn == NULL); + mas_push_node(&mas2, (struct maple_enode*)mn); + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j); + } + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0); + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != i); + + for (j = i; j > 0; j--) { //Free the requests + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j); + mn = mas_next_alloc(&mas2); // get the next node. + MT_BUG_ON(mt, mn == NULL); + ma_free(mn); + } + MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != 0); + } mtree_unlock(mt); mtree_destroy(mt); @@ -354,7 +390,7 @@ static noinline void check_mid_split(struct maple_tree *mt) unsigned long huge = 8000UL * 1000 * 1000; check_insert(mt, huge, (void *) huge); - check_insert(mt, 0, (void *) 0); + check_insert(mt, 0, xa_mk_value(0) ); check_lb_not_empty(mt); } @@ -672,6 +708,7 @@ static noinline void check_erase_testset(struct maple_tree *mt) check_load(mt, 5018, NULL); erase_check_load(mt, 3); + mt_set_non_kernel(1); erase_check_erase(mt, 2); // erase 5017 to check append erase_check_erase(mt, 0); // erase 5015 to check append erase_check_insert(mt, 4); // 1000 < Should not split. @@ -844,13 +881,15 @@ static noinline void check_erase2_testset(struct maple_tree *mt, switch(set[i]) { case STORE: if (!mtree_test_insert_range(mt, set[i + 1], - set[i + 2] - 1, &set)) + set[i + 2] - 1, + xa_mk_value(set[i+1]))) entry_cnt++; else - erase_check_store_range(mt, set, i + 1, &set); + erase_check_store_range(mt, set, i + 1, + xa_mk_value(set[i+1])); break; case ERASE: - check_erase(mt, set[i+1], &set); + check_erase(mt, set[i+1], xa_mk_value(set[i+1])); entry_cnt--; break; } @@ -871,6 +910,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, // These tests were pulled from kvm tests. static noinline void check_erase2_sets(struct maple_tree *mt) { + void *entry; unsigned long start = 0; unsigned long set[] = { STORE, 140737488347136, 140737488351231, @@ -955,9 +995,32 @@ STORE, 47135835840512, 47135835893759, ERASE, 47135835840512, 47135835893759, STORE, 47135835840512, 47135835885567, STORE, 47135835885568, 47135835893759, + }; + + unsigned long set4[] = { +STORE, 140737488347136, 140737488351232, +STORE, 140728251703296, 140737488351232, +ERASE, 140728251703296, 140737488351232, +STORE, 140728251703296, 140728251707392, +STORE, 94668429205504, 94668429377536, +ERASE, 94668429205504, 94668429377536, +STORE, 94668429205504, 94668429221888, +STORE, 94668429221888, 94668429377536, +ERASE, 94668429221888, 94668429377536, +STORE, 94668429221888, 94668429324288, +STORE, 94668429324288, 94668429365248, +STORE, 94668429365248, 94668429377536, +STORE, 47646523273216, 47646523445248, +ERASE, 47646523273216, 47646523445248, +STORE, 47646523273216, 47646523277312, +STORE, 47646523277312, 47646523445248, +ERASE, 47646523277312, 47646523445248, +STORE, 47646523277312, 47646523400192, }; + + mt_set_non_kernel(3); check_erase2_testset(mt, set, ARRAY_SIZE(set)); mtree_destroy(mt); @@ -970,7 +1033,18 @@ STORE, 47135835885568, 47135835893759, mt_set_non_kernel(2); mtree_init(mt, 0); check_erase2_testset(mt, set3, ARRAY_SIZE(set3)); + mtree_destroy(mt); + mtree_init(mt, 0); + check_erase2_testset(mt, set4, ARRAY_SIZE(set4)); + MA_STATE(mas, mt, 0, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + if (mas_retry(&mas, entry)) + continue; + } + rcu_read_unlock(); + mtree_destroy(mt); } static noinline void check_alloc_rev_range(struct maple_tree *mt) { -- 2.50.1