From dc8d167242e97f6a7e3059e9bf4c4ae8b518b352 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sat, 8 Dec 2018 06:14:49 -0500 Subject: [PATCH] maple_tree: Fix a bug in split A test where we sequentially add entries with indexes 0-15 shows a bug where we were assuming the left hand side of the tree was NULL. Fix that assumption, and change how the tree grows so that any NULL range on the left sinks towards the base of the tree. Signed-off-by: Matthew Wilcox --- lib/maple_tree.c | 19 +++++++++---------- lib/test_maple_tree.c | 22 ++++++++++++++++++++++ 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 019d55558173..48da513c5ae5 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -380,8 +380,8 @@ static void maple_split_data_64(struct maple_state *ms, struct maple_node_64 *target = &left->map64; int i, j; - /* Copy 0-4 to left_mn 1-5 - * Copy 4-8 to right_mn 0-4 + /* Copy 0-3 to left_mn 0-3 + * Copy 4-7 to right_mn 0-3 */ for (i = 0, j = off; j < MAPLE_NODE64_MAX_SLOT; i++, j++) { if (i == MAPLE_NODE64_MAX_SLOT / 2) { @@ -394,7 +394,6 @@ static void maple_split_data_64(struct maple_state *ms, target->pivot[i] = ms->max; target->slot[i] = full64->slot[j]; } - } /* @@ -461,13 +460,13 @@ void maple_link_node(struct maple_state *ms, * The pivot will be the maximum pivot of each. * pivot 0 will be the lowest pivot of left. */ - new_root->map64.pivot[0] = full->map64.pivot[0]; /* Left child */ - new_root->map64.pivot[1] = left64->pivot[l_end]; - RCU_INIT_POINTER(new_root->map64.slot[1], ma_mk_node(left)); + new_root->map64.pivot[0] = left64->pivot[l_end]; + RCU_INIT_POINTER(new_root->map64.slot[0], ma_mk_node(left)); /* Right child */ - new_root->map64.pivot[2] = right->map64.pivot[r_end]; - RCU_INIT_POINTER(new_root->map64.slot[2], ma_mk_node(right)); + new_root->map64.pivot[1] = right->map64.pivot[r_end]; + RCU_INIT_POINTER(new_root->map64.slot[1], ma_mk_node(right)); + /* Swap root of tree */ rcu_assign_pointer(ms->tree->root, ma_mk_node(new_root)); ms->node = new_root; @@ -516,7 +515,7 @@ static void _maple_node_split(struct maple_state *ms) if (maple_is_root(ms, ms->node)) { alloc_cnt++; - offset++; +// offset++; } /* @@ -533,7 +532,7 @@ static void _maple_node_split(struct maple_state *ms) lmn = ma_next_alloc(ms); rmn = ma_next_alloc(ms); /* copy 1/2 data from the end to the new node. */ - maple_split_data_64(ms, lmn, rmn,offset); + maple_split_data_64(ms, lmn, rmn, offset); maple_link_node(ms, lmn, rmn); } diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index e4bf5efbc06e..c2cfd60f5097 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -87,6 +87,12 @@ static noinline void check_load(struct maple_tree *mt, unsigned long index, MT_BUG_ON(mt, ret != ptr); } +static noinline +void check_index_load(struct maple_tree *mt, unsigned long index) +{ + return check_load(mt, index, xa_mk_value(index & LONG_MAX)); +} + static noinline void check_nomem(struct maple_tree *mt) { MAP_STATE(ms, mt, 1, 1); @@ -123,6 +129,21 @@ static noinline void check_nomem(struct maple_tree *mt) mtree_destroy(mt); } +static noinline void check_seq(struct maple_tree *mt) +{ + int i, j; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + for (i = 0; i < 15; i++) { + MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); + for (j = 0; j <= i; j++) + check_index_load(mt, j); + check_load(mt, i + 1, NULL); + } + mtree_destroy(mt); +} + static DEFINE_MAPLE_TREE(tree); static int maple_tree_seed(void) @@ -243,6 +264,7 @@ static int maple_tree_seed(void) mtree_destroy(&tree); check_nomem(&tree); + check_seq(&tree); printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run == tests_passed) ? 0 : -EINVAL; -- 2.50.1