From 8384f50d91b33d233d2c98c47409defe93ed8cf0 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 5 Aug 2019 11:33:27 -0400 Subject: [PATCH] maple_tree: Restore slot in ma_coalesce and additional erase testcases Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 52 ++++++++++++++++++--------- lib/test_maple_tree.c | 83 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 107 insertions(+), 28 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 6cdc42f198e2..dcfb59bec052 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1850,10 +1850,14 @@ static inline int _ma_add(struct ma_state *mas, void *entry, bool overwrite, spans_range = true; if (!overwrite) { - if (spans_range || ma_get_rcu_slot(mas->node, slot)) { - mas_set_err(mas, -EBUSY); - return 0; - } + void *entry; + + if (spans_range) + goto busy; + + entry = ma_get_rcu_slot(mas->node, slot); + if (entry && !xa_is_retry(entry)) + goto busy; } if (last_piv == mas->max) @@ -2007,6 +2011,10 @@ update_gap: ma_update_gap(mas); return ret; + +busy: + mas_set_err(mas, -EBUSY); + return 0; } static inline int _ma_insert(struct ma_state *mas, void *entry, unsigned char slot) @@ -2293,22 +2301,27 @@ retry: * * */ -static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot) +static inline int mas_coalesce(struct ma_state *mas) { int ret = 0; unsigned char slot_cnt; unsigned long pivot, last = 0; + struct maple_enode *m_en = mas->node; + unsigned char s_slot = ma_get_slot(mas); + unsigned char slot; slot_cnt = mt_slot_count(mas->node); - for (; s_slot < slot_cnt; s_slot++) { - pivot = ma_get_safe_pivot(mas, s_slot); - if (s_slot && !pivot) + for (slot = 0; slot < slot_cnt; slot++) { + pivot = ma_get_safe_pivot(mas, slot); + if (slot && !pivot) break; - if (s_slot && last == pivot) { + if (slot && last == pivot) { mas_partial_copy(mas, mt_slot_count(mas->node)); - if (mas_is_err(mas)) + if (mas_is_err(mas)) { + mas->node = m_en; goto mas_error; + } ret = 1; goto done; @@ -2321,6 +2334,7 @@ static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot) } done: + // Restore slot. if (ret) mt_replace(mas); @@ -2328,6 +2342,7 @@ mas_error: // Regardless of allocation, update gaps. if (mt_is_alloc(mas->tree)) ma_update_gap(mas); + ma_set_slot(mas, s_slot); return ret; } @@ -3072,15 +3087,19 @@ retry: leaf = _mas_walk(mas); slot = ma_get_slot(mas); // Clean this node - if (mas_coalesce(mas, 0)) { + if (mas_coalesce(mas)) { if (mas_is_err(mas)) return 0; goto retry; } if (leaf == true && slot != MAPLE_NODE_SLOTS) { - if (!overwrite && ma_get_rcu_slot(mas->node, slot)) - goto exists; + if (!overwrite) { + void *entry = ma_get_rcu_slot(mas->node, slot); + + if (entry && !xa_is_retry(entry)) + goto exists; + } } /* Do the add */ @@ -3350,7 +3369,7 @@ static inline int ma_erase(struct ma_state *mas) } /* The error on allocation failure can be ignored */ - mas_coalesce(mas, ++slot); + mas_coalesce(mas); return ret; } @@ -3378,8 +3397,9 @@ void *mtree_load(struct maple_tree *mt, unsigned long index) rcu_read_lock(); entry = mas_walk(&mas); rcu_read_unlock(); - if (xa_is_zero(entry)) - entry = NULL; + + if (xa_is_zero(entry) || xa_is_retry(entry)) + return NULL; return entry; } EXPORT_SYMBOL(mtree_load); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index f2c2b41ba187..0adb50fd0dd2 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -464,6 +464,70 @@ static noinline void check_find(struct maple_tree *mt) } +static noinline void check_erase_testset(struct maple_tree *mt) +{ + unsigned long set[] = {5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 5003, 5002}; + void *ptr = &set; + void *root_node; + + check_insert(mt, set[0], ptr); // 5015 + check_insert(mt, set[1], mt); // 5014 + check_insert(mt, set[2], ptr); // 5017 + check_insert(mt, set[3], mt); // 25 + check_load(mt, set[0], ptr); + check_load(mt, set[1], mt); + check_load(mt, set[2], ptr); + check_load(mt, set[3], mt); + mt_set_non_kernel(1); + check_erase(mt, set[1]); + check_load(mt, set[0], ptr); + check_load(mt, set[1], NULL); + check_load(mt, set[2], ptr); + check_load(mt, set[3], mt); + + check_insert(mt, set[1], mt); + // Check erase and load without an allocation. + check_erase(mt, set[1]); + check_load(mt, set[0], ptr); + check_load(mt, set[1], NULL); + check_load(mt, set[2], ptr); + check_load(mt, set[3], mt); + + // Set the newly erased node. This will produce a different allocated + // node to avoid busy slots. + root_node = mt->ma_root; + check_insert(mt, set[1], mt); + + // The root node should be replaced to avoid writing a busy slot. + MT_BUG_ON(mt, root_node == mt->ma_root); + + check_load(mt, set[0], ptr); + check_load(mt, 5016, NULL); + check_load(mt, set[1], mt); + check_load(mt, 5013, NULL); + check_load(mt, set[2], ptr); + check_load(mt, 5018, NULL); + check_load(mt, set[3], mt); + + check_erase(mt, set[2]); // erase 5017 to check append + check_load(mt, set[0], ptr); + check_load(mt, 5016, NULL); + check_load(mt, set[1], mt); + check_load(mt, 5013, NULL); + check_load(mt, set[2], NULL); + check_load(mt, 5018, NULL); + check_load(mt, set[3], mt); + + root_node = mt->ma_root; + check_insert(mt, set[2], ptr); + // The root node should be replaced to avoid writing a busy slot. + MT_BUG_ON(mt, root_node == mt->ma_root); + + mtree_destroy(mt); +} + static noinline void check_alloc_rev_range(struct maple_tree *mt) { /* Generated by: @@ -831,29 +895,24 @@ static int maple_tree_seed(void) /* Clear out the tree */ mtree_destroy(&tree); + mtree_init(&tree, 0); + check_erase_testset(&tree); mtree_init(&tree, 0); /* - * set[] = {15, 14, 17, 25, 1000, + * set[] = {5015, 5014, 5017, 25, 1000, * 1001, 1002, 1003, 1005, 0, - * 3}; + * 5003, 5002}; */ - check_insert(&tree, set[0], ptr); // 15 - check_insert(&tree, set[1], &tree); // 14 - check_insert(&tree, set[2], ptr); // 17 + check_insert(&tree, set[0], ptr); // 5015 + check_insert(&tree, set[1], &tree); // 5014 + check_insert(&tree, set[2], ptr); // 5017 check_insert(&tree, set[3], &tree); // 25 check_load(&tree, set[0], ptr); check_load(&tree, set[1], &tree); check_load(&tree, set[2], ptr); check_load(&tree, set[3], &tree); - mt_set_non_kernel(1); - check_erase(&tree, set[1]); - check_load(&tree, set[0], ptr); - check_load(&tree, set[1], NULL); - check_load(&tree, set[2], ptr); - check_load(&tree, set[3], &tree); - check_insert(&tree, set[1], &tree); check_insert(&tree, set[4], ptr); // 1000 < Should split. check_load(&tree, set[0], ptr); check_load(&tree, set[1], &tree); -- 2.50.1