From 681b98be25cbe1ab04d56333e6d7c84f634d8e58 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 17 Aug 2020 15:26:15 -0400 Subject: [PATCH] maple_tree: Fix append to work with allocation tree. Append wasn't happening with allocation tree due to the ZERO entry being added. Stop addign the zero entry, it should be in the mm code anyways. Also, add gap support when appending. Also, fix parent pointer assignment on push_left operations. Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 94 ++++++++++++++++++---- lib/test_maple_tree.c | 134 ++++++++++++++++++++++++++++++- tools/testing/radix-tree/linux.c | 2 + tools/testing/radix-tree/test.h | 1 + 4 files changed, 214 insertions(+), 17 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 9616b02e24a8..6895076e6212 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1681,6 +1681,8 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas, if (mas->index && piv < mas->index - 1) { b_node->slot[b_end] = contents; + if (!contents) + b_node->gap[b_end] = mas->index - 1 - piv; b_node->pivot[b_end++] = mas->index - 1; } @@ -1690,6 +1692,8 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas, piv = mas_get_safe_pivot(mas, slot); if (piv > mas->last) { b_node->slot[++b_end] = contents; + if (!contents) + b_node->gap[b_end] = piv - mas->last + 1; b_node->pivot[b_end] = piv; } else piv = mas->last; @@ -2531,6 +2535,8 @@ static inline bool mas_push_left(struct ma_state *mas, mas_dup_state(mast->l, &tmp_mas); split = mab_no_null_split(mast->bn, mt_slots[mast->bn->type] - 1, mt_slots[mast->bn->type]); + // Update parent slot for split calculation. + mas_set_slot(mast->orig_l, mas_get_slot(mast->orig_l) + end + 1); mast_split_data(mast, mas, split); mast_split_fill_bnode(mast, mas, 2); mas_split_final_node(mast, mas, mas->full_cnt + 1); @@ -2626,8 +2632,6 @@ static inline int mas_commit_b_node(struct ma_state *mas, } -// FIXME: When task_size / page_size -1 works, check to ensure we are -// not inserting above this. static inline int mas_root_expand(struct ma_state *mas, void *entry) { void *contents = rcu_dereference_protected(mas->tree->ma_root, @@ -2654,7 +2658,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) mte_set_rcu_slot(mas->node, slot, entry); mte_set_pivot(mas->node, slot++, mas->last); - +#if 0 if (mt_is_alloc(mas->tree)) { //FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1; unsigned long mmap_end = 0x2000000000000UL; @@ -2664,7 +2668,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY); mte_set_pivot(mas->node, slot++, mt_max[type]); } - +#endif /* swap the new root into the tree */ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); mas->tree->ma_height = 1; @@ -3034,6 +3038,28 @@ static inline int mas_spanning_store(struct ma_state *mas, void *entry) return mas_spanning_rebalance(mas, &mast, count); } +static inline bool mas_can_append(struct ma_state *mas, + struct maple_big_node *bn, + unsigned char slot_cnt, + unsigned char end) +{ + if (bn->b_end >= slot_cnt) + return false; // no room. + + if (bn->b_end <= end) + return false; // truncated data. + + if (!mas->last) + return false; // zero cannot be an append. + + if (bn->pivot[bn->b_end] == mas->last) + return true; // last entry is the insert. + + if ((bn->pivot[bn->b_end - 1] == mas->last) && !bn->slot[bn->b_end]) + return true; // inserting second last entry and last is NULL. + + return false; +} static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite) { unsigned long r_max, r_min; @@ -3090,12 +3116,14 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite // Check if this is an append operation. end = mas_data_end(mas); - if ((b_node.b_end < slot_cnt) && ((slot > end) || !end)) { - if (r_min < mas->index) - mte_set_pivot(mas->node, slot++, mas->index - 1); - - mte_set_rcu_slot(mas->node, slot, entry); - mte_set_pivot(mas->node, slot, mas->last); + if (mas_can_append(mas, &b_node, slot_cnt, end)) { + slot = b_node.b_end; + do { + mte_set_rcu_slot(mas->node, slot, b_node.slot[slot]); + if (slot < slot_cnt - 1) + mte_set_pivot(mas->node, slot, b_node.pivot[slot]); + } while(slot && slot-- >= end); + mas_update_gap(mas); goto append; } @@ -3105,11 +3133,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite else if (b_node.b_end < mt_min_slot_cnt(mas->node)) mas_cnt_empty(mas); - mas_commit_b_node(mas, &b_node); - if (mas_is_err(mas)) - ret = 3; - - if (mas_is_err(mas)) + if (!mas_commit_b_node(mas, &b_node)) return NULL; if (ret > 2) @@ -4264,7 +4288,47 @@ retry: return entry; } +static inline void mas_dfs_preorder(struct ma_state *mas) +{ + + struct maple_enode *prev; + unsigned char slot = 0; + + if (mas_is_start(mas)) { + mas_start(mas); + return; + } + + if (mte_is_leaf(mas->node) && mte_is_root(mas->node)) { + mas->node = MAS_NONE; + return; + } + +walk_up: + if (mte_is_leaf(mas->node) || + (slot >= mt_slot_count(mas->node))) { + slot = mte_parent_slot(mas->node) + 1; + mas->node = mt_mk_node(mte_parent(mas->node), + mas_parent_enum(mas, mas->node)); + goto walk_up; + } + prev = mas->node; + mas->node = mas_get_rcu_slot(mas, slot); + if (!mas->node) { + mas->node = MAS_NONE; + if (mte_is_root(prev)) + return; + + mas->node = prev; + slot = mte_parent_slot(mas->node) + 1; + mas->node = mt_mk_node(mte_parent(mas->node), + mas_parent_enum(mas, mas->node)); + goto walk_up; + } + + return; +} /* Interface */ void __init maple_tree_init(void) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 20ae441e7c50..f568bbae3402 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -318,6 +318,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, MT_BUG_ON(mt, !mtree_empty(mt)); + nr_tallocated = 0; for (i = 0; i <= max; i++) { MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); for (j = 0; j <= i; j++) @@ -328,8 +329,9 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, if (verbose) { rcu_barrier(); mt_dump(mt); - pr_info(" seq test of 0-%lu used %luK in %d allocations\n", - max, mt_get_alloc_size()/1024, nr_allocated); + pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", + max, mt_get_alloc_size()/1024, nr_allocated, + nr_tallocated); } } @@ -31269,6 +31271,10 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) MA_STATE(mas, mt, 0, 0); + mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY, + GFP_KERNEL); + mt_dump(mt); + #define DEBUG_REV_RANGE 0 for (i = 0; i < range_cnt; i += 2) { /* Inclusive, Inclusive (with the -1) */ @@ -31429,6 +31435,8 @@ static noinline void check_alloc_range(struct maple_tree *mt) int req_range_cnt = ARRAY_SIZE(req_range); unsigned long min = 0x565234af2000; + mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY, + GFP_KERNEL); for (i = 0; i < range_cnt; i += 2) { #define DEBUG_ALLOC_RANGE 0 #if DEBUG_ALLOC_RANGE @@ -31727,6 +31735,97 @@ static noinline void check_ranges(struct maple_tree *mt) check_store_range(mt, 1792, 1799, NULL, 0); mt_validate(mt); mtree_destroy(mt); + + mtree_init(mt, MAPLE_ALLOC_RANGE); + for (i = 0; i <= 200; i++) { + val = i*10; + val2 = (i+1)*10; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + mt_validate(mt); + } + + for (i = 10; i <= 19; i++) { + val = i*100 + 5; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + val += 10; + if (val == 1116) + mt_dump(mt); + check_store_range(mt, val, val, xa_mk_value(val), 0); + if (val == 1116) + mt_dump(mt); + mt_validate(mt); + + val += 39; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + val += 10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + + for (i = 13; i <= 14; i++) { + val = i*100 + 75; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val += 9; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val += 9; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + + for (i = 0; i <= 3; i++) { + val = 1200 + i*10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + for (i = 0; i <= 5; i++) { + val = 1270 + i; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val+= 10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + for (i = 0; i <= 6; i++) { + val = 1400 + i*10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val++; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + for (i = 0; i <= 5; i++) { + val = 1370 + i; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val+= 10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + val+= 10; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + for (i = 0; i < ARRAY_SIZE(overflow); i++) { + val = overflow[i]; + check_store_range(mt, val, val, xa_mk_value(val), 0); + mt_validate(mt); + } + + // Cause a 3 child split all the way up the tree. + check_store_range(mt, 1349, 1350, NULL, 0); + mt_validate(mt); + mtree_destroy(mt); } static noinline void check_next_entry(struct maple_tree *mt) @@ -31965,6 +32064,33 @@ static noinline void check_gap_combining(struct maple_tree *mt) mtree_destroy(mt); } +static void check_dfs_preorder(struct maple_tree *mt) +{ + int count = 0; + + MA_STATE(mas, mt, 0, 0); + + check_seq(mt, 1000, false); + + do { + count++; + mas_dfs_preorder(&mas); + } while(!mas_is_none(&mas)); + // 68 + MAS_START = 69 + MT_BUG_ON(mt, count != 69); + mtree_destroy(mt); + + mtree_init(mt, MAPLE_ALLOC_RANGE); + mas_reset(&mas); + count = 0; + check_seq(mt, 1000, false); + do { + count++; + mas_dfs_preorder(&mas); + } while(!mas_is_none(&mas)); + // 71 + MAS_START = 72 + MT_BUG_ON(mt, count != 72); +} static DEFINE_MTREE(tree); @@ -31980,6 +32106,10 @@ static int maple_tree_seed(void) check_new_node(&tree); mtree_destroy(&tree); + mtree_init(&tree, 0); + check_dfs_preorder(&tree); + mtree_destroy(&tree); + /* Test ranges (store and insert) */ mtree_init(&tree, 0); check_ranges(&tree); diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 93f7de81fbe8..f8a3fe3cf6c0 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -13,6 +13,7 @@ #include int nr_allocated; +int nr_tallocated; int preempt_count; int kmalloc_verbose; int test_verbose; @@ -67,6 +68,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp) } uatomic_inc(&nr_allocated); + uatomic_inc(&nr_tallocated); if (kmalloc_verbose) printf("Allocating %p from slab\n", p); return p; diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 7ef7067e942c..22153004077b 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -48,6 +48,7 @@ void tree_verify_min_height(struct radix_tree_root *root, int maxindex); void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag); extern int nr_allocated; +extern int nr_tallocated; /* Normally private parts of lib/radix-tree.c */ struct radix_tree_node *entry_to_node(void *ptr); -- 2.50.1