From 63c9c522c194000d220880dd1779c62e75245f74 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 21 Oct 2020 15:23:50 -0400 Subject: [PATCH] maple_tree: Add better allocation testing and relax push_left to avoid jitters Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 17 +++-- lib/test_maple_tree.c | 142 +++++++++++++++++++++++++++++++++++------- 2 files changed, 130 insertions(+), 29 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ca625e142332..602b4588dfc6 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1019,15 +1019,19 @@ static void mas_node_count(struct ma_state *mas, int count) int mas_entry_count(struct ma_state *mas, unsigned long nr_entries) { - int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 1; + int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; struct maple_enode *enode = mas->node; int nr_nodes; int ret; + nr_entries++; // For trailing null. + if (!mt_is_alloc(mas->tree)) - nonleaf_cap = MAPLE_RANGE64_SLOTS - 1; + nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; - nr_nodes = DIV_ROUND_UP(nr_entries, MAPLE_RANGE64_SLOTS); // leaves + // Leaves + nr_nodes = DIV_ROUND_UP(nr_entries, MAPLE_RANGE64_SLOTS - 1); + // Internal nodes. nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); mas_node_count(mas, nr_nodes); @@ -2551,7 +2555,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height, end = mas_data_end(&tmp_mas); slot_total += end; - space = 2 * mt_slot_count(mas->node) - 1; + space = 2 * mt_slot_count(mas->node) - 2; /* -2 instead of -1 to ensure there isn't a triple split */ if (ma_is_leaf(mast->bn->type)) space--; @@ -2573,7 +2577,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height, } /* Configure mast for splitting of mast->bn */ - split = mt_slots[mast->bn->type] - 1; + split = mt_slots[mast->bn->type] - 2; if (left) { /* Switch mas to prev node */ mat_add(mast->free, mas->node); @@ -2590,7 +2594,8 @@ static inline bool mas_push_data(struct ma_state *mas, int height, split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); // Update parent slot for split calculation. if (left) - mas_set_offset(mast->orig_l, mas_offset(mast->orig_l) + end + 1); + mas_set_offset(mast->orig_l, + mas_offset(mast->orig_l) + end + 1); mast_split_data(mast, mas, split); mast_fill_bnode(mast, mas, 2); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 7b169fbcc35e..d3cfe8876009 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -195,6 +195,7 @@ static noinline void check_new_node(struct maple_tree *mt) struct maple_node *mn; struct maple_alloc *smn; + struct maple_node *nodes[100]; int i, j, total; MA_STATE(mas, mt, 0, 0); @@ -230,7 +231,6 @@ static noinline void check_new_node(struct maple_tree *mt) MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); // Eat the requested node. mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mn == NULL); MT_BUG_ON(mt, mn->slot[0] != NULL); MT_BUG_ON(mt, mn->slot[1] != NULL); @@ -256,10 +256,99 @@ static noinline void check_new_node(struct maple_tree *mt) // Free. mas_nomem(&mas, GFP_KERNEL); - // Set allocation request to 127. - total = 127; + // Set allocation request to 1. + mas_set_alloc_req(&mas, 1); + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + mas_set_err(&mas, -ENOMEM); + // Validate allocation request. + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + // Check the node is only one node. + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas)); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mn->slot[0] != NULL); + MT_BUG_ON(mt, mn->slot[1] != NULL); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_push_node(&mas, (struct maple_enode *)mn); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + MT_BUG_ON(mt, mas.alloc->node_count); + + mas_set_alloc_req(&mas, 2); // request 2 more. + MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); + for (i = 2; i >= 0; i--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i); + MT_BUG_ON(mt, !mn); + ma_free_rcu(mn); + } + + total = 64; + mas_set_alloc_req(&mas, total); // request 2 more. + MT_BUG_ON(mt, mas_alloc_req(&mas) != total); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (i = total; i > 0; i--) { + unsigned int e = 0; // expected node_count + if (i >= 35) + e = i - 35; + else if (i >= 5) + e = i - 5; + else if (i >= 2) + e = i - 2; + MT_BUG_ON(mt, mas.alloc->node_count != e); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); + MT_BUG_ON(mt, !mn); + ma_free_rcu(mn); + } + + total = 100; + for (i = 1; i < total; i++) { + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = i; j > 0; j--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + MT_BUG_ON(mt, !mn); + mas_push_node(&mas, (struct maple_enode *)mn); + MT_BUG_ON(mt, mas_allocated(&mas) != j); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = 0; j <= i/2; j++) { + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + nodes[j] = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); + } + + while (j) { + j--; + mas_push_node(&mas, (struct maple_enode *)nodes[j]); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + } + MT_BUG_ON(mt, mas_allocated(&mas) != i); + MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); + + } + + // Set allocation request. + total = 500; mas_node_count(&mas, total); - // Drop the lock and allocate 127 nodes. + // Drop the lock and allocate the nodes. mas_nomem(&mas, GFP_KERNEL); MT_BUG_ON(mt, !mas.alloc); i = 1; @@ -273,7 +362,7 @@ static noinline void check_new_node(struct maple_tree *mt) } smn = smn->slot[0]; // next. } - MT_BUG_ON(mt, mas_allocated(&mas) != 127); + MT_BUG_ON(mt, mas_allocated(&mas) != total); mas_nomem(&mas, GFP_KERNEL); // Free. MT_BUG_ON(mt, mas_allocated(&mas) != 0); @@ -34818,33 +34907,42 @@ static noinline void check_ranges(struct maple_tree *mt) mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); - for (i = 0; i <= 1590; i++) { -// for (i = 0; i <= 1420; i++) { + for (i = 0; i <= 1300; i++) { val = i*10; val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); MT_BUG_ON(mt, mt_height(mt) >= 4); } // Cause a 3 child split all the way up the tree. - check_store_range(mt, 15519, 15519, NULL, 0); - -// check_store_range(mt, 9755, 9759, NULL, 0); -// MT_BUG_ON(mt, mt_height(mt) >= 4); + for (i = 5; i < 215; i += 10) + check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0); + for (i = 5; i < 65; i += 10) + check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0); + + MT_BUG_ON(mt, mt_height(mt) >= 4); + for (i = 5; i < 45; i += 10) + check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0); MT_BUG_ON(mt, mt_height(mt) < 4); mtree_destroy(mt); + mtree_init(mt, MAPLE_ALLOC_RANGE); - for (i = 0; i <= 1590; i++) { -// for (i = 0; i <= 1420; i++) { + for (i = 0; i <= 1200; i++) { val = i*10; val2 = (i+1)*10; check_store_range(mt, val, val2, xa_mk_value(val), 0); MT_BUG_ON(mt, mt_height(mt) >= 4); } + // Fill parents and leaves before split. + for (i = 5; i < 455; i += 10) + check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0); + mt_dump(mt); + for (i = 1; i < 16; i++) + check_store_range(mt, 8185 + i, 8185 + i + 1, + xa_mk_value(8185+i), 0); + MT_BUG_ON(mt, mt_height(mt) >= 4); // triple split across multiple levels. -// check_store_range(mt, 9595, 9599, NULL, 0); - check_store_range(mt, 9115, 9121, NULL, 0); -// MT_BUG_ON(mt, mt_height(mt) >= 4); + check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0); MT_BUG_ON(mt, mt_height(mt) != 4); } @@ -35109,9 +35207,9 @@ static void check_dfs_preorder(struct maple_tree *mt) count++; mas_dfs_preorder(&mas); } while(!mas_is_none(&mas)); - // 68 + MAS_START = 69 + // 68 + MAS_START = 69 + 1 for no jitter //printk("count %lu\n", count); - MT_BUG_ON(mt, count != 69); + MT_BUG_ON(mt, count != 70); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); @@ -35123,7 +35221,7 @@ static void check_dfs_preorder(struct maple_tree *mt) mas_dfs_preorder(&mas); } while(!mas_is_none(&mas)); //printk("count %lu\n", count); - // 71 + MAS_START = 72 + // 71 + MAS_START = 72 + 1 for no jitter MT_BUG_ON(mt, count != 72); mtree_destroy(mt); @@ -35137,14 +35235,13 @@ static void check_dfs_preorder(struct maple_tree *mt) } while(!mas_is_none(&mas)); // 71 + MAS_START = 72 //printk("count %lu\n", count); - - MT_BUG_ON(mt, count != 72); + MT_BUG_ON(mt, count != 77); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); mas_reset(&mas); nr_tallocated = 0; - mt_set_non_kernel(100); + mt_set_non_kernel(200); mas_entry_count(&mas, max); for(count = 0; count <= max; count++) { mas.index = mas.last = count; @@ -35153,7 +35250,6 @@ static void check_dfs_preorder(struct maple_tree *mt) } mas_empty_alloc(&mas); rcu_barrier(); - //mt_dump(mt); //pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n", // max, mt_get_alloc_size()/1024, nr_allocated, // nr_tallocated); -- 2.50.1