From 098da3f40675ff3f41efb0ccc296eb0129f5d9a8 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 29 Oct 2020 21:45:01 -0400 Subject: [PATCH] maple_tree: Rework MA_STATE_BULK and MA_STATE_REBALANCE Set MA_STATE_BULK when setting the expected number of store events. Only rebalance the end node when it is deficient. Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 22 +++++++++++++++------- lib/test_maple_tree.c | 34 +++++++++++++--------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f98d78e22277..64d4de3c4bbf 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1411,10 +1411,11 @@ static inline int mab_calc_split(struct ma_state *mas, unsigned char min = mt_min_slots[b_node->type] - 1; *mid_split = 0; - if ((mas->mas_flags & MA_STATE_REBALANCE) && + if ((mas->mas_flags & MA_STATE_BULK) && ma_is_leaf(b_node->type)) { min = 2; split = mt_slots[b_node->type] - min; + mas->mas_flags |= MA_STATE_REBALANCE; } /* Avoid having a range less than the slot count unless it * causes one node to be deficient. @@ -1564,7 +1565,8 @@ static inline void mas_descend_adopt(struct ma_state *mas) } } -static inline void mas_advanced_may_rebalance(struct ma_state *mas) +static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, + enum maple_type mt) { if (!(mas->mas_flags & MA_STATE_BULK)) return; @@ -1572,7 +1574,10 @@ static inline void mas_advanced_may_rebalance(struct ma_state *mas) if (mte_is_root(mas->node)) return; - mas->mas_flags |= MA_STATE_REBALANCE; + if (end > mt_min_slots[mt]) { + mas->mas_flags &= ~MA_STATE_REBALANCE; + return; + } } /* * mas_store_b_node() - Store an @entry into the b_node while also copying the @@ -1618,7 +1623,7 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas, piv = _mas_safe_pivot(mas, pivots, slot, b_node->type); if (piv > mas->last) { if (piv == ULONG_MAX) - mas_advanced_may_rebalance(mas); + mas_bulk_rebalance(mas, b_node->b_end, b_node->type); b_node->slot[++b_end] = contents; if (!contents) @@ -3120,7 +3125,7 @@ static inline bool mas_node_store(struct ma_state *mas, void *entry, offset_end++; } else if (mas->last < max) { // new range ends in this range. if (max == ULONG_MAX) - mas_advanced_may_rebalance(mas); + mas_bulk_rebalance(mas, end, mt); new_end++; offset_end = offset; @@ -3204,7 +3209,6 @@ done: mas_update_gap(mas); return true; - } static inline bool mas_slot_store(struct ma_state *mas, void *entry, @@ -3286,6 +3290,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite mas_set_err(mas, -EEXIST); return NULL; // spanning writes always overwrite something. } + ret = mas_spanning_store(mas, entry); goto spanning_store; } @@ -4843,7 +4848,6 @@ EXPORT_SYMBOL(mtree_destroy); */ void *mas_store(struct ma_state *mas, void *entry) { - mas->mas_flags |= MA_STATE_BULK; if (mas->index <= mas->last) return _mas_store(mas, entry, true); @@ -4870,6 +4874,9 @@ int mas_entry_count(struct ma_state *mas, unsigned long nr_entries) int nr_nodes; int ret; + // Optimize splitting for bulk insert in-order. + mas->mas_flags |= MA_STATE_BULK; + // Avoid overflow, assume a gap between each entry and a trailing null // If this is wrong, it just means allocation can happen during // insertion of entries. @@ -4919,6 +4926,7 @@ void mas_destroy(struct ma_state *mas) } mas->mas_flags &= ~MA_STATE_REBALANCE; } + mas->mas_flags &= ~MA_STATE_BULK; while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) { node = mas->alloc; diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index dd541adbecb4..fbee24ba3dc3 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -35311,13 +35311,14 @@ static noinline void check_forking(struct maple_tree *mt) { struct maple_tree newmt; - int i, max = 3000; + int i, nr_entries = 134; void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); - for (i = 0; i < max; i+=10) - mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); mt_set_non_kernel(99999); mtree_init(&newmt, MAPLE_ALLOC_RANGE); @@ -35326,11 +35327,10 @@ static noinline void check_forking(struct maple_tree *mt) mas_reset(&mas); mas.index = 0; mas.last = 0; - if (mas_entry_count(&newmas, max/10)) { + if (mas_entry_count(&newmas, nr_entries)) { printk("OOM!"); BUG_ON(1); } - mas_for_each(&mas, val, ULONG_MAX) { newmas.index = mas.index; newmas.last = mas.last; @@ -35338,14 +35338,6 @@ static noinline void check_forking(struct maple_tree *mt) } mas_destroy(&newmas); mt_validate(&newmt); - mas_reset(&mas); - mas_reset(&newmas); - mas_for_each(&mas, val, ULONG_MAX) { - void *val2 = mas_find(&newmas, mas.last); - MT_BUG_ON(&newmt, mas.index != newmas.index); - MT_BUG_ON(&newmt, mas.last != newmas.last); - MT_BUG_ON(&newmt, val != val2); - } mt_set_non_kernel(0); mtree_destroy(&newmt); } @@ -35354,16 +35346,16 @@ static noinline void bench_forking(struct maple_tree *mt) { struct maple_tree newmt; - int i, max = 300000, count = 100; + int i, nr_entries = 134, nr_fork = 60000; void *val; MA_STATE(mas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0); -// for (i = max; i > 0; i-=10) - for (i = 0; i < max; i+=10) - mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL); + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); - for (i = 0; i < count; i++) { + for (i = 0; i < nr_fork; i++) { mt_set_non_kernel(99999); mtree_init(&newmt, MAPLE_ALLOC_RANGE); newmas.tree = &newmt; @@ -35371,9 +35363,9 @@ static noinline void bench_forking(struct maple_tree *mt) mas_reset(&mas); mas.index = 0; mas.last = 0; - if (mas_entry_count(&newmas, max/10)) { + if (mas_entry_count(&newmas, nr_entries)) { printk("OOM!"); - break; + BUG_ON(1); } mas_for_each(&mas, val, ULONG_MAX) { newmas.index = mas.index; @@ -35397,7 +35389,7 @@ static int maple_tree_seed(void) pr_info("\nTEST STARTING\n\n"); -#if 1 +#if 0 mtree_init(&tree, MAPLE_ALLOC_RANGE); bench_slot_store(&tree); mtree_destroy(&tree); -- 2.50.1