From 68a4a65cbf9d88e1db20adca8aafadabc1809cd4 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 20 Feb 2020 16:36:16 -0500 Subject: [PATCH] maple_tree: Fix off-by-one on append_entry When appending an entry, ensure that the previous pivot is one less than the desired range. Don't subtraction coalesce from the slot calculation as that is now taken into account within _mas_add_slot_cnt itself. Fix testing to kick out if mas_for_each is in a retry loop. Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 14 +++++++++----- lib/test_maple_tree.c | 17 ++++++++++++++--- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 31f1608adf0c..f540f6fd0a66 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1238,7 +1238,7 @@ static inline unsigned char mas_append_entry(struct ma_state *mas, void *entry) } else if (dst_slot) wr_pivot = mas_get_safe_pivot(mas, dst_slot - 1); - if (dst_slot && mas->index < wr_pivot) { + if (dst_slot && mas->index <= wr_pivot) { mas_set_safe_pivot(mas, dst_slot - 1, mas->index - 1); } else if (entry && mas->index && (mas->index - 1 != wr_pivot)) { if (dst_slot && !mte_get_rcu_slot(mas->node, dst_slot - 1)) @@ -2226,11 +2226,11 @@ skip_slot: static inline int _mas_add_slot_cnt(struct ma_state *mas, const unsigned char slot, const unsigned long min, - const unsigned long max) + const unsigned long max, void *entry) { int slot_cnt; unsigned char slot_max = mt_slot_count(mas->node); - bool prev_null = !!mte_get_rcu_slot(mas->node, slot); + bool prev_null = false; unsigned long prev_piv = (mas->min ? mas->min - 1 : mas->min); slot_cnt = __mas_add_slot_cnt(mas, prev_piv, 0, slot, false, true); @@ -2239,10 +2239,14 @@ static inline int _mas_add_slot_cnt(struct ma_state *mas, slot_cnt++; // (2?) if (max > mas->last) { // ends before this_slot. + void *prev_val = mte_get_rcu_slot(mas->node, slot); slot_cnt++; // (2 or 3?) prev_piv = max; + if (!prev_val || mt_will_coalesce(prev_val)) + prev_null = true; } else { - prev_null = false; + if (!entry) + prev_null = true; prev_piv = mas->last; } @@ -2571,7 +2575,7 @@ static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite, ret = 1; goto complete; } - new_end = _mas_add_slot_cnt(mas, slot, min, max) - coalesce; + new_end = _mas_add_slot_cnt(mas, slot, min, max, entry); if (!spans_node && new_end > slot_cnt + 1) { mas_split(mas, slot, active, old_end, entry); if (mas_is_err(mas)) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index ac1e8c1eb390..b3d9e5f7380f 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -960,8 +960,10 @@ static noinline void check_erase2_testset(struct maple_tree *mt, break; } mt_validate(mt); -#if check_erase2_debug +#if check_erase2_debug > 1 mt_dump(mt); +#endif +#if check_erase2_debug pr_err("Done\n"); #endif @@ -983,14 +985,23 @@ static noinline void check_erase2_testset(struct maple_tree *mt, MT_BUG_ON(mt, check != entry_cnt); check = 0; + addr = 0; mas_reset(&mas); mas.index = 0; rcu_read_lock(); mas_for_each(&mas, foo, ULONG_MAX) { - if (mas_retry(&mas, foo)) + if (mas_retry(&mas, foo)) { + if (addr == mas.index) { + mt_dump(mas.tree); + printk("retry failed %lu - %lu\n", + mas.index, mas.last); + MT_BUG_ON(mt, 1); + } + addr = mas.index; continue; + } #if check_erase2_debug > 1 - pr_err("mas: %lu -> %p\n", addr+1, foo); + pr_err("mas: %lu -> %p\n", mas.index, foo); #endif check++; if (check > entry_cnt) -- 2.50.1