From 275b01469696c869c84dd7b15fd4c019ff92da5d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 4 Mar 2020 12:00:33 -0500 Subject: [PATCH] maple_tree: Zero pivots of gaps that have been moved. This doesn't seem totally correct and should really use a new node. A solution for low mem is needed too. Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 39 ++++++++++++++++++++++++++------------- lib/test_maple_tree.c | 4 +++- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index faef5eb79bb3..714c7dbb180b 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -934,11 +934,8 @@ static inline void mas_shift_pivot(struct ma_state *curr, mte_set_pivot(left.node, slot, piv); } while (++slot < r_p_slot); } - break; + return; } - - mas_set_slot(&left, l_p_slot); - mas_set_slot(&right, r_p_slot); } while (left.node != right.node); } /* mas_get_prev_pivot() - Return the previous pivot. @@ -2208,6 +2205,7 @@ static inline void mas_may_move_gap(struct ma_state *mas) unsigned long last_piv; unsigned char coalesce; unsigned char end; + unsigned char new_end; unsigned char slot = mas_get_slot(mas); void *entry = NULL; void *next_entry = NULL; @@ -2252,24 +2250,32 @@ static inline void mas_may_move_gap(struct ma_state *mas) * start, see if there are more to shift. */ - while (end && (!entry || mt_will_coalesce(entry))) - entry = mas_get_rcu_slot(&curr, --end); + new_end = end; + while (new_end && (!entry || mt_will_coalesce(entry))) + entry = mas_get_rcu_slot(&curr, --new_end); // Set the slot to 0 in case of deleted or retry. if (next_entry) mte_set_rcu_slot(next.node, 0, NULL); - last_piv = mas_get_safe_pivot(&curr, end); - if (end < mt_pivot_count(curr.node) - 1) - mte_set_pivot(curr.node, end + 1, 0); + + last_piv = mas_get_safe_pivot(&curr, new_end); + while (new_end++ != end) { + if (new_end < mt_pivot_count(curr.node)) + mte_set_pivot(curr.node, new_end, 0); + // The location storing these values has moved. + // FIXME: We need to allocate a new node. + // FIXME: What if allocation fails? + mte_set_rcu_slot(curr.node, new_end, NULL); + } if (curr.node == mas->node) mas->max = last_piv; else mas->min = last_piv + 1; - mas_shift_pivot(&curr, &next, last_piv); next.min = last_piv + 1; curr.max = last_piv; + mas_shift_pivot(&curr, &next, last_piv); if (mt_is_alloc(mas->tree)) { mas_update_gap(&curr, false); @@ -5293,7 +5299,7 @@ void mas_validate_gaps(struct ma_state *mas) goto counted; } - for(i = 0; i < mt_slot_count(mte); i++) { + for (i = 0; i < mt_slot_count(mte); i++) { p_end = mas_get_safe_pivot(mas, i); if (!p_end && i) p_end = mas->max; @@ -5316,6 +5322,7 @@ void mas_validate_gaps(struct ma_state *mas) mas_mn(mas), i, mas_get_rcu_slot(mas, i), gap, p_end, p_start); + mt_dump(mas->tree); MT_BUG_ON(mas->tree, gap != p_end - p_start + 1); @@ -5325,6 +5332,7 @@ void mas_validate_gaps(struct ma_state *mas) pr_err(MA_PTR"[%u] %lu >= %lu - %lu + 1 (%lu)\n", mas_mn(mas), i, gap, p_end, p_start, p_end - p_start + 1); + mt_dump(mas->tree); MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); } @@ -5336,7 +5344,8 @@ void mas_validate_gaps(struct ma_state *mas) not_empty: p_start = p_end + 1; if (p_end >= mas->max) - break; } + break; + } counted: if (mte_is_root(mte)) @@ -5345,8 +5354,10 @@ counted: p_slot = mte_parent_slot(mas->node); p_mn = mte_parent(mte); MT_BUG_ON(mas->tree, max_gap > mas->max); - if (ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != max_gap) + if (ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != max_gap) { pr_err("gap "MA_PTR"[%u] != %lu\n", p_mn, p_slot, max_gap); + mt_dump(mas->tree); + } MT_BUG_ON(mas->tree, ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != @@ -5418,12 +5429,14 @@ void mas_validate_limits(struct ma_state *mas) mt_dump(mas->tree); pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i, piv, mas->min); + mt_dump(mas->tree); MT_BUG_ON(mas->tree, piv < mas->min); } } if (piv > mas->max) { pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i, piv, mas->max); + mt_dump(mas->tree); MT_BUG_ON(mas->tree, piv > mas->max); } prev_piv = piv; diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index ebab8f995c57..e00bcc9db8bf 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -10762,7 +10762,6 @@ STORE, 140590376591360, 140590376595455, */ }; - int cnt = 0; MA_STATE(mas, mt, 0, 0); @@ -11399,6 +11398,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) rcu_read_lock(); MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 81, 2)); MT_BUG_ON(mt, mas.index != 79); + mt_validate(mt); rcu_read_unlock(); // Test retry entry in the start of a gap. @@ -11409,6 +11409,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 85, 4)); rcu_read_unlock(); MT_BUG_ON(mt, mas.index != 78); + mt_validate(mt); mtree_destroy(mt); mtree_init(mt, MAPLE_ALLOC_RANGE); @@ -11421,6 +11422,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 1700, 1800, 2)); MT_BUG_ON(mt, mas.index != 1791); rcu_read_unlock(); + mt_validate(mt); mtree_destroy(mt); } -- 2.50.1