From 5ae06bb570a2b41ae3b64b6aaccfb6849beea0ef Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 2 Aug 2019 11:21:12 -0400 Subject: [PATCH] maple_tree: Store XA_RETRY_ENTRY on erase Avoid using slots again until an RCU freeing cycle is complete by storing XA_RETRY_ENTRY Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index e8c055d67249..07be6c796b08 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3307,9 +3307,11 @@ static inline bool mas_skip_node(struct ma_state *mas) return true; } /* Private - * Find the range in which index resides and erase the entire range + * ma_erase() - Find the range in which index resides and erase the entire + * range. + * * Any previous pivots with no value will be set to the same pivot value. - * returns the number of slots that have been erased. + * Return: the number of concurrent slots that are NULL or XA_RETRY_ENTRY. */ static inline int ma_erase(struct ma_state *mas) { @@ -3321,7 +3323,7 @@ static inline int ma_erase(struct ma_state *mas) slot = ma_get_slot(mas); - ma_update_rcu_slot(mas->node, slot, NULL); + ma_update_rcu_slot(mas->node, slot, XA_RETRY_ENTRY); if ((slot >= slot_cnt - 1)) return ret; @@ -3347,14 +3349,18 @@ static inline int ma_erase(struct ma_state *mas) } /* Walk down and set all the previous pivots with NULLs to piv_val */ - while (--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL) { + while (--slot >= 0) { + void *entry = ma_get_rcu_slot(mas->node, slot); + + if (entry && entry != XA_RETRY_ENTRY) + break; + ma_set_pivot(mas->node, slot, piv_val); ret++; } /* The error on allocation failure can be ignored */ mas_coalesce(mas, ++slot); - return ret; } -- 2.50.1