From: Liam R. Howlett <Liam.Howlett@oracle.com>
Date: Tue, 23 Nov 2021 16:39:32 +0000 (-0500)
Subject: maple_tree: Just use ma_wr_state for spanning write function arguments
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=17e70855fee1a0ae547b7939751462ce41adeac8;p=users%2Fjedix%2Flinux-maple.git

maple_tree: Just use ma_wr_state for spanning write function arguments

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 6594ed6068ca..84a9cbaded04 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -3572,8 +3572,8 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
 
 /*
  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
- * @l_mas: The left maple state
- * @r_mas: The right maple state
+ * @l_wr_mas: The left maple write state
+ * @r_wr_mas: The right maple write state
  */
 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
 					    struct ma_wr_state *r_wr_mas)
@@ -3582,7 +3582,6 @@ static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
 	struct ma_state *l_mas = l_wr_mas->mas;
 	unsigned char l_slot;
 
-
 	l_slot = l_mas->offset;
 	if (!l_wr_mas->content)
 		l_mas->index = l_wr_mas->r_min;
@@ -3806,12 +3805,11 @@ done:
  * and new nodes where necessary, then place the sub-tree in the actual tree.
  * Note that mas is expected to point to the node which caused the store to
  * span.
- * @mas: The maple state
- * @entry: The entry to store.
+ * @wr_mas: The maple write state
  *
  * Return: 0 on error, positive on success.
  */
-static inline int mas_spanning_store(struct ma_wr_state *wr_mas, void *entry)
+static inline int mas_spanning_store(struct ma_wr_state *wr_mas)
 {
 	struct ma_wr_state r_wr_mas, l_wr_mas;
 	struct maple_subtree_state mast;
@@ -3827,7 +3825,7 @@ static inline int mas_spanning_store(struct ma_wr_state *wr_mas, void *entry)
 	trace_ma_op(__func__, mas);
 
 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
-		return mas_new_root(mas, entry);
+		return mas_new_root(mas, wr_mas->entry);
 	/*
 	 * Node rebalancing may occur due to this store, so there may be two new
 	 * entries per level plus a new root.
@@ -3839,7 +3837,7 @@ static inline int mas_spanning_store(struct ma_wr_state *wr_mas, void *entry)
 
 	/* Set up right side. */
 	r_mas = *mas;
-	/* Avoid overflow. */
+	/* Avoid overflow, walk to next slot in the tree. */
 	if (r_mas.last + 1)
 		r_mas.last++;
 
@@ -3851,10 +3849,11 @@ static inline int mas_spanning_store(struct ma_wr_state *wr_mas, void *entry)
 	/* Set up left side. */
 	l_mas = *mas;
 	l_wr_mas.mas = &l_mas;
+	/* Stop detection of spanning store on write walk */
 	l_mas.last = l_mas.index;
 	mas_wr_walk(&l_wr_mas);
 
-	if (!entry) {
+	if (!wr_mas->entry) {
 		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
 		mas->offset = l_mas.offset;
 		mas->index = l_mas.index;
@@ -3864,7 +3863,7 @@ static inline int mas_spanning_store(struct ma_wr_state *wr_mas, void *entry)
 
 	b_node.type = wr_mas->type;
 	/* Copy l_mas and store the value in b_node. */
-	b_node.b_end = mas_store_b_node(&l_mas, &b_node, entry,
+	b_node.b_end = mas_store_b_node(&l_mas, &b_node, wr_mas->entry,
 					l_wr_mas.node_end, l_wr_mas.node_end,
 					l_wr_mas.content);
 	/* Copy r_mas into b_node. */
@@ -4354,7 +4353,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
 		return;
 
 slow_path:
-	b_node.type = mte_node_type(mas->node);
+	b_node.type = wr_mas->type;
 	b_node.b_end = mas_store_b_node(mas, &b_node, wr_mas->entry, wr_mas->node_end,
 					wr_mas->offset_end, wr_mas->content);
 	b_node.min = mas->min;
@@ -4388,7 +4387,7 @@ static inline void *mas_store_entry(struct ma_state *mas, void *entry)
 	wr_mas.mas = mas;
 	wr_mas.entry = entry;
 	if (!mas_wr_walk(&wr_mas)) {
-		mas_spanning_store(&wr_mas, entry);
+		mas_spanning_store(&wr_mas);
 		return wr_mas.content;
 	}