From 8ea1fc66a3635c76410f1d69a847ffb5c3decf15 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 24 Sep 2025 22:10:01 -0400 Subject: [PATCH] maple_tree: Remove new_end argument from append and node store Not all writes need the new_end value, so only calculate it when necessary. mas_wr_node_store() already looks at the necessary informations, so inline the calculation with the other logic. Signed-off-by: Liam R. Howlett --- lib/maple_tree.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 08d1e597d9bd..741ec1e020b5 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3452,8 +3452,7 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) * * Attempts to reuse the node, but may allocate. */ -static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, - unsigned char new_end) +static inline void mas_wr_node_store(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; void __rcu **dst_slots; @@ -3463,11 +3462,16 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; bool in_rcu = mt_in_rcu(mas->tree); unsigned char height = mas_mt_height(mas); + unsigned char new_end; - if (mas->last == wr_mas->end_piv) + /* Assume last adds an entry */ + new_end = mas->end + 1 - wr_mas->offset_end + mas->offset; + if (mas->last == wr_mas->end_piv) { offset_end++; /* don't copy this offset */ - else if (unlikely(wr_mas->r_max == ULONG_MAX)) + new_end--; + } else if (unlikely(wr_mas->r_max == ULONG_MAX)) { mas_bulk_rebalance(mas, mas->end, wr_mas->type); + } /* set up node. */ if (in_rcu) { @@ -3488,6 +3492,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, if (wr_mas->r_min < mas->index) { rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); dst_pivots[mas->offset++] = mas->index - 1; + new_end++; } /* Store the new entry and range end. */ @@ -3644,18 +3649,17 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) /* * mas_wr_append: Attempt to append * @wr_mas: the maple write state - * @new_end: The end of the node after the modification * * This is currently unsafe in rcu mode since the end of the node may be cached * by readers while the node contents may be updated which could result in * inaccurate information. */ -static inline void mas_wr_append(struct ma_wr_state *wr_mas, - unsigned char new_end) +static inline void mas_wr_append(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; void __rcu **slots; unsigned char end = mas->end; + unsigned char new_end = mas_wr_new_end(wr_mas); if (new_end < mt_pivots[wr_mas->type]) { wr_mas->pivots[new_end] = wr_mas->pivots[end]; @@ -3834,7 +3838,6 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned char new_end = mas_wr_new_end(wr_mas); switch (mas->store_type) { case wr_exact_fit: @@ -3843,13 +3846,13 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) mas_update_gap(mas); break; case wr_append: - mas_wr_append(wr_mas, new_end); + mas_wr_append(wr_mas); break; case wr_slot_store: mas_wr_slot_store(wr_mas); break; case wr_node_store: - mas_wr_node_store(wr_mas, new_end); + mas_wr_node_store(wr_mas); break; case wr_spanning_store: mas_wr_spanning_store(wr_mas); -- 2.51.0