From: Liam R. Howlett Date: Fri, 26 Sep 2025 20:44:36 +0000 (-0400) Subject: trace cleanup and drop inline from mas_wr_store_entry() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=refs%2Fheads%2Fsplit_w_structs_v2;p=users%2Fjedix%2Flinux-maple.git trace cleanup and drop inline from mas_wr_store_entry() Signed-off-by: Liam R. Howlett --- diff --git a/lib/maple_tree.c b/lib/maple_tree.c index e7b943d570ba..f7c2806f0f07 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3124,6 +3124,7 @@ static inline void mas_root_expand(struct ma_state *mas, void *entry) */ static inline void mas_store_root(struct ma_state *mas, void *entry) { + trace_ma_op(__func__, mas); if (!entry) { if (!mas->index) rcu_assign_pointer(mas->tree->ma_root, NULL); @@ -3173,7 +3174,6 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas) return false; } - trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); return true; } @@ -3352,6 +3352,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry) void __rcu **slots; unsigned long *pivots; + trace_ma_op(__func__, mas); WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX); if (!entry) { @@ -3408,9 +3409,9 @@ static void mas_wr_spanning_store(struct ma_wr_state *wr_mas) * a rebalance is required for the operation to complete and an overflow * of data may happen. */ - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); mas = wr_mas->mas; + trace_ma_op(__func__, mas); if (unlikely(!mas->index && mas->last == ULONG_MAX)) return mas_new_root(mas, wr_mas->entry); /* @@ -3485,6 +3486,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas) mas = wr_mas->mas; + trace_ma_op(__func__, mas); in_rcu = mt_in_rcu(mas->tree); offset_end = wr_mas->offset_end; node_pivots = mt_pivots[wr_mas->type]; @@ -3554,7 +3556,6 @@ done: } else { memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); } - trace_ma_write(__func__, mas, 0, wr_mas->entry); mas_update_gap(mas); mas->end = new_end; return; @@ -3571,6 +3572,7 @@ static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas) void __rcu **slots = wr_mas->slots; bool gap = false; + trace_ma_op(__func__, mas); gap |= !mt_slot_locked(mas->tree, slots, offset); gap |= !mt_slot_locked(mas->tree, slots, offset + 1); @@ -3598,7 +3600,6 @@ static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas) mas->offset++; /* Keep mas accurate. */ } - trace_ma_write(__func__, mas, 0, wr_mas->entry); /* * Only update gap when the new entry is empty or there is an empty * entry in the original two ranges. @@ -3687,6 +3688,7 @@ static inline void mas_wr_append(struct ma_wr_state *wr_mas) unsigned char end = mas->end; unsigned char new_end = mas_wr_new_end(wr_mas); + trace_ma_op(__func__, mas); if (new_end < mt_pivots[wr_mas->type]) { wr_mas->pivots[new_end] = wr_mas->pivots[end]; ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end); @@ -3718,7 +3720,6 @@ static inline void mas_wr_append(struct ma_wr_state *wr_mas) mas_update_gap(mas); mas->end = new_end; - trace_ma_write(__func__, mas, new_end, wr_mas->entry); return; } @@ -3804,9 +3805,8 @@ static void mas_wr_split(struct ma_wr_state *wr_mas) struct maple_copy cp; struct ma_state sib; - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); - mas = wr_mas->mas; + trace_ma_op(__func__, mas); cp_leaf_init(&cp, mas, wr_mas, wr_mas); do { split_data(&cp, wr_mas, &sib); @@ -3833,8 +3833,6 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) struct maple_copy cp; struct ma_state sib; - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); - /* * Rebalancing occurs if a node is insufficient. Data is rebalanced * against the node to the right if it exists, otherwise the node to the @@ -3846,6 +3844,7 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) */ mas = wr_mas->mas; + trace_ma_op(__func__, mas); cp_leaf_init(&cp, mas, wr_mas, wr_mas); do { rebalance_data(&cp, wr_mas, &sib); @@ -3861,10 +3860,11 @@ static void mas_wr_rebalance(struct ma_wr_state *wr_mas) * mas_wr_store_entry() - Internal call to store a value * @wr_mas: The maple write state */ -static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) +static void mas_wr_store_entry(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; + trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); switch (mas->store_type) { case wr_exact_fit: rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); @@ -5184,7 +5184,6 @@ void *mas_store(struct ma_state *mas, void *entry) int request; MA_WR_STATE(wr_mas, mas, entry); - trace_ma_write(__func__, mas, 0, entry); #ifdef CONFIG_DEBUG_MAPLE_TREE if (MAS_WARN_ON(mas, mas->index > mas->last)) pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, @@ -5285,7 +5284,6 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) } store: - trace_ma_write(__func__, mas, 0, entry); mas_wr_store_entry(&wr_mas); MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); mas_destroy(mas); @@ -6024,6 +6022,7 @@ void *mas_erase(struct ma_state *mas) unsigned long index = mas->index; MA_WR_STATE(wr_mas, mas, NULL); + trace_ma_op(__func__, mas); if (!mas_is_active(mas) || !mas_is_start(mas)) mas->status = ma_start; @@ -6141,7 +6140,6 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, MA_STATE(mas, mt, index, last); int ret = 0; - trace_ma_write(__func__, &mas, 0, entry); if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL;