*/
static inline void mas_store_root(struct ma_state *mas, void *entry)
{
+ trace_ma_op(__func__, mas);
if (!entry) {
if (!mas->index)
rcu_assign_pointer(mas->tree->ma_root, NULL);
return false;
}
- trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
return true;
}
void __rcu **slots;
unsigned long *pivots;
+ trace_ma_op(__func__, mas);
WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX);
if (!entry) {
* a rebalance is required for the operation to complete and an overflow
* of data may happen.
*/
- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
/*
mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
in_rcu = mt_in_rcu(mas->tree);
offset_end = wr_mas->offset_end;
node_pivots = mt_pivots[wr_mas->type];
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
- trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
return;
void __rcu **slots = wr_mas->slots;
bool gap = false;
+ trace_ma_op(__func__, mas);
gap |= !mt_slot_locked(mas->tree, slots, offset);
gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
mas->offset++; /* Keep mas accurate. */
}
- trace_ma_write(__func__, mas, 0, wr_mas->entry);
/*
* Only update gap when the new entry is empty or there is an empty
* entry in the original two ranges.
unsigned char end = mas->end;
unsigned char new_end = mas_wr_new_end(wr_mas);
+ trace_ma_op(__func__, mas);
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
mas_update_gap(mas);
mas->end = new_end;
- trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return;
}
struct maple_copy cp;
struct ma_state sib;
- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
-
mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
cp_leaf_init(&cp, mas, wr_mas, wr_mas);
do {
split_data(&cp, wr_mas, &sib);
struct maple_copy cp;
struct ma_state sib;
- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
-
/*
* Rebalancing occurs if a node is insufficient. Data is rebalanced
* against the node to the right if it exists, otherwise the node to the
*/
mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
cp_leaf_init(&cp, mas, wr_mas, wr_mas);
do {
rebalance_data(&cp, wr_mas, &sib);
* mas_wr_store_entry() - Internal call to store a value
* @wr_mas: The maple write state
*/
-static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
+static void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
switch (mas->store_type) {
case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
int request;
MA_WR_STATE(wr_mas, mas, entry);
- trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last,
}
store:
- trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
mas_destroy(mas);
unsigned long index = mas->index;
MA_WR_STATE(wr_mas, mas, NULL);
+ trace_ma_op(__func__, mas);
if (!mas_is_active(mas) || !mas_is_start(mas))
mas->status = ma_start;
MA_STATE(mas, mt, index, last);
int ret = 0;
- trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;