}
static inline struct maple_enode *ma_get_rcu_slot(
const struct maple_node *mn, unsigned char slot,
- enum maple_type type)
+ enum maple_type type, struct maple_tree *mtree)
{
switch (type) {
case maple_range_64:
case maple_leaf_64:
- return rcu_dereference(mn->mr64.slot[slot]);
+ return rcu_dereference_check(mn->mr64.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
default:
case maple_dense:
- return rcu_dereference(mn->slot[slot]);
+ return rcu_dereference_check(mn->slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_arange_64:
- return rcu_dereference(mn->ma64.slot[slot]);
+ return rcu_dereference_check(mn->ma64.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_6:
- return rcu_dereference(mn->ms6.slot[slot]);
+ return rcu_dereference_check(mn->ms6.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_9:
- return rcu_dereference(mn->ms9.slot[slot]);
+ return rcu_dereference_check(mn->ms9.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_16:
- return rcu_dereference(mn->ms16.slot[slot]);
+ return rcu_dereference_check(mn->ms16.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_21:
- return rcu_dereference(mn->ms21.slot[slot]);
+ return rcu_dereference_check(mn->ms21.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_32:
- return rcu_dereference(mn->ms32.slot[slot]);
+ return rcu_dereference_check(mn->ms32.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_sparse_64:
- return rcu_dereference(mn->ms64.slot[slot]);
+ return rcu_dereference_check(mn->ms64.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_range_16:
case maple_leaf_16:
- return rcu_dereference(mn->mr16.slot[slot]);
+ return rcu_dereference_check(mn->mr16.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
case maple_range_32:
case maple_leaf_32:
- return rcu_dereference(mn->mr32.slot[slot]);
+ return rcu_dereference_check(mn->mr32.slot[slot],
+ lockdep_is_held(mtree->ma_lock));
}
}
static inline struct maple_enode *_mte_get_rcu_slot(
const struct maple_enode *mn, unsigned char slot,
- enum maple_type type)
+ enum maple_type type, struct maple_tree *mtree)
{
- return ma_get_rcu_slot(mte_to_node(mn), slot, type);
+ return ma_get_rcu_slot(mte_to_node(mn), slot, type, mtree);
}
static inline struct maple_enode *mte_get_rcu_slot(const struct maple_enode *mn,
- unsigned char slot)
+ unsigned char slot, struct maple_tree *mtree)
{
- return _mte_get_rcu_slot(mn, slot, mte_node_type(mn));
+ return _mte_get_rcu_slot(mn, slot, mte_node_type(mn), mtree);
}
-static inline struct maple_enode *mte_get_rcu_sanitized(
- const struct maple_enode *mn, unsigned char slot)
+
+static inline struct maple_enode *mas_get_rcu_slot(const struct ma_state *mas,
+ unsigned char slot)
{
- void *entry = mte_get_rcu_slot(mn, slot);
+ return mte_get_rcu_slot(mas->node, slot, mas->tree);
+}
+static inline struct maple_enode *mas_get_rcu_sanitized(
+ struct ma_state *mas, unsigned char slot)
+{
+ void *entry = mte_get_rcu_slot(mas->node, slot, mas->tree);
if (mt_will_coalesce(entry))
return NULL;
return entry;
}
+
static inline void ma_set_rcu_slot(struct maple_node *mn,
unsigned char slot, enum maple_type type, void *val)
{
switch (type) {
default:
case maple_dense:
- RCU_INIT_POINTER(mn->slot[slot], val);
+ rcu_assign_pointer(mn->slot[slot], val);
break;
case maple_sparse_6:
- RCU_INIT_POINTER(mn->ms6.slot[slot], val);
+ rcu_assign_pointer(mn->ms6.slot[slot], val);
break;
case maple_sparse_9:
- RCU_INIT_POINTER(mn->ms9.slot[slot], val);
+ rcu_assign_pointer(mn->ms9.slot[slot], val);
break;
case maple_sparse_16:
- RCU_INIT_POINTER(mn->ms16.slot[slot], val);
+ rcu_assign_pointer(mn->ms16.slot[slot], val);
break;
case maple_sparse_21:
- RCU_INIT_POINTER(mn->ms21.slot[slot], val);
+ rcu_assign_pointer(mn->ms21.slot[slot], val);
break;
case maple_sparse_32:
- RCU_INIT_POINTER(mn->ms32.slot[slot], val);
+ rcu_assign_pointer(mn->ms32.slot[slot], val);
break;
case maple_sparse_64:
- RCU_INIT_POINTER(mn->ms64.slot[slot], val);
+ rcu_assign_pointer(mn->ms64.slot[slot], val);
break;
case maple_range_16:
case maple_leaf_16:
- RCU_INIT_POINTER(mn->mr16.slot[slot], val);
+ rcu_assign_pointer(mn->mr16.slot[slot], val);
break;
case maple_range_32:
case maple_leaf_32:
- RCU_INIT_POINTER(mn->mr32.slot[slot], val);
+ rcu_assign_pointer(mn->mr32.slot[slot], val);
break;
case maple_range_64:
case maple_leaf_64:
BUG_ON(slot >= 8);
- RCU_INIT_POINTER(mn->mr64.slot[slot], val);
+ rcu_assign_pointer(mn->mr64.slot[slot], val);
break;
case maple_arange_64:
BUG_ON(slot >= 5);
- RCU_INIT_POINTER(mn->ma64.slot[slot], val);
+ rcu_assign_pointer(mn->ma64.slot[slot], val);
break;
}
}
if (slot)
mas->min = mas_get_safe_pivot(mas, slot - 1) + 1;
mas->max = mas_get_safe_pivot(mas, slot);
- mas->node = mte_get_rcu_slot(mas->node, mas_get_slot(mas));
-}
-/** Private
- * mte_cp_rcu_slot() = Copy from one node to anther. Upon seeing a retry,
- * copies NULL.
- */
-static inline void mte_cp_rcu_slot(struct maple_enode *dst,
- unsigned char dloc, struct maple_enode *src, unsigned long sloc)
-{
- void *entry = mte_get_rcu_slot(src, sloc);
-
- if (mt_is_empty(entry) || xa_is_retry(entry))
- entry = NULL;
-
- mte_set_rcu_slot(dst, dloc, entry);
+ mas->node = mas_get_rcu_slot(mas, mas_get_slot(mas));
}
static inline void mte_update_rcu_slot(const struct maple_enode *mn,
break;
}
- entry = _mte_get_rcu_slot(mn, slot, type);
+ entry = _mte_get_rcu_slot(mn, slot, type, mas->tree);
if (mt_will_coalesce(entry)) {
if (piv == prev_piv || !slot)
(*coalesce)++;
unsigned long wr_pivot = mas->min ? mas->min - 1 : 0;
unsigned char coalesce, dst_slot = mas_get_slot(mas);
- if (!mte_get_rcu_slot(mas->node, 0) && !mte_get_pivot(mas->node, 0))
+ if (!mas_get_rcu_slot(mas, 0) && !mte_get_pivot(mas->node, 0))
dst_slot = 0; // empty node.
else if (dst_slot > mt_slot_count(mas->node)) { // Should not happen.
dst_slot = mas_data_end(mas, mte_node_type(mas->node),
if (dst_slot && mas->index <= wr_pivot) {
mas_set_safe_pivot(mas, dst_slot - 1, mas->index - 1);
} else if (entry && mas->index && (mas->index - 1 != wr_pivot)) {
- if (dst_slot && !mte_get_rcu_slot(mas->node, dst_slot - 1))
+ if (dst_slot && !mas_get_rcu_slot(mas, dst_slot - 1))
dst_slot--;
mte_set_rcu_slot(mas->node, dst_slot, NULL);
mas_set_safe_pivot(mas, dst_slot++, mas->index - 1);
} else if (!entry) { // appending NULL value.
- if (mte_get_rcu_slot(mas->node, dst_slot)) {
+ if (mas_get_rcu_slot(mas, dst_slot)) {
mas_set_safe_pivot(mas, dst_slot, mas->index - 1);
dst_slot++;
}
this_piv = ma_get_pivot(smn, src_end, stype);
}
- src_data = mte_get_rcu_slot(mas->node, dst_slot);
+ src_data = mas_get_rcu_slot(mas, dst_slot);
if (!src_data) {
if (!this_piv)
break;
}
// Append data from src.
- src_data = ma_get_rcu_slot(smn, src_start, stype);
+ src_data = ma_get_rcu_slot(smn, src_start, stype, mas->tree);
for (src_slot = src_start; src_slot <= src_end; src_slot++) {
bool next_dst = true;
if (dst_slot >= mt_slot_count(mas->node))
return dst_slot;
- src_data = ma_get_rcu_slot(smn, src_slot, stype);
+ src_data = ma_get_rcu_slot(smn, src_slot, stype, mas->tree);
if (src_slot >= mt_pivots[stype])
src_piv = src_max;
else
unsigned char split, unsigned char start, unsigned char end,
unsigned char slot, void *entry)
{
- void *existing_entry = mte_get_rcu_sanitized(src->node, slot);
+ void *existing_entry = mas_get_rcu_sanitized(src, slot);
struct ma_state *dst = left;
unsigned char dst_slot = slot;
unsigned long slot_min, slot_max;
goto done;
mas_get_range(src, slot, &slot_min, &slot_max);
- existing_entry = mte_get_rcu_sanitized(src->node, slot);
+ existing_entry = mas_get_rcu_sanitized(src, slot);
if (slot_min <= src->last && slot_max > src->last) {
mte_set_rcu_slot(dst->node, dst_slot, existing_entry);
if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mas->node); i++) {
- entry = mte_get_rcu_slot(mas->node, i);
+ entry = mas_get_rcu_slot(mas, i);
if (!mt_is_empty(entry) || xa_is_retry(entry)) {
if (gap > max_gap)
max_gap = gap;
pend = mas->max;
gap = pend - pstart + 1;
- entry = mte_get_rcu_slot(mas->node, i);
+ entry = mas_get_rcu_slot(mas, i);
if (!mt_is_empty(entry) || xa_is_retry(entry)) {
prev_gap = 0;
if (pivot > limit)
goto no_entry;
- mn = mte_get_rcu_slot(mas->node, slot);
+ mn = mas_get_rcu_slot(mas, slot);
if (mt_is_empty(mn)) {
min = pivot + 1;
/* Private
* mte_destroy_walk: Free the sub-tree from @mn and below.
*/
-void mte_destroy_walk(struct maple_enode *mn)
+void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree)
{
struct maple_enode *node;
unsigned int type = mte_node_type(mn);
case maple_range_64:
case maple_arange_64:
for (i = 0; i < slot_cnt; i++) {
- node = mte_get_rcu_slot(mn, i);
+ node = mte_get_rcu_slot(mn, i, mtree);
if (!mt_is_empty(node) && !xa_is_retry(node))
- mte_destroy_walk(node);
+ mte_destroy_walk(node, mtree);
}
break;
default:
}
-static inline void mte_adopt_children(struct maple_enode *parent)
+static inline void mas_adopt_children(struct ma_state *mas,
+ struct maple_enode *parent)
{
enum maple_type type = mte_node_type(parent);
_mte_get_pivot(parent, slot, type) == 0)
break;
- child = _mte_get_rcu_slot(parent, slot, type);
+ child = _mte_get_rcu_slot(parent, slot, type, mas->tree);
if (!mt_is_empty(child))
mte_set_parent(child, parent, slot);
}
parent = mt_mk_node(mte_parent(mas->node), ptype);
slot = mte_parent_slot(mas->node);
- prev = mte_get_rcu_slot(parent, slot);
+ prev = mte_get_rcu_slot(parent, slot, mas->tree);
}
if (mte_to_node(prev) == mn)
return;
if (!mte_is_leaf(mas->node))
- mte_adopt_children(mas->node);
+ mas_adopt_children(mas, mas->node);
if (mte_is_root(mas->node)) {
mn->parent = ma_parent_ptr(
mte_set_gap(parent, slot, gap);
mas->max = max;
}
-static inline void mte_link(struct maple_enode *new, struct maple_enode *parent,
- unsigned char slot, unsigned long pivot, enum maple_type type)
+static inline void mas_link(struct ma_state *mas, struct maple_enode *new,
+ struct maple_enode *parent, unsigned char slot,
+ unsigned long pivot, enum maple_type type)
{
unsigned char pivot_cnt = mt_pivots[type];
mte_set_rcu_slot(parent, slot, new);
if (!mte_is_leaf(new))
- mte_adopt_children(new);
+ mas_adopt_children(mas, new);
}
static inline enum maple_type mas_ptype_leaf(struct ma_state *mas)
right.max = mas->max;
// left will be placed in link, not p_slot as coalescing may occur.
- mte_link(left.node, new_p_mas.node, link, left.max, ptype);
+ mas_link(mas, left.node, new_p_mas.node, link, left.max, ptype);
// right (if it exists, will be placed in link + 1;
if (right.node)
- mte_link(right.node, new_p_mas.node, link + 1,
+ mas_link(mas, right.node, new_p_mas.node, link + 1,
right.max, ptype);
// Append data from p_slot + 1 to the end.
mas_append(&new_p_mas, &parent, p_slot + 1, p_end);
// Update encoded slots in children
- mte_adopt_children(new_p_mas.node);
+ mas_adopt_children(&new_p_mas, new_p_mas.node);
mas_dup_state(mas, &new_p_mas);
// Replace the parent node & free the old parent.
sibling_slot -= 1;
else
sibling_slot += 1;
- sibling = mte_get_rcu_slot(mas->node, sibling_slot);
+ sibling = mas_get_rcu_slot(mas, sibling_slot);
if (!sibling)
return mt;
// FIXME: Check entire range, not what we would insert this time.
if (!overwrite) {
do {
- if (_mte_get_rcu_slot(mas->node, min++, this_type))
+ if (_mte_get_rcu_slot(mas->node, min++, this_type,
+ mas->tree))
return 0;
} while (min < max);
}
if (this_piv < prev_piv)
goto skip_slot;
- data = mte_get_rcu_slot(mas->node, this_slot);
+ data = mas_get_rcu_slot(mas, this_slot);
if (!data || mt_will_coalesce(data)) {
if (prev_null)
goto skip_slot;
slot_cnt++; // (2?)
if (max > mas->last) { // ends before this_slot.
- void *prev_val = mte_get_rcu_slot(mas->node, slot);
+ void *prev_val = mas_get_rcu_slot(mas, slot);
slot_cnt++; // (2 or 3?)
prev_piv = max;
if (!prev_val || mt_will_coalesce(prev_val))
mas_get_range(mas, slot, &prev_piv, &piv);
- existing_entry = mte_get_rcu_sanitized(mas->node, slot);
+ existing_entry = mas_get_rcu_sanitized(mas, slot);
if (prev_piv <= mas->last && piv > mas->last) {
mte_set_rcu_slot(cp.node, end_slot, existing_entry);
mas_set_safe_pivot(&cp, end_slot++, piv);
max = mas->max;
if (slot <= old_end)
- contents = mte_get_rcu_slot(mas->node, slot);
+ contents = mas_get_rcu_slot(mas, slot);
// Check early failures.
if (slot)
mas->min = mas_get_safe_pivot(mas, slot - 1) + 1;
mas->max = mas_get_safe_pivot(mas, slot);
- entry = mte_get_rcu_slot(mas->node, slot);
+ entry = mas_get_rcu_slot(mas, slot);
if (xa_is_skip(entry)) {
if (mas->max >= max) {
goto no_entry;
if (slot)
mas->min = mas_get_safe_pivot(mas, slot - 1);
mas->max = mas_get_safe_pivot(mas, slot);
- mas->node = mte_get_rcu_slot(mas->node, slot);
+ mas->node = mas_get_rcu_slot(mas, slot);
if (mt_is_empty(mas->node))
goto done;
if (slot != 0 && pivot == 0)
break;
- mn = mte_get_rcu_slot(mas->node, slot);
+ mn = mas_get_rcu_slot(mas, slot);
if (mt_is_empty(mn) || xa_is_retry(mn))
continue;
if (slot != 0 && pivot == 0)
break;
- mn = mte_get_rcu_slot(mas->node, slot);
+ mn = mas_get_rcu_slot(mas, slot);
if (mt_is_empty(mn) || xa_is_retry(mn)) {
prev_piv = pivot;
continue;
if (pivot < limit)
goto no_entry;
- entry = mte_get_rcu_slot(mas->node, slot);
+ entry = mas_get_rcu_slot(mas, slot);
if (!mt_is_empty(entry))
goto found;
} while (slot--);
if (r_start > max)
goto no_entry;
- entry = mte_get_rcu_slot(mas->node, slot);
+ entry = mas_get_rcu_slot(mas, slot);
if (!mt_is_empty(entry))
goto found;
while (range_start < limit) {
mas_set_slot(mas, slot);
if (!mas_next_nentry(mas, limit, &range_start)) {
- void *entry = mte_get_rcu_slot(mas->node, slot - 1);
+ void *entry = mas_get_rcu_slot(mas, slot - 1);
if (mte_is_leaf(mas->node)) {
mas->index = range_start - 1;
mas->index = mte_get_pivot(mas->node, slot - 1);
if (mas_is_none(mas))
return NULL;
- entry = mte_get_rcu_slot(mas->node, mas_get_slot(mas));
+ entry = mas_get_rcu_slot(mas, mas_get_slot(mas));
if (mas_dead_node(mas, index))
goto retry;
else
mas->index = mas->min;
- return mte_get_rcu_slot(mas->node, mas_get_slot(mas));
+ return mas_get_rcu_slot(mas, mas_get_slot(mas));
}
/*
*/
if (!hard_data ||
(end + 1 == coalesce) ||
- (end == 1 && !mte_get_rcu_slot(this_enode, 1))) {
+ (end == 1 && !mte_get_rcu_slot(this_enode, 1, mas->tree))) {
unsigned long piv;
min = mas->min;
if (mte_is_leaf(this_enode)) {
if (!piv) {
void *entry = mte_get_rcu_slot(this_enode,
- mas_get_slot(mas));
+ mas_get_slot(mas), mas->tree);
rcu_assign_pointer(mas->tree->ma_root,
entry);
mte_free(this_enode);
mas_append(mas, r_mas, 0, r_end_slot);
if (!mte_is_leaf(mas->node))
- mte_adopt_children(mas->node);
+ mas_adopt_children(mas, mas->node);
mte_set_pivot(p_mas->node, mte_parent_slot(mas->node), r_mas->max);
mte_set_rcu_slot(p_mas->node, mte_parent_slot(r_mas->node),
done:
if (!mte_is_leaf(mas->node))
- mte_adopt_children(mas->node);
+ mas_adopt_children(mas, mas->node);
if (free)
mas_replace(mas);
}
/* check if this slot is full */
- entry = _mte_get_rcu_slot(mas->node, i, type);
+ entry = mas_get_rcu_slot(mas, i);
if (entry && !xa_is_deleted(entry)) {
this_gap = 0;
goto next_slot;
struct maple_enode *next;
unsigned char coalesce;
- next = _mte_get_rcu_slot(mas->node, i, type);
+ next = mas_get_rcu_slot(mas, i);
mas->min = min;
mas->max = max;
if (!mt_is_empty(next)) {
if (mas->index > pivot)
goto next;
- entry = _mte_get_rcu_slot(mas->node, i, type);
+ entry = mas_get_rcu_slot(mas, i);
if (unlikely(xa_is_skip(entry)))
goto next;
if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
- next = _mte_get_rcu_slot(mas->node, i, type);
+ next = mas_get_rcu_slot(mas, i);
mas->min = min;
mas->max = max;
if (!mt_is_empty(next)) {
goto done;
}
- next = _mte_get_rcu_slot(mas->node, i, type);
+ next = mas_get_rcu_slot(mas, i);
if (unlikely(xa_is_skip(next))) {
if (unlikely(i == mt_slots[type] - 1)) {
i = MAPLE_NODE_SLOTS;
if (!mas_get_safe_pivot(mas, (*slot) + delta))
return false;
- entry = mte_get_rcu_slot(mas->node, (*slot) + delta);
+ entry = mas_get_rcu_slot(mas, (*slot) + delta);
if (!mt_is_empty(entry) && !xa_is_retry(entry))
return true;
*slot += delta;
leaf = _mas_range_walk(&mas, &range_start, &range_end);
slot = mas_get_slot(&mas);
if (leaf == true && slot != MAPLE_NODE_SLOTS)
- entry = mte_get_rcu_slot(mas.node, slot);
+ entry = mas_get_rcu_slot(&mas, slot);
mas.last = range_end;
if (mt_is_empty(entry) || xa_is_zero(entry) || xa_is_retry(entry))
if (mte_is_leaf(mas->node)) {
- entry = mte_get_rcu_slot(mas->node, mas_get_slot(mas));
+ entry = mas_get_rcu_slot(mas, mas_get_slot(mas));
if (!mt_is_empty(entry))
{
new_mas.index = r_index;
mas_update_gap(mas, false);
mas->node = MAS_START;
mas->alloc = new_mas.alloc;
- mte_destroy_walk(last);
+ mte_destroy_walk(last, mas->tree);
return node_cnt;
error:
if (new_mas.tree)
- mte_destroy_walk(new_mas.tree->ma_root);
+ mte_destroy_walk(new_mas.tree->ma_root, new_mas.tree);
return 0;
}
if (mas->index == 0 && !overwrite)
goto exists;
} else if (!overwrite) {
- void *entry = mte_get_rcu_slot(mas->node, slot);
+ void *entry = mas_get_rcu_slot(mas, slot);
if (!mt_is_empty(entry))
goto exists;
if (slot >= MAPLE_NODE_SLOTS)
return NULL;
- entry = mte_get_rcu_slot(mas->node, slot);
+ entry = mas_get_rcu_slot(mas, slot);
if (mte_dead_node(mas->node))
goto retry;
}
if (slot == MAPLE_NODE_SLOTS)
return NULL;
- entry = mte_get_rcu_slot(mas->node, slot);
+ entry = mas_get_rcu_slot(mas, slot);
mte_update_rcu_slot(mas->node, slot, XA_DELETED_ENTRY);
// dense nodes only need to set a single value.
mtree_lock(mt);
destroyed = mt->ma_root;
if (xa_is_node(destroyed))
- mte_destroy_walk(destroyed);
+ mte_destroy_walk(destroyed, mt);
mt->ma_flags = 0;
rcu_assign_pointer(mt->ma_root, NULL);
if (mte_is_dense(mte)) {
for (i = 0; i < mt_slot_count(mte); i++) {
- if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) {
+ if (!mt_is_empty(mas_get_rcu_slot(mas, i))) {
if (gap > max_gap)
max_gap = gap;
gap = 0;
p_end = mas->max;
if (mte_is_leaf(mte)) {
- if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) {
+ if (!mt_is_empty(mas_get_rcu_slot(mas, i))) {
gap = 0;
goto not_empty;
}
gap += p_end - p_start + 1;
} else {
- void *entry = mte_get_rcu_slot(mas->node, i);
+ void *entry = mas_get_rcu_slot(mas, i);
gap = mte_get_gap(mte, i);
if (mt_is_empty(entry) || xa_is_retry(entry)) {
if (gap != p_end - p_start + 1) {
pr_err(MA_PTR"[%u] -> "MA_PTR" %lu != %lu - %lu + 1\n",
mas_mn(mas), i,
- mte_get_rcu_slot(mas->node, i),
- gap, p_end, p_start);
+ mas_get_rcu_slot(mas, i), gap,
+ p_end, p_start);
MT_BUG_ON(mas->tree,
gap != p_end - p_start + 1);
not_empty:
p_start = p_end + 1;
if (p_end >= mas->max)
- break;
- }
+ break; }
counted:
if (mte_is_root(mte))
// Check prev/next parent slot for duplicate node entry
for (i = 0; i < mt_slots[p_type]; i++) {
- if (i == p_slot)
+ if (i == p_slot) {
MT_BUG_ON(mas->tree,
- ma_get_rcu_slot(parent, i, p_type) != mas->node);
- else if (ma_get_rcu_slot(parent, i, p_type) == mas->node) {
+ ma_get_rcu_slot(parent, i, p_type, mas->tree) !=
+ mas->node);
+ } else if (ma_get_rcu_slot(parent, i, p_type, mas->tree) ==
+ mas->node) {
pr_err("parent contains invalid child at "MA_PTR"[%u] "
MA_PTR"\n", parent, i, mas_mn(mas));
MT_BUG_ON(mas->tree,
- ma_get_rcu_slot(parent, i, p_type) == mas->node);
+ ma_get_rcu_slot(parent, i, p_type, mas->tree) ==
+ mas->node);
}
}
}
break;
if (prev_piv > piv) {
- void *entry = mte_get_rcu_slot(mas->node, i);
+ void *entry = mas_get_rcu_slot(mas, i);
if (!mt_will_coalesce(entry)) {
pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
piv, prev_piv);
}
if (piv < mas->min) {
- void *entry = mte_get_rcu_slot(mas->node, i);
+ void *entry = mas_get_rcu_slot(mas, i);
if (!mt_will_coalesce(entry)) {
if (piv < mas->min)