if (!xa_is_node(entry))
mt_dump_entry(entry, 0, 0, 0, format);
else if (entry)
- mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0, format);
+ mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0,
+ format);
}
-EXPORT_SYMBOL_GPL(mt_dump);
-/*
- * Calculate the maximum gap in a node and check if that's what is reported in
- * the parent (unless root).
- */
-static void mas_validate_gaps(struct ma_state *mas)
+static void mas_validate_node(struct ma_state *mas,
+ struct maple_enode *leaf_prev)
{
- struct maple_enode *mte = mas->node;
- struct maple_node *p_mn;
- unsigned long gap = 0, max_gap = 0;
- unsigned long p_end, p_start = mas->min;
- unsigned char p_slot;
- unsigned long *gaps = NULL;
- unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
- int i;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned long *pivots, *gaps;
+ unsigned long piv, prev_piv, gap, max_gap;
+ struct maple_enode *child, *prev;
+ struct maple_node *node;
+ unsigned char i;
- if (ma_is_dense(mte_node_type(mte))) {
- for (i = 0; i < mt_slot_count(mte); i++) {
- if (mas_get_slot(mas, i)) {
- if (gap > max_gap)
- max_gap = gap;
- gap = 0;
- continue;
- }
- gap++;
+ max_gap = 0;
+ prev_piv = mas->min;
+ prev = leaf_prev;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ slots = ma_slots(node, type);
+ pivots = ma_pivots(node, type);
+ gaps = ma_gaps(node, type);
+ for (i = 0; i < mt_slots[type]; i++) {
+ gap = 0;
+ piv = mas_logical_pivot(mas, pivots, i, type);
+ if (!piv && (i != 0))
+ break;
+
+ /* Validate all pivots are within mas->min and mas->max. */
+ if ((piv < prev_piv)) {
+ pr_err("%p[%u] piv %lu < prev_piv %lu\n",
+ mas_mn(mas), i, piv, prev_piv);
+ MT_BUG_ON(mas->tree, piv < prev_piv);
}
- goto counted;
- }
- gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
- for (i = 0; i < mt_slot_count(mte); i++) {
- p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
+ if (piv < mas->min) {
+ pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ piv, mas->min);
+ MT_BUG_ON(mas->tree, piv < mas->min);
+ }
- if (!gaps) {
- if (mas_get_slot(mas, i)) {
- gap = 0;
- goto not_empty;
- }
+ if (piv > mas->max) {
+ pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
+ piv, mas->max);
+ MT_BUG_ON(mas->tree, piv > mas->max);
+ }
- gap += p_end - p_start + 1;
- } else {
- void *entry = mas_get_slot(mas, i);
-
- gap = gaps[i];
- if (!entry) {
- if (gap != p_end - p_start + 1) {
- pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
- mas_mn(mas), i,
- mas_get_slot(mas, i), gap,
- p_end, p_start);
- mt_dump(mas->tree, mt_dump_hex);
-
- MT_BUG_ON(mas->tree,
- gap != p_end - p_start + 1);
- }
+ child = mas_slot(mas, slots, i);
+
+ /* Calculate gap for this slot */
+ if (mt_is_alloc(mas->tree)) {
+ if (!gaps) {
+ if (!child)
+ gap = piv - prev_piv + 1;
} else {
- if (gap > p_end - p_start + 1) {
+ unsigned long limits = piv - prev_piv + 1;
+ gap = gaps[i];
+
+ if (child && (gap > limits)) {
pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
- mas_mn(mas), i, gap, p_end, p_start,
- p_end - p_start + 1);
- MT_BUG_ON(mas->tree,
- gap > p_end - p_start + 1);
+ mas_mn(mas), i, gap, piv, prev_piv,
+ limits);
+ MT_BUG_ON(mas->tree, gap > limits);
+ } else {
+ MT_BUG_ON(mas->tree, !child);
}
}
}
- if (gap > max_gap)
- max_gap = gap;
-not_empty:
- p_start = p_end + 1;
- if (p_end >= mas->max)
- break;
- }
+ /* Validate no sequential NULLs */
+ if (!child && !prev)
+ pr_err("Sequential null at %p[%u]\n", mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, !prev && !child);
-counted:
- if (mte_is_root(mte))
- return;
+ if (mte_is_leaf(mas->node))
+ goto next_in_leaf;
- p_slot = mte_parent_slot(mas->node);
- p_mn = mte_parent(mte);
- MT_BUG_ON(mas->tree, max_gap > mas->max);
- if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
- pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
- mt_dump(mas->tree, mt_dump_hex);
- }
-
- MT_BUG_ON(mas->tree,
- ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
-}
-
-static void mas_validate_parent_slot(struct ma_state *mas)
-{
- struct maple_node *parent;
- struct maple_enode *node;
- enum maple_type p_type;
- unsigned char p_slot;
- void __rcu **slots;
- int i;
-
- if (mte_is_root(mas->node))
- return;
+ if (!child)
+ pr_err("%p[%u] cannot be null\n", mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, !child);
- p_slot = mte_parent_slot(mas->node);
- p_type = mas_parent_enum(mas, mas->node);
- parent = mte_parent(mas->node);
- slots = ma_slots(parent, p_type);
- MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
-
- /* Check prev/next parent slot for duplicate node entry */
-
- for (i = 0; i < mt_slots[p_type]; i++) {
- node = mas_slot(mas, slots, i);
- if (i == p_slot) {
- if (node != mas->node)
- pr_err("parent %p[%u] does not have %p\n",
- parent, i, mas_mn(mas));
- MT_BUG_ON(mas->tree, node != mas->node);
- } else if (node == mas->node) {
- pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
- mas_mn(mas), parent, i, p_slot);
- MT_BUG_ON(mas->tree, node == mas->node);
+ /* Validate no duplicate entries */
+ if (mte_to_node(prev) == mte_to_node(child)) {
+ pr_err("Duplicate child at %p[%u] and prev\n",
+ mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, 1);
}
- }
-}
-
-static void mas_validate_child_slot(struct ma_state *mas)
-{
- enum maple_type type = mte_node_type(mas->node);
- void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
- unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
- struct maple_enode *child;
- unsigned char i;
-
- if (mte_is_leaf(mas->node))
- return;
-
- for (i = 0; i < mt_slots[type]; i++) {
- child = mas_slot(mas, slots, i);
- if (!pivots[i] || pivots[i] == mas->max)
- break;
-
- if (!child)
- break;
+ /* Validate slot encodings */
if (mte_parent_slot(child) != i) {
pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
mas_mn(mas), i, mte_to_node(child),
MT_BUG_ON(mas->tree, 1);
}
+ /* Validate childs parent */
if (mte_parent(child) != mte_to_node(mas->node)) {
pr_err("child %p has parent %p not %p\n",
mte_to_node(child), mte_parent(child),
mte_to_node(mas->node));
MT_BUG_ON(mas->tree, 1);
}
- }
-}
-/*
- * Validate all pivots are within mas->min and mas->max.
- */
-static void mas_validate_limits(struct ma_state *mas)
-{
- int i;
- unsigned long prev_piv = 0;
- enum maple_type type = mte_node_type(mas->node);
- void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
- unsigned long *pivots = ma_pivots(mas_mn(mas), type);
-
- /* all limits are fine here. */
- if (mte_is_root(mas->node))
- return;
- for (i = 0; i < mt_slots[type]; i++) {
- unsigned long piv;
-
- piv = mas_safe_pivot(mas, pivots, i, type);
+next_in_leaf:
+ /* Set max_gap if necessary */
+ if (gap > max_gap)
+ max_gap = gap;
- if (!piv && (i != 0))
+ prev_piv = piv + 1;
+ prev = child;
+ if (piv == mas->max)
break;
+ }
- if (!mte_is_leaf(mas->node)) {
- void *entry = mas_slot(mas, slots, i);
-
- if (!entry)
- pr_err("%p[%u] cannot be null\n",
- mas_mn(mas), i);
+ /* Check if the gap is correct against parent */
+ if (mt_is_alloc(mas->tree) && !mte_is_root(mas->node)) {
+ unsigned long *parent_gaps;
+ unsigned char p_slot;
- MT_BUG_ON(mas->tree, !entry);
- }
+ p_slot = mte_parent_slot(mas->node);
+ parent_gaps = ma_gaps(mte_parent(mas->node),
+ mas_parent_enum(mas, mas->node));
- if (prev_piv > piv) {
- pr_err("%p[%u] piv %lu < prev_piv %lu\n",
- mas_mn(mas), i, piv, prev_piv);
- MT_BUG_ON(mas->tree, piv < prev_piv);
- }
+ if (parent_gaps) {
+ if (max_gap > mas->max) {
+ printk("%p\n", mas_mn(mas));
+ printk("max gap %lu vs %lu\n", max_gap, mas->max);
+ }
+ MT_BUG_ON(mas->tree, max_gap > mas->max);
+ if (parent_gaps[p_slot] != max_gap)
+ pr_err("gap %p[%u] (%lu) != %lu\n",
+ mte_parent(mas->node), p_slot,
+ parent_gaps[p_slot], max_gap);
- if (piv < mas->min) {
- pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
- piv, mas->min);
- MT_BUG_ON(mas->tree, piv < mas->min);
- }
- if (piv > mas->max) {
- pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
- piv, mas->max);
- MT_BUG_ON(mas->tree, piv > mas->max);
+ MT_BUG_ON(mas->tree, parent_gaps[p_slot] != max_gap);
}
- prev_piv = piv;
- if (piv == mas->max)
- break;
}
+
+ if (mte_is_leaf(mas->node))
+ leaf_prev = prev;
+
+ /* Check for stale data beyond end of node */
for (i += 1; i < mt_slots[type]; i++) {
- void *entry = mas_slot(mas, slots, i);
+ child = mas_slot(mas, slots, i);
- if (entry && (i != mt_slots[type] - 1)) {
- pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
- i, entry);
- MT_BUG_ON(mas->tree, entry != NULL);
+ if (child && (i != mt_slots[type] - 1)) {
+ pr_err("%p[%u] should not have child %p\n", mas_mn(mas),
+ i, child);
+ MT_BUG_ON(mas->tree, child != NULL);
}
if (i < mt_pivots[type]) {
}
}
-static void mt_validate_nulls(struct maple_tree *mt)
-{
- void *entry, *last = (void *)1;
- unsigned char offset = 0;
- void __rcu **slots;
- MA_STATE(mas, mt, 0, 0);
-
- mas_start(&mas);
- if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
- return;
-
- while (!mte_is_leaf(mas.node))
- mas_descend(&mas);
-
- slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
- do {
- entry = mas_slot(&mas, slots, offset);
- if (!last && !entry) {
- pr_err("Sequential nulls end at %p[%u]\n",
- mas_mn(&mas), offset);
- }
- MT_BUG_ON(mt, !last && !entry);
- last = entry;
- if (offset == mas_data_end(&mas)) {
- mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
- if (mas_is_none(&mas))
- return;
- offset = 0;
- slots = ma_slots(mte_to_node(mas.node),
- mte_node_type(mas.node));
- } else {
- offset++;
- }
-
- } while (!mas_is_none(&mas));
-}
-
/*
* validate a maple tree by checking:
* 1. The limits (pivots are within mas->min to mas->max)
void mt_validate(struct maple_tree *mt)
{
unsigned char end;
+ struct maple_enode *last;
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
if (!mas_searchable(&mas))
goto done;
+ last = mas.node;
mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
while (!mas_is_none(&mas)) {
MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
if ((end < mt_min_slot_count(mas.node)) &&
(mas.max != ULONG_MAX)) {
pr_err("Invalid size %u of %p\n", end,
- mas_mn(&mas));
+ mas_mn(&mas));
MT_BUG_ON(mas.tree, 1);
}
}
- mas_validate_parent_slot(&mas);
- mas_validate_child_slot(&mas);
- mas_validate_limits(&mas);
- if (mt_is_alloc(mt))
- mas_validate_gaps(&mas);
+
+ mas_validate_node(&mas, last);
mas_dfs_postorder(&mas, ULONG_MAX);
}
- mt_validate_nulls(mt);
done:
rcu_read_unlock();