From: Liam R. Howlett <Liam.Howlett@oracle.com>
Date: Wed, 22 Sep 2021 17:15:07 +0000 (-0400)
Subject: maple_tree: Add leaf and non-alloc metadata for end of data
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=21d21c43e9e4af1eb7bc4423505294443846d89f;p=users%2Fjedix%2Flinux-maple.git

maple_tree: Add leaf and non-alloc metadata for end of data

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---

diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 2be19d5fd7cb..319974be5ea1 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -82,10 +82,22 @@ typedef struct maple_pnode *maple_pnode; /* parent node */
  *
  *
  */
+struct maple_metadata {
+	unsigned char end;
+	unsigned char gap;
+
+};
+
 struct maple_range_64 {
 	struct maple_pnode *parent;
-	unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
-	void __rcu *slot[MAPLE_RANGE64_SLOTS];
+		unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+	union {
+		void __rcu *slot[MAPLE_RANGE64_SLOTS];
+		struct {
+			void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+			struct maple_metadata meta;
+		};
+	};
 };
 
 struct maple_arange_64 {
@@ -93,7 +105,7 @@ struct maple_arange_64 {
 	unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
 	void __rcu *slot[MAPLE_ARANGE64_SLOTS];
 	unsigned long gap[MAPLE_ARANGE64_SLOTS];
-	unsigned char meta;
+	struct maple_metadata meta;
 };
 
 struct maple_alloc {
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 564d2aed62ea..7e85984b6ccc 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -826,8 +826,17 @@ static inline void *mas_root_locked(struct ma_state *mas)
 	return mt_root_locked(mas->tree);
 }
 
-#define MA_META_END_MASK	0b1111
-#define MA_META_GAP_SHIFT	4
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+					     enum maple_type mt)
+{
+	switch (mt) {
+	case maple_arange_64:
+		return &mn->ma64.meta;
+	default:
+		return &mn->mr64.meta;
+	}
+}
+
 /*
  * ma_set_meta() - Set the metadata information of a node.
  * @mn: The maple node
@@ -838,8 +847,10 @@ static inline void *mas_root_locked(struct ma_state *mas)
 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
 			       unsigned char offset, unsigned char end)
 {
+	struct maple_metadata *meta = ma_meta(mn, mt);
 
-	mn->ma64.meta = (offset << MA_META_GAP_SHIFT) | end;
+	meta->gap = offset;
+	meta->end = end;
 }
 
 /*
@@ -850,8 +861,9 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
 static inline unsigned char ma_meta_end(struct maple_node *mn,
 					enum maple_type mt)
 {
+	struct maple_metadata *meta = ma_meta(mn, mt);
 
-	return mn->ma64.meta & MA_META_END_MASK;
+	return meta->end;
 }
 
 /*
@@ -863,7 +875,7 @@ static inline unsigned char ma_meta_gap(struct maple_node *mn,
 					enum maple_type mt)
 {
 
-	return mn->ma64.meta >> MA_META_GAP_SHIFT;
+	return mn->ma64.meta.gap;
 }
 
 /*
@@ -876,8 +888,9 @@ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
 				   unsigned char offset)
 {
 
-	mn->ma64.meta = (offset << MA_META_GAP_SHIFT) |
-		(mn->ma64.meta & MA_META_END_MASK);
+	struct maple_metadata *meta = ma_meta(mn, mt);
+
+	meta->gap = offset;
 }
 
 /*
@@ -1306,8 +1319,15 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
 	if (type == maple_arange_64)
 		return ma_meta_end(mte_to_node(mas->node), type);
 
-	offset = mt_min_slots[type];
 	pivots = ma_pivots(mas_mn(mas), type);
+	offset = mt_pivots[type] - 1;
+	if (likely(!pivots[offset]) || pivots[offset] == mas->max) {
+		int ret = ma_meta_end(mas_mn(mas), type);
+		if (ret)
+			return ret;
+	}
+
+	offset = mt_min_slots[type];
 	if (unlikely(!pivots[offset]))
 		goto decrement;
 
@@ -1320,6 +1340,7 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
 		/* Totally full. */
 		if (pivots[offset] != mas->max)
 			return offset + 1;
+
 		return offset;
 	}
 
@@ -1327,7 +1348,8 @@ decrement:
 	while (--offset) {
 		if (likely(pivots[offset]))
 			break;
-	};
+	}
+
 	if (likely(pivots[offset] < mas->max))
 		offset++;
 
@@ -1575,11 +1597,10 @@ static inline void mas_adopt_children(struct ma_state *mas,
 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
 	struct maple_enode *child;
 	unsigned char offset;
+	unsigned char count = mas_data_end(mas);
 
-	for (offset = 0; offset < mt_slots[type]; offset++) {
+	for (offset = 0; offset <= count; offset++) {
 		child = mas_slot_locked(mas, slots, offset);
-		if (unlikely(!child))
-			break;
 		mte_set_parent(child, parent, offset);
 	}
 }
@@ -1642,13 +1663,9 @@ static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
 	mt = mte_node_type(mas->node);
 	node = mas_mn(mas);
 	slots = ma_slots(node, mt);
-	count = mt_slots[mt];
-	for (offset = mas->offset; offset < count; offset++) {
+	count = mas_data_end(mas);
+	for (offset = mas->offset; offset <= count; offset++) {
 		entry = mas_slot_locked(mas, slots, offset);
-		/* end of node data. */
-		if (unlikely(!entry))
-			break;
-
 		if (mte_parent(entry) == node) {
 			*child = *mas;
 			mas->offset = offset + 1;
@@ -1846,7 +1863,7 @@ complete:
  */
 static inline void mab_mas_cp(struct maple_big_node *b_node,
 			      unsigned char mab_start, unsigned char mab_end,
-			      struct ma_state *mas)
+			      struct ma_state *mas, bool new_max)
 {
 	int i, j = 0;
 	enum maple_type mt = mte_node_type(mas->node);
@@ -1854,10 +1871,15 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
 	void __rcu **slots = ma_slots(node, mt);
 	unsigned long *pivots = ma_pivots(node, mt);
 	unsigned long *gaps = NULL;
+	unsigned char end;
 
 	if (mab_end - mab_start > mt_pivots[mt])
 		mab_end--;
 
+	if (!pivots[mt_pivots[mt] - 1]) {
+		slots[mt_pivots[mt]] = NULL;
+	}
+
 	i = mab_start;
 	pivots[j++] = b_node->pivot[i++];
 	do {
@@ -1867,11 +1889,12 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
 	memcpy(slots, b_node->slot + mab_start,
 	       sizeof(void *) * (i - mab_start));
 
-	mas->max = b_node->pivot[i - 1];
+	if (new_max)
+		mas->max = b_node->pivot[i - 1];
+	end = j - 1;
 	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
 		unsigned long max_gap = 0;
 		unsigned char offset = 15;
-		unsigned char end = j - 1;
 
 		gaps = ma_gaps(node, mt);
 		do {
@@ -1881,7 +1904,13 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
 				max_gap = gaps[j];
 			}
 		} while (j);
+
 		ma_set_meta(node, mt, offset, end);
+	} else if (end <= mt_pivots[mt] - 1) {
+		if (pivots[end] != mas->max)
+			end++;
+
+		ma_set_meta(node, mt, 0, end);
 	}
 }
 
@@ -2542,11 +2571,11 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
 
 	mast->l->min = mast->orig_l->min;
 	mast->l->max = mast->bn->pivot[split];
-	mab_mas_cp(mast->bn, 0, split, mast->l);
+	mab_mas_cp(mast->bn, 0, split, mast->l, true);
 	mast->r->max = mast->l->max;
 
 	if (middle) {
-		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m);
+		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
 		mast->m->min = mast->bn->pivot[split] + 1;
 		mast->m->max = mast->bn->pivot[mid_split];
 		if (!save->node &&
@@ -2560,7 +2589,7 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
 	}
 
 	if (right) {
-		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r);
+		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, true);
 		mast->r->min = mast->bn->pivot[split] + 1;
 		mast->r->max = mast->bn->pivot[mast->bn->b_end];
 		if (!save->node && (save->offset > split)) {
@@ -2753,7 +2782,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
 	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
 				mte_node_type(mast->orig_l->node));
 	mast->orig_l->depth++;
-	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas);
+	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
 	mte_set_parent(left, l_mas.node, slot);
 	if (middle)
 		mte_set_parent(middle, l_mas.node, ++slot);
@@ -2985,7 +3014,7 @@ static inline bool _mas_split_final_node(struct maple_subtree_state *mast,
 	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
 
 	mast->l->node = ancestor;
-	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l);
+	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
 	mas->offset = mast->bn->b_end - 1;
 	return true;
 }
@@ -3069,9 +3098,9 @@ static inline void mast_split_data(struct maple_subtree_state *mast,
 {
 	unsigned char p_slot;
 
-	mab_mas_cp(mast->bn, 0, split, mast->l);
+	mab_mas_cp(mast->bn, 0, split, mast->l, true);
 	mte_set_pivot(mast->r->node, 0, mast->r->max);
-	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r);
+	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, true);
 	mast->l->offset = mte_parent_slot(mas->node);
 	mast->l->max = mast->bn->pivot[split];
 	mast->r->min = mast->l->max + 1;
@@ -3261,15 +3290,11 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
 static inline bool mas_reuse_node(struct ma_state *mas,
 			  struct maple_big_node *bn, unsigned char end)
 {
-	unsigned long max;
-
 	/* Need to be rcu safe. */
 	if (mt_in_rcu(mas->tree))
 		return false;
 
-	max = mas->max;
-	mab_mas_cp(bn, 0, bn->b_end, mas);
-	mas->max = max;
+	mab_mas_cp(bn, 0, bn->b_end, mas, false);
 
 	if (end > bn->b_end) {
 		/* Zero end of node. */
@@ -3277,13 +3302,17 @@ static inline bool mas_reuse_node(struct ma_state *mas,
 		struct maple_node *mn = mas_mn(mas);
 		unsigned long *pivots = ma_pivots(mn, mt);
 		void __rcu **slots = ma_slots(mn, mt);
-		char zero = mt_slots[mt] - bn->b_end - 1;
+		char clear = mt_slots[mt] - bn->b_end - 2;
 
-		memset(slots + bn->b_end + 1, 0, sizeof(void *) * zero--);
-		memset(pivots + bn->b_end + 1, 0, sizeof(unsigned long *) * zero);
+		if (clear > 0) {
+			memset(slots + bn->b_end + 1, 0,
+			       sizeof(void *) * clear);
+			memset(pivots + bn->b_end + 1, 0,
+			       sizeof(unsigned long *) * clear);
+		}
 	}
-	return true;
 
+	return true;
 }
 
 /*
@@ -3300,15 +3329,18 @@ static inline int mas_commit_b_node(struct ma_state *mas,
 	enum maple_type b_type = b_node->type;
 
 	if ((b_end < mt_min_slots[b_type]) &&
-	    (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1))
+	    (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1)) {
 		return mas_rebalance(mas, b_node);
+	}
 
 
-	if (b_end >= mt_slots[b_type])
+	if (b_end >= mt_slots[b_type]) {
 		return mas_split(mas, b_node);
+	}
 
-	if (mas_reuse_node(mas, b_node, end))
+	if (mas_reuse_node(mas, b_node, end)) {
 		goto reuse_node;
+	}
 
 	mas_node_count(mas, 1);
 	if (mas_is_err(mas))
@@ -3317,7 +3349,10 @@ static inline int mas_commit_b_node(struct ma_state *mas,
 	new_node = mt_mk_node(mas_pop_node(mas), mte_node_type(mas->node));
 	mte_to_node(new_node)->parent = mas_mn(mas)->parent;
 	mas->node = new_node;
-	mab_mas_cp(b_node, 0, b_end, mas);
+	mab_mas_cp(b_node, 0, b_end, mas, true);
+	if (b_end < mt_pivots[b_type] - 1)
+		ma_set_meta(mas_mn(mas), maple_leaf_64, 0, b_end);
+
 	mas_replace(mas, false);
 reuse_node:
 	mas_update_gap(mas);
@@ -3360,12 +3395,15 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
 
 	rcu_assign_pointer(slots[slot], entry);
 	mas->offset = slot;
-	pivots[slot++] = mas->last;
+	pivots[slot] = mas->last;
+	if (mas->last != ULONG_MAX)
+		slot++;
 	mas->depth = 1;
 	mas_set_height(mas);
 
 	/* swap the new root into the tree */
 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+	ma_set_meta(node, maple_leaf_64, 0, slot);
 	return slot;
 }
 
@@ -3672,6 +3710,14 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
 	void __rcu **slots;
 	unsigned long *pivots;
 
+	if (!entry && !mas->index && mas->last == ULONG_MAX) {
+		mas->depth = 0;
+		mas_set_height(mas);
+		rcu_assign_pointer(mas->tree->ma_root, entry);
+		mas->node = MAS_START;
+		goto done;
+	}
+
 	mas_node_count(mas, 1);
 	if (mas_is_err(mas))
 		return 0;
@@ -3687,6 +3733,8 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
 	mas->depth = 1;
 	mas_set_height(mas);
 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+
+done:
 	if (xa_is_node(root))
 		mte_destroy_walk(root, mas->tree);
 
@@ -3878,17 +3926,25 @@ static inline bool mas_node_store(struct ma_state *mas, void *entry,
 		memcpy(dst_pivots + dst_offset, pivots + offset_end,
 		       sizeof(unsigned long) * copy_size);
 	}
+
 done:
 	if ((end == mt_slots[mt] - 1) && (new_end < mt_slots[mt] - 1))
 		dst_pivots[new_end] = mas->max;
 
+	if (!dst_pivots[mt_pivots[mt] - 1] || dst_pivots[mt_pivots[mt] - 1] == mas->max) {
+		if (dst_pivots[new_end] && dst_pivots[new_end] < mas->max)
+			new_end++;
+		//printk("Set new end %u pivot is %lu max is %lu\n", new_end, dst_pivots[new_end],
+		//      mas->max);
+		ma_set_meta(newnode, maple_leaf_64, 0, new_end);
+	}
+
 	if (!mt_in_rcu(mas->tree)) {
 		memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
 	} else {
 		mas->node = mt_mk_node(newnode, mt);
 		mas_replace(mas, false);
 	}
-
 	trace_ma_write(__func__, mas, 0, entry);
 	mas_update_gap(mas);
 	return true;
@@ -4063,6 +4119,10 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite
 		}
 	}
 
+	if (!mas->index && mas->last == ULONG_MAX) {
+		mas_new_root(mas, entry);
+		return content;
+	}
 	if (r_min == mas->index && r_max == mas->last) {
 		rcu_assign_pointer(slots[mas->offset], entry);
 		if (!!entry ^ !!content)
@@ -4071,21 +4131,29 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite
 	}
 
 	/* Appending can skip a lot. */
-	if ((end < mt_slots[mt] - 1) && (mas->offset == end)) {
+	if (entry && (end < mt_slots[mt] - 1) && (mas->offset == end)) {
 		if ((mas->index != r_min) && (mas->last == r_max)) {
 			if (end + 1 < mt_pivots[mt])
 				pivots[end + 1] = pivots[end];
 
+			if (end + 1 < mt_pivots[mt]) {
+				ma_set_meta(mas_mn(mas), maple_leaf_64, 0, end + 1);
+			}
 			rcu_assign_pointer(slots[end + 1], entry);
 			pivots[end] = mas->index - 1;
+
 			if (!content || !entry)
 				mas_update_gap(mas);
+
 			return content;
 		} else if ((mas->index == r_min) && (mas->last < r_max)) {
 			if (end + 1 < mt_pivots[mt])
 				pivots[end + 1] = pivots[end];
 
 			rcu_assign_pointer(slots[end + 1], content);
+			if (end  + 1 < mt_pivots[mt]) {
+				ma_set_meta(mas_mn(mas), maple_leaf_64, 0, end + 1);
+			}
 			pivots[end] = mas->last;
 			rcu_assign_pointer(slots[end], entry);
 			if (!content || !entry)
@@ -4315,20 +4383,27 @@ static inline void *mas_next_nentry(struct ma_state *mas, unsigned long max,
 {
 	enum maple_type type;
 	struct maple_node *node;
-	unsigned long pivot;
+	unsigned long pivot = 0;
 	unsigned long r_start;
-	unsigned char count, offset;
+	unsigned char count;
+	unsigned char offset;
 	unsigned long *pivots;
 	void __rcu **slots;
 	void *entry = NULL;
 
+	if (mas->last == mas->max) {
+		*range_start = mas->max;
+		return NULL;
+	}
+
 	offset = mas->offset;
 	node = mas_mn(mas);
 	type = mte_node_type(mas->node);
 	pivots = ma_pivots(node, type);
 	r_start = mas_safe_min(mas, pivots, offset);
-	count = mt_pivots[type];
+	count = mas_data_end(mas);
 	slots = ma_slots(node, type);
+
 	if (mte_dead_node(mas->node))
 		return NULL;
 
@@ -4337,6 +4412,7 @@ static inline void *mas_next_nentry(struct ma_state *mas, unsigned long max,
 		goto no_entry;
 	}
 
+
 	while (offset < count) {
 		pivot = pivots[offset];
 		entry = mas_slot(mas, slots, offset);
@@ -4349,6 +4425,9 @@ static inline void *mas_next_nentry(struct ma_state *mas, unsigned long max,
 		if (entry)
 			goto found;
 
+		if (pivot == mas->max)
+			goto no_entry;
+
 		r_start = pivot + 1;
 		if (r_start > max) {
 			mas->index = max;
@@ -4357,6 +4436,10 @@ static inline void *mas_next_nentry(struct ma_state *mas, unsigned long max,
 		offset++;
 	}
 
+
+	if (r_start > mas->max) {
+		goto no_entry;
+	}
 	pivot = _mas_safe_pivot(mas, pivots, offset, type);
 	entry = mas_slot(mas, slots, offset);
 	if (mte_dead_node(mas->node))
@@ -4559,18 +4642,20 @@ retry:
 
 	while (!mas_is_none(mas)) {
 
-		if (likely(ma_is_leaf(mt)))
+		if (likely(ma_is_leaf(mt))) {
 			entry = mas_next_nentry(mas, limit, &r_start);
-		else
+		} else {
 			entry = mas_first_entry(mas, limit, &r_start);
+		}
 
 		if (unlikely(mte_dead_node(mas->node))) {
 			mas_rewalk(mas, last);
 			goto retry;
 		}
 
-		if (unlikely((r_start > limit)))
+		if (unlikely((r_start > limit))) {
 			break;
+		}
 
 
 		if (likely(entry)) {
@@ -4675,7 +4760,8 @@ retry:
 			goto retry;
 		}
 
-		mas->offset = mt_slot_count(mas->node);
+		if (!mas_is_none(mas))
+			mas->offset = mas_data_end(mas) + 1;
 	}
 
 	mas->index = mas->last = limit;
@@ -5363,9 +5449,9 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
 	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
 		void *entry = mas_slot_locked(mas, slots, offset);
 
-		if (!entry)
-			break;
 		node = mte_to_node(entry);
+		if (!node)
+			break;
 		mte_set_node_dead(entry);
 		smp_wmb();
 		node->type = mte_node_type(entry);
@@ -5546,6 +5632,8 @@ void *mas_store(struct ma_state *mas, void *entry)
 
 	trace_ma_write(__func__, mas, 0, entry);
 #ifdef CONFIG_DEBUG_MAPLE_TREE
+	if (mas->index > mas->last)
+		printk("Error %lu > %lu %p\n", mas->index, mas->last, entry);
 	MT_BUG_ON(mas->tree, mas->index > mas->last);
 	if (mas->index > mas->last) {
 		mas_set_err(mas, -EINVAL);
@@ -6222,7 +6310,7 @@ static void mas_dfs_preorder(struct ma_state *mas)
 {
 
 	struct maple_enode *prev;
-	unsigned char slot = 0;
+	unsigned char end, slot = 0;
 
 	if (mas_is_start(mas)) {
 		mas_start(mas);
@@ -6233,27 +6321,26 @@ static void mas_dfs_preorder(struct ma_state *mas)
 		goto done;
 
 walk_up:
+	end = mas_data_end(mas);
 	if (mte_is_leaf(mas->node) ||
-	    (slot >= mt_slot_count(mas->node))) {
+	    (slot > end)) {
 		if (mte_is_root(mas->node))
 			goto done;
 
 		slot = mte_parent_slot(mas->node) + 1;
-		mas->node = mt_mk_node(mte_parent(mas->node),
-				       mas_parent_enum(mas, mas->node));
+		mas_ascend(mas);
 		goto walk_up;
 	}
 
 	prev = mas->node;
 	mas->node = mas_get_slot(mas, slot);
-	if (!mas->node) {
+	if (!mas->node || slot > end) {
 		if (mte_is_root(prev))
 			goto done;
 
 		mas->node = prev;
 		slot = mte_parent_slot(mas->node) + 1;
-		mas->node = mt_mk_node(mte_parent(mas->node),
-				       mas_parent_enum(mas, mas->node));
+		mas_ascend(mas);
 		goto walk_up;
 	}
 
@@ -6390,7 +6477,7 @@ void mt_dump_arange64(const struct maple_tree *mt, void *entry,
 	pr_cont(" contents: ");
 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
 		pr_cont("%lu ", node->gap[i]);
-	pr_cont("| %02X | ", node->meta);
+	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
 	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
 		pr_cont(MA_PTR" %lu ", node->slot[i], node->pivot[i]);
 	pr_cont(MA_PTR"\n", node->slot[i]);
@@ -6598,6 +6685,7 @@ void mas_validate_child_slot(struct ma_state *mas)
 {
 	enum maple_type type = mte_node_type(mas->node);
 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
 	struct maple_enode *child;
 	unsigned char i;
 
@@ -6606,6 +6694,9 @@ void mas_validate_child_slot(struct ma_state *mas)
 
 	for (i = 0; i < mt_slots[type]; i++) {
 		child = mas_slot(mas, slots, i);
+		if (!pivots[i] || pivots[i] == mas->max)
+			break;
+
 		if (!child)
 			break;
 
@@ -6657,22 +6748,17 @@ void mas_validate_limits(struct ma_state *mas)
 		if (prev_piv > piv) {
 			pr_err(MA_PTR"[%u] piv %lu < prev_piv %lu\n",
 				mas_mn(mas), i, piv, prev_piv);
-			mt_dump(mas->tree);
 			MT_BUG_ON(mas->tree, piv < prev_piv);
 		}
 
 		if (piv < mas->min) {
-			if (piv < mas->min)
-				mt_dump(mas->tree);
 			pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
 				piv, mas->min);
-			mt_dump(mas->tree);
 			MT_BUG_ON(mas->tree, piv < mas->min);
 		}
 		if (piv > mas->max) {
 			pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i,
 				piv, mas->max);
-			mt_dump(mas->tree);
 			MT_BUG_ON(mas->tree, piv > mas->max);
 		}
 		prev_piv = piv;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index af52183c03bf..05a917037f66 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1334,10 +1334,10 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
 		check = 0;
 		addr = 0;
 		mt_for_each(mt, foo, addr, ULONG_MAX) {
+			check++;
 #if check_erase2_debug > 2
-			pr_err("mt: %lu -> %p\n", addr+1, foo);
+			pr_err("mt: %lu -> %p (%d)\n", addr+1, foo, check);
 #endif
-			check++;
 			if (check > entry_count)
 				break;
 		}
@@ -35375,7 +35375,6 @@ static void check_dfs_preorder(struct maple_tree *mt)
 		count++;
 		mas_dfs_preorder(&mas);
 	} while (!mas_is_none(&mas));
-	/*printk("count %lu\n", count); */
 	MT_BUG_ON(mt, count != 74);
 	mtree_destroy(mt);
 
@@ -35633,7 +35632,6 @@ static noinline void next_prev_test(struct maple_tree *mt)
 	 * 690 - 695 = 0x61a00001930c
 	 * Check simple next/prev
 	 */
-	mas_reset(&mas);
 	mas_set(&mas, 686);
 	val = mas_walk(&mas);
 	MT_BUG_ON(mt, val != NULL);
@@ -35659,7 +35657,6 @@ static noinline void next_prev_test(struct maple_tree *mt)
 	MT_BUG_ON(mt, mas.last != 705);
 
 	/* Check across node boundaries of the tree */
-	mas_reset(&mas);
 	mas_set(&mas, 70);
 	val = mas_walk(&mas);
 	MT_BUG_ON(mt, val != xa_mk_value(70/ 10));
@@ -36977,7 +36974,6 @@ static int maple_tree_seed(void)
 #if defined(BENCH)
 skip:
 #endif
-
 	rcu_barrier();
 	pr_info("maple_tree: %u of %u tests passed\n",
 			atomic_read(&maple_tree_tests_passed),