From: Liam R. Howlett <Liam.Howlett@Oracle.com>
Date: Fri, 4 Sep 2020 19:50:50 +0000 (-0400)
Subject: maple_tree: Drop extra node types and mt_is_empty()
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ccf56d43d090ed3547dae3ccce538ea36f2c57e8;p=users%2Fjedix%2Flinux-maple.git

maple_tree: Drop extra node types and mt_is_empty()

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---

diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 560a5de8fe77..812e4be382ba 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -29,26 +29,10 @@
 #define MAPLE_NODE_SLOTS	31	/* 256 bytes including ->parent */
 #define MAPLE_RANGE64_SLOTS	16	/* 256 bytes */
 #define MAPLE_ARANGE64_SLOTS	10	/* 240 bytes */
-#define MAPLE_RANGE32_SLOTS	21	/* 256 bytes */
-#define MAPLE_RANGE16_SLOTS	25	/* 256 bytes */
-#define MAPLE_SPARSE64_SLOTS	15	/* 248 bytes */
-#define MAPLE_SPARSE32_SLOTS	20	/* 248 bytes */
-#define MAPLE_SPARSE21_SLOTS	23	/* 256 bytes */
-#define MAPLE_SPARSE16_SLOTS	24	/* 248 bytes */
-#define MAPLE_SPARSE9_SLOTS	27	/* 256 bytes */
-#define MAPLE_SPARSE6_SLOTS	30	/* 256 bytes */
 #else
 #define MAPLE_NODE_SLOTS       15      /* 128 bytes including ->parent */
 #define MAPLE_RANGE64_SLOTS    8       /* 128 bytes */
 #define MAPLE_ARANGE64_SLOTS   5       /* 120 bytes */
-#define MAPLE_RANGE32_SLOTS    10      /* 124 bytes */
-#define MAPLE_RANGE16_SLOTS    12      /* 126 bytes */
-#define MAPLE_SPARSE64_SLOTS   7       /* 120 bytes */
-#define MAPLE_SPARSE32_SLOTS   10      /* 128 bytes */
-#define MAPLE_SPARSE21_SLOTS   11      /* 128 bytes */
-#define MAPLE_SPARSE16_SLOTS   12      /* 128 bytes */
-#define MAPLE_SPARSE9_SLOTS    13      /* 127 bytes */
-#define MAPLE_SPARSE6_SLOTS    14      /* 128 bytes */
 #endif // End NODE256
 
 #define MA_MAX_ALLOC		127
@@ -108,54 +92,6 @@ struct maple_arange_64 {
 	unsigned long gap[MAPLE_ARANGE64_SLOTS];
 };
 
-struct maple_range_32 {
-	struct maple_pnode *parent;
-	u32 pivot[MAPLE_RANGE32_SLOTS - 1];
-	void __rcu *slot[MAPLE_RANGE32_SLOTS];
-};
-
-struct maple_range_16 {
-	struct maple_pnode *parent;
-	u16 pivot[MAPLE_RANGE16_SLOTS - 1];
-	void __rcu *slot[MAPLE_RANGE16_SLOTS];
-};
-
-struct maple_sparse_64 {
-	struct maple_pnode *parent;
-	unsigned long pivot[MAPLE_SPARSE64_SLOTS];
-	void __rcu *slot[MAPLE_SPARSE64_SLOTS];
-};
-
-struct maple_sparse_32 {
-	struct maple_pnode *parent;
-	u32 pivot[MAPLE_SPARSE32_SLOTS];
-	void __rcu *slot[MAPLE_SPARSE32_SLOTS];
-};
-
-struct maple_sparse_21 {
-	struct maple_pnode *parent;
-	unsigned long pivot[(MAPLE_SPARSE21_SLOTS + 2) / 3];
-	void __rcu *slot[MAPLE_SPARSE21_SLOTS];
-};
-
-struct maple_sparse_16 {
-	struct maple_pnode *parent;
-	u16 pivot[MAPLE_SPARSE16_SLOTS];
-	void __rcu *slot[MAPLE_SPARSE16_SLOTS];
-};
-
-struct maple_sparse_9 {
-	struct maple_pnode *parent;
-	unsigned long pivot[(MAPLE_SPARSE9_SLOTS + 6) / 7];
-	void __rcu *slot[MAPLE_SPARSE9_SLOTS];
-};
-
-struct maple_sparse_6 {
-	struct maple_pnode *parent;
-	unsigned long pivot;	/* Use a bitmap for pivots */
-	void __rcu *slot[MAPLE_SPARSE6_SLOTS];
-};
-
 struct maple_topiary {
 	struct maple_pnode *parent;
 	struct maple_enode *next; /* Overlaps the pivot */
@@ -163,17 +99,7 @@ struct maple_topiary {
 
 enum maple_type {
 	maple_dense,
-	maple_sparse_6,
-	maple_sparse_9,
-	maple_sparse_16,
-	maple_sparse_21,
-	maple_sparse_32,
-	maple_sparse_64,
-	maple_leaf_16,
-	maple_leaf_32,
 	maple_leaf_64,
-	maple_range_16,
-	maple_range_32,
 	maple_range_64,
 	maple_arange_64,
 };
@@ -222,14 +148,6 @@ struct maple_node {
 		};
 		struct maple_range_64 mr64;
 		struct maple_arange_64 ma64;
-		struct maple_range_32 mr32;
-		struct maple_range_16 mr16;
-		struct maple_sparse_64 ms64;
-		struct maple_sparse_32 ms32;
-		struct maple_sparse_21 ms21;
-		struct maple_sparse_16 ms16;
-		struct maple_sparse_9 ms9;
-		struct maple_sparse_6 ms6;
 	};
 };
 
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index b2e85da24db8..2ec96a6796f3 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -26,17 +26,7 @@ static struct kmem_cache *maple_node_cache;
 
 unsigned long mt_max[] = {
 	[maple_dense]		= MAPLE_NODE_SLOTS,
-	[maple_sparse_6]	= (1UL << 6) - 1,
-	[maple_sparse_9]	= (1UL << 9) - 1,
-	[maple_sparse_16]	= (1UL << 16) - 1,
-	[maple_sparse_21]	= (1UL << 21) - 1,
-	[maple_sparse_32]	= UINT_MAX,
-	[maple_sparse_64]	= ULONG_MAX,
-	[maple_leaf_16]		= (1UL << 16) - 1,
-	[maple_leaf_32]		= UINT_MAX,
 	[maple_leaf_64]		= ULONG_MAX,
-	[maple_range_16]	= (1UL << 16) - 1,
-	[maple_range_32]	= UINT_MAX,
 	[maple_range_64]	= ULONG_MAX,
 	[maple_arange_64]	= ULONG_MAX,
 };
@@ -44,17 +34,7 @@ unsigned long mt_max[] = {
 
 unsigned char mt_slots[] = {
 	[maple_dense]		= MAPLE_NODE_SLOTS,
-	[maple_sparse_6]	= MAPLE_SPARSE6_SLOTS,
-	[maple_sparse_9]	= MAPLE_SPARSE9_SLOTS,
-	[maple_sparse_16]	= MAPLE_SPARSE16_SLOTS,
-	[maple_sparse_21]	= MAPLE_SPARSE21_SLOTS,
-	[maple_sparse_32]	= MAPLE_SPARSE32_SLOTS,
-	[maple_sparse_64]	= MAPLE_SPARSE64_SLOTS,
-	[maple_leaf_16]		= MAPLE_RANGE16_SLOTS,
-	[maple_leaf_32]		= MAPLE_RANGE32_SLOTS,
 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
-	[maple_range_16]	= MAPLE_RANGE16_SLOTS,
-	[maple_range_32]	= MAPLE_RANGE32_SLOTS,
 	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
 };
@@ -62,17 +42,7 @@ unsigned char mt_slots[] = {
 
 unsigned char mt_pivots[] = {
 	[maple_dense]		= 0,
-	[maple_sparse_6]	= 1,
-	[maple_sparse_9]	= MAPLE_SPARSE9_SLOTS - 1,
-	[maple_sparse_16]	= MAPLE_SPARSE16_SLOTS - 1,
-	[maple_sparse_21]	= MAPLE_SPARSE21_SLOTS - 1,
-	[maple_sparse_32]	= MAPLE_SPARSE32_SLOTS - 1,
-	[maple_sparse_64]	= MAPLE_SPARSE64_SLOTS - 1,
-	[maple_leaf_16]		= MAPLE_RANGE16_SLOTS - 1,
-	[maple_leaf_32]		= MAPLE_RANGE32_SLOTS - 1,
 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
-	[maple_range_16]	= MAPLE_RANGE16_SLOTS - 1,
-	[maple_range_32]	= MAPLE_RANGE32_SLOTS - 1,
 	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
 };
@@ -80,17 +50,7 @@ unsigned char mt_pivots[] = {
 
 unsigned char mt_min_slots[] = {
 	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
-	[maple_sparse_6]	= MAPLE_SPARSE6_SLOTS / 2,
-	[maple_sparse_9]	= MAPLE_SPARSE9_SLOTS / 2,
-	[maple_sparse_16]	= MAPLE_SPARSE16_SLOTS / 2,
-	[maple_sparse_21]	= MAPLE_SPARSE21_SLOTS / 2,
-	[maple_sparse_32]	= MAPLE_SPARSE32_SLOTS / 2,
-	[maple_sparse_64]	= MAPLE_SPARSE64_SLOTS / 2,
-	[maple_leaf_16]		= MAPLE_RANGE16_SLOTS / 2,
-	[maple_leaf_32]		= MAPLE_RANGE32_SLOTS / 2,
 	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
-	[maple_range_16]	= MAPLE_RANGE16_SLOTS / 2,
-	[maple_range_32]	= MAPLE_RANGE32_SLOTS / 2,
 	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
 #if defined(NODE256)
 	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 2,
@@ -168,17 +128,12 @@ static inline enum maple_type mte_node_type(const struct maple_enode *entry)
 
 static inline bool ma_is_dense(const enum maple_type type)
 {
-	return type < maple_sparse_6;
-}
-
-static inline bool mte_is_dense(const struct maple_enode *entry)
-{
-	return ma_is_dense(mte_node_type(entry));
+	return type < maple_leaf_64;
 }
 
 static inline bool ma_is_leaf(const enum maple_type type)
 {
-	return type < maple_range_16;
+	return type < maple_range_64;
 }
 
 static inline bool mte_is_leaf(const struct maple_enode *entry)
@@ -195,11 +150,6 @@ static inline bool mt_is_reserved(const void *entry)
 	return ((unsigned long)entry < 4096) && xa_is_internal(entry);
 }
 
-static inline bool mt_is_empty(const void *entry)
-{
-	return !entry;
-}
-
 static inline void mas_set_err(struct ma_state *mas, long err)
 {
 	mas->node = MA_ERROR(err);
@@ -306,23 +256,17 @@ static inline unsigned int mte_parent_shift(unsigned long parent)
 
 static inline enum maple_type mte_parent_range_enum(unsigned long parent)
 {
-	switch (parent) {
-	case 6:
+	if (parent)
 		return maple_range_64;
-	case 4:
-		return maple_range_32;
-	case 0:
-		return maple_range_16;
-	}
+
 	return maple_dense;
 }
 
 static inline enum maple_type mte_parent_alloc_enum(unsigned long parent)
 {
-	switch (parent) {
-	case 6:
+	if (parent)
 		return maple_arange_64;
-	}
+
 	return maple_dense;
 }
 
@@ -369,13 +313,8 @@ static inline void mte_set_parent(struct maple_enode *node,
 	case maple_range_64:
 	case maple_arange_64:
 		type |= 4;
-		fallthrough;
-	case maple_range_32:
 		type |= 2;
 		break;
-	case maple_range_16:
-		slot_shift = 2;
-		break;
 	default:
 		break;
 	}
@@ -480,24 +419,6 @@ static inline unsigned long ma_get_pivot(const struct maple_node *mn,
 	case maple_range_64:
 	case maple_leaf_64:
 		return mn->mr64.pivot[piv];
-	case maple_sparse_6:
-		return mn->ms6.pivot;
-	case maple_sparse_9:
-		return mn->ms9.pivot[piv];
-	case maple_sparse_16:
-		return mn->ms16.pivot[piv];
-	case maple_sparse_21:
-		return mn->ms21.pivot[piv];
-	case maple_sparse_32:
-		return mn->ms32.pivot[piv];
-	case maple_sparse_64:
-		return mn->ms64.pivot[piv];
-	case maple_range_16:
-	case maple_leaf_16:
-		return mn->mr16.pivot[piv];
-	case maple_range_32:
-	case maple_leaf_32:
-		return mn->mr32.pivot[piv];
 	case maple_dense:
 	default:
 		return 0;
@@ -564,32 +485,6 @@ static inline void ma_set_pivot(struct maple_node *mn, unsigned char piv,
 		(&mn->ma64)->pivot[piv] = val;
 	case maple_dense:
 		break;
-	case maple_sparse_6:
-		(&mn->ms6)->pivot = val;
-		break;
-	case maple_sparse_9:
-		(&mn->ms9)->pivot[piv] = val;
-		break;
-	case maple_sparse_16:
-		(&mn->ms16)->pivot[piv] = val;
-		break;
-	case maple_sparse_21:
-		(&mn->ms21)->pivot[piv] = val;
-		break;
-	case maple_sparse_32:
-		(&mn->ms32)->pivot[piv] = val;
-		break;
-	case maple_sparse_64:
-		(&mn->ms64)->pivot[piv] = val;
-		break;
-	case maple_range_16:
-	case maple_leaf_16:
-		(&mn->mr16)->pivot[piv] = val;
-		break;
-	case maple_range_32:
-	case maple_leaf_32:
-		(&mn->mr32)->pivot[piv] = val;
-		break;
 	}
 }
 
@@ -611,135 +506,62 @@ static inline void __rcu **ma_get_slots(struct maple_node *mn,
 		return mn->mr64.slot;
 	case maple_dense:
 		return mn->slot;
-	case maple_sparse_6:
-		return mn->ms6.slot;
-	case maple_sparse_9:
-		return mn->ms9.slot;
-	case maple_sparse_16:
-		return mn->ms16.slot;
-	case maple_sparse_21:
-		return mn->ms21.slot;
-	case maple_sparse_32:
-		return mn->ms32.slot;
-	case maple_sparse_64:
-		return mn->ms64.slot;
-	case maple_range_16:
-	case maple_leaf_16:
-		return mn->mr16.slot;
-	case maple_range_32:
-	case maple_leaf_32:
-		return mn->mr32.slot;
-	}
-}
-
-static inline struct maple_enode *ma_get_rcu_slot(
+	}
+}
+
+static inline struct maple_enode *ma_get_slot(
 		const struct maple_node *mn, unsigned char slot,
 		enum maple_type type, struct maple_tree *mtree)
 {
 	switch (type) {
+	default:
+	case maple_arange_64:
+		return rcu_dereference_check(mn->ma64.slot[slot],
+				lockdep_is_held(&mtree->ma_lock));
 	case maple_range_64:
 	case maple_leaf_64:
 		return rcu_dereference_check(mn->mr64.slot[slot],
 				lockdep_is_held(&mtree->ma_lock));
-	default:
 	case maple_dense:
 		return rcu_dereference_check(mn->slot[slot],
 				lockdep_is_held(&mtree->ma_lock));
-	case maple_arange_64:
-		return rcu_dereference_check(mn->ma64.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_6:
-		return rcu_dereference_check(mn->ms6.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_9:
-		return rcu_dereference_check(mn->ms9.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_16:
-		return rcu_dereference_check(mn->ms16.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_21:
-		return rcu_dereference_check(mn->ms21.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_32:
-		return rcu_dereference_check(mn->ms32.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_sparse_64:
-		return rcu_dereference_check(mn->ms64.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_range_16:
-	case maple_leaf_16:
-		return rcu_dereference_check(mn->mr16.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
-	case maple_range_32:
-	case maple_leaf_32:
-		return rcu_dereference_check(mn->mr32.slot[slot],
-				lockdep_is_held(&mtree->ma_lock));
 	}
 }
 
-static inline struct maple_enode *_mte_get_rcu_slot(
+static inline struct maple_enode *_mte_get_slot(
 		const struct maple_enode *mn, unsigned char slot,
 		enum maple_type type, struct maple_tree *mtree)
 {
-	return ma_get_rcu_slot(mte_to_node(mn), slot, type, mtree);
+	return ma_get_slot(mte_to_node(mn), slot, type, mtree);
 }
 
-static inline struct maple_enode *mte_get_rcu_slot(const struct maple_enode *mn,
+static inline struct maple_enode *mte_get_slot(const struct maple_enode *mn,
 		 unsigned char slot, struct maple_tree *mtree)
 {
-	return _mte_get_rcu_slot(mn, slot, mte_node_type(mn), mtree);
+	return _mte_get_slot(mn, slot, mte_node_type(mn), mtree);
 }
 
-static inline struct maple_enode *mas_get_rcu_slot(const struct ma_state *mas,
+static inline struct maple_enode *mas_get_slot(const struct ma_state *mas,
 		unsigned char slot)
 {
-	return mte_get_rcu_slot(mas->node, slot, mas->tree);
+	return mte_get_slot(mas->node, slot, mas->tree);
 }
 
 /*
- * ma_set_rcu_slot() - Set a nodes rcu slot.
+ * ma_set_slot() - Set a nodes rcu slot.
  *
  * @mn - the maple node for the operation
  * @slot - the slot number to set
  * @type - the maple node type
  * @val - the value to store
  */
-static inline void ma_set_rcu_slot(struct maple_node *mn,
+static inline void ma_set_slot(struct maple_node *mn,
 		unsigned char slot, enum maple_type type, void *val)
 {
 	BUG_ON(slot >= mt_slots[type]);
 
 	switch (type) {
 	default:
-	case maple_dense:
-		rcu_assign_pointer(mn->slot[slot], val);
-		break;
-	case maple_sparse_6:
-		rcu_assign_pointer(mn->ms6.slot[slot], val);
-		break;
-	case maple_sparse_9:
-		rcu_assign_pointer(mn->ms9.slot[slot], val);
-		break;
-	case maple_sparse_16:
-		rcu_assign_pointer(mn->ms16.slot[slot], val);
-		break;
-	case maple_sparse_21:
-		rcu_assign_pointer(mn->ms21.slot[slot], val);
-		break;
-	case maple_sparse_32:
-		rcu_assign_pointer(mn->ms32.slot[slot], val);
-		break;
-	case maple_sparse_64:
-		rcu_assign_pointer(mn->ms64.slot[slot], val);
-		break;
-	case maple_range_16:
-	case maple_leaf_16:
-		rcu_assign_pointer(mn->mr16.slot[slot], val);
-		break;
-	case maple_range_32:
-	case maple_leaf_32:
-		rcu_assign_pointer(mn->mr32.slot[slot], val);
-		break;
 	case maple_range_64:
 	case maple_leaf_64:
 		rcu_assign_pointer(mn->mr64.slot[slot], val);
@@ -747,15 +569,18 @@ static inline void ma_set_rcu_slot(struct maple_node *mn,
 	case maple_arange_64:
 		rcu_assign_pointer(mn->ma64.slot[slot], val);
 		break;
+	case maple_dense:
+		rcu_assign_pointer(mn->slot[slot], val);
+		break;
 	}
 }
 /*
- * mte_set_rcu_slot() - Set an encoded nodes rcu slot.
+ * mte_set_slot() - Set an encoded nodes rcu slot.
  */
-static inline void mte_set_rcu_slot(const struct maple_enode *mn,
+static inline void mte_set_slot(const struct maple_enode *mn,
 				 unsigned char slot, void *val)
 {
-	ma_set_rcu_slot(mte_to_node(mn), slot, mte_node_type(mn), val);
+	ma_set_slot(mte_to_node(mn), slot, mte_node_type(mn), val);
 }
 
 /*
@@ -832,7 +657,7 @@ static inline void mas_descend(struct ma_state *mas)
 	if (slot)
 		mas->min = mas_safe_pivot(mas, slot - 1) + 1;
 	mas->max = mas_safe_pivot(mas, slot);
-	mas->node = mas_get_rcu_slot(mas, mas_offset(mas));
+	mas->node = mas_get_slot(mas, mas_offset(mas));
 }
 
 static inline unsigned long ma_get_gap(const struct maple_node *mn,
@@ -1159,8 +984,8 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas)
 
 	if (ma_is_dense(mt)) {
 		for (i = 0; i < mt_slot_count(mas->node); i++) {
-			entry = mas_get_rcu_slot(mas, i);
-			if (!mt_is_empty(entry)) {
+			entry = mas_get_slot(mas, i);
+			if (entry) {
 				if (gap > max_gap)
 					max_gap = gap;
 				gap = 0;
@@ -1180,9 +1005,9 @@ static inline unsigned long mas_leaf_max_gap(struct ma_state *mas)
 			pend = mas->max;
 
 		gap = pend - pstart + 1;
-		entry = mas_get_rcu_slot(mas, i);
+		entry = mas_get_slot(mas, i);
 
-		if (!mt_is_empty(entry))
+		if (entry)
 			goto next;
 
 		if (gap > max_gap)
@@ -1304,9 +1129,9 @@ static inline unsigned long mas_first_node(struct ma_state *mas,
 		if (pivot > limit)
 			goto no_entry;
 
-		mn = mas_get_rcu_slot(mas, slot);
+		mn = mas_get_slot(mas, slot);
 
-		if (mt_is_empty(mn)) {
+		if (!mn) {
 			min = pivot + 1;
 			continue;
 		}
@@ -1377,8 +1202,8 @@ static inline void mas_adopt_children(struct ma_state *mas,
 		    _mte_get_pivot(parent, slot, type) == 0)
 			break;
 
-		child = _mte_get_rcu_slot(parent, slot, type, mas->tree);
-		if (!mt_is_empty(child))
+		child = _mte_get_slot(parent, slot, type, mas->tree);
+		if (child)
 			mte_set_parent(child, parent, slot);
 	}
 }
@@ -1405,7 +1230,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
 
 		parent = mt_mk_node(mte_parent(mas->node), ptype);
 		slot = mte_parent_slot(mas->node);
-		prev = mte_get_rcu_slot(parent, slot, mas->tree);
+		prev = mte_get_slot(parent, slot, mas->tree);
 	}
 
 	if (mte_to_node(prev) == mn)
@@ -1420,7 +1245,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
 		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
 		mas_set_height(mas);
 	} else {
-		mte_set_rcu_slot(parent, slot, mas->node);
+		mte_set_slot(parent, slot, mas->node);
 	}
 
 	if (!advanced)
@@ -1439,7 +1264,7 @@ static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
 	struct maple_enode *entry;
 
 	for (slot = mas_offset(mas); slot < end; slot++) {
-		entry = mas_get_rcu_slot(mas, slot);
+		entry = mas_get_slot(mas, slot);
 		if (!entry) // end of node data.
 			break;
 
@@ -1573,7 +1398,7 @@ static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
 	int i, j;
 
 	for (i = mas_start, j = mab_start; i <= mas_end; i++, j++) {
-		b_node->slot[j] = mas_get_rcu_slot(mas, i);
+		b_node->slot[j] = mas_get_slot(mas, i);
 		if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
 			b_node->gap[j] = mte_get_gap(mas->node, i);
 
@@ -1609,7 +1434,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
 			break;
 
 		mas->max = b_node->pivot[i];
-		mte_set_rcu_slot(mas->node, j, b_node->slot[i]);
+		mte_set_slot(mas->node, j, b_node->slot[i]);
 
 		if (j < mt_pivot_count(mas->node))
 			mte_set_pivot(mas->node, j, b_node->pivot[i]);
@@ -1681,7 +1506,7 @@ static inline unsigned char mas_store_b_node(struct ma_state *mas,
 {
 	unsigned char slot = mas_offset(mas);
 	unsigned char end = mas_data_end(mas);
-	void *contents = mas_get_rcu_slot(mas, slot);
+	void *contents = mas_get_slot(mas, slot);
 	unsigned char b_end = 0;
 	// Possible underflow of piv will wrap back to 0 before use.
 	unsigned long piv = mas->min - 1;
@@ -1821,7 +1646,7 @@ static inline void mast_topiary(struct maple_subtree_state *mast)
 	if (mast->orig_l->node == mast->orig_r->node) {
 		for (slot = l_slot + 1; slot < r_slot; slot++)
 			mat_add(mast->destroy,
-				mas_get_rcu_slot(mast->orig_l, slot));
+				mas_get_slot(mast->orig_l, slot));
 		return;
 	}
 	/* mast->orig_r is different and consumed. */
@@ -1831,10 +1656,10 @@ static inline void mast_topiary(struct maple_subtree_state *mast)
 	/* Now destroy l_slot + 1 -> end and 0 -> r_slot - 1 */
 	end = mas_data_end(mast->orig_l);
 	for (slot = l_slot + 1; slot <= end; slot++)
-		mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_l, slot));
+		mat_add(mast->destroy, mas_get_slot(mast->orig_l, slot));
 
 	for (slot = 0; slot < r_slot; slot++)
-		mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_r, slot));
+		mat_add(mast->destroy, mas_get_slot(mast->orig_r, slot));
 }
 
 static inline void mast_rebalance_next(struct maple_subtree_state *mast,
@@ -2691,7 +2516,7 @@ static inline bool mas_reuse_node(struct ma_state *mas,
 	// Zero end of node.
 	if (end > bn->b_end) {
 		for (i = bn->b_end + 1; i < mt_slot_count(mas->node); i++) {
-			mte_set_rcu_slot(mas->node, i, NULL);
+			mte_set_slot(mas->node, i, NULL);
 			if (i < mt_pivot_count(mas->node))
 				mte_set_pivot(mas->node, i, 0);
 
@@ -2759,14 +2584,14 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
 
 	if (contents)
-		mte_set_rcu_slot(mas->node, slot++, contents);
+		mte_set_slot(mas->node, slot++, contents);
 
 	if (!mas->index && slot)
 		slot--;
 	else if (mas->index > 1)
 		mte_set_pivot(mas->node, slot++, mas->index - 1);
 
-	mte_set_rcu_slot(mas->node, slot, entry);
+	mte_set_slot(mas->node, slot, entry);
 	mte_set_pivot(mas->node, slot++, mas->last);
 	/* swap the new root into the tree */
 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
@@ -2948,11 +2773,11 @@ static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
 		else
 			mas->full_cnt = 0;
 
-		next = mas_get_rcu_slot(mas, mas_offset(mas));
+		next = mas_get_slot(mas, mas_offset(mas));
 		// Traverse.
 		mas->max = *range_max;
 		mas->min = *range_min;
-		if (unlikely(mt_is_empty(next)))
+		if (unlikely(!next))
 			return false;
 
 		mas->node = next;
@@ -2967,7 +2792,7 @@ static inline unsigned char mas_extend_null(struct ma_state *l_mas,
 	unsigned char l_slot = mas_offset(l_mas);
 	unsigned char r_slot = mas_offset(r_mas);
 	unsigned char cp_r_slot = r_slot;
-	void *content = mas_get_rcu_slot(l_mas, l_slot);
+	void *content = mas_get_slot(l_mas, l_slot);
 	unsigned long range_max = mas_safe_pivot(r_mas, r_slot);
 	unsigned long range_min = l_mas->min;
 
@@ -2978,7 +2803,7 @@ static inline unsigned char mas_extend_null(struct ma_state *l_mas,
 		l_mas->index = range_min;
 
 	if ((l_mas->index == range_min) &&
-	    l_slot && !mas_get_rcu_slot(l_mas, l_slot - 1)) {
+	    l_slot && !mas_get_slot(l_mas, l_slot - 1)) {
 		if (l_slot > 1)
 			l_mas->index = mas_safe_pivot(l_mas, l_slot - 2) + 1;
 		else
@@ -2986,14 +2811,14 @@ static inline unsigned char mas_extend_null(struct ma_state *l_mas,
 		mas_set_offset(l_mas, l_slot - 1);
 	}
 
-	if (!mas_get_rcu_slot(r_mas, r_slot)) {
+	if (!mas_get_slot(r_mas, r_slot)) {
 		if (r_mas->last < range_max)
 			r_mas->last = range_max;
 		cp_r_slot++;
 	}
 
 	if (r_mas->last == range_max &&
-	    r_mas->last < r_mas->max && !mas_get_rcu_slot(r_mas, r_slot + 1)) {
+	    r_mas->last < r_mas->max && !mas_get_slot(r_mas, r_slot + 1)) {
 		r_mas->last = mas_safe_pivot(r_mas, r_slot + 1);
 		cp_r_slot++;
 	}
@@ -3033,9 +2858,9 @@ static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
 		if (ma_is_leaf(type)) // Leaf.
 			return true;
 
-		next = mas_get_rcu_slot(mas, mas_offset(mas));
+		next = mas_get_slot(mas, mas_offset(mas));
 
-		if (unlikely(mt_is_empty(next)))
+		if (unlikely(!next))
 			return false;
 
 		// Traverse.
@@ -3182,7 +3007,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite
 	/* Calculate needed space */
 	slot = mas_offset(mas);
 	slot_cnt = mt_slot_count(mas->node);
-	content = mas_get_rcu_slot(mas, slot);
+	content = mas_get_slot(mas, slot);
 	if (!overwrite && ((mas->last > r_max) || content)) {
 		mas_set_err(mas, -EEXIST);
 		goto exists;
@@ -3204,7 +3029,7 @@ static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite
 	if (mas_can_append(mas, &b_node, slot_cnt, end)) {
 		slot = b_node.b_end;
 		do {
-			mte_set_rcu_slot(mas->node, slot, b_node.slot[slot]);
+			mte_set_slot(mas->node, slot, b_node.slot[slot]);
 			if (slot < slot_cnt - 1)
 				mte_set_pivot(mas->node, slot, b_node.pivot[slot]);
 		} while(slot && slot-- >= end);
@@ -3281,8 +3106,8 @@ restart_prev_node:
 			if (slot != 0 && pivot == 0)
 				break;
 
-			mn = mas_get_rcu_slot(mas, slot);
-			if (mt_is_empty(mn))
+			mn = mas_get_slot(mas, slot);
+			if (!mn)
 				continue;
 
 			if (level == 1) {
@@ -3355,8 +3180,8 @@ restart_next_node:
 			if (slot != 0 && pivot == 0)
 				break;
 
-			mn = mas_get_rcu_slot(mas, slot);
-			if (mt_is_empty(mn)) {
+			mn = mas_get_slot(mas, slot);
+			if (!mn) {
 				prev_piv = pivot;
 				continue;
 			}
@@ -3409,8 +3234,8 @@ static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long limit,
 		if (pivot < limit)
 			return false;
 
-		entry = mas_get_rcu_slot(mas, slot);
-		if (!mt_is_empty(entry))
+		entry = mas_get_slot(mas, slot);
+		if (entry)
 			break;
 	} while (slot--);
 
@@ -3452,8 +3277,8 @@ static inline bool mas_next_nentry(struct ma_state *mas, unsigned long max,
 		if (r_start > mas->max)
 			goto no_entry;
 
-		entry = mas_get_rcu_slot(mas, slot);
-		if (!mt_is_empty(entry))
+		entry = mas_get_slot(mas, slot);
+		if (entry)
 			goto found;
 
 		/* Ran over the limit, this is was the last slot to try */
@@ -3496,7 +3321,7 @@ static inline void *mas_last_entry(struct ma_state *mas,
 	while (range_start < limit) {
 		mas_set_offset(mas, slot);
 		if (!mas_next_nentry(mas, limit, &range_start)) {
-			entry = mas_get_rcu_slot(mas, slot - 1);
+			entry = mas_get_slot(mas, slot - 1);
 			if (mte_is_leaf(mas->node)) {
 				mas->index = range_start - 1;
 				mas->index = mte_get_pivot(mas->node, slot - 1);
@@ -3573,7 +3398,7 @@ next_node:
 	if (mas_is_none(mas))
 		return NULL;
 
-	entry = mas_get_rcu_slot(mas, mas_offset(mas));
+	entry = mas_get_slot(mas, mas_offset(mas));
 	if (mas_dead_node(mas, index))
 		goto retry;
 
@@ -3606,7 +3431,7 @@ static inline void *_mas_prev(struct ma_state *mas, unsigned long limit)
 	mas->last = max;
 	slot = mas_offset(mas);
 	mas->index = mas_safe_min(mas, slot);
-	return mas_get_rcu_slot(mas, mas_offset(mas));
+	return mas_get_slot(mas, mas_offset(mas));
 }
 
 /*
@@ -3667,7 +3492,7 @@ static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size)
 			}
 
 			if (ma_is_leaf(type)) {
-				if (mas_get_rcu_slot(mas, slot))
+				if (mas_get_slot(mas, slot))
 					goto next_slot;
 
 				gap = max - min + 1;
@@ -3705,10 +3530,10 @@ next_slot:
 	if (!ma_is_leaf(type)) { //descend
 		struct maple_enode *next;
 
-		next = mas_get_rcu_slot(mas, slot);
+		next = mas_get_slot(mas, slot);
 		mas->min = min;
 		mas->max = max;
-		if (mt_is_empty(next))
+		if (!next)
 			goto ascend;
 
 		mas->node = next;
@@ -3750,7 +3575,7 @@ static inline bool _mas_awalk(struct ma_state *mas, unsigned long size)
 
 			if (ma_is_leaf(type)) {
 				gap = 0;
-				if (mt_is_empty(mas_get_rcu_slot(mas, slot)))
+				if (!mas_get_slot(mas, slot))
 					gap = min(pivot, mas->last) -
 						max(mas->index, min) + 1;
 			} else {
@@ -3763,7 +3588,7 @@ next_slot:
 					found = true;
 					break;
 				} else if (mas->index <= pivot) {
-					mas->node = mas_get_rcu_slot(mas, slot);
+					mas->node = mas_get_slot(mas, slot);
 					mas->min = min;
 					mas->max = pivot;
 					slot = 0;
@@ -4216,7 +4041,7 @@ retry:
 		if (slot >= MAPLE_NODE_SLOTS)
 			return NULL;
 
-		entry = mas_get_rcu_slot(mas, slot);
+		entry = mas_get_slot(mas, slot);
 		if (mte_dead_node(mas->node))
 			goto retry;
 	}
@@ -4280,7 +4105,7 @@ void *mas_find(struct ma_state *mas, unsigned long max)
 
 	while (mas_search_cont(mas, index, max, entry)) {
 		entry = _mas_next(mas, max, &index);
-		if (mt_is_empty(entry))
+		if (!entry)
 			entry = NULL;
 	}
 
@@ -4313,15 +4138,15 @@ void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
 	leaf = _mas_range_walk(&mas, &range_start, &range_end);
 	slot = mas_offset(&mas);
 	if (leaf == true && slot != MAPLE_NODE_SLOTS)
-		entry = mas_get_rcu_slot(&mas, slot);
+		entry = mas_get_slot(&mas, slot);
 
 	mas.last = range_end;
-	if (mt_is_empty(entry) || xa_is_zero(entry))
+	if (!entry || xa_is_zero(entry))
 		entry = NULL;
 
 	while (mas_search_cont(&mas, range_start, max, entry)) {
 		entry = _mas_next(&mas, max, &range_start);
-		if (mt_is_empty(entry) || xa_is_zero(entry))
+		if (!entry || xa_is_zero(entry))
 			entry = NULL;
 	}
 
@@ -4416,7 +4241,7 @@ walk_up:
 	}
 
 	prev = mas->node;
-	mas->node = mas_get_rcu_slot(mas, slot);
+	mas->node = mas_get_slot(mas, slot);
 	if (!mas->node) {
 		if (mte_is_root(prev))
 			goto done;
@@ -4470,13 +4295,13 @@ static inline void mas_dup_children(struct ma_state *mas, int *node_cnt)
 
 
 	for(slot = 0; slot < end; slot++) {
-		oldchild = mas_get_rcu_slot(mas, slot);
+		oldchild = mas_get_slot(mas, slot);
 		if (!oldchild)
 			return;
 
 		child = mas_next_alloc(mas);
 		echild = mt_mk_node(child, mte_node_type(oldchild));
-		mte_set_rcu_slot(mas->node, slot, echild);
+		mte_set_slot(mas->node, slot, echild);
 		memcpy(child, mte_to_node(oldchild), sizeof(struct maple_node));
 	}
 }
@@ -5058,9 +4883,9 @@ void mas_validate_gaps(struct ma_state *mas)
 	unsigned char p_slot;
 	int i;
 
-	if (mte_is_dense(mte)) {
+	if (ma_is_dense(mte_node_type(mte))) {
 		for (i = 0; i < mt_slot_count(mte); i++) {
-			if (!mt_is_empty(mas_get_rcu_slot(mas, i))) {
+			if (mas_get_slot(mas, i)) {
 				if (gap > max_gap)
 					max_gap = gap;
 				gap = 0;
@@ -5077,21 +4902,21 @@ void mas_validate_gaps(struct ma_state *mas)
 			p_end = mas->max;
 
 		if (mte_is_leaf(mte)) {
-			if (!mt_is_empty(mas_get_rcu_slot(mas, i))) {
+			if (mas_get_slot(mas, i)) {
 				gap = 0;
 				goto not_empty;
 			}
 
 			gap += p_end - p_start + 1;
 		} else {
-			void *entry = mas_get_rcu_slot(mas, i);
+			void *entry = mas_get_slot(mas, i);
 
 			gap = mte_get_gap(mte, i);
-			if (mt_is_empty(entry)) {
+			if (!entry) {
 				if (gap != p_end - p_start + 1) {
 					pr_err(MA_PTR"[%u] -> "MA_PTR" %lu != %lu - %lu + 1\n",
 						mas_mn(mas), i,
-						mas_get_rcu_slot(mas, i), gap,
+						mas_get_slot(mas, i), gap,
 						p_end, p_start);
 					mt_dump(mas->tree);
 
@@ -5152,7 +4977,7 @@ void mas_validate_parent_slot(struct ma_state *mas)
 	// Check prev/next parent slot for duplicate node entry
 
 	for (i = 0; i < mt_slots[p_type]; i++) {
-		node = ma_get_rcu_slot(parent, i, p_type, mas->tree);
+		node = ma_get_slot(parent, i, p_type, mas->tree);
 		if (i == p_slot) {
 			if (node != mas->node)
 				pr_err("parent %p[%u] does not have %p\n",
@@ -5176,7 +5001,7 @@ void mas_validate_child_slot(struct ma_state *mas)
 		return;
 
 	for (i = 0; i < mt_slots[type]; i++) {
-		child = mte_get_rcu_slot(mas->node, i, mas->tree);
+		child = mte_get_slot(mas->node, i, mas->tree);
 		if (!child)
 			break;
 
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 1da0a048e2d4..55bc8e567f12 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1011,7 +1011,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
 				if (!entry_cnt)
 					entry_cnt++;
 
-				else if (!mt_is_empty(s_entry)) {
+				else if (s_entry) {
 					if (e_max > mas_end.last)
 						entry_cnt++;