--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_maple_tree.c: Test the maple tree API
+ * Copyright (c) 2018 Liam R. Howlett
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ */
+
+#include <linux/maple_tree.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifndef MAPLE_TREE_DEBUG
+# ifdef __KERNEL__
+void mt_dump(const struct maple_tree *mt) { }
+# endif
+#undef MT_BUG_ON
+#define MT_BUG_ON(tree, x) do { \
+ tests_run++; \
+ if (x) { \
+ printk("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, x); \
+ mt_dump(tree); \
+ printk("Pass: %u Run:%u\n", tests_passed, tests_run); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+#endif
+
+static
+int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
+{
+ return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static int mtree_test_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ return mtree_insert(mt, index, ptr, GFP_KERNEL);
+}
+
+static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start,
+ unsigned long end, void *ptr)
+{
+ return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static void *mtree_test_load(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_load(mt, index);
+}
+
+
+static noinline void check_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ int ret = -EINVAL;
+ ret = mtree_test_insert_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
+static noinline void check_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ int ret = -EINVAL;
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
+static noinline void check_dup_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != -EEXIST);
+}
+
+
+static noinline void check_load(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ void *ret = mtree_test_load(mt, index);
+
+ MT_BUG_ON(mt, ret != ptr);
+}
+
+static noinline
+void check_index_load(struct maple_tree *mt, unsigned long index)
+{
+ return check_load(mt, index, xa_mk_value(index & LONG_MAX));
+}
+
+static noinline void check_nomem(struct maple_tree *mt)
+{
+#if 0
+ MA_STATE(ms, mt, 1, 1);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ /* Storing something at 1 requires memory allocation */
+ MT_BUG_ON(mt, mtree_insert_index(mt, 1, GFP_ATOMIC) != -ENOMEM);
+ /* Storing something at 0 does not */
+ MT_BUG_ON(mt, mtree_insert_index(mt, 0, GFP_ATOMIC) != 0);
+
+ /*
+ * Simulate two threads racing; the first one fails to allocate
+ * memory to insert an entry at 1, then the second one succeeds
+ * in allocating memory to insert an entry at 2. The first one
+ * then needs to free the node it allocated. LeakSanitizer will
+ * notice this, as will the 'nr_allocated' debugging aid in the
+ * userspace test suite.
+ */
+ mtree_lock(mt);
+ _maple_setup_insert(&ms);
+ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM));
+ _maple_insert(&ms, xa_mk_value(1));
+ mas_nomem(&ms, GFP_KERNEL);
+ MT_BUG_ON(mt, ms.node != MAS_START);
+ mtree_unlock(mt);
+ MT_BUG_ON(mt, mtree_insert_index(mt, 2, GFP_KERNEL) != 0);
+ mtree_lock(mt);
+ _maple_setup_insert(&ms);
+ _maple_insert(&ms, xa_mk_value(1));
+ mas_nomem(&ms, GFP_KERNEL);
+ mtree_unlock(mt);
+#endif
+
+ mtree_destroy(mt);
+}
+
+static noinline int alloc_req(struct ma_state *ms) {
+ return (int)(((unsigned long)ms->alloc & 0x03));
+}
+static noinline void check_new_node(struct maple_tree *mt)
+{
+#if 0
+
+ struct maple_node *mn;
+ MA_STATE(ms, mt, 0, 0);
+
+ /* Try allocating 3 nodes */
+ mtree_lock(mt);
+ maple_state_node(&ms, 3); // req allocate 3 node.
+ MT_BUG_ON(mt, alloc_req(&ms) != 3); // Allocation request of 3.
+
+ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); // Allocate failed.
+ MT_BUG_ON(mt, !mas_nomem(&ms, GFP_KERNEL));
+
+
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 3);
+ mn = ma_get_alloc(&ms);
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] == NULL);
+ MT_BUG_ON(mt, mn->slot[1] == NULL);
+ mas_nomem(&ms, GFP_KERNEL); // free;
+ mtree_unlock(mt);
+
+
+ /* Try allocating 1 node, then 2 more */
+ mtree_lock(mt);
+ maple_state_node(&ms, 1); // allocate 1 node.
+ MT_BUG_ON(mt, alloc_req(&ms) != 1); // Allocation request of 1.
+ MT_BUG_ON(mt, !mas_nomem(&ms, GFP_KERNEL)); // Validate allocation request.
+ mn = ma_get_alloc(&ms);
+
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] != NULL);
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 1);
+ maple_state_node(&ms, 3); // req allocate 3 node.
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 1); // Still only 1 allocated.
+ MT_BUG_ON(mt, ma_get_alloc_req(&ms) != 2); // Allocation request of 2.
+
+ mas_nomem(&ms, GFP_KERNEL); // Validate request for 2 more.
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] == NULL);
+ MT_BUG_ON(mt, mn->slot[1] == NULL);
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 3); // Ensure we counted 3.
+ mas_nomem(&ms, GFP_KERNEL); // Free.
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+#endif
+}
+static noinline void check_seq(struct maple_tree *mt)
+{
+ int i, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ for (i = 0; i < 16; i++) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = 0; j <= i; j++)
+ check_index_load(mt, j);
+ check_load(mt, i + 1, NULL);
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void check_double_insert_leaf(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 4000UL * 1000 * 1000;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ i = huge;
+ while (i > 0x80) {
+ check_insert(mt, i, xa_mk_value(i));
+ for (j = huge; j >= i; j /= 2) {
+ check_load(mt, j, xa_mk_value(j));
+ check_load(mt, j+1, NULL);
+ }
+ i /= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static DEFINE_MTREE(tree);
+
+static int maple_tree_seed(void)
+{
+ unsigned long set[] = {15, 14, 17, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 3};
+ unsigned long r[] = {10, 15, 20, 25, 22}; // For range testing
+ void *ptr = &set;
+
+
+ mtree_init(&tree);
+ check_new_node(&tree);
+ check_double_insert_leaf(&tree);
+ check_load(&tree, set[0], NULL); // See if 15 -> NULL
+
+ check_insert(&tree, set[9], &tree); // Insert 0
+ check_load(&tree, set[9], &tree); // See if 0 -> &tree
+ check_load(&tree, set[0], NULL); // See if 15 -> NULL
+
+ check_insert(&tree, set[10], ptr); // Insert 3
+ check_load(&tree, set[9], &tree); // See if 0 -> &tree
+
+ check_load(&tree, set[10], ptr); // See if 3 -> ptr
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ /* Try to insert, insert a dup, and load back what was inserted. */
+ mtree_init(&tree);
+ check_insert(&tree, set[0], &tree); // Insert 15
+ check_dup_insert(&tree, set[0], &tree); // Insert 15 again
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
+
+ /* Second set of tests try to load a value that doesn't exist, inserts
+ * a second value, then loads the value again
+ */
+ check_load(&tree, set[1], NULL); // See if 14 -> NULL
+ check_insert(&tree, set[1], ptr); // insert 14 -> ptr
+ check_load(&tree, set[1], ptr); // See if 14 -> ptr
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
+
+ /* Tree currently contains:
+ * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
+ */
+ check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
+ check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
+
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
+ check_load(&tree, set[1], ptr); // See if 14 -> ptr
+ check_load(&tree, set[6], ptr); // See if 1002 -> ptr
+ check_load(&tree, set[7], &tree); // 1003 = &tree ?
+
+ /* Clear out tree */
+ mtree_destroy(&tree);
+
+ mtree_init(&tree);
+ /* Test inserting into a NULL hole. */
+ check_insert(&tree, set[5], ptr); // insert 1001 -> ptr
+ check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
+ check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
+ check_load(&tree, set[5], ptr); // See if 1001 -> ptr
+ check_load(&tree, set[6], ptr); // See if 1002 -> ptr
+ check_load(&tree, set[7], &tree); // See if 1003 -> &tree
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mtree_init(&tree);
+
+ check_insert_range(&tree, r[0], r[1], ptr); // Insert 10-15 => ptr
+ check_insert_range(&tree, r[2], r[3], ptr); // Insert 20-25 => &tree
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mtree_init(&tree);
+/*
+ * set[] = {15, 14, 17, 25, 1000,
+ * 1001, 1002, 1003, 1005, 0,
+ * 3};
+ */
+
+ check_insert(&tree, set[0], ptr); // 15
+ check_insert(&tree, set[1], &tree); // 14
+ check_insert(&tree, set[2], ptr); // 17
+ check_insert(&tree, set[3], &tree); // 25.
+ check_insert(&tree, set[4], ptr); // 1000 < Should split.
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); //25
+ check_load(&tree, set[4], ptr);
+ check_insert(&tree, set[5], &tree); // 1001
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_insert(&tree, set[6], ptr);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_insert(&tree, set[7], &tree);
+ check_insert(&tree, set[8], ptr);
+ check_insert(&tree, set[9], &tree);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_load(&tree, set[9], &tree);
+ mtree_destroy(&tree);
+
+ check_nomem(&tree);
+ check_seq(&tree);
+
+ printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run == tests_passed) ? 0 : -EINVAL;
+}
+
+static void maple_tree_harvest(void)
+{
+
+}
+
+module_init(maple_tree_seed);
+module_exit(maple_tree_harvest);
+MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_LICENSE("GPL");