mtree_destroy(mt);
}
+static noinline int alloc_req(struct maple_state *ms) {
+ return (int)(((unsigned long)ms->alloc & 0x03));
+}
+
+static noinline void check_new_node(struct maple_tree *mt)
+{
+
+ struct maple_node *mn;
+ MAP_STATE(ms, mt, 0, 0);
+
+ /* Try allocating 3 nodes */
+ mtree_lock(mt);
+ maple_state_node(&ms, 3); // req allocate 3 node.
+ MT_BUG_ON(mt, alloc_req(&ms) != 3); // Allocation request of 3.
+
+ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); // Allocate failed.
+ MT_BUG_ON(mt, !mas_nomem(&ms, GFP_KERNEL));
+
+
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 3);
+ mn = ma_get_alloc(&ms);
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] == NULL);
+ MT_BUG_ON(mt, mn->slot[1] == NULL);
+ mas_nomem(&ms, GFP_KERNEL); // free;
+ mtree_unlock(mt);
+
+
+ /* Try allocating 1 node, then 2 more */
+ mtree_lock(mt);
+ maple_state_node(&ms, 1); // allocate 1 node.
+ MT_BUG_ON(mt, alloc_req(&ms) != 1); // Allocation request of 1.
+ MT_BUG_ON(mt, !mas_nomem(&ms, GFP_KERNEL)); // Validate allocation request.
+ mn = ma_get_alloc(&ms);
+
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] != NULL);
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 1);
+ maple_state_node(&ms, 3); // req allocate 3 node.
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 1); // Still only 1 allocated.
+ MT_BUG_ON(mt, ma_get_alloc_req(&ms) != 2); // Allocation request of 2.
+
+ mas_nomem(&ms, GFP_KERNEL); // Validate request for 2 more.
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] == NULL);
+ MT_BUG_ON(mt, mn->slot[1] == NULL);
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&ms) != 3); // Ensure we counted 3.
+ mas_nomem(&ms, GFP_KERNEL); // Free.
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+}
+
static noinline void check_seq(struct maple_tree *mt)
{
int i, j;
mtree_init(&tree);
+ check_new_node(&tree);
check_load(&tree, set[0], NULL); // See if 15 -> NULL
check_insert(&tree, set[9], &tree); // Insert 0