struct maple_node *mn;
struct maple_alloc *smn;
+ struct maple_node *nodes[100];
int i, j, total;
MA_STATE(mas, mt, 0, 0);
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
// Eat the requested node.
mn = mas_pop_node(&mas);
-
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] != NULL);
MT_BUG_ON(mt, mn->slot[1] != NULL);
// Free.
mas_nomem(&mas, GFP_KERNEL);
- // Set allocation request to 127.
- total = 127;
+ // Set allocation request to 1.
+ mas_set_alloc_req(&mas, 1);
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
+ mas_set_err(&mas, -ENOMEM);
+ // Validate allocation request.
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_allocated(&mas) != 1);
+ // Check the node is only one node.
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas));
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] != NULL);
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mas_push_node(&mas, (struct maple_enode *)mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 1);
+ MT_BUG_ON(mt, mas.alloc->node_count);
+
+ mas_set_alloc_req(&mas, 2); // request 2 more.
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 2);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
+ for (i = 2; i >= 0; i--) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ MT_BUG_ON(mt, !mn);
+ ma_free_rcu(mn);
+ }
+
+ total = 64;
+ mas_set_alloc_req(&mas, total); // request 2 more.
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != total);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (i = total; i > 0; i--) {
+ unsigned int e = 0; // expected node_count
+ if (i >= 35)
+ e = i - 35;
+ else if (i >= 5)
+ e = i - 5;
+ else if (i >= 2)
+ e = i - 2;
+ MT_BUG_ON(mt, mas.alloc->node_count != e);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
+ MT_BUG_ON(mt, !mn);
+ ma_free_rcu(mn);
+ }
+
+ total = 100;
+ for (i = 1; i < total; i++) {
+ mas_set_alloc_req(&mas, i);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (j = i; j > 0; j--) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
+ MT_BUG_ON(mt, !mn);
+ mas_push_node(&mas, (struct maple_enode *)mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != j);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ mas_set_alloc_req(&mas, i);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (j = 0; j <= i/2; j++) {
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ nodes[j] = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ }
+
+ while (j) {
+ j--;
+ mas_push_node(&mas, (struct maple_enode *)nodes[j]);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
+
+ }
+
+ // Set allocation request.
+ total = 500;
mas_node_count(&mas, total);
- // Drop the lock and allocate 127 nodes.
+ // Drop the lock and allocate the nodes.
mas_nomem(&mas, GFP_KERNEL);
MT_BUG_ON(mt, !mas.alloc);
i = 1;
}
smn = smn->slot[0]; // next.
}
- MT_BUG_ON(mt, mas_allocated(&mas) != 127);
+ MT_BUG_ON(mt, mas_allocated(&mas) != total);
mas_nomem(&mas, GFP_KERNEL); // Free.
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
- for (i = 0; i <= 1590; i++) {
-// for (i = 0; i <= 1420; i++) {
+ for (i = 0; i <= 1300; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
// Cause a 3 child split all the way up the tree.
- check_store_range(mt, 15519, 15519, NULL, 0);
-
-// check_store_range(mt, 9755, 9759, NULL, 0);
-// MT_BUG_ON(mt, mt_height(mt) >= 4);
+ for (i = 5; i < 215; i += 10)
+ check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
+ for (i = 5; i < 65; i += 10)
+ check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
+
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ for (i = 5; i < 45; i += 10)
+ check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
MT_BUG_ON(mt, mt_height(mt) < 4);
mtree_destroy(mt);
+
mtree_init(mt, MAPLE_ALLOC_RANGE);
- for (i = 0; i <= 1590; i++) {
-// for (i = 0; i <= 1420; i++) {
+ for (i = 0; i <= 1200; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
+ // Fill parents and leaves before split.
+ for (i = 5; i < 455; i += 10)
+ check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
+ mt_dump(mt);
+ for (i = 1; i < 16; i++)
+ check_store_range(mt, 8185 + i, 8185 + i + 1,
+ xa_mk_value(8185+i), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
// triple split across multiple levels.
-// check_store_range(mt, 9595, 9599, NULL, 0);
- check_store_range(mt, 9115, 9121, NULL, 0);
-// MT_BUG_ON(mt, mt_height(mt) >= 4);
+ check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
MT_BUG_ON(mt, mt_height(mt) != 4);
}
count++;
mas_dfs_preorder(&mas);
} while(!mas_is_none(&mas));
- // 68 + MAS_START = 69
+ // 68 + MAS_START = 69 + 1 for no jitter
//printk("count %lu\n", count);
- MT_BUG_ON(mt, count != 69);
+ MT_BUG_ON(mt, count != 70);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
mas_dfs_preorder(&mas);
} while(!mas_is_none(&mas));
//printk("count %lu\n", count);
- // 71 + MAS_START = 72
+ // 71 + MAS_START = 72 + 1 for no jitter
MT_BUG_ON(mt, count != 72);
mtree_destroy(mt);
} while(!mas_is_none(&mas));
// 71 + MAS_START = 72
//printk("count %lu\n", count);
-
- MT_BUG_ON(mt, count != 72);
+ MT_BUG_ON(mt, count != 77);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
mas_reset(&mas);
nr_tallocated = 0;
- mt_set_non_kernel(100);
+ mt_set_non_kernel(200);
mas_entry_count(&mas, max);
for(count = 0; count <= max; count++) {
mas.index = mas.last = count;
}
mas_empty_alloc(&mas);
rcu_barrier();
- //mt_dump(mt);
//pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n",
// max, mt_get_alloc_size()/1024, nr_allocated,
// nr_tallocated);