};
#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
-#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS* 2 + 2)
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
struct maple_big_node {
struct maple_pnode *parent;
static void mas_set_height(struct ma_state *mas)
{
unsigned int new_flags = mas->tree->ma_flags;
+
new_flags &= ~MAPLE_HEIGHT_MASK;
new_flags |= mas->depth << MAPLE_HEIGHT_OFFSET;
mas->tree->ma_flags = new_flags;
if (!max || min == ULONG_MAX) {
if (mas->node == a_enode) {
- printk("Failed on node %p (%p)\n", mas_mn(mas), a_enode);
+ pr_err("Failed on node %p (%p)\n", mas_mn(mas),
+ a_enode);
//FIXME: Restart and retry if the lock is held.
MT_BUG_ON(mas->tree, mas->node == a_enode);
}
node = mas->alloc;
while (requested) {
- void **slots = (void**)&node->slot;
+ void **slots = (void **)&node->slot;
unsigned int max_req = MAPLE_NODE_SLOTS - 1;
if (node->slot[0]) {
unsigned int offset = node->node_count + 1;
- slots = (void**)&node->slot[offset];
+
+ slots = (void **)&node->slot[offset];
max_req -= offset;
}
goto nomem;
node->node_count += count;
- if (slots == (void**)&node->slot)
+ if (slots == (void **)&node->slot)
node->node_count--; // zero indexed.
success += count;
max_gap = gaps[i];
*offset = i;
}
- } while(i--);
+ } while (i--);
return max_gap;
}
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
{
- unsigned long *gaps;//, *pivots;
+ unsigned long *gaps;
unsigned char offset;
enum maple_type mt;
+
if (mte_is_leaf(mas->node))
return mas_leaf_max_gap(mas);
return;
if (offset != meta_offset) {
- if (meta_gap > new)
- return;
+ if (meta_gap > new)
+ return;
- ma_set_meta_gap(pnode, pmt, offset);
+ ma_set_meta_gap(pnode, pmt, offset);
} else if (new < meta_gap) {
meta_offset = 15;
new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
memcpy(b_node->slot + mab_start,
slots + mas_start,
- sizeof(void*) * (j - mab_start));
+ sizeof(void *) * (j - mab_start));
if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree)) {
gaps = ma_gaps(node, mt);
}
memcpy(slots, b_node->slot + mab_start,
- sizeof(void*) * (i - mab_start));
+ sizeof(void *) * (i - mab_start));
if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
unsigned long max_gap = 0;
unsigned char offset = 15, tmp;
unsigned char end = i - mab_start;
+
gaps = ma_gaps(mas_mn(mas), mt);
for (tmp = 0; tmp < end; tmp++) {
gaps[tmp] = b_node->gap[mab_start + tmp];
mas_dup_state(&next[0], mas);
- while (!mte_is_leaf(list[0].node))
- {
+ while (!mte_is_leaf(list[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
if (mas_is_none(&list[i]))
{
struct maple_enode *old_l = mast->orig_l->node;
struct maple_enode *old_r = mast->orig_r->node;
+
MA_STATE(tmp, mast->orig_r->tree, mast->orig_r->index, mast->orig_r->last);
mas_dup_state(&tmp, mast->orig_r);
goto dense;
}
- while(mas->offset < mt_slots[type]) {
+ while (mas->offset < mt_slots[type]) {
pivot = _mas_safe_pivot(mas, pivots, mas->offset, type);
if (!pivot && mas->offset) {
new_end++;
offset_end = offset;
} else if (mas->last == mas->max) { // runs right to the end of the node.
- new_end = offset;
- offset_end = end + 1; // no data beyond this range.
+ new_end = offset;
+ offset_end = end + 1; // no data beyond this range.
} else {
unsigned long piv = 0;
+
new_end++;
do {
offset_end++;
new_end--;
piv = mas_logical_pivot(mas, pivots, offset_end, mt);
- } while(piv <= mas->last);
+ } while (piv <= mas->last);
}
if (min < mas->index) // new range starts within a range.
dst_slots = ma_slots(newnode, mt);
// Copy from start to insert point
memcpy(dst_pivots, pivots, sizeof(unsigned long) * (offset + 1));
- memcpy(dst_slots, slots, sizeof(void*) * (offset + 1));
+ memcpy(dst_slots, slots, sizeof(void *) * (offset + 1));
dst_offset = offset;
// Handle insert of new range starting after old range
if (!entry) {
enum maple_type mt = mte_node_type(mas->node);
- unsigned long *pivots = ma_pivots(mas_mn(mas),mt);
+ unsigned long *pivots = ma_pivots(mas_mn(mas), mt);
void **slots = ma_slots(mas_mn(mas), mt);
unsigned char offset_end = mas->offset;
mt = mte_node_type(mas->node);
slots = ma_slots(mas_mn(mas), mt);
offset = mt_slots[mt];
- do {} while(!mas_get_slot(mas, --offset));
+ do {} while (!mas_get_slot(mas, --offset));
mas->max = mas_safe_pivot(mas, offset);
}
pivot = mas_safe_pivot(mas, offset);
}
- mas->node = mas_slot(mas, slots,offset);
+ mas->node = mas_slot(mas, slots, offset);
mas->min = prev_piv + 1;
mas->max = pivot;
return mas->max;
nr_nodes = max(nr_entries, nr_entries * 2 + 1);
if (!mt_is_alloc(mas->tree))
- nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
// Leaves
- nr_nodes = DIV_ROUND_UP(nr_nodes , MAPLE_RANGE64_SLOTS - 1);
+ nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 1);
// Internal nodes.
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
mas_node_count(mas, nr_nodes);
__mas_walk(mas, &range_min, &range_max);
end = mas_data_end(mas) + 1;
- if (end < mt_min_slot_count(mas->node) - 1) {
+ if (end < mt_min_slot_count(mas->node) - 1)
mas_destroy_rebalance(mas, end);
- }
+
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
mas->mas_flags &= ~MA_STATE_BULK;
node = mas->alloc;
mas->alloc = mas->alloc->slot[0];
if (node->node_count > 0)
- mt_free_bulk(node->node_count, (void**)&node->slot[1]);
+ mt_free_bulk(node->node_count, (void **)&node->slot[1]);
kmem_cache_free(maple_node_cache, node);
}
mas->alloc = NULL;
void mt_validate_nulls(struct maple_tree *mt)
{
- void *entry, *last = (void*)1;
+ void *entry, *last = (void *)1;
unsigned char end, offset = 0;
void **slots;
MA_STATE(mas, mt, 0, 0);
if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
return;
- while (!mte_is_leaf(mas.node)) {
+ while (!mte_is_leaf(mas.node))
mas_descend(&mas);
- }
slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
end = mas_data_end(&mas);
} else
offset++;
- } while(!mas_is_none(&mas));
+ } while (!mas_is_none(&mas));
}
/*
* validate a maple tree by checking:
void *ptr)
{
void *ret = mtree_test_load(mt, index);
+
if (ret != ptr)
- printk("Load %lu returned %p expect %p\n", index, ret, ptr);
+ pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
MT_BUG_ON(mt, ret != ptr);
}
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
for (i = total; i > 0; i--) {
unsigned int e = 0; // expected node_count
+
if (i >= 35)
e = i - 35;
else if (i >= 5)
MT_BUG_ON(mt, !mas.alloc);
i = 1;
smn = mas.alloc;
- while(i < total) {
+ while (i < total) {
for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) {
i++;
MT_BUG_ON(mt, !smn->slot[j]);
for (j = 0; j <= i; j++)
check_index_load(mt, j);
- if(i)
+ if (i)
MT_BUG_ON(mt, !mt_height(mt));
check_load(mt, i + 1, NULL);
}
} else { // !null_entry (store)
bool esplit = e_max > mas_end->last;
bool ssplit = s_min != mas_start->index;
+
if (s_entry && e_entry) {
if (esplit && ssplit)
count--;
do {
count++;
mas_dfs_preorder(&mas);
- } while(!mas_is_none(&mas));
+ } while (!mas_is_none(&mas));
// 68 + MAS_START = 69 + 1 for no jitter
//printk("count %lu\n", count);
MT_BUG_ON(mt, count != 70);
do {
count++;
mas_dfs_preorder(&mas);
- } while(!mas_is_none(&mas));
+ } while (!mas_is_none(&mas));
//printk("count %lu\n", count);
// 71 + MAS_START = 72 + 1 for no jitter
MT_BUG_ON(mt, count != 72);
do {
count++;
mas_dfs_preorder(&mas);
- } while(!mas_is_none(&mas));
+ } while (!mas_is_none(&mas));
// 71 + MAS_START = 72
//printk("count %lu\n", count);
MT_BUG_ON(mt, count != 77);
nr_tallocated = 0;
mt_set_non_kernel(200);
mas_entry_count(&mas, max);
- for(count = 0; count <= max; count++) {
+ for (count = 0; count <= max; count++) {
mas.index = mas.last = count;
mas_store(&mas, xa_mk_value(count));
MT_BUG_ON(mt, mas_is_err(&mas));
static noinline void bench_slot_store(struct maple_tree *mt)
{
- int i, brk = 105, max = 1040, brk_start= 100, count = 20000000;
+ int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
- for (i = 0; i < max; i+=10)
+ for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
{
int i, overwrite = 76, max = 240, count = 20000000;
- for (i = 0; i < max; i+=10)
+ for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
mas.index = 0;
mas.last = 0;
if (mas_entry_count(&newmas, nr_entries)) {
- printk("OOM!");
+ pr_err("OOM!");
BUG_ON(1);
}
mas_for_each(&mas, val, ULONG_MAX) {
}
}
+//#define BENCH_SLOT_STORE
+//#define BENCH_NODE_STORE
+//#define BENCH_FORK
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
pr_info("\nTEST STARTING\n\n");
-#if 0
+#if BENCH_SLOT_STORE
+#define BENCH
mtree_init(&tree, MAPLE_ALLOC_RANGE);
bench_slot_store(&tree);
mtree_destroy(&tree);
goto skip;
#endif
-#if 0
+#if BENCH_NODE_STORE
+#define BENCH
mtree_init(&tree, MAPLE_ALLOC_RANGE);
bench_node_store(&tree);
mtree_destroy(&tree);
goto skip;
#endif
-#if 0
+#if BENCH_FORK
+#define BENCH
mtree_init(&tree, MAPLE_ALLOC_RANGE);
bench_forking(&tree);
mtree_destroy(&tree);
check_node_overwrite(&tree);
mtree_destroy(&tree);
+#if BENCH
skip:
+#endif
rcu_barrier();
pr_info("maple_tree: %u of %u tests passed\n", maple_tree_tests_passed,
maple_tree_tests_run);