if (mas->index && piv < mas->index - 1) {
b_node->slot[b_end] = contents;
+ if (!contents)
+ b_node->gap[b_end] = mas->index - 1 - piv;
b_node->pivot[b_end++] = mas->index - 1;
}
piv = mas_get_safe_pivot(mas, slot);
if (piv > mas->last) {
b_node->slot[++b_end] = contents;
+ if (!contents)
+ b_node->gap[b_end] = piv - mas->last + 1;
b_node->pivot[b_end] = piv;
} else
piv = mas->last;
mas_dup_state(mast->l, &tmp_mas);
split = mab_no_null_split(mast->bn, mt_slots[mast->bn->type] - 1,
mt_slots[mast->bn->type]);
+ // Update parent slot for split calculation.
+ mas_set_slot(mast->orig_l, mas_get_slot(mast->orig_l) + end + 1);
mast_split_data(mast, mas, split);
mast_split_fill_bnode(mast, mas, 2);
mas_split_final_node(mast, mas, mas->full_cnt + 1);
}
-// FIXME: When task_size / page_size -1 works, check to ensure we are
-// not inserting above this.
static inline int mas_root_expand(struct ma_state *mas, void *entry)
{
void *contents = rcu_dereference_protected(mas->tree->ma_root,
mte_set_rcu_slot(mas->node, slot, entry);
mte_set_pivot(mas->node, slot++, mas->last);
-
+#if 0
if (mt_is_alloc(mas->tree)) {
//FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1;
unsigned long mmap_end = 0x2000000000000UL;
mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY);
mte_set_pivot(mas->node, slot++, mt_max[type]);
}
-
+#endif
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
mas->tree->ma_height = 1;
return mas_spanning_rebalance(mas, &mast, count);
}
+static inline bool mas_can_append(struct ma_state *mas,
+ struct maple_big_node *bn,
+ unsigned char slot_cnt,
+ unsigned char end)
+{
+ if (bn->b_end >= slot_cnt)
+ return false; // no room.
+
+ if (bn->b_end <= end)
+ return false; // truncated data.
+
+ if (!mas->last)
+ return false; // zero cannot be an append.
+
+ if (bn->pivot[bn->b_end] == mas->last)
+ return true; // last entry is the insert.
+
+ if ((bn->pivot[bn->b_end - 1] == mas->last) && !bn->slot[bn->b_end])
+ return true; // inserting second last entry and last is NULL.
+
+ return false;
+}
static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
{
unsigned long r_max, r_min;
// Check if this is an append operation.
end = mas_data_end(mas);
- if ((b_node.b_end < slot_cnt) && ((slot > end) || !end)) {
- if (r_min < mas->index)
- mte_set_pivot(mas->node, slot++, mas->index - 1);
-
- mte_set_rcu_slot(mas->node, slot, entry);
- mte_set_pivot(mas->node, slot, mas->last);
+ if (mas_can_append(mas, &b_node, slot_cnt, end)) {
+ slot = b_node.b_end;
+ do {
+ mte_set_rcu_slot(mas->node, slot, b_node.slot[slot]);
+ if (slot < slot_cnt - 1)
+ mte_set_pivot(mas->node, slot, b_node.pivot[slot]);
+ } while(slot && slot-- >= end);
+ mas_update_gap(mas);
goto append;
}
else if (b_node.b_end < mt_min_slot_cnt(mas->node))
mas_cnt_empty(mas);
- mas_commit_b_node(mas, &b_node);
- if (mas_is_err(mas))
- ret = 3;
-
- if (mas_is_err(mas))
+ if (!mas_commit_b_node(mas, &b_node))
return NULL;
if (ret > 2)
return entry;
}
+static inline void mas_dfs_preorder(struct ma_state *mas)
+{
+
+ struct maple_enode *prev;
+ unsigned char slot = 0;
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ return;
+ }
+
+ if (mte_is_leaf(mas->node) && mte_is_root(mas->node)) {
+ mas->node = MAS_NONE;
+ return;
+ }
+
+walk_up:
+ if (mte_is_leaf(mas->node) ||
+ (slot >= mt_slot_count(mas->node))) {
+ slot = mte_parent_slot(mas->node) + 1;
+ mas->node = mt_mk_node(mte_parent(mas->node),
+ mas_parent_enum(mas, mas->node));
+ goto walk_up;
+ }
+ prev = mas->node;
+ mas->node = mas_get_rcu_slot(mas, slot);
+ if (!mas->node) {
+ mas->node = MAS_NONE;
+ if (mte_is_root(prev))
+ return;
+
+ mas->node = prev;
+ slot = mte_parent_slot(mas->node) + 1;
+ mas->node = mt_mk_node(mte_parent(mas->node),
+ mas_parent_enum(mas, mas->node));
+ goto walk_up;
+ }
+
+ return;
+}
/* Interface */
void __init maple_tree_init(void)
MT_BUG_ON(mt, !mtree_empty(mt));
+ nr_tallocated = 0;
for (i = 0; i <= max; i++) {
MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
for (j = 0; j <= i; j++)
if (verbose) {
rcu_barrier();
mt_dump(mt);
- pr_info(" seq test of 0-%lu used %luK in %d allocations\n",
- max, mt_get_alloc_size()/1024, nr_allocated);
+ pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
+ max, mt_get_alloc_size()/1024, nr_allocated,
+ nr_tallocated);
}
}
MA_STATE(mas, mt, 0, 0);
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ mt_dump(mt);
+
#define DEBUG_REV_RANGE 0
for (i = 0; i < range_cnt; i += 2) {
/* Inclusive, Inclusive (with the -1) */
int req_range_cnt = ARRAY_SIZE(req_range);
unsigned long min = 0x565234af2000;
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
for (i = 0; i < range_cnt; i += 2) {
#define DEBUG_ALLOC_RANGE 0
#if DEBUG_ALLOC_RANGE
check_store_range(mt, 1792, 1799, NULL, 0);
mt_validate(mt);
mtree_destroy(mt);
+
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ for (i = 0; i <= 200; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+
+ for (i = 10; i <= 19; i++) {
+ val = i*100 + 5;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ val += 10;
+ if (val == 1116)
+ mt_dump(mt);
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ if (val == 1116)
+ mt_dump(mt);
+ mt_validate(mt);
+
+ val += 39;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ val += 10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+
+ for (i = 13; i <= 14; i++) {
+ val = i*100 + 75;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val += 9;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val += 9;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+
+ for (i = 0; i <= 3; i++) {
+ val = 1200 + i*10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+ for (i = 0; i <= 5; i++) {
+ val = 1270 + i;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val+= 10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+ for (i = 0; i <= 6; i++) {
+ val = 1400 + i*10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val++;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+ for (i = 0; i <= 5; i++) {
+ val = 1370 + i;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val+= 10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ val+= 10;
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+ for (i = 0; i < ARRAY_SIZE(overflow); i++) {
+ val = overflow[i];
+ check_store_range(mt, val, val, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+
+ // Cause a 3 child split all the way up the tree.
+ check_store_range(mt, 1349, 1350, NULL, 0);
+ mt_validate(mt);
+ mtree_destroy(mt);
}
static noinline void check_next_entry(struct maple_tree *mt)
mtree_destroy(mt);
}
+static void check_dfs_preorder(struct maple_tree *mt)
+{
+ int count = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ check_seq(mt, 1000, false);
+
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while(!mas_is_none(&mas));
+ // 68 + MAS_START = 69
+ MT_BUG_ON(mt, count != 69);
+ mtree_destroy(mt);
+
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ mas_reset(&mas);
+ count = 0;
+ check_seq(mt, 1000, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while(!mas_is_none(&mas));
+ // 71 + MAS_START = 72
+ MT_BUG_ON(mt, count != 72);
+}
static DEFINE_MTREE(tree);
check_new_node(&tree);
mtree_destroy(&tree);
+ mtree_init(&tree, 0);
+ check_dfs_preorder(&tree);
+ mtree_destroy(&tree);
+
/* Test ranges (store and insert) */
mtree_init(&tree, 0);
check_ranges(&tree);