if (_ma_is_root(a_node))
goto no_parent;
+ printk("ascend: Starting at %p\n", a_node);
+
p_enode = mt_mk_node(mte_parent(mas->node),
mas_parent_enum(mas, mas->node));
} while (b_end--);
}
+static inline bool mab_middle_node(struct maple_big_node *b_node, int size,
+ unsigned char slot_cnt)
+{
+ int split = (size - 1) / 2; // Assume equal split.
+
+ if (!b_node->slot[split] && (size >= (2 * slot_cnt - 1)))
+ return true;
+ return false;
+}
static inline int mab_calc_split(struct maple_big_node *b_node, int size,
unsigned char slot_cnt, unsigned long min)
{
int split = (size - 1) / 2; // Assume equal split.
+ printk("orig split %u\n", split);
- if (size > 2* slot_cnt) {
+ if (mab_middle_node(b_node, size, slot_cnt)) {
split = (size + 1) / 3;
+ printk("3 split %u\n", split);
+ } else {
+ /* Avoid ending a node in NULL and avoid having a range less
+ * than the slot count
+ */
+ while (((b_node->pivot[split] - min) < slot_cnt - 1) &&
+ (split < slot_cnt))
+ split++;
}
- //printk("Guessing leaf split of %u (size is %d)\n", split, size);
- /* Avoid ending a node in NULL and avoid having a range less than the
- * slot count
- */
- while (((b_node->pivot[split] - min) < slot_cnt) &&
- split < slot_cnt) {
- //printk("Skipping ahead due to min span\n");
- split++;
- }
-
+ printk("adjusted split %u\n", split);
if (!b_node->slot[split]) {
+ printk("%u is null\n", split);
if (split < slot_cnt - 1)
split++;
else
r = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
mte_node_type(orig_l_mas->node));
}
- if (b_end > slot_cnt*2)
+ if (mab_middle_node(b_node, b_end, slot_cnt))
m = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
mte_node_type(orig_l_mas->node));
printk("Splitting %u at %u\n", b_end, split);
/* Set parents from previous run */
if (l_mas.node) {
- printk("Set parent of %p to either %p or %p\n", mas_mn(&l_mas),
- l, r);
- printk("child %u split %u\n", child, split);
if (child <= split)
mte_set_parent(l_mas.node, l, child);
else
mte_set_parent(l_mas.node, r, child - split);
+ printk("Set parent of %p to %p\n", mas_mn(&l_mas),
+ mte_parent(l_mas.node));
+ printk("child %u split %u\n", child, split);
}
if (m_mas.node) {
- child *= 2;
+ child++;
if (child <= split)
mte_set_parent(m_mas.node, l, child);
else
mte_set_parent(m_mas.node, r, child - split);
- child++;
+ printk("Set parent of m %p to %p\n", mas_mn(&m_mas),
+ mte_parent(m_mas.node));
+ printk("child %u split %u\n", child, split);
}
if (r_mas.node) {
child++;
mte_set_parent(r_mas.node, l, child);
else
mte_set_parent(r_mas.node, r, child - split);
+ printk("Set parent of r %p to %p\n", mas_mn(&r_mas),
+ mte_parent(r_mas.node));
+ printk("child %u split %u\n", child, split);
}
/* Copy data from b_node to new nodes */
mas_mn(&l_mas)->parent = ma_parent_ptr(
((unsigned long)mas->tree | MA_ROOT_PARENT));
mas->depth = orig_l_mas->depth;
- orig_l_mas->node = l_mas.node;
+ mas_dup_state(orig_l_mas, &l_mas);
return 0;
}
// copy in prev.
mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
b_end += end + 1;
+ child += end + 1;
if (!count)
count++;
}
(mt_is_alloc(mas->tree) ? true : false));
// copy in prev.
mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
+ child += end + 1;
b_end += end + 1;
printk("b_end is %u\n", b_end);
}
mas_mn(&l_mas)->parent = mas_mn(orig_l_mas)->parent;
}
- orig_l_mas->node = l_mas.node;
+ mas_dup_state(orig_l_mas, &l_mas);
mas->depth = orig_l_mas->depth;
return b_end;
mas_set_slot(&r_mas, 0);
__mas_walk(&r_mas, &range_min, &range_max);
r_mas.last = r_mas.index = mas->last;
- mt_dump(mas->tree);
// Set up left side.
mas_set_slot(&l_mas, 0);
if (mte_is_leaf(mas->node))
return 1;
- //mt_dump(mas->tree);
+ mt_dump(mas->tree);
mas_descend_adopt(mas);
/* Expand store of NULL, if necessary */
if (!entry) {
- mt_dump(mas->tree);
mas_extend_null(mas, mas);
slot = mas_get_slot(mas);
printk("store is now %lu-%lu at slot %u\n", mas->index,
void *ptr = NULL;
MA_STATE(mas, mt, 0, 0);
-// goto skip;
mt_set_non_kernel(3);
check_erase2_testset(mt, set, ARRAY_SIZE(set));
mtree_destroy(mt);
mas_get_unmapped_area_rev(&mas, 0, 140373518663680, 4096);
rcu_read_unlock();
mtree_destroy(mt);
-//skip:
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set14, ARRAY_SIZE(set14));
rcu_barrier();
mtree_destroy(mt);
check_seq(mt, 50, false);
- return;// FIXME
+ return;
mt_set_non_kernel(4);
check_store_range(mt, 5, 47, xa_mk_value(47), 0);
mtree_destroy(mt);
{
struct maple_enode *mn1, *mn2;
void *entry;
- unsigned long index = 86;
+ unsigned long index = 82;
MA_STATE(mas, mt, index, index);
check_seq(mt, 100, false); // create 100 singletons.
mt_set_non_kernel(1);
- mtree_test_erase(mt, 88);
- check_load(mt, 88, NULL);
- mtree_test_erase(mt, 87);
- check_load(mt, 87, NULL);
+ mtree_test_erase(mt, 83);
+ check_load(mt, 83, NULL);
+ mtree_test_erase(mt, 84);
+ check_load(mt, 84, NULL);
rcu_read_lock();
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index));
mn1 = mas.node;
+ printk("node %p\n", mn1);
+ mas_next(&mas, ULONG_MAX);
entry = mas_next(&mas, ULONG_MAX);
- MT_BUG_ON(mt, entry != xa_mk_value(index + 3));
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
mn2 = mas.node;
MT_BUG_ON(mt, mn1 == mn2); // test the test.
- // At this point, there is a gap of 2 in either 1 or 2 nodes. Find a
- // gap of size 2 from 100 down to 50.
+ /* At this point, there is a gap of 2 at index + 1 between 50-100.
+ * Search for the gap.
+ */
mt_set_non_kernel(1);
mas_reset(&mas);
MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 50, 100, 2));
- MT_BUG_ON(mt, mas.index != 87);
+ MT_BUG_ON(mt, mas.index != index + 1);
rcu_read_unlock();
mtree_test_erase(mt, 38);
mn1 = mas.node;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mas_next(&mas, ULONG_MAX); // go to the next node.
mn2 = mas.node;
MT_BUG_ON(mt, mn1 == mn2);
- // At this point, there is a gap of 2 in either 1 or 2 nodes. Find a
- // gap of size 2 from 100 down to 50.
+ /* At this point, there is a gap of 3 at 38. Find it by searching 20 -
+ * 50 for size 3.
+ */
mas_reset(&mas);
MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 20, 50, 3));
MT_BUG_ON(mt, mas.index != 38);