struct maple_enode *new_node;
printk("Commit\n");
+ if (end < mt_min_slot_cnt(mas->node)) {
+ // Rebalance?
+ }
if (end >= mt_slot_count(mas->node))
return mas_commit_split(mas, b_node, end);
mas_node_cnt(mas, 1);
- if (mas_is_err(mas))
+ if (mas_is_err(mas)) {
+ printk("Failed\n");
return 0;
+ }
new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node));
mte_to_node(new_node)->parent = mas_mn(mas)->parent;
if (mas_is_err(mas))
return 0;
+ printk("\tExpand\n");
mas->node = mt_mk_node(mas_next_alloc(mas), mt);
mas_mn(mas)->parent = ma_parent_ptr(
((unsigned long)mas->tree | MA_ROOT_PARENT));
- if (mas->index) {
- mte_set_rcu_slot(mas->node, slot, contents);
- if (mas->index > 1)
- mte_set_pivot(mas->node, slot, mas->index - 1);
- slot++;
- }
+ if (contents)
+ mte_set_rcu_slot(mas->node, slot++, contents);
+
+ if (!mas->index)
+ slot--;
+ else if (mas->index > 1)
+ mte_set_pivot(mas->node, slot++, mas->index - 1);
+
mte_set_rcu_slot(mas->node, slot, entry);
mte_set_pivot(mas->node, slot++, mas->last);
mas_set_slot(mas, i);
return ret;
}
+static inline void mas_cnt_full(struct ma_state *mas)
+{
+ if (mas->full_cnt < 0)
+ mas->full_cnt = 1;
+ else
+ mas->full_cnt++;
+}
+static inline void mas_cnt_empty(struct ma_state *mas)
+{
+ if (mas->full_cnt > 0)
+ mas->full_cnt = -1;
+ else
+ mas->full_cnt--;
+}
/* Private
*
* mas_wr_walk(): Walk the tree for a write. Tracks extra information which
mas->last_cnt++;
- mas->full_cnt++;
end = mas_data_end(mas);
- printk("End of %p is %u type %u\n", mas_mn(mas), end, mt_slots[type] - 1);
+ printk("End of %p is %u max of %u\n", mas_mn(mas), end, mt_slots[type]);
if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
return false;
if (ma_is_leaf(type))
return true;
- if (end < mt_slots[type] - 1)
+ if (end <= mt_min_slots[type])
+ mas_cnt_empty(mas);
+ else if (end >= mt_slots[type] - 1)
+ mas_cnt_full(mas);
+ else
mas->full_cnt = 0;
*/
static inline int mas_spanning_store(struct ma_state *mas, void *entry)
{
- unsigned long range_min, range_max;
- struct maple_enode *r_node;
unsigned char slot, r_slot, l_slot = mas_get_slot(mas);
+ unsigned long range_min, range_max;
bool store_left = (entry ? true : false);
enum maple_type type;
+ struct maple_enode *left, *right;
+#if 0
printk("Not implemented to store %lu-%lu, span starts at %p\n",
mas->index, mas->last, mte_to_node(mas->span_enode));
-
BUG_ON(1);
+#endif
// FIXME: Allocations..
mas_node_cnt(mas, 1 + 20* 2);
return 0;
MA_STATE(r_mas, mas->tree, mas->index, mas->last); // right
- MA_STATE(p_r_mas, mas->tree, mas->index, mas->last); // right parent
+ MA_STATE(p_r_mas, mas->tree, mas->index, mas->last); // right previous
MA_STATE(l_mas, mas->tree, mas->index, mas->last); // left
- MA_STATE(p_l_mas, mas->tree, mas->index, mas->last); // left parent
+ MA_STATE(p_l_mas, mas->tree, mas->index, mas->last); // left previous
if(!mte_is_root(mas->node)) {
mas_ascend(&p_l_mas);
}
mas_dup_state(&l_mas, mas);
+ l_mas.last = l_mas.index;
__mas_walk(&l_mas, &range_min, &range_max);
l_slot = mas_get_slot(&l_mas);
- mas_dup_state(&r_mas, mas);
+ mas_dup_state(&r_mas, &l_mas);
+ r_mas.last = mas->last;
+ r_mas.index = r_mas.last;
+ mas_set_slot(&r_mas, mte_parent_slot(l_mas.node));
+ mas_next_node(&r_mas, ULONG_MAX);
+ mas_set_slot(&r_mas, 0);
// range_max is used below, so keep r_mas walk after l_mas.
__mas_walk(&r_mas, &range_min, &range_max);
r_slot = mas_get_slot(&r_mas);
+
+ printk("%s: l_slot %p[%u] r_slot %p[%u]\n", __func__,
+ mas_mn(&l_mas), l_slot,
+ mas_mn(&r_mas), r_slot);
+
/* No entry means storing right, otherwise check for full nodes and swap
* to right if the left is full and the right is not.
*/
}
}
+ printk("%s: store %s\n", __func__, store_left ? "left" : "right");
/* Expand store of NULL, if necessary */
if (!entry) {
/* Check if there is a null in the previous slot on the left */
- if (l_slot && (!mas_get_rcu_slot(&l_mas, l_slot - 1)))
- mas->index = mte_get_pivot(l_mas.node,
- l_slot - 1) + 1;
- else if (!l_slot)
- printk("NULL could be in the prev node?\n");
+ if (!mas_get_rcu_slot(&l_mas, l_slot)) {
+ if (l_slot > 1)
+ mas->index = mte_get_pivot(l_mas.node, l_slot - 2) + 1;
+ else
+ mas->index = mas->min;
+ }
/* Check if there is a null in the next slot on the right */
- if ((range_max != r_mas.max) &&
- (!mas_get_rcu_slot(&r_mas, r_slot + 1)))
+ if ((r_slot < mt_slot_count(r_mas.node)) &&
+ (!mas_get_rcu_slot(&r_mas, r_slot + 1))) {
mas->last = mas_get_safe_pivot(&r_mas, r_slot + 1);
- else if (range_max == r_mas.max)
+ if (!mas->last)
+ mas->last = mas->max;
+ }
+ if (range_max == r_mas.max)
printk("NULL could be in the next node?\n");
+
}
+ printk("%s: final range is %lu-%lu at slot %u\n", __func__,
+ mas->index, mas->last, mas_get_slot(mas));
/* FIXME: What about detecting a split here? */
// if the parent is root
mas_dup_state(&p_r_mas, mas);
mas_dup_state(&p_l_mas, mas);
+ type = mte_node_type(mas->node);
// Set up the right side maple state to point to the correct slot
r_mas.index = r_mas.last; // Just point to the right.
mas_node_walk(&r_mas, type, &range_min, &range_max);
+ r_slot = mas_get_slot(&r_mas);
+ l_slot = mas_get_slot(mas);
+ slot = l_slot + 1;
+ printk("%s: copy %p[%u] over right\n", __func__, mas_mn(mas), r_slot);
+ printk("%s: copy %p[0-%u] for left\n", __func__, mas_mn(mas), l_slot);
// Set up the left side maple state to point to just the left.
l_mas.last = mas->index;
// Make a new node, etc.
- type = mte_node_type(mas->node);
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
- r_mas.node = mas->node;
- slot = l_slot; // Start filling the right at this position.
+ r_mas.node = l_mas.node;
// Set the middle pivot of the ancestor.
- mte_set_pivot(l_mas.node, l_slot,
- (store_left ? mas->last : mas->index - 1));
mas_mn(&l_mas)->parent = mas_mn(mas)->parent; // Copy the parent.
mas->node = l_mas.node; // Save for later.
do {
-
+ if (mte_is_leaf(r_mas.node) && !store_left) {
+ // Store value in right side.
+ mte_set_rcu_slot(r_mas.node, slot, entry);
+ mte_set_pivot(r_mas.node, slot, r_mas.last);
+ if (entry || mas_get_rcu_slot(&p_r_mas, r_slot))
+ slot++;
+ }
// Copy right to new node.
- for (; r_slot < mt_slot_count(r_node); r_slot++) {
+ for (; r_slot < mt_slot_count(r_mas.node); r_slot++) {
unsigned long piv =
mas_get_safe_pivot(&p_r_mas, r_slot);
if (!piv) // Node end.
break;
- mte_set_rcu_slot(r_node, ++slot,
+ mte_set_rcu_slot(r_mas.node, slot,
mas_get_rcu_slot(&p_r_mas, r_slot));
- if (r_slot < mt_pivot_count(r_node))
- mte_set_pivot(r_node, slot, piv);
+ printk("r cp %p[%u] -> %p[%u]\n",
+ mas_mn(&p_r_mas), r_slot,
+ mas_mn(&r_mas), slot);
+ if (r_slot < mt_pivot_count(r_mas.node))
+ mte_set_pivot(r_mas.node, slot, piv);
if (mt_is_alloc(mas->tree))
- mte_set_gap(r_node, slot,
+ mte_set_gap(r_mas.node, slot,
mte_get_gap(p_r_mas.node, r_slot));
+ slot++;
}
// Copy left to new node.
type = mte_node_type(l_mas.node);
for (slot = 0; slot <= l_slot; slot++) {
+ printk("l cp %p[%u] -> %p[%u]\n",
+ mas_mn(&p_l_mas), slot,
+ mas_mn(&l_mas), slot);
mte_set_rcu_slot(l_mas.node, slot,
mas_get_rcu_slot(&p_l_mas, slot));
mte_set_pivot(l_mas.node, slot,
mte_get_gap(mas->node, slot));
}
+ if (ma_is_leaf(type) && store_left) {
+ // Store the value at the end of the left node. Nodes
+ // require a value at the end so terminate that range
+ // and store a new one.
+ mte_set_pivot(l_mas.node, --slot, l_mas.index - 1);
+ // Store new range.
+ mte_set_rcu_slot(l_mas.node, ++slot, entry);
+ if (slot < mt_pivot_count(l_mas.node))
+ mte_set_pivot(l_mas.node, slot, l_mas.last);
+
+ } else if (l_mas.node == mas->node) {
+ // Overwrite the pivot between the two nodes with the
+ // correct value.
+ mte_set_pivot(l_mas.node, l_slot,
+ (store_left ? mas->last : mas->index - 1));
+ }
+
// Rebalance if necessary.
// FIXME: Rebalance inactive needed, also set the slot correctly
// for index.
//
// Rebalance left + right + (possibly) node to the right of right
- mas_inactive_rebalance(mas, &l_mas, &p_l_mas, &r_mas, &p_r_mas);
+ if (l_mas.node != r_mas.node)
+ mas_inactive_rebalance(mas, &l_mas, &p_l_mas, &r_mas,
+ &p_r_mas);
l_slot = mas_get_slot(&l_mas);
r_slot = mas_get_slot(&r_mas);
break;
// Set up for a new run.
- mas_dup_state(&p_r_mas, &r_mas);
- mas_dup_state(&p_l_mas, &l_mas);
mas_descend(&l_mas);
mas_descend(&r_mas);
+ mas_set_slot(&p_r_mas, r_slot);
+ mas_set_slot(&p_l_mas, l_slot);
+ mas_descend(&p_r_mas);
+ mas_descend(&p_l_mas);
mas_node_walk(&l_mas, mte_node_type(l_mas.node), &range_min,
&range_max);
type = mte_node_type(r_mas.node);
mas_node_walk(&r_mas, type, &range_min, &range_max);
// Create next nodes.
- l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ left = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
mte_node_type(l_mas.node));
- mte_set_parent(l_mas.node, p_l_mas.node, l_slot);
- mte_set_rcu_slot(p_l_mas.node, l_slot, l_mas.node);
+ mte_set_parent(left, p_l_mas.node, l_slot);
+ mte_set_rcu_slot(p_l_mas.node, l_slot, left);
+ l_mas.node = left;
- r_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
- type);
- mte_set_parent(r_mas.node, p_r_mas.node, r_slot);
- mte_set_rcu_slot(p_r_mas.node, r_slot, r_mas.node);
+ right = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ mte_set_parent(right, p_r_mas.node, r_slot);
+ mte_set_rcu_slot(p_r_mas.node, r_slot, right);
+ r_mas.node = right;
l_slot = mas_get_slot(&l_mas);
r_slot = mas_get_slot(&r_mas);
slot = 0;
+ printk("New loop, copy %p[0-%u] and %p[%u-end]\n",
+ mas_mn(&p_l_mas), l_slot, mas_mn(&p_r_mas), r_slot);
} while (1);
+
// mark modified node(s?) dead.
// NOTE: Rebalanced nodes not in this sub-tree are already marked dead.
+ mas_set_node_dead(&p_l_mas);
+ mas_set_node_dead(&p_r_mas);
+ smp_wmb();
+ _mas_replace(mas, false, false);
// insert new sub-tree
//
//do
{
unsigned long r_max, r_min;
unsigned char end, new_end, slot;
+ unsigned char slot_cnt;
void *content = NULL;
struct maple_big_node b_node;
int ret = 0;
- printk("\nStart: %s %d store %lu-%lu\n", __func__, __LINE__,
- mas->index, mas->last);
+ printk("\nStart: %s %d store %lu-%lu %p\n", __func__, __LINE__,
+ mas->index, mas->last, mas_mn(mas));
+ mt_dump(mas->tree);
if (mas_start(mas) || (mas_is_none(mas) || mas->node == MAS_ROOT))
ret = ma_root_ptr(mas, entry, content, overwrite);
/* At this point, we are at the leaf node that needs to be altered. */
/* Calculate needed space */
slot = mas_get_slot(mas);
+ slot_cnt = mt_slot_count(mas->node);
content = mas_get_rcu_slot(mas, slot);
if (!overwrite && ((mas->last > r_max) || content )) {
mas_set_err(mas, -EEXIST);
/* Expand store of NULL, if necessary */
if (!entry) {
- if (!content)
+ if (!content) {
mas->index = r_min;
- if (!mas_get_rcu_slot(mas, slot - 1)) {
+ if (mas->last < r_max)
+ mas->last = r_max;
+ }
+ if (slot && !mas_get_rcu_slot(mas, slot - 1)) {
if (slot > 1)
mas->index = mte_get_pivot(mas->node, slot - 2) + 1;
else
mas->index = mas->min;
+ r_min = mas->index;
}
- if ((r_max != mas->max) && !mas_get_rcu_slot(mas, slot + 1))
+ if ((r_max != mas->max) && !mas_get_rcu_slot(mas, slot + 1)) {
+ printk("rcu slot %u is %p\n", slot+1, mas_get_rcu_slot(mas, slot+1));
mas->last = mas_get_safe_pivot(mas, slot + 1);
+ if (!mas->last)
+ mas->last = mas->max;
+ r_max = mas->last;
+ }
mas_set_slot(mas, --slot);
- mas_node_walk(mas, mte_node_type(mas->node), &r_min, &r_max);
printk("store is now %lu-%lu at slot %u\n", mas->index,
mas->last, mas_get_slot(mas));
}
do {
printk("Skip %u\n", slot);
r_max = mas_get_safe_pivot(mas, ++slot);
- } while ((r_max <= mas->last) && (slot < mt_slot_count(mas->node)));
+ } while ((r_max <= mas->last) && (slot < slot_cnt));
+
+ new_end = mas_mab_cp(mas, slot, slot_cnt, &b_node, new_end);
+ // count the node as full if it has not already been counted.
+ if (new_end >= slot_cnt && end < slot_cnt)
+ mas_cnt_full(mas);
+
+ printk("End %u new_end %u slot_cnt %u\n", end, new_end, slot_cnt);
- new_end = mas_mab_cp(mas, slot, mt_slot_count(mas->node), &b_node,
- new_end);
mas_commit_b_node(mas, &b_node, new_end);
if (mas_is_err(mas))
ret = 3;
if (ret > 2)
return NULL;
+ mt_dump(mas->tree);
return content;
}
void *mas_store(struct ma_state *mas, void *entry)
printk("Start: erase %lu\n", mas->index);
entry = mas_range_load(mas, &r_min, &r_max, true);
+retry:
printk("Erase %lu-%lu\n", r_min, r_max);
- if (entry) {
- mas->index = r_min;
- mas->last = r_max;
- _mas_store(mas, NULL, true);
- }
- return entry;
+ mas->index = r_min;
+ mas->last = r_max;
+ _mas_store(mas, NULL, true);
+ if (mas_nomem(mas, GFP_KERNEL|GFP_ATOMIC))
+ goto retry;
+ return entry;
}