} while (i--);
r_mas->node = MAS_NONE;
}
+
+static inline int mab_calc_split(struct maple_big_node *b_node, int size,
+ unsigned char slot_cnt, unsigned long min)
+{
+ int split = size / 2; // Assume equal split.
+
+ printk("Guessing leaf split of %u (size is %d)\n", split, size);
+ /* Avoid ending a node in NULL and avoid having a range less than the
+ * slot count
+ */
+ while ((b_node->pivot[split] - min) < slot_cnt) {
+ printk("Skipping ahead due to min span\n");
+ split++;
+ }
+
+ if (!b_node->slot[split]) {
+ if (split < slot_cnt - 1)
+ split++;
+ else
+ split--;
+ }
+ printk("Setting split (leaf split) to %u\n", split);
+ return split;
+}
+
+static inline int mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
+ unsigned char mas_end, struct maple_big_node *b_node,
+ unsigned char mab_start)
+{
+ int i, j;
+ for (i = mas_start, j = mab_start; i < mas_end; i++, j++) {
+ b_node->slot[j] = mas_get_rcu_slot(mas, i);
+ if (i < mt_pivot_count(mas->node)) {
+ b_node->pivot[j] = mas_get_safe_pivot(mas, i);
+ printk("Set %u slot %p => piv %lu\n", j,
+ b_node->slot[j], b_node->pivot[j]);
+ } else {
+ b_node->pivot[j] = mas->max;
+ printk("Set %u slot %p => piv %lu\n", j,
+ b_node->slot[j], b_node->pivot[j]);
+ break;
+ }
+ if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
+ b_node->gap[j] = mte_get_gap(mas->node, i);
+ }
+ return j;
+}
+
+static inline int mab_mas_cp(struct maple_big_node *b_node,
+ unsigned char mab_start, unsigned char mab_end,
+ struct ma_state *mas, unsigned char mas_start)
+{
+ int i, j;
+ for (i = mab_start, j = mas_start; i < mab_end; i++, j++) {
+ if(j && !b_node->pivot[i])
+ break;
+
+ mas->max = b_node->pivot[i];
+ mte_set_rcu_slot(mas->node, j, b_node->slot[i]);
+ if (j < mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, j, b_node->pivot[i]);
+
+ if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
+ mte_set_gap(mas->node, j, b_node->gap[i]);
+ }
+
+ return i;
+}
+
static inline int mas_split_leaf(struct ma_state *mas, struct ma_state *l_mas,
struct ma_state *r_mas, void *entry,
unsigned char new_end)
unsigned long piv;
void *contents;
int ret = 0;
- int i = 0, j = 0, end;
+ int i = 0, end;
b_node.slot[slot] = NULL;
printk("Slot %u put in %lu-%lu\n", slot, mas->index, mas->last);
- do {
- b_node.slot[i] = mas_get_rcu_slot(mas, i);
- b_node.pivot[i] = mas_get_safe_pivot(mas, i);
- printk("Set %u slot %p => piv %lu\n", i, b_node.slot[i], b_node.pivot[i]);
- } while (++i < slot);
+ i = mas_mab_cp(mas, 0, slot, &b_node, 0);
piv = b_node.pivot[i];
contents = b_node.slot[i];
b_node.slot[i++] = contents;
}
- j = i;
- printk("j = %u\n", j);
- for (i = slot + 1; i < slot_cnt; i++, j++) {
- b_node.slot[j] = mas_get_rcu_slot(mas, i);
- if (i < mt_pivot_count(mas->node))
- b_node.pivot[j] = mas_get_safe_pivot(mas, i);
- else {
- b_node.pivot[j] = mas->max;
- break;
- }
- printk("Set %u slot %p => piv %lu\n", j, b_node.slot[j], b_node.pivot[j]);
- }
- printk("Zero %u\n", j);
- b_node.slot[j] = NULL;
- b_node.pivot[j] = 0;
+ i = mas_mab_cp(mas, slot + 1, slot_cnt, &b_node, i);
+ printk("i = %u\n", i);
+ printk("Zero %u\n", i);
+ b_node.slot[i] = NULL;
+ b_node.pivot[i--] = 0;
end = i;
ret = i / 2;
- printk("Guessing leaf split of %u (i is %d)\n", ret, i);
- /* Avoid ending a node in NULL and avoid having a range less than the
- * slot count
- */
- while ((b_node.pivot[ret] - mas->min) < slot_cnt) {
- printk("Skipping ahead due to min span\n");
- ret++;
- }
-
- if (!b_node.slot[ret]) {
- if (ret < slot_cnt - 1)
- ret++;
- else
- ret--;
- }
- printk("Setting ret (leaf split) to %u\n", ret);
+ ret = mab_calc_split(&b_node, i, slot_cnt, mas->min);
// Copy values up to ret to left.
- for (i = 0; i <= ret; i++) {
-// printk("left i %u\n", i);
- mte_set_rcu_slot(l_mas->node, i, b_node.slot[i]);
- piv = b_node.pivot[i];
- if (i >= slot_cnt - 1) {
- ret = i - 1;
- break;
- }
- mte_set_pivot(l_mas->node, i, piv);
- }
- printk("l_max is %lu\n", piv);
+ i = mab_mas_cp(&b_node, 0, ret + 1, l_mas, 0);
+ printk("l_max is %lu\n", l_mas->max);
mas_set_slot(l_mas, mte_parent_slot(mas->node));
- l_mas->max = piv;
printk("r_min is %lu\n", piv + 1);
r_mas->min = piv + 1;
- j = 0;
// In the case of empty.
- mte_set_pivot(r_mas->node, j, r_mas->max);
+ mte_set_pivot(r_mas->node, 0, r_mas->max);
printk("We are missing one here.. end %u ret %u\n", end, ret);
- for (i = ret + 2; i <= end; i++, j++) {
- mte_set_rcu_slot(r_mas->node, j, b_node.slot[i]);
- mte_set_pivot(r_mas->node, j, b_node.pivot[i]);
- printk("right j %u i %u piv %lu\n", j, i, b_node.pivot[i]);
- }
+ mab_mas_cp(&b_node, ret + 2, end + 1, r_mas, 0);
mas_set_slot(r_mas, mas_get_slot(l_mas) + 1);
- printk("Setting slots to %d\n", mas_get_slot(l_mas));
- printk("%p Last pivot is %u %lu\n", mas_mn(r_mas), j, b_node.pivot[i-1]);
return ret;
}
new_end);
} else {
unsigned char slot_cnt = mt_slot_count(mas->node); /* should be full. */
- unsigned char p_slot, j;
+ unsigned char p_slot;
/* TODO: Check rebalancing to avoid continuing walking
* up if there is space in a neighbour.
*/
/* internal node split - cut in half for now. */
- i = 0;
- do {
- printk("copy %u left\n", i);
- mte_set_rcu_slot(l_mas.node, i,
- big_node.slot[i]);
- mte_set_pivot(l_mas.node, i,
- big_node.pivot[i]);
- if (mt_is_alloc(mas->tree))
- mte_set_gap(l_mas.node, i,
- big_node.gap[i]);
- } while (++i < slot_cnt / 2);
-
+ i = mab_mas_cp(&big_node, 0, slot_cnt/2, &l_mas, 0);
l_mas.max = big_node.pivot[i];
r_mas.min = l_mas.max + 1;
-
- for (j = 0; i < slot_cnt; i++, j++) {
- printk("copy %u right\n", i);
- mte_set_rcu_slot(r_mas.node, j,
- big_node.slot[i]);
- mte_set_pivot(r_mas.node, j,
- big_node.pivot[i]);
- if (mt_is_alloc(mas->tree))
- mte_set_gap(r_mas.node, j,
- big_node.gap[i]);
- }
-
+ mab_mas_cp(&big_node, i, slot_cnt + 1, &r_mas, 0);
p_slot = mas_get_slot(&prev_l_mas);
if (p_slot < split) {
// prev left side is on left.
if (!mte_is_root(parent.node)) {
printk("Ascend parent %p\n", mas_mn(&parent));
mas_ascend(&parent); /* Go up a level */
- do {
- if (!big_node.pivot[i])
- break;
- printk("Copy parent slot %u\n", i);
- big_node.slot[i] = mas_get_rcu_slot(&parent, i);
- big_node.pivot[i] =
- mas_get_safe_pivot(&parent, i);
- if (mt_is_alloc(mas->tree))
- big_node.gap[i] =
- mte_get_gap(parent.node, i);
- } while (++i < mas_get_slot(&l_mas));
+ i = mas_mab_cp(&parent, 0, mas_get_slot(&l_mas),
+ &big_node, 0);
}
split = i;
big_node.slot[i + 1] = NULL;
if (!mte_is_root(mas->node)) {
mas_ascend(mas);
- for (++i; i < mt_slot_count(parent.node); i++) {
- printk("post-insert Copy parent slot %u\n", i);
- big_node.pivot[i + 1] =
- mas_get_safe_pivot(&parent, i);
- if (!big_node.pivot[i + 1])
- break;
- big_node.slot[i + 1] =
- mas_get_rcu_slot(&parent, i);
-
- if (mt_is_alloc(mas->tree))
- big_node.gap[i + 1] =
- mte_get_gap(parent.node, i);
- }
- printk("Zero %u\n", i + 1);
+ i++;
+ i = mas_mab_cp(&parent, i, mt_slot_count(parent.node),
+ &big_node, i + 1);
+ printk("Zero %u\n", i);
big_node.pivot[i] = 0;
big_node.slot[i] = NULL;
}
return new_end;
}
-static inline int mas_spanning_store(struct ma_state *mas, void *entry)
-{
- printk("Not implemented to store %lu-%lu, span starts at %p\n",
- mas->index, mas->last, mte_to_node(mas->span_enode));
- BUG_ON(1);
-
- if (mt_is_alloc(mas->tree))
- mas_update_gap(mas, false);
-
-}
static inline int mas_commit_store(struct ma_state *mas, unsigned long r_min,
unsigned long r_max, unsigned char new_end,
void *entry, bool active)
* @entry - the entry that is going to be written.
*
*/
-static inline void mas_is_span_wr(struct ma_state *mas, unsigned long piv,
+static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv,
void *entry)
{
if (mas->span_enode) // Already a spanning store.
- return;
+ return true;
if (piv > mas->last) // Contained in this pivot
- return;
+ return false;
if (!mte_is_leaf(mas->node)) {
if ( mas->last < piv) // Fits in the slot.
- return;
+ return false;
if (entry && piv == mas->last) // Writes to the end of the child node, but has a value.
- return;
+ return false;
} else {
if (mas->last < mas->max) // Fits in the node, but may span slots.
- return;
+ return false;
if (entry && mas->last == mas->max) // Writes to the end of the node but not null.
- return;
+ return false;
}
mas->span_enode = mas->node;
mas->last_cnt = 0;
+ return true;
}
static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type,
* @entry - the value that will be written.
*/
static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max, void *entry, bool overwrite)
+ unsigned long *range_max, void *entry)
{
enum maple_type type;
struct maple_enode *next;
if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
return false;
- mas_is_span_wr(mas, *range_max, entry);
+ if (mas_is_span_wr(mas, *range_max, entry))
+ return ma_is_leaf(type);
if (ma_is_leaf(type))
return true;
}
return ret;
}
+
+/* Private
+ *
+ * mas_remove_slot() - Remove the contents of a node by shifting slots over.
+ * @mas - the ma_state, note the node cannot be a leaf due to the gap use.
+ * @slot - Slot that contains the contents to be removed.
+ *
+ * Note: Not active node safe, not leaf node safe.
+ */
+static inline void mas_move_slot(struct ma_state *mas, unsigned char slot,
+ unsigned char shift)
+{
+ unsigned char end = mas_data_end(mas);
+ unsigned char old_slot = slot + shift;
+ unsigned long piv;
+
+ for(; old_slot < end; old_slot++, slot++) {
+
+ if (old_slot < mt_pivot_count(mas->node))
+ piv = mte_get_pivot(mas->node, old_slot);
+ else
+ piv = mas->max;
+ mte_set_pivot(mas->node, piv, slot);
+ mte_set_rcu_slot(mas->node, slot,
+ mte_get_rcu_slot(mas->node, old_slot, mas->tree));
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(mas->node, slot,
+ mte_get_gap(mas->node, old_slot));
+ }
+
+ // Zero the rest.
+ for(; slot < end; slot++) {
+ if (slot < mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, slot, 0);
+ mte_set_rcu_slot(mas->node, slot, NULL);
+ if (mt_is_alloc(mas->tree)) {
+ mte_set_gap(mas->node, slot, 0);
+ }
+ }
+}
+
+
+static inline void mas_rebalance_coalesce(struct ma_state *l_mas,
+ struct ma_state *p_l_mas,
+ unsigned char l_end,
+ struct ma_state *r_mas,
+ struct ma_state *p_r_mas,
+ unsigned char r_end)
+{
+ unsigned char slot;
+
+ // Copy data from right to left.
+ for (slot = 0; slot < r_end; slot++) {
+ mte_set_pivot(l_mas->node, l_end + slot,
+ mte_get_pivot(r_mas->node, slot));
+ mte_set_rcu_slot(l_mas->node, l_end + slot,
+ mas_get_rcu_slot(r_mas, slot));
+ if (!mte_is_leaf(l_mas->node) && mt_is_alloc(l_mas->tree))
+ mte_set_gap(l_mas->node, l_end + slot,
+ mte_get_gap(r_mas->node, slot));
+ }
+ if (mte_is_leaf(l_mas->node) && mt_is_alloc(l_mas->tree)) {
+ // Leaf nodes may need to update the gap.
+ unsigned long gap = mte_get_gap(p_r_mas->node,
+ mte_parent_slot(r_mas->node));
+ if (gap > mte_get_gap(p_l_mas->node,
+ mte_parent_slot(l_mas->node)))
+ mte_set_gap(p_l_mas->node,
+ mte_parent_slot(l_mas->node), gap);
+ }
+
+}
+static inline unsigned long mas_next_node(struct ma_state *mas,
+ unsigned long max);
+/* Private
+ * mas_inactive_rebalance() - Rebalance l_mas and r_mas to have the necessary
+ * number of occupied slots.
+ *
+ * @l_mas - the left maple state
+ * @p_l_mas - the left parent maple state.
+ * @r_mas - the right maple state
+ * @p_r_mas - the right parent maple state.
+ *
+ * Note: r_mas may be completely consumed in this operation.
+ *
+ */
+static inline void mas_inactive_rebalance(struct ma_state *mas,
+ struct ma_state *l_mas,
+ struct ma_state *p_l_mas,
+ struct ma_state *r_mas,
+ struct ma_state *p_r_mas)
+{
+ unsigned char l_end = mas_data_end(l_mas);
+ unsigned char r_end = mas_data_end(r_mas);
+ struct maple_big_node b_node;
+ unsigned char b = 0;
+ unsigned long min;
+ unsigned char slot_cnt, slot, split;
+ bool overwrite_left = false;
+ struct ma_state *target = l_mas;
+
+ MA_STATE(next, r_mas->tree, r_mas->index, r_mas->last);
+
+ if (l_end <= mt_min_slot_cnt(l_mas->node)) {
+ mas_mab_cp(l_mas, 0, l_end, &b_node, b);
+ b = l_end;
+ overwrite_left = true;
+ }
+
+ if (overwrite_left || (r_end <= mt_min_slot_cnt(r_mas->node))) {
+ mas_mab_cp(r_mas, 0, r_end, &b_node, b);
+ b += r_end;
+ }
+
+ if (!b)
+ return;
+
+ if ((overwrite_left && b <= mt_min_slot_cnt(l_mas->node)) ||
+ (!overwrite_left && b <= mt_min_slot_cnt(r_mas->node))) {
+ /* Left and Right don't have enough for a node, balance with the
+ * node to the right - if it exists.
+ */
+ mas_dup_state(&next, r_mas);
+ mas_next_node(&next, ULONG_MAX);
+ if (!mas_is_none(&next)) {
+ slot = mas_data_end(&next);
+ mas_mab_cp(&next, 0, slot, &b_node, b);
+ b += slot;
+ }
+ }
+
+ /* Set up to copy data out of the big node by calculating the split and
+ * setting the target. */
+ split = b / 2;
+ if (overwrite_left) {
+ slot_cnt = mt_slot_count(l_mas->node);
+ min = l_mas->min;
+ } else {
+ slot_cnt = mt_slot_count(r_mas->node);
+ min = r_mas->min;
+ target = r_mas;
+ }
+
+ if (mte_is_leaf(l_mas->node))
+ split = mab_calc_split(&b_node, b, slot_cnt, min);
+
+
+ /* Now, redistribute the data in b_node across l_mas->node, r_mas->node,
+ * and a new node (if necessary).
+ */
+ /* First, put data into target */
+ slot = mab_mas_cp(&b_node, 0, split + 1, target, 0);
+ /* Then switch targets (either to a new node or to r_mas */
+ if (overwrite_left) {
+ target = r_mas;
+ } else {
+ target = &next;
+ target->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(target->node));
+ }
+
+ /* Put the remainder of the data into the next target */
+ mab_mas_cp(&b_node, slot, b + 1, target, 0);
+
+ /* Rewrite parent pointer(s) and potentially create a new parent for
+ * ma_state next.
+ */
+
+ if (!mas_is_none(&next) && !mas_is_start(&next)) {
+ //FIXME: Create new parent and put it in the tree.
+ }
+
+ /* Fix up r_mas slot and p_r_mas */
+ if (overwrite_left) {
+ unsigned char r = mas_get_slot(r_mas) + l_end;
+ // update parent.
+ mas_dup_state(p_r_mas, p_l_mas);
+ mte_set_parent(r_mas->node, p_r_mas->node, r);
+ // Update the slot.
+ mas_set_slot(r_mas, l_end + r);
+ }
+}
+static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
+ unsigned long *range_max);
+/* Private
+ *
+ * mas_spanning_store() - Create a subtree with the store operation completed
+ * and new nodes where necessary, then place the sub-tree in the actual tree.
+ * Note that mas is expected to point to the node which caused the store to
+ * span.
+ *
+ */
+static inline int mas_spanning_store(struct ma_state *mas, void *entry)
+{
+ unsigned long range_min, range_max;
+ struct maple_enode *r_node;
+ unsigned char slot, r_slot, l_slot = mas_get_slot(mas);
+ bool store_left = (entry ? true : false);
+ enum maple_type type;
+
+ printk("Not implemented to store %lu-%lu, span starts at %p\n",
+ mas->index, mas->last, mte_to_node(mas->span_enode));
+
+ BUG_ON(1);
+
+ // FIXME: Allocations..
+ mas_node_cnt(mas, 1 + 20* 2);
+ if (mas_is_err(mas))
+ return 0;
+
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last); // right
+ MA_STATE(p_r_mas, mas->tree, mas->index, mas->last); // right parent
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last); // left
+ MA_STATE(p_l_mas, mas->tree, mas->index, mas->last); // left parent
+
+ if(!mte_is_root(mas->node)) {
+ mas_ascend(&p_l_mas);
+ mas_dup_state(&p_r_mas, &p_l_mas);
+ }
+
+ mas_dup_state(&l_mas, mas);
+ __mas_walk(&l_mas, &range_min, &range_max);
+ l_slot = mas_get_slot(&l_mas);
+
+ mas_dup_state(&r_mas, mas);
+ // range_max is used below, so keep r_mas walk after l_mas.
+ __mas_walk(&r_mas, &range_min, &range_max);
+ r_slot = mas_get_slot(&r_mas);
+ /* No entry means storing right, otherwise check for full nodes and swap
+ * to right if the left is full and the right is not.
+ */
+ if (store_left) {
+ if (mas_data_end(&l_mas) == mt_slot_count(l_mas.node) - 1) {
+ if (mas_data_end(&r_mas) < mt_slot_count(r_mas.node))
+ store_left = false;
+ }
+
+ }
+
+ /* Expand store of NULL, if necessary */
+ if (!entry) {
+ /* Check if there is a null in the previous slot on the left */
+ if (l_slot && (!mas_get_rcu_slot(&l_mas, l_slot - 1)))
+ mas->index = mte_get_pivot(l_mas.node,
+ l_slot - 1) + 1;
+ else if (!l_slot)
+ printk("NULL could be in the prev node?\n");
+
+ /* Check if there is a null in the next slot on the right */
+ if ((range_max != r_mas.max) &&
+ (!mas_get_rcu_slot(&r_mas, r_slot + 1)))
+ mas->last = mas_get_safe_pivot(&r_mas, r_slot + 1);
+ else if (range_max == r_mas.max)
+ printk("NULL could be in the next node?\n");
+ }
+ /* FIXME: What about detecting a split here? */
+
+ // if the parent is root
+ // check if a level can be removed.
+ // FIXME: Move this somewhere above here.
+ if (!mte_is_root(mas->node) && mte_is_root(p_l_mas.node)) {
+ printk("potentially drop a level?\n");
+// if (!l_slot && r_slot == mas_data_end(parent))
+// printk("Drop a level\n");
+ }
+
+ mas_dup_state(&r_mas, mas);
+ mas_dup_state(&l_mas, mas);
+ mas_dup_state(&p_r_mas, mas);
+ mas_dup_state(&p_l_mas, mas);
+
+ // Set up the right side maple state to point to the correct slot
+ r_mas.index = r_mas.last; // Just point to the right.
+ mas_node_walk(&r_mas, type, &range_min, &range_max);
+
+ // Set up the left side maple state to point to just the left.
+ l_mas.last = mas->index;
+
+ // Make a new node, etc.
+ type = mte_node_type(mas->node);
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ r_mas.node = mas->node;
+ slot = l_slot; // Start filling the right at this position.
+ // Set the middle pivot of the ancestor.
+ mte_set_pivot(l_mas.node, l_slot,
+ (store_left ? mas->last : mas->index - 1));
+ mas_mn(&l_mas)->parent = mas_mn(mas)->parent; // Copy the parent.
+ mas->node = l_mas.node; // Save for later.
+
+ do {
+
+ // Copy right to new node.
+ for (; r_slot < mt_slot_count(r_node); r_slot++) {
+ unsigned long piv =
+ mas_get_safe_pivot(&p_r_mas, r_slot);
+ if (!piv) // Node end.
+ break;
+
+ mte_set_rcu_slot(r_node, ++slot,
+ mas_get_rcu_slot(&p_r_mas, r_slot));
+ if (r_slot < mt_pivot_count(r_node))
+ mte_set_pivot(r_node, slot, piv);
+
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(r_node, slot,
+ mte_get_gap(p_r_mas.node, r_slot));
+ }
+
+ // Copy left to new node.
+ type = mte_node_type(l_mas.node);
+ for (slot = 0; slot <= l_slot; slot++) {
+ mte_set_rcu_slot(l_mas.node, slot,
+ mas_get_rcu_slot(&p_l_mas, slot));
+ mte_set_pivot(l_mas.node, slot,
+ mte_get_pivot(p_l_mas.node, slot));
+ if (mt_is_alloc(mas->tree) && !ma_is_leaf(type))
+ mte_set_gap(l_mas.node, slot,
+ mte_get_gap(mas->node, slot));
+ }
+
+ // Rebalance if necessary.
+ // FIXME: Rebalance inactive needed, also set the slot correctly
+ // for index.
+ // FIXME: Mark modified nodes dead, set the parents of the new
+ // nodes only.
+ //
+
+ // Rebalance left + right + node to right of right
+ mas_inactive_rebalance(mas, &l_mas, &p_l_mas, &r_mas, &p_r_mas);
+ l_slot = mas_get_slot(&l_mas);
+ r_slot = mas_get_slot(&r_mas);
+
+ // Check parent for rebalance or removal?
+ //
+ /* FIXME: Rebalance across gap means changing pivots up the tree
+ * to the ancestor, r_mas may now fall on the same side as
+ * l_mas, what does that look like?
+ if (unlikely(0)
+ mas_spanning_rebalance(&l_mas, &r_mas);
+ */
+
+ if (ma_is_leaf(type)) // Don't descend if this is a leaf.
+ break;
+
+ // Set up for a new run.
+ mas_dup_state(&p_r_mas, &r_mas);
+ mas_dup_state(&p_l_mas, &l_mas);
+ mas_descend(&l_mas);
+ mas_descend(&r_mas);
+ mas_node_walk(&l_mas, mte_node_type(l_mas.node), &range_min,
+ &range_max);
+ type = mte_node_type(r_mas.node);
+ mas_node_walk(&r_mas, type, &range_min, &range_max);
+ // Create next nodes.
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(l_mas.node));
+ mte_set_parent(l_mas.node, p_l_mas.node, l_slot);
+ mte_set_rcu_slot(p_l_mas.node, l_slot, l_mas.node);
+
+ r_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ type);
+ mte_set_parent(r_mas.node, p_r_mas.node, r_slot);
+ mte_set_rcu_slot(p_r_mas.node, r_slot, r_mas.node);
+
+ l_slot = mas_get_slot(&l_mas);
+ r_slot = mas_get_slot(&r_mas);
+ slot = 0;
+ } while (1);
+
+
+
+ // mark modified node(s?) dead.
+ // NOTE: Rebalanced nodes not in this sub-tree are already marked dead.
+
+ // insert new sub-tree
+ //
+ //do
+ // ascend
+ // adopt children of nodes that don't have the correct parent
+ // mas_update_gap and friends (don't recursively do this)
+ // while (!ancestor)
+ //
+
+
+
+ if (mt_is_alloc(mas->tree))
+ mas_update_gap(mas, false);
+
+ return 1;
+
+}
static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
{
unsigned long r_max, r_min;
else if (ret)
return content;
- if (!mas_wr_walk(mas, &r_min, &r_max, entry, overwrite)) {
- /* Not a leaf = broken tree. */
+ if (!mas_wr_walk(mas, &r_min, &r_max, entry) &&
+ !mas->span_enode) {
+ /* Not a leaf or spanning write. = broken tree. */
// FIXME, rebuild?
printk("%s %d\n", __func__, __LINE__);
return NULL;
}
static inline bool _mas_walk(struct ma_state *mas);
-static inline bool mas_rebalance_node(struct ma_state *mas);
-static inline unsigned long mas_next_node(struct ma_state *mas,
- unsigned long max);
/* Private
*
do {
pivot = mas_get_safe_pivot(mas, slot);
if (pivot < limit)
- goto no_entry;
+ return false;
entry = mas_get_rcu_slot(mas, slot);
if (!mt_is_empty(entry))
- goto found;
+ break;
} while (slot--);
-no_entry:
- return false;
-
-found:
*max = pivot;
mas_set_slot(mas, slot);
return true;
return entry;
}
- if (!mas_wr_walk(mas, &r_max, &r_min, entry, true)) {
+ if (!mas_wr_walk(mas, &r_max, &r_min, entry)) {
+ if (mas->span_enode) {
+ // Walk down to the node to get the value
+ // entry = mas_load maybe?
+
+ // Need to span_store a null?
+ mas_spanning_store(mas, NULL);
+ return entry;
+ }
/* Not a leaf = broken tree. */
// FIXME, rebuild?
return 0;
slot = mas_get_slot(mas);
entry = mas_get_rcu_slot(mas, slot);
- if (mas->span_enode) // Could be writing NULL to the end of a node.
- mas_spanning_store(mas, NULL);
-
if (!entry)
return entry;