[maple_range_16] = MAPLE_RANGE16_SLOTS / 2,
[maple_range_32] = MAPLE_RANGE32_SLOTS / 2,
[maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
- [maple_arange_64] = (MAPLE_ARANGE64_SLOTS + 1) / 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS) / 2,
};
#define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)]
break;
case maple_range_64:
case maple_leaf_64:
- BUG_ON(slot >= 8);
+ BUG_ON(slot >= MAPLE_RANGE64_SLOTS);
rcu_assign_pointer(mn->mr64.slot[slot], val);
break;
case maple_arange_64:
- BUG_ON(slot >= 5);
+ BUG_ON(slot >= MAPLE_ARANGE64_SLOTS);
rcu_assign_pointer(mn->ma64.slot[slot], val);
break;
}
r_mas->node = MAS_NONE;
}
+static inline void mab_shift_right(struct maple_big_node *b_node,
+ unsigned char b_end,
+ unsigned char shift, bool alloc)
+{
+ do {
+ printk("move %u to %u\n", b_end, b_end + shift);
+ b_node->pivot[b_end + shift] = b_node->pivot[b_end];
+ b_node->slot[b_end + shift] = b_node->slot[b_end];
+ printk("piv %u is %lu\n", b_end + shift, b_node->pivot[b_end + shift]);
+ if (alloc)
+ b_node->gap[b_end + shift] = b_node->gap[b_end];
+ } while (b_end--);
+}
+
static inline int mab_calc_split(struct maple_big_node *b_node, int size,
unsigned char slot_cnt, unsigned long min)
{
return split;
}
+/*
+ * Note, mas_end is exclusive.
+ */
static inline int mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
unsigned char mas_end, struct maple_big_node *b_node,
unsigned char mab_start)
printk("cp mas %u\n", i);
b_node->slot[j] = mas_get_rcu_slot(mas, i);
if (i < mt_pivot_count(mas->node)) {
+ printk("get safe pivot for %u\n", i);
b_node->pivot[j] = mas_get_safe_pivot(mas, i);
- printk("Set %u slot %p => piv %lu\n", j,
+ printk("Set b_node %u slot %p => piv %lu\n", j,
b_node->slot[j], b_node->pivot[j]);
} else {
b_node->pivot[j] = mas->max;
- printk("Set %u slot %p => piv %lu\n", j,
+ printk("Set b_node %u slot %p => piv %lu\n", j,
b_node->slot[j], b_node->pivot[j]);
break;
}
struct ma_state *mas, unsigned char mas_start)
{
int i, j;
- printk("%s: cp %u - %u\n", __func__, mab_start, mab_end - 1);
+ printk("%s: cp %u - %u to mas\n", __func__, mab_start, mab_end - 1);
for (i = mab_start, j = mas_start; i < mab_end; i++, j++) {
- if(j && !b_node->pivot[i])
+ if(j && !b_node->pivot[i]) {
+ printk("no pivot at %u\n", i);
break;
+ }
mas->max = b_node->pivot[i];
mte_set_rcu_slot(mas->node, j, b_node->slot[i]);
return i;
}
+static inline unsigned long mas_next_node(struct ma_state *mas,
+ unsigned long max);
+static inline void mas_prev_node(struct ma_state *mas, unsigned long limit);
+
+static inline int mas_commit_rebalance(struct ma_state *mas,
+ struct maple_big_node *b_node,
+ unsigned char new_end)
+{
+ unsigned char p_end, end = 0, slot_cnt, split, b;
+ unsigned char l_child = 0, r_child = 0;
+ char empty = mas->full_cnt * -1;
+ int height = 0;
+ struct maple_enode *left, *right;
+ unsigned long l_piv;
+ printk("Need to rebalance %d levels\n", empty);
+
+ MA_STATE(orig_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+ MA_STATE(cp, mas->tree, mas->index, mas->last);
+
+ mas_dup_state(&orig_mas, mas);
+ mas_dup_state(&parent, mas);
+ mas_ascend(&parent);
+
+ mas_node_cnt(mas, 1 + empty * 2);
+ if (mas_is_err(mas))
+ return 0;
+
+ while (height++ < empty) {
+ unsigned char p_slot = mte_parent_slot(mas->node);
+
+ p_end = mas_data_end(&parent);
+ slot_cnt = mt_slot_count(mas->node);
+
+ printk("\nLooking to rebalance node %p\n", mas_mn(mas));
+ printk("new_end %u p_slot %u p_end %u\n", new_end, p_slot, p_end);
+ if (height >= empty) {
+ printk("Final entry.\n");
+ }
+
+ mas_dup_state(&cp, mas);
+ mas_set_slot(&cp, p_slot);
+ printk("p_slot %u p_end %u\n", p_slot, p_end);
+ if (p_slot < p_end) {
+ printk("Rebalance from next\n");
+ mas_next_node(&cp, ULONG_MAX);
+ end = mas_data_end(&cp);
+ mas_mab_cp(&cp, 0, end + 1, b_node, new_end + 1);
+ end += new_end + 1;
+ printk("End is now %u\n", end);
+ } else {
+ bool alloc = false;
+
+ printk("Rebalance from prev\n");
+ if (mt_is_alloc(mas->tree) && !mte_is_leaf(mas->node))
+ alloc = true;
+ mas_prev_node(&cp, 0);
+ printk("prev node is %p\n", cp.node);
+ end = mas_data_end(&cp);
+ printk("prev end %u and new_end %u\n", end, new_end);
+ mab_shift_right(b_node, new_end + 1, end + 1, alloc);
+ mas_mab_cp(&cp, 0, end + 1, b_node, 0);
+ end += new_end + 1;
+ p_slot--;
+ }
+
+ mas_dup_state(&cp, mas);
+ mas_set_slot(&cp, p_slot);
+ /* @end will now have the size of b_node. This can be used to
+ * determine if a right node is needed.
+ * @p_slot is also pointing to the left node entry in the parent.
+ */
+ left = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(mas->node));
+
+ if (!mte_is_leaf(left)) {
+ mte_set_parent(b_node->slot[l_child], left, l_child);
+ mte_set_parent(b_node->slot[r_child], left, r_child);
+ }
+
+ /* Note, failure to allocate would set cp.node to error code
+ * here and not mas->node.
+ */
+
+ printk("\t%s: end %u slot_Cnt %u\n", __func__, end, slot_cnt);
+ if (end < slot_cnt) {
+ split = end;
+ printk("Using end %u\n", split);
+ }
+ else if (mte_is_leaf(mas->node)) // leaf split is special
+ split = mab_calc_split(b_node, end, slot_cnt,
+ mas->min);
+ else // internal nodes share 1/2, with the larger half going left.
+ split = (end + 1) / 2;
+
+ printk("Cp 0-%u\n", split);
+ cp.node = left;
+ printk("%s: left will have %u - %u\n", __func__, 0, split + 1);
+ mab_mas_cp(b_node, 0, split + 1, &cp, 0);
+ l_piv = cp.max;
+
+ if (end >= slot_cnt) {
+ right = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(mas->node));
+ cp.node = right;
+ printk("%s: right will have %u - %u\n", __func__, split + 1, end + 1);
+ mab_mas_cp(b_node, split + 1, end + 1, &cp, 0);
+ } else {
+ // Totally merged with the left.
+ right = NULL;
+ }
+
+
+
+ mas_ascend(mas);
+ printk("Next empty! %p\n\n", mas_mn(mas));
+
+ if (!mte_is_root(parent.node))
+ mas_ascend(&parent);
+
+ end = mas_data_end(mas);
+ if (mte_is_root(mas->node)) {
+ if (!end || (end == 1 && !right)) {
+ mas_mn(&cp)->parent = mas_mn(&parent)->parent;
+ goto new_root_node;
+ }
+ }
+
+ memset(b_node, 0, sizeof(struct maple_big_node));
+ b = mas_mab_cp(mas, 0, p_slot, b_node, 0);
+ r_child = l_child = b;
+ printk("Set %u to %p\n", b, mte_to_node(left));
+ b_node->slot[b] = left;
+ b_node->pivot[b++] = l_piv;
+ p_slot++;
+ if (right) {
+ r_child = b;
+ b_node->slot[b] = right;
+ printk("Set %u to %p\n", b, mte_to_node(right));
+ b_node->pivot[b++] = mas_get_safe_pivot(mas, p_slot);
+ }
+ p_slot++;
+
+ if (end >= p_slot)
+ b = mas_mab_cp(mas, p_slot, end + 1, b_node, b) + 1;
+
+ // If the parent didn't decrease in size, then we are done.
+ if (b >= mt_min_slot_cnt(mas->node)) {
+ end = b;
+ break;
+ }
+
+ new_end = b - 1;
+ printk("Going with new_end = %u\n", new_end);
+ }
+
+ /* In the end, b_node will have the contents of the new parent - unless
+ * it is root
+ */
+ printk("\nThis is the end\n");
+ cp.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(mas->node));
+ mas_mn(&cp)->parent = mas_mn(mas)->parent;
+ printk("the end copies 0-%u\n", end+1);
+ mab_mas_cp(b_node, 0, end + 1, &cp, 0);
+
+
+
+new_root_node:
+
+ mas->node = cp.node;
+ /* Kill leaf node(s) */
+ mas_set_node_dead(&orig_mas);
+ smp_wmb();
+
+ /* Insert new data */
+ _mas_replace(mas, false, false);
+
+ /* Adoption routine... */
+
+
+
+ return 0;
+}
static inline int mas_commit_split(struct ma_state *mas,
struct maple_big_node *b_node,
printk("l_max is %lu\n", l_mas.max);
mas_set_slot(&l_mas, mte_parent_slot(mas->node));
+ printk("l parent slot %u\n", mas_get_slot(&l_mas));
r_mas.min = l_mas.max + 1;
printk("r_min is %lu\n", r_mas.min);
p_slot + 1);
} else {
mte_set_parent(prev_r_mas.node, r_mas.node,
- p_slot + 1 - split + 1);
+ p_slot + 1 - split + 2);
}
}
mas_dup_state(&prev_r_mas, &r_mas);
if (!mte_is_root(mas->node)) {
mas_ascend(mas);
- i++;
- i = mas_mab_cp(&parent, i, mt_slot_count(parent.node),
- b_node, i + 1);
+ printk("get rest of parent %u-%u\n", i, mt_slot_count(parent.node) + 1);
+ i = mas_mab_cp(&parent, split + 1,
+ mt_slot_count(parent.node) + 1, b_node,
+ i + 1);
}
}
struct maple_enode *new_node;
printk("Commit\n");
- if (end < mt_min_slot_cnt(mas->node)) {
- // Rebalance?
+ if ((end < mt_min_slot_cnt(mas->node)) && !mte_is_root(mas->node)) {
+ printk("end %u min slot %u\n", end, mt_min_slot_cnt(mas->node));
+ return mas_commit_rebalance(mas, b_node, end);
}
- if (end >= mt_slot_count(mas->node))
+ else if (end >= mt_slot_count(mas->node))
return mas_commit_split(mas, b_node, end);
mas_node_cnt(mas, 1);
- if (mas_is_err(mas)) {
- printk("Failed\n");
+ if (mas_is_err(mas))
return 0;
- }
new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node));
mte_to_node(new_node)->parent = mas_mn(mas)->parent;
if (piv > mas->last) // Contained in this pivot
return false;
- if (!mte_is_leaf(mas->node)) {
+ /* Writing ULONG_MAX is not a spanning write regardless of the value
+ * being written as long as the range fits in the node.
+ */
+ if (mas->last == ULONG_MAX &&
+ mas->min <= mas->index &&
+ mas->last == mas->max)
+ return false;
+
+ if (!mte_is_leaf(mas->node)) { // Internal nodes.
if ( mas->last < piv) // Fits in the slot.
return false;
if (entry && piv == mas->last) // Writes to the end of the child node, but has a value.
return false;
- } else {
+ } else { // A leaf node.
if (mas->last < mas->max) // Fits in the node, but may span slots.
return false;
return ret;
}
+static inline void mas_zero_to_end(struct ma_state *mas, unsigned char slot)
+{
+ for(; slot < mt_slot_count(mas->node); slot++) {
+ if (slot < mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, slot, 0);
+ mte_set_rcu_slot(mas->node, slot, NULL);
+ if (mt_is_alloc(mas->tree)) {
+ mte_set_gap(mas->node, slot, 0);
+ }
+ }
+}
/* Private
*
* mas_remove_slot() - Remove the contents of a node by shifting slots over.
}
// Zero the rest.
- for(; slot < end; slot++) {
- if (slot < mt_pivot_count(mas->node))
- mte_set_pivot(mas->node, slot, 0);
- mte_set_rcu_slot(mas->node, slot, NULL);
- if (mt_is_alloc(mas->tree)) {
- mte_set_gap(mas->node, slot, 0);
- }
- }
+ mas_zero_to_end(mas, slot);
}
-static inline void mas_rebalance_coalesce(struct ma_state *l_mas,
- struct ma_state *p_l_mas,
- unsigned char l_end,
- struct ma_state *r_mas,
- struct ma_state *p_r_mas,
- unsigned char r_end)
-{
- unsigned char slot;
-
- // Copy data from right to left.
- for (slot = 0; slot < r_end; slot++) {
- mte_set_pivot(l_mas->node, l_end + slot,
- mte_get_pivot(r_mas->node, slot));
- mte_set_rcu_slot(l_mas->node, l_end + slot,
- mas_get_rcu_slot(r_mas, slot));
- if (!mte_is_leaf(l_mas->node) && mt_is_alloc(l_mas->tree))
- mte_set_gap(l_mas->node, l_end + slot,
- mte_get_gap(r_mas->node, slot));
- }
- if (mte_is_leaf(l_mas->node) && mt_is_alloc(l_mas->tree)) {
- // Leaf nodes may need to update the gap.
- unsigned long gap = mte_get_gap(p_r_mas->node,
- mte_parent_slot(r_mas->node));
- if (gap > mte_get_gap(p_l_mas->node,
- mte_parent_slot(l_mas->node)))
- mte_set_gap(p_l_mas->node,
- mte_parent_slot(l_mas->node), gap);
- }
-
-}
-static inline unsigned long mas_next_node(struct ma_state *mas,
- unsigned long max);
/* Private
* mas_inactive_rebalance() - Rebalance l_mas and r_mas to have the necessary
* number of occupied slots.
* Note: r_mas may be completely consumed in this operation.
*
*/
-static inline void mas_inactive_rebalance(struct ma_state *mas,
+static inline bool mas_inactive_rebalance(struct ma_state *mas,
struct ma_state *l_mas,
- struct ma_state *p_l_mas,
- struct ma_state *r_mas,
- struct ma_state *p_r_mas)
+ struct ma_state *r_mas)
{
unsigned char l_end = mas_data_end(l_mas);
unsigned char r_end = mas_data_end(r_mas);
bool overwrite_left = false;
struct ma_state *target = l_mas;
struct maple_node *n_node;
+ unsigned long piv;
+ unsigned char p_slot = mte_parent_slot(l_mas->node);
+ bool ret = false;
memset(&b_node, 0, sizeof(struct maple_big_node));
MA_STATE(next, r_mas->tree, r_mas->index, r_mas->last);
+ MA_STATE(parent, l_mas->tree, l_mas->index, l_mas->last);
- if (l_end <= mt_min_slot_cnt(l_mas->node)) {
- mas_mab_cp(l_mas, 0, l_end, &b_node, b);
+ printk("l_end = %u min %u\n", l_end, mt_min_slot_cnt(l_mas->node));
+ if ((l_end + r_end + 1 < mt_slot_count(l_mas->node)) ||
+ (l_end < mt_min_slot_cnt(l_mas->node))) {
+ printk("Include left node\n");
+ mas_mab_cp(l_mas, 0, l_end + 1, &b_node, b);
b = l_end;
overwrite_left = true;
+ printk("b is %u\n", b);
}
- if (overwrite_left || (r_end <= mt_min_slot_cnt(r_mas->node))) {
- mas_mab_cp(r_mas, 0, r_end, &b_node, b);
+ if (overwrite_left || (r_end < mt_min_slot_cnt(r_mas->node))) {
+ printk("Include right node\n");
+ if (overwrite_left)
+ b++; // count slot 0 if there is data
+ mas_mab_cp(r_mas, 0, r_end + 1, &b_node, b);
b += r_end;
}
- if (!b)
- return;
+ if (!overwrite_left && (r_end + 1 >= mt_min_slot_cnt(r_mas->node))) {
+ printk("No rebalance\n");
+ return false; // No rebalancing.
+ }
+ printk("b is %u\n", b);
if ((overwrite_left && b <= mt_min_slot_cnt(l_mas->node)) ||
(!overwrite_left && b <= mt_min_slot_cnt(r_mas->node))) {
/* Left and Right don't have enough for a node, balance with the
* node to the right - if it exists.
*/
+ printk("b = %u\n", b);
mas_dup_state(&next, r_mas);
mas_next_node(&next, ULONG_MAX);
if (!mas_is_none(&next)) {
slot = mas_data_end(&next);
mas_mab_cp(&next, 0, slot, &b_node, b);
b += slot;
+ } else {
+ printk("Just created invalid tree?\n");
+ BUG_ON(1);
}
}
- /* Set up to copy data out of the big node by calculating the split and
- * setting the target. */
- split = b / 2;
- if (overwrite_left) {
- slot_cnt = mt_slot_count(l_mas->node);
- min = l_mas->min;
+ printk("b = %u mt_slot_count = %u\n", b, mt_slot_count(l_mas->node));
+ if (b < mt_slot_count(l_mas->node)) {
+ split = b;
} else {
- slot_cnt = mt_slot_count(r_mas->node);
- min = r_mas->min;
- target = r_mas;
- }
+ /* Set up to copy data out of the big node by calculating the split and
+ * setting the target. */
+ split = b / 2;
+ if (overwrite_left) {
+ slot_cnt = mt_slot_count(l_mas->node);
+ min = l_mas->min;
+ } else {
+ slot_cnt = mt_slot_count(r_mas->node);
+ min = r_mas->min;
+ target = r_mas;
+ }
- if (mte_is_leaf(l_mas->node))
- split = mab_calc_split(&b_node, b, slot_cnt, min);
+ if (mte_is_leaf(l_mas->node))
+ split = mab_calc_split(&b_node, b, slot_cnt, min);
+ }
/* Now, redistribute the data in b_node across l_mas->node, r_mas->node,
* and a new node (if necessary).
*/
/* First, put data into target */
+ printk("%s: left will have %u - %u\n", __func__, 0, split);
slot = mab_mas_cp(&b_node, 0, split + 1, target, 0);
- /* Then switch targets (either to a new node or to r_mas */
+ mas_zero_to_end(target, split + 1);
+
+ if (split == b) // Everything went left.
+ goto fixup_parent;
+
+ /* Then switch targets, either to a new node or to r_mas */
if (overwrite_left) {
target = r_mas;
} else {
+ printk("FIXME: Broken\n");
n_node = mte_to_node(next.node);
target = &next;
target->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
}
/* Put the remainder of the data into the next target */
+ printk("%s: right will have %u - %u\n", __func__, slot, b+1);
mab_mas_cp(&b_node, slot, b + 1, target, 0);
+ mas_zero_to_end(target, b + 1 - slot);
/* Rewrite parent pointer(s) and potentially create a new parent for
* ma_state next.
*/
+fixup_parent:
+ mas_dup_state(&parent, l_mas);
+ mas_ascend(&parent);
+ // Set old pivot.
+ if (split == b) {
+ printk("Consumed right\n");
+ mte_set_pivot(parent.node, p_slot, r_mas->max);
+ piv = mas_get_safe_pivot(&parent, ++p_slot);
+ while (piv && p_slot < mt_slot_count(parent.node) - 1) {
+ mte_set_rcu_slot(parent.node, p_slot,
+ mas_get_rcu_slot(&parent, p_slot + 1));
+ piv = mas_get_safe_pivot(&parent, ++p_slot);
+ mte_set_pivot(parent.node, p_slot - 1, piv);
+ }
- if (!mas_is_none(&next) && !mas_is_start(&next)) {
- //FIXME: Create new parent and put it in the tree.
- n_node->parent = ma_parent_ptr(n_node);
- }
-
- /* Fix up r_mas slot and p_r_mas */
- if (overwrite_left) {
- unsigned char r = mas_get_slot(r_mas) + l_end;
- // update parent.
- mas_dup_state(p_r_mas, p_l_mas);
- mte_set_parent(r_mas->node, p_r_mas->node, r);
- // Update the slot.
- mas_set_slot(r_mas, l_end + r);
+ } else {
+ mte_set_pivot(parent.node, p_slot, l_mas->max);
+ if (!mas_is_none(&next) && !mas_is_start(&next)) {
+ //FIXME: Create new parent and put it in the tree.
+ }
+ /* Fix up r_mas slot and p_r_mas */
+ if (overwrite_left) {
+ unsigned char r = mas_get_slot(r_mas) + l_end + 1;
+ printk("r slot %u\n", r);
+ if (r > split)
+ r -= split - 1;
+ else
+ ret = true;
+
+ // Get parent
+ // update parent.
+ //mte_set_parent(r_mas->node, parent, r);
+ // Update the slot.
+ printk("Update slot to %u\n", r);
+ mas_set_slot(r_mas, r);
+ }
}
+ return ret;
}
static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max);
+ unsigned long *range_max);
/* Private
*
* mas_spanning_store() - Create a subtree with the store operation completed
{
unsigned char slot, r_slot, l_slot = mas_get_slot(mas);
unsigned long range_min, range_max;
+ unsigned long l_gap = 0, r_gap = 0;
bool store_left = (entry ? true : false);
enum maple_type type;
- struct maple_enode *left, *right;
+ struct maple_enode *left = NULL, *right, *ancestor;
-#if 0
- printk("Not implemented to store %lu-%lu, span starts at %p\n",
- mas->index, mas->last, mte_to_node(mas->span_enode));
- BUG_ON(1);
-#endif
-
- // FIXME: Allocations..
- mas_node_cnt(mas, 1 + 20* 2);
- if (mas_is_err(mas))
+ printk("%s: %d\n", __func__, __LINE__);
+ // FIXME: Allocation count, height * 2 + 1 ?
+ mas_node_cnt(mas, 32); // 32 is an entire page.
+ if (mas_is_err(mas)) {
+ printk("Error %d\n", xa_err(mas->node));
return 0;
+ }
MA_STATE(r_mas, mas->tree, mas->index, mas->last); // right
- MA_STATE(p_r_mas, mas->tree, mas->index, mas->last); // right previous
- MA_STATE(l_mas, mas->tree, mas->index, mas->last); // left
- MA_STATE(p_l_mas, mas->tree, mas->index, mas->last); // left previous
+ MA_STATE(old_r_mas, mas->tree, mas->index, mas->last); // right previous
- if(!mte_is_root(mas->node)) {
- mas_ascend(&p_l_mas);
- mas_dup_state(&p_r_mas, &p_l_mas);
- }
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last); // left
+ MA_STATE(old_l_mas, mas->tree, mas->index, mas->last); // left previous
mas_dup_state(&l_mas, mas);
l_mas.last = l_mas.index;
l_slot = mas_get_slot(&l_mas);
mas_dup_state(&r_mas, &l_mas);
- r_mas.last = mas->last;
+ r_mas.last++;
r_mas.index = r_mas.last;
mas_set_slot(&r_mas, mte_parent_slot(l_mas.node));
mas_next_node(&r_mas, ULONG_MAX);
// range_max is used below, so keep r_mas walk after l_mas.
__mas_walk(&r_mas, &range_min, &range_max);
r_slot = mas_get_slot(&r_mas);
+ r_mas.last = r_mas.index = mas->last;
printk("%s: l_slot %p[%u] r_slot %p[%u]\n", __func__,
mas_mn(&l_mas), l_slot,
/* Expand store of NULL, if necessary */
if (!entry) {
/* Check if there is a null in the previous slot on the left */
- if (!mas_get_rcu_slot(&l_mas, l_slot)) {
+ if (!l_slot) {
+ mas->index = mas->min;
+ } else if (!mas_get_rcu_slot(&l_mas, l_slot - 1)) {
if (l_slot > 1)
mas->index = mte_get_pivot(l_mas.node, l_slot - 2) + 1;
else
}
/* Check if there is a null in the next slot on the right */
- if ((r_slot < mt_slot_count(r_mas.node)) &&
- (!mas_get_rcu_slot(&r_mas, r_slot + 1))) {
- mas->last = mas_get_safe_pivot(&r_mas, r_slot + 1);
+ if (!mas_get_rcu_slot(&r_mas, r_slot)) {
+ mas->last = mas_get_safe_pivot(&r_mas, r_slot);
if (!mas->last)
mas->last = mas->max;
}
printk("NULL could be in the next node?\n");
}
- printk("%s: final range is %lu-%lu at slot %u\n", __func__,
- mas->index, mas->last, mas_get_slot(mas));
+ printk("%s: final range is %lu-%lu at slot %u or %u\n", __func__,
+ mas->index, mas->last, mas_get_slot(&l_mas), mas_get_slot(&r_mas));
/* FIXME: What about detecting a split here? */
// if the parent is root
// check if a level can be removed.
// FIXME: Move this somewhere above here.
- if (!mte_is_root(mas->node) && mte_is_root(p_l_mas.node)) {
+ if (!mte_is_root(mas->node)) {
printk("potentially drop a level?\n");
// if (!l_slot && r_slot == mas_data_end(parent))
// printk("Drop a level\n");
mas_dup_state(&r_mas, mas);
mas_dup_state(&l_mas, mas);
- mas_dup_state(&p_r_mas, mas);
- mas_dup_state(&p_l_mas, mas);
type = mte_node_type(mas->node);
+ l_mas.last = l_mas.index;
+ mas_node_walk(&l_mas, type, &range_min, &range_max);
// Set up the right side maple state to point to the correct slot
+ r_mas.last++;
r_mas.index = r_mas.last; // Just point to the right.
mas_node_walk(&r_mas, type, &range_min, &range_max);
+
+ mas_dup_state(&old_r_mas, &r_mas);
+ mas_dup_state(&old_l_mas, &l_mas);
r_slot = mas_get_slot(&r_mas);
- l_slot = mas_get_slot(mas);
- slot = l_slot + 1;
- printk("%s: copy %p[%u] over right\n", __func__, mas_mn(mas), r_slot);
+ l_slot = mas_get_slot(&l_mas);
+ slot = r_slot;
printk("%s: copy %p[0-%u] for left\n", __func__, mas_mn(mas), l_slot);
+ printk("%s: copy %p[%u] over right\n", __func__, mas_mn(mas), r_slot);
// Set up the left side maple state to point to just the left.
l_mas.last = mas->index;
// Make a new node, etc.
- l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ ancestor = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ l_mas.node = ancestor;
r_mas.node = l_mas.node;
// Set the middle pivot of the ancestor.
mas_mn(&l_mas)->parent = mas_mn(mas)->parent; // Copy the parent.
do {
if (mte_is_leaf(r_mas.node) && !store_left) {
// Store value in right side.
+ printk("r store %p[%u] %lu %p\n", mas_mn(&r_mas), slot,
+ r_mas.last - 1, entry);
mte_set_rcu_slot(r_mas.node, slot, entry);
- mte_set_pivot(r_mas.node, slot, r_mas.last);
- if (entry || mas_get_rcu_slot(&p_r_mas, r_slot))
+ mte_set_pivot(r_mas.node, slot, r_mas.last - 1);
+ if (entry || mas_get_rcu_slot(&old_r_mas, r_slot))
slot++;
}
// Copy right to new node.
for (; r_slot < mt_slot_count(r_mas.node); r_slot++) {
unsigned long piv =
- mas_get_safe_pivot(&p_r_mas, r_slot);
+ mas_get_safe_pivot(&old_r_mas, r_slot);
if (!piv) // Node end.
break;
- mte_set_rcu_slot(r_mas.node, slot,
- mas_get_rcu_slot(&p_r_mas, r_slot));
printk("r cp %p[%u] -> %p[%u]\n",
- mas_mn(&p_r_mas), r_slot,
+ mas_mn(&old_r_mas), r_slot,
mas_mn(&r_mas), slot);
+ printk("val is %lu -> %p\n", piv, mas_get_rcu_slot(&old_r_mas, r_slot));
+ mte_set_rcu_slot(r_mas.node, slot,
+ mas_get_rcu_slot(&old_r_mas, r_slot));
if (r_slot < mt_pivot_count(r_mas.node))
mte_set_pivot(r_mas.node, slot, piv);
+ r_mas.max = piv;
- if (mt_is_alloc(mas->tree))
+ if (mt_is_alloc(mas->tree) && !ma_is_leaf(type))
mte_set_gap(r_mas.node, slot,
- mte_get_gap(p_r_mas.node, r_slot));
+ mte_get_gap(old_r_mas.node, r_slot));
slot++;
}
// Copy left to new node.
type = mte_node_type(l_mas.node);
- for (slot = 0; slot <= l_slot; slot++) {
+ for (slot = 0; slot < l_slot; slot++) {
+ unsigned long piv;
+
printk("l cp %p[%u] -> %p[%u]\n",
- mas_mn(&p_l_mas), slot,
+ mas_mn(&old_l_mas), slot,
mas_mn(&l_mas), slot);
+ printk("val is %lu -> %p\n",
+ mte_get_pivot(old_l_mas.node, slot),
+ mas_get_rcu_slot(&old_l_mas, slot));
+
+ piv = mte_get_pivot(old_l_mas.node, slot);
mte_set_rcu_slot(l_mas.node, slot,
- mas_get_rcu_slot(&p_l_mas, slot));
- mte_set_pivot(l_mas.node, slot,
- mte_get_pivot(p_l_mas.node, slot));
+ mas_get_rcu_slot(&old_l_mas, slot));
+ mte_set_rcu_slot(l_mas.node, slot,
+ mas_get_rcu_slot(&old_l_mas, slot));
+ mte_set_pivot(l_mas.node, slot,piv);
+ l_mas.max = piv;
if (mt_is_alloc(mas->tree) && !ma_is_leaf(type))
mte_set_gap(l_mas.node, slot,
- mte_get_gap(mas->node, slot));
+ mte_get_gap(old_l_mas.node, slot));
}
- if (ma_is_leaf(type) && store_left) {
- // Store the value at the end of the left node. Nodes
- // require a value at the end so terminate that range
- // and store a new one.
- mte_set_pivot(l_mas.node, --slot, l_mas.index - 1);
- // Store new range.
- mte_set_rcu_slot(l_mas.node, ++slot, entry);
- if (slot < mt_pivot_count(l_mas.node))
- mte_set_pivot(l_mas.node, slot, l_mas.last);
-
- } else if (l_mas.node == mas->node) {
+ if (ma_is_leaf(type)) {
+ unsigned long piv = l_mas.index - 1;
+ printk("piv = %lu\n", piv);
+
+ // If this truncates a range, terminate the range.
+ if (mte_get_pivot(l_mas.node, l_slot - 1) > piv)
+ mte_set_pivot(l_mas.node, l_slot - 1, piv);
+
+ if (store_left) {
+ // Store new range.
+ mte_set_rcu_slot(l_mas.node, l_slot, entry);
+ if (slot < mt_pivot_count(l_mas.node))
+ mte_set_pivot(l_mas.node, l_slot,
+ l_mas.last);
+ l_mas.max = l_mas.last;
+ }
+
+ } else {
// Overwrite the pivot between the two nodes with the
// correct value.
- mte_set_pivot(l_mas.node, l_slot,
- (store_left ? mas->last : mas->index - 1));
- }
+ if (store_left)
+ l_mas.max = mas->last;
+ else
+ l_mas.max = mas->index - 1;
- // Rebalance if necessary.
- // FIXME: Rebalance inactive needed, also set the slot correctly
- // for index.
- // FIXME: Mark modified nodes dead, set the parents of the new
- // nodes only.
- //
+ mte_set_pivot(l_mas.node, l_slot, l_mas.max);
+
+ if (!left) {
+ mte_set_rcu_slot(l_mas.node, l_slot,
+ mas_get_rcu_slot(&old_l_mas, l_slot));
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(l_mas.node, l_slot,
+ mte_get_gap(old_l_mas.node, slot));
+ }
+
+ }
// Rebalance left + right + (possibly) node to the right of right
- if (l_mas.node != r_mas.node)
- mas_inactive_rebalance(mas, &l_mas, &p_l_mas, &r_mas,
- &p_r_mas);
+ if (l_mas.node != r_mas.node) {
+ printk("Old %p and %p\n",
+ mas_mn(&old_l_mas), mas_mn(&old_r_mas));
+ printk("Rebalance between %p and %p\n",
+ mas_mn(&l_mas), mas_mn(&r_mas));
+ mas_inactive_rebalance(mas, &l_mas, &r_mas);
+ }
l_slot = mas_get_slot(&l_mas);
r_slot = mas_get_slot(&r_mas);
+ printk("l_slot %u and r_slot %u\n", l_slot, r_slot);
+ // FIXME: Mark modified nodes dead, set the parents of the new
+ // nodes only.
+
// Check parent for rebalance or removal?
//
/* FIXME: Rebalance across gap means changing pivots up the tree
* to the ancestor, r_mas may now fall on the same side as
* l_mas, what does that look like?
- if (unlikely(0)
- mas_spanning_rebalance(&l_mas, &r_mas);
- */
+ if (unlikely(0)
+ mas_spanning_rebalance(&l_mas, &r_mas);
+ */
if (ma_is_leaf(type)) // Don't descend if this is a leaf.
break;
// Set up for a new run.
- mas_descend(&l_mas);
- mas_descend(&r_mas);
- mas_set_slot(&p_r_mas, r_slot);
- mas_set_slot(&p_l_mas, l_slot);
- mas_descend(&p_r_mas);
- mas_descend(&p_l_mas);
- mas_node_walk(&l_mas, mte_node_type(l_mas.node), &range_min,
- &range_max);
- type = mte_node_type(r_mas.node);
- mas_node_walk(&r_mas, type, &range_min, &range_max);
+ mas_descend(&old_r_mas);
+ mas_descend(&old_l_mas);
+ type = mte_node_type(old_r_mas.node);
// Create next nodes.
- left = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
- mte_node_type(l_mas.node));
- mte_set_parent(left, p_l_mas.node, l_slot);
- mte_set_rcu_slot(p_l_mas.node, l_slot, left);
- l_mas.node = left;
+ left = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ printk("Set parent of %p to %p[%u]\n", left, mas_mn(&l_mas), l_slot);
+ mte_set_parent(left, l_mas.node, l_slot);
+ mte_set_rcu_slot(l_mas.node, l_slot, left);
+ mas_descend(&l_mas);
right = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
- mte_set_parent(right, p_r_mas.node, r_slot);
- mte_set_rcu_slot(p_r_mas.node, r_slot, right);
- r_mas.node = right;
+ printk("Set parent of %p to %p[%u]\n", right, mas_mn(&r_mas), r_slot);
+ mte_set_parent(right, r_mas.node, r_slot);
+ mte_set_rcu_slot(r_mas.node, r_slot, right);
+ mas_descend(&r_mas);
- l_slot = mas_get_slot(&l_mas);
- r_slot = mas_get_slot(&r_mas);
+ mas_set_slot(&old_l_mas, 0);
+ mas_node_walk(&old_l_mas, type, &range_min, &range_max);
+ mas_set_slot(&old_r_mas, 0);
+ mas_node_walk(&old_r_mas, type, &range_min, &range_max);
+
+ l_slot = mas_get_slot(&old_l_mas);
+ r_slot = mas_get_slot(&old_r_mas);
+ // Copy the slot info across to l_mas, r_mas even though the
+ // nodes are empty.
+ mas_set_slot(&l_mas, l_slot);
+ mas_set_slot(&r_mas, r_slot);
slot = 0;
printk("New loop, copy %p[0-%u] and %p[%u-end]\n",
- mas_mn(&p_l_mas), l_slot, mas_mn(&p_r_mas), r_slot);
+ mas_mn(&old_l_mas), l_slot, mas_mn(&old_r_mas), r_slot);
} while (1);
+ printk("Done loop\n");
// mark modified node(s?) dead.
// NOTE: Rebalanced nodes not in this sub-tree are already marked dead.
- mas_set_node_dead(&p_l_mas);
- mas_set_node_dead(&p_r_mas);
+ mas_set_node_dead(&old_l_mas);
+ mas_set_node_dead(&old_r_mas);
smp_wmb();
- _mas_replace(mas, false, false);
// insert new sub-tree
- //
- //do
+ mas->node = ancestor;
+ printk("Replace tree\n\n");
+ _mas_replace(mas, false, false);
+ mt_dump(mas->tree);
+
+ if (mt_is_alloc(mas->tree)) {
+ l_gap = mas_leaf_max_gap(&l_mas);
+ l_slot = mte_parent_slot(l_mas.node);
+ r_gap = mas_leaf_max_gap(&r_mas);
+ r_slot = mte_parent_slot(r_mas.node);
+ }
+
+ do {
// ascend
- // adopt children of nodes that don't have the correct parent
+ mas_dup_state(&old_l_mas, &l_mas);
+ mas_dup_state(&old_r_mas, &r_mas);
+ mas_ascend(&l_mas);
+ mas_ascend(&r_mas);
+ if (mt_is_alloc(mas->tree)) {
// mas_update_gap and friends (don't recursively do this)
- // while (!ancestor)
- //
+ mte_set_gap(l_mas.node, l_slot, l_gap);
+ l_gap = mas_max_gap(&l_mas, &l_slot);
+ l_slot = mte_parent_slot(l_mas.node);
+
+ mte_set_gap(r_mas.node, r_slot, r_gap);
+ r_gap = mas_max_gap(&r_mas, &r_slot);
+ r_slot = mte_parent_slot(r_mas.node);
+ }
+ // adopt children of nodes that don't have the correct parent
+ mas_adopt_children(&old_l_mas, l_mas.node);
+ mas_adopt_children(&old_r_mas, r_mas.node);
+
+ } while (l_mas.node != ancestor);
+
+ mas_update_gap(&l_mas, false);
+
int ret = 0;
printk("\nStart: %s %d store %lu-%lu %p\n", __func__, __LINE__,
- mas->index, mas->last, mas_mn(mas));
+ mas->index, mas->last, entry);
mt_dump(mas->tree);
if (mas_start(mas) || (mas_is_none(mas) || mas->node == MAS_ROOT))
if (mas->last < r_max)
mas->last = r_max;
}
+ if ((r_max != mas->max) && !mas_get_rcu_slot(mas, slot + 1)) {
+ printk("rcu slot %u is %p\n", slot + 1, mas_get_rcu_slot(mas, slot+1));
+ mas->last = mas_get_safe_pivot(mas, slot + 1);
+ if (!mas->last)
+ mas->last = mas->max;
+ r_max = mas->last;
+ }
if (slot && !mas_get_rcu_slot(mas, slot - 1)) {
if (slot > 1)
mas->index = mte_get_pivot(mas->node, slot - 2) + 1;
else
mas->index = mas->min;
r_min = mas->index;
+ mas_set_slot(mas, --slot);
}
- if ((r_max != mas->max) && !mas_get_rcu_slot(mas, slot + 1)) {
- printk("rcu slot %u is %p\n", slot+1, mas_get_rcu_slot(mas, slot+1));
- mas->last = mas_get_safe_pivot(mas, slot + 1);
- if (!mas->last)
- mas->last = mas->max;
- r_max = mas->last;
- }
- mas_set_slot(mas, --slot);
printk("store is now %lu-%lu at slot %u\n", mas->index,
mas->last, mas_get_slot(mas));
}
- end = mas_data_end(mas);
- if ((slot > end) || !end) {
- // Appending
- if (r_min < mas->index)
- mte_set_pivot(mas->node, slot++, mas->index - 1);
-
- mte_set_rcu_slot(mas->node, slot, entry);
- mte_set_pivot(mas->node, slot, mas->last);
- goto done;
- }
-
mt_dump(mas->tree);
memset(&b_node, 0, sizeof(struct maple_big_node));
printk("Copy 0-%u\n", slot);
}
+ // Check if this is an append operation.
+ end = mas_data_end(mas);
+ if ((new_end <= slot_cnt) && ((slot > end) || !end)) {
+ // Appending
+ printk("Appending to %p[%u] %lu\n", mas_mn(mas), slot, mas->index);
+ if (r_min < mas->index)
+ mte_set_pivot(mas->node, slot++, mas->index - 1);
+
+ mte_set_rcu_slot(mas->node, slot, entry);
+ mte_set_pivot(mas->node, slot, mas->last);
+ goto done;
+ }
+
+ // Skip overwritten data
do {
printk("Skip %u\n", slot);
r_max = mas_get_safe_pivot(mas, ++slot);
} while ((r_max <= mas->last) && (slot < slot_cnt));
new_end = mas_mab_cp(mas, slot, slot_cnt, &b_node, new_end);
+
// count the node as full if it has not already been counted.
if (new_end >= slot_cnt && end < slot_cnt)
mas_cnt_full(mas);
+ else if (new_end < mt_min_slot_cnt(mas->node))
+ mas_cnt_empty(mas);
printk("End %u new_end %u slot_cnt %u\n", end, new_end, slot_cnt);
ret = 3;
done:
+ if (mas_is_err(mas))
+ return NULL;
+
if (ret > 2)
return NULL;
+
mt_dump(mas->tree);
return content;
}
return _mas_store(mas, entry, true);
}
-
-static inline bool _mas_walk(struct ma_state *mas);
-
-/* Private
- *
- * mas_rebalance_gaps() - walk down to the mas->index location and update the
- * gaps.
- *
- *
- */
-static inline void mas_rebalance_gaps(struct ma_state *mas)
-{
- if (mt_is_alloc(mas->tree)) {
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
- mas->node = MAS_START;
- _mas_walk(mas); // return to the updated location in the tree.
- mas_dup_state(&r_mas, mas);
- mas_update_gap(mas, true);
- mas_dup_state(mas, &r_mas);
- mas_set_slot(&r_mas, mte_parent_slot(mas->node));
- mas_next_node(&r_mas, ULONG_MAX);
- if (!mas_is_none(&r_mas))
- mas_update_gap(&r_mas, true);
-
- }
-}
-
-static inline int mas_safe_slot(struct ma_state *mas, int *slot, int delta);
static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
static inline void mas_next_slot(struct ma_state *mas, unsigned long max)
mas_ascend(mas);
level++;
- if (!mas_safe_slot(mas, &slot, -1))
- goto ascend;
-
if (mas_dead_node(mas, start_piv))
goto restart_prev_node;
level++;
mas_ascend(mas);
- if (!mas_safe_slot(mas, &slot, 1))
- goto ascend;
-
if (mas_dead_node(mas, start_piv))
goto restart_next_node;
count = mt_slot_count(mas->node);
}
-ascend:
if (mte_is_root(mas->node))
goto no_entry;
mas_set_slot(mas, mte_parent_slot(mas->node));
break;
} while (slot--);
+ if (!entry)
+ return false;
+
*max = pivot;
mas_set_slot(mas, slot);
return true;
prev_min = mas->min;
prev_max = mas->max;
while (range_start < limit) {
+ printk("start %p\n", mas_mn(mas));
mas_set_slot(mas, slot);
if (!mas_next_nentry(mas, limit, &range_start)) {
+ printk("no next.. slot %u\n", slot);
void *entry = mas_get_rcu_slot(mas, slot - 1);
if (mte_is_leaf(mas->node)) {
+ printk("leaf at slot %u\n", slot);
mas->index = range_start - 1;
mas->index = mte_get_pivot(mas->node, slot - 1);
return entry;
unsigned char slot;
while (!mas_is_none(mas)) {
- if (mas_prev_nentry(mas, limit, &max))
+ if (mas_prev_nentry(mas, limit, &max)) {
+ printk("prev_nentry is %p\n", mas_mn(mas));
break;
+ }
mas_prev_node(mas, limit);
+ printk("prev_node is %p\n", mas_mn(mas));
mas_set_slot(mas, mt_slot_count(mas->node));
}
if (!mas->node)
mas->node = MAS_START;
+ printk("Prev of %p\n", mas_mn(mas));
if (mas_is_start(mas)) {
mas_start(mas);
+ printk("Get last entry %p\n", mas_mn(mas));
return mas_last_entry(mas, ULONG_MAX);
}
entry = _mas_prev(mas, min);
if (!mas_searchable(mas))
break;
-
} while (!entry);
return entry;
return _mas_range_walk(mas, &range_min, &range_max);
}
-/* Private
- * Skip any slots that have special values.
- * If the limit of the slot is hit, then return false.
- */
-static inline int mas_safe_slot(struct ma_state *mas, int *slot,
- int delta)
-{
- unsigned char max = mt_slot_count(mas->node);
- unsigned char limit = max;
- if (0 > delta)
- limit = 0;
- while (*slot != limit) {
- void *entry;
- if (!mas_get_safe_pivot(mas, (*slot) + delta))
- return false;
-
- entry = mas_get_rcu_slot(mas, (*slot) + delta);
- if (!mt_is_empty(entry) && !xa_is_retry(entry))
- return true;
- *slot += delta;
- }
- return false;
-}
static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
{
if (!mas_searchable(mas))
entry = mas_range_load(mas, &r_min, &r_max, true);
retry:
printk("Erase %lu-%lu\n", r_min, r_max);
+ mas->node = MAS_START;
mas->index = r_min;
mas->last = r_max;
_mas_store(mas, NULL, true);
- if (mas_nomem(mas, GFP_KERNEL|GFP_ATOMIC))
+ if (mas_nomem(mas, GFP_KERNEL))
goto retry;
return entry;