}
}
+static inline void mas_dup_state(struct ma_state *dst, struct ma_state *src)
+{
+ dst->tree = src->tree;
+ dst->index = src->index;
+ dst->last = src->last;
+ dst->node = src->node;
+ mas_set_slot(dst, mas_get_slot(src));
+}
+static inline void mas_descend(struct ma_state *mas)
+{
+ unsigned char slot = mas_get_slot(mas);
+ if (slot)
+ mas->min = mas_get_safe_pivot(mas, slot - 1) + 1;
+ mas->max = mas_get_safe_pivot(mas, slot);
+ mas->node = mte_get_rcu_slot(mas->node, mas_get_slot(mas));
+}
/** Private
* mte_cp_rcu_slot() = Copy from one node to anther. Upon seeing a retry,
* copies NULL.
entry = _mte_get_rcu_slot(mn, slot, type);
if (mt_will_coalesce(entry)) {
- if (piv == prev_piv)
+ if (piv == prev_piv) {
+ printk("will coalesce %d\n", slot);
(*coalesce)++;
+ }
+ //counted_null = true;
} else if (!entry) {
- if (counted_null)
+ if (counted_null) {
+ printk("has value\n");
(*coalesce)++;
+ }
counted_null = true;
} else {
counted_null = false;
return end - coalesce;
}
-#define mas_calc_split mas_no_dense_calc_split
+#define mas_do_split mas_no_dense_split
-static inline unsigned char mas_no_dense_calc_split(struct ma_state *mas,
- struct maple_enode **left, struct maple_enode **right)
+/*
+ * Private
+ *
+ * Trying to calculate a split is a rather difficult task. One has to
+ * remember:
+ * 1. Keep gaps at the end of a node
+ * 2. Coalescing may affect the middle position of data
+ * 3. Use the target slot and the new count to better get a 50/50 split.
+ *
+ */
+static inline unsigned char mas_no_dense_split(struct ma_state *mas,
+ struct ma_state *left, struct ma_state *right,
+ unsigned char entry_cnt, void *entry)
{
- char i, j;
- unsigned long min = mas->min;
- unsigned long max = min;
- enum maple_type type = mte_node_type(mas->node);
- unsigned long slot_cnt = mt_slots[type];
- unsigned long half = mt_slots[type] / 2;
- unsigned long last_pivot;
- unsigned char coalesce;
- unsigned char data_end = mas_data_end(mas, type, &last_pivot, &coalesce);
+ enum maple_type mas_type = mte_node_type(mas->node);
+ unsigned char ret = 0, half = mt_slots[mas_type] / 2;
+ unsigned char src_slot = 0, slot = 0;
+ unsigned long piv, prev_piv = mas->min, written_piv = mas->min;
+ bool attempt_insert = false;
- *left = (struct maple_enode *)mas_next_alloc(mas);
- *left = mt_mk_node(ma_mnode_ptr(*left), type);
- *right = (struct maple_enode *)mas_next_alloc(mas);
- *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ MA_STATE(cp, mas->tree, mas->index, mas->last);
+ left->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), mas_type);
+ right->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), mas_type);
- if (mte_get_pivot(mas->node, half) > mas->index)
- return half - 1;
+ if (ma_is_leaf(mas_type))
+ attempt_insert = true;
- return half;
+ printk("%s %u\n", __func__, half);
+ mas_dup_state(&cp, left);
+ do {
+ void *existing = NULL;
- if (mte_is_root(mas->node))
- return half;
+ piv = mas_get_safe_pivot(mas, src_slot);
+ existing = mte_get_rcu_slot(mas->node, src_slot);
+ printk("%p[%u] (%lu)\n", mas_mn(mas), src_slot, piv);
- for (i = 0; i < data_end; i++) {
- max = mte_get_pivot(mas->node, i);
- if (max == mt_max[type] && i)
- return i - 1;
+ if (mt_will_coalesce(existing)) {
+ printk("coalesce\n");
+ goto skip_src_slot;
+ }
- if ((max - min) > 15)
- break;
- }
- if (i)
- i--;
+ if (prev_piv > piv) {// insert overwrote this pivot.
+ printk("prev_piv > piv\n");
+ goto skip_src_slot;
+ }
- if (i >= data_end)
- return i;
+ if (src_slot && piv == prev_piv) {
+ printk("piv == prev_piv..\n");
+ goto skip_src_slot;
+ }
- if (i >= half)
- return i;
+ // Insert happens in leaf
+ if (attempt_insert && (cp.index <= piv)) {
+ printk("%lu %lu\n", prev_piv, piv);
+ if (written_piv != cp.index - 1) {
+ // Insert a previous entry..
+ mte_set_rcu_slot(cp.node, slot, existing);
+ mte_set_pivot(cp.node, slot, cp.index - 1);
+ slot++;
+ }
- if (data_end < slot_cnt)
- max = mte_get_pivot(mas->node, data_end - 1);
- else
- max = mas->max;
+ printk("inserting new data at %p[%u]\n", cp.node, slot);
+ mte_set_rcu_slot(cp.node, slot, entry);
+ mte_set_pivot(cp.node, slot, cp.last);
+ slot++;
+ if (piv > cp.last) {
+ mte_set_rcu_slot(cp.node, slot, existing);
+ mte_set_pivot(cp.node, slot, piv);
+ } else
+ piv = cp.last;
- j = data_end;
- do {
- j--;
- min = mte_get_pivot(mas->node, j);
- if ((max - min) > 15) {
- j++;
- break;
+ attempt_insert = false;
+ } else {
+ printk("Copy %u -> %p[%u]\n", src_slot, mas_mn(&cp), slot);
+ mte_set_pivot(cp.node, slot, piv);
+ if (mas_type == maple_arange_64)
+ mte_cp_gap(cp.node, slot, mas->node, src_slot);
+ mte_set_rcu_slot(cp.node, slot, existing);
}
- } while (j > 0);
- if (data_end - j >= half)
- return j;
+ // next
+ if ((slot >= half) && (cp.node != right->node)) {
+ ret = slot;
+ left->max = piv;
+ printk("Set %p max %lu\n", mas_mn(left), piv);
+ right->min = piv + 1;
+ printk("Switch dst %d\n", entry_cnt);
+ mas_dup_state(&cp, right);
+ slot = 0;
+ } else {
+ slot++;
+ }
+
+ written_piv = piv;
+skip_src_slot:
- return half;
+ prev_piv = piv;
+ src_slot++;
+ } while (entry_cnt-- > 0 || attempt_insert);
+
+ right->max = piv;
+ return ret;
}
+
static inline unsigned char mas_dense_calc_split(struct ma_state *mas,
struct maple_enode **left, struct maple_enode **right)
{
mte_free(mn);
}
-static inline void mas_copy(struct ma_state *mas, struct ma_cp *cp)
+static inline unsigned long _mas_copy(struct ma_state *mas, struct ma_cp *cp,
+ unsigned long min)
{
unsigned char sloc = cp->src_start; // src location
unsigned char dloc = cp->dst_start; // dst location
enum maple_type type = mte_node_type(cp->src);
enum maple_type dtype;
unsigned char pivot_cnt = mt_pivots[type];
- unsigned long piv, prev_piv = mas->min;
+ unsigned long piv, prev_piv = min;
if (!cp->dst) {
/* Allocate a new node */
mas_node_cnt(mas, 1);
if (mas_is_err(mas))
- return;
+ return prev_piv;
cp->dst = mt_mk_node(mas_next_alloc(mas), type);
}
if (!mt_pivot_count(cp->dst)) {
mas_dense_cp(mas, cp);
- return;
+ return prev_piv;
}
dtype = mte_node_type(cp->dst);
while (sloc <= cp->src_end &&
dloc <= cp->dst_end) {
+ void *entry;
if (prev_piv >= mas->max)
break;
else
piv = mas->max;
+ if (sloc && !piv)
+ break;
+
+ entry = mte_get_rcu_slot(cp->src, sloc);
+ if (mt_will_coalesce(entry))
+ goto next_src_slot;
+
+
// Last entry.
if (dloc && (piv == mas->max || !piv)) {
if (!mte_get_rcu_slot(cp->dst, dloc -1) &&
- ! mte_get_rcu_slot(cp->src, sloc)) {
+ mt_is_empty(entry)) {
mte_set_pivot(cp->dst, dloc -1, 0);
break;
}
}
- if (sloc && !piv)
- break;
-
if (piv < cp->start_piv)
goto next_src_slot;
cp->dst_start = dloc;
cp->src_start = sloc;
+ return prev_piv;
+}
+
+static inline unsigned long mas_copy(struct ma_state *mas, struct ma_cp *cp)
+{
+ return _mas_copy(mas, cp, mas->min);
}
+
static inline int mas_split_data(struct ma_state *mas, struct maple_enode *left,
struct maple_enode *right, unsigned char split,
unsigned long r_max)
MA_CP(cp, mas->node, left, 0, split);
+ printk("cp %p[0-%u] to %p\n", mas_mn(mas), split, mte_to_node(left));
mas_copy(mas, &cp);
cp.src_start = split + 1;
*
*/
static inline int mas_split(struct ma_state *mas, unsigned char slot,
- bool active)
+ bool active, unsigned entry_cnt, void *entry)
{
struct maple_enode *full = mas->node;
unsigned char split, p_slot = 0, p_end = 0;
struct maple_enode *old_parent, *new_parent;
- struct maple_enode *left = NULL, *right = NULL;
enum maple_type ptype; // parent type.
- unsigned long pivot;
- unsigned long r_max, p_max = ULONG_MAX;
+
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+ MA_STATE(left, mas->tree, mas->index, mas->last);
+ MA_STATE(right , mas->tree, mas->index, mas->last);
if (mte_is_root(mas->node)) {
old_parent = full;
else
ptype = maple_range_64;
p_slot = 0;
+ printk("root\n");
} else {
unsigned long last_pivot;
unsigned char coalesce;
p_slot = mte_parent_slot(mas->node);
- mas_encoded_parent(mas);
- old_parent = mas->node;
- ptype = mte_node_type(mas->node);
- p_end = mas_data_end(mas, ptype, &last_pivot, &coalesce);
+ mas_dup_state(&parent, mas);
+ printk("parent slot %u\n", p_slot);
+ mas_encoded_parent(&parent);
+ old_parent = parent.node;
+
+ ptype = mte_node_type(parent.node);
+ p_end = mas_data_end(&parent, ptype, &last_pivot, &coalesce);
if (p_end - coalesce >= mt_slots[ptype] - 1) {
+ printk("Split the parent\n");
/* Must split the parent */
- split = mas_split(mas, p_slot, active);
- if (mas_is_err(mas)) {
+ split = mas_split(&parent, p_slot, active,
+ p_end - coalesce + 1, entry);
+ if (mas_is_err(&parent)) {
+ mas->node = parent.node;
return 0;
}
if (split < p_slot)
p_slot -= split;
// Split will return the parent.
- old_parent = mas->node;
- mas_set_slot(mas, p_slot);
+ old_parent = parent.node;
+ mas_set_slot(&parent, p_slot);
}
- ptype = mte_node_type(mas->node);
- p_max = mas->max;
- p_end = mas_data_end(mas, ptype, &last_pivot, &coalesce);
- mas_update_limits(mas, p_slot, ptype);
- mas->node = mte_get_rcu_slot(old_parent, p_slot);
+ ptype = mte_node_type(parent.node);
+ p_end = mas_data_end(&parent, ptype, &last_pivot, &coalesce);
+ mas_dup_state(mas, &parent);
+ mas_set_slot(mas, p_slot);
+ mas_descend(mas);
}
mas_node_cnt(mas, 3);
// Allocations.
new_parent = mt_mk_node(mas_next_alloc(mas), ptype);
+ // split the data into left & right and do the insert.
+ split = mas_do_split(mas, &left, &right, entry_cnt, entry);
+
+ // KEEP REWRITING THIS DOWN WITH left/right/parent states.
+ //
// Copy the parents information
if (!mte_is_root(full)) {
- unsigned long c_max = mas->max;
-
- // Copy the parent data except p_slot, which will later be
- // replaced.
MA_CP(cp, old_parent, new_parent, 0, p_slot - 1);
+ unsigned char skip = 1;
+
- mas->max = p_max;
+ // Copy the parent data up to p_slot - 1.
if (p_slot)
- mas_copy(mas, &cp);
+ mas_copy(&parent, &cp);
+ if (right.node)
+ skip++;
+
+ // Copy from p_slot + @skip to the end.
if (p_slot < mt_slots[ptype] - 2) {
cp.src_start = p_slot + 1;
- cp.dst_start += 2;
cp.src_end = p_end;
- mas_copy(mas, &cp);
+ cp.dst_start += skip;
+ printk("Copy anything larger than %lu\n", right.max);
+ _mas_copy(&parent, &cp, right.max);
}
- mas->max = c_max;
- }
-
- // calculate the split location.
- split = mas_calc_split(mas, &left, &right);
-
- r_max = mas->max;
- if (p_slot == p_end) // Right will be the end of the parent node.
- r_max = p_max;
- // the node type for the children types.
- // Node types must be set to copy data into them.
- mas_split_data(mas, left, right, split - 1, r_max);
- if (right) {
- pivot = mte_get_pivot(left, split - 1);
- if (!pivot) // dense node
- pivot = mas->min + split - 2;
- } else {
- pivot = mt_node_max(left);
- if (!p_slot || mte_node_type(left) == maple_dense)
- pivot += mas->min;
- else
- pivot += mte_get_pivot(new_parent, p_slot - 1);
- }
-
- mte_link(left, new_parent, p_slot, pivot, ptype);
- if (right) {
- if (mt_node_max(right) < r_max)
- r_max = pivot + mt_node_max(right);
- mte_link(right, new_parent, p_slot + 1, r_max, ptype);
}
-
+ // left will be placed in p_slot
+ mte_link(left.node, new_parent, p_slot, left.max, ptype);
+
+ // right (if it exists, will be placed in p_slot + 1;
+ if (right.node)
+ mte_link(right.node, new_parent, p_slot + 1,
+ right.max, ptype);
+#if 0
if (mt_is_alloc(mas->tree)) {
- if (right) {
+ if (right->node) {
unsigned long min = mas->min;
mas->node = right;
mas->min = min;
}
mas->node = left;
- mas_gap_link(mas, new_parent, p_slot, pivot);
+ mas_gap_link(mas, new_parent, p_slot, left.max);
}
-
+#endif
// Copy grand parent to the parent, including slot encoding.
mte_to_node(new_parent)->parent = mte_to_node(old_parent)->parent;
// Update encoded slots in children
mte_adopt_children(new_parent);
// Set up maple state to replace the parent node in the grandparent.
- mas->node = new_parent;
+ parent.node = new_parent;
// Replace the parent node & free the old parent.
- _mas_replace(mas, active, true);
+ _mas_replace(&parent, active, true);
// Set up the ma_state for the return. Point to the correct node for
// the insert or subsequent split.
- if (!mt_is_alloc(mas->tree))
+ if (!mt_is_alloc(mas->tree)) // FIXME: citation needed.
p_slot = slot - p_slot;
- if (mas->index <= pivot) {
- mas->node = left;
+ if (mas->index <= left.max) {
+ mas_dup_state(mas, &left);
p_slot += 1;
- mas->max = pivot;
} else {
- mas->node = right;
+ mas_dup_state(mas, &right);
p_slot += 2;
- mas->min = pivot + 1;
}
+ // If we are going to insert into the parent..?
if (mas->index > mt_node_max(mas->node) &&
mas->index > mt_node_max(mas->node) + mas->min) {
+ // FIXME: Do the insert
mas->node = new_parent;
split = p_slot;
}
return ret;
}
+
+// Set min/max for a given slot in mas->node.
+static inline void _mas_get_range(struct ma_state *mas, unsigned char slot,
+ unsigned long *min, unsigned long *max)
+{
+ *min = mas->min;
+ *max = mas_get_safe_pivot(mas, slot);
+ if (!(*max))
+ *max = mas->max;
+
+ if (slot)
+ *min = mte_get_pivot(mas->node, slot - 1) + 1;
+
+ if ((*min) == 1) // empty node.
+ *min = mas->min;
+}
+
+static inline int _mas_add_slot_cnt(struct ma_state *mas,
+ const unsigned char slot, const unsigned long min,
+ const unsigned long max)
+{
+ unsigned char end, coalesce;
+ unsigned long last_piv;
+ void *last_entry;
+ int req_slots;
+
+ end = mas_data_end(mas, mte_node_type(mas->node), &last_piv, &coalesce);
+ req_slots = end - coalesce + 1;
+
+ printk("Start with %d, req_slots %d %lu\n", end, req_slots, last_piv);
+ if (max == mas->max && min == mas->min)
+ return req_slots; // overwriting a single entry.
+
+ if (min < mas->index) {
+ printk("min inc\n");
+ req_slots++; //slot needed at start.
+ }
+
+ if (max > mas->last) {
+ printk("max inc\n");
+ req_slots++; // slot needed at end.
+ }else {
+ unsigned char end_slot = end;
+ while (end_slot > slot) {
+ if (mas->last <=
+ mas_get_safe_pivot(mas, end_slot))
+ break;
+ end_slot--;
+ }
+ printk("Can go down slots?\n");
+ req_slots -= (end_slot - slot);
+ }
+
+
+ if (mte_is_dense(mas->node))
+ return req_slots;
+
+ do {
+ last_entry = mte_get_rcu_slot(mas->node, end--);
+ } while (mt_will_coalesce(last_entry) && end);
+
+ // can't reuse last null.
+ if ((last_entry) && (req_slots >= mt_slot_count(mas->node) - 1))
+ return req_slots + 1;
+
+ return req_slots;
+}
/* Private
*
* Insert entry into a node.
unsigned char slot_cnt = mt_slots[this_type] - 1;
unsigned char pivot_cnt = mt_pivots[this_type];
unsigned char coalesce;
- unsigned char data_slot, slot = mas_get_slot(mas);
+ unsigned char slot = mas_get_slot(mas);
bool spans_range = false;
bool append = false;
- bool split = false;
bool null_entry = false;
- bool reuse_null_end = false;
int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce);
int new_end = old_end;
int ret = 0;
+ void *contents = NULL;
if (mas->last > mas->max) {
BUG_ON(!active);
slot = old_end + 1;
/* Calculate the range of this slot */
- // Set the range max
- if (slot < pivot_cnt && mte_get_pivot(mas->node, slot) != 0)
- max = mte_get_pivot(mas->node, slot);
-
- // Set the range min.
- if (slot > 0)
- min = mte_get_pivot(mas->node, slot - 1) + 1;
+ _mas_get_range(mas, slot, &min, &max);
if (mas->last > max)
spans_range = true;
- if (!overwrite) {
- void *entry;
+ if (slot <= old_end)
+ contents = mte_get_rcu_slot(mas->node, slot);
+ if (!overwrite) {
if (spans_range) {
mas_set_err(mas, -ERANGE);
return 0;
}
-
- entry = mte_get_rcu_slot(mas->node, slot);
- if (!mt_is_empty(entry)) {
+ if (!mt_is_empty(contents)) {
mas_set_err(mas, -EBUSY);
return 0;
}
}
/* Calculate how many new pivots we will need. */
- // Possible pivot before the new range.
- data_slot = slot;
- if (mas->index > min) {
- if (!slot) // storing to slot 0 means we need the null.
- ret++;
- else if (append) // Appending will need a null.
- ret++;
- else { // a starting null is only needed if there isn't one
- // there.
- struct maple_enode *slot_val;
-
- slot_val = mte_get_rcu_slot(mas->node, slot);
- if (!mt_is_empty(slot_val))
- ret++;
- }
-
- null_entry = true;
- }
- data_slot += ret;
-
- // Possible pivot after the new range - should exists, but make sure
- // we don't overflow to the last slot which implies a pivot.
- if (mas->last < max) {
- unsigned char max_slots = old_end + ret - coalesce + 2;
-
- if (max_slots >= slot_cnt)
- ret++;
- }
-
- // If there is a null at the end already, then it can be reused.
- if (last_piv > mas->last && !mte_get_rcu_slot(mas->node, old_end))
- reuse_null_end = true;
/* Check for splitting */
- new_end += ret - coalesce + 1;
- if (new_end == slot_cnt && mas->max == mt_node_max(mas->node) &&
- !reuse_null_end)
- split = true; // Need a NULL for the maximum range.
- else if (new_end > slot_cnt)
- split = true; // There is not enough space.
-
+ new_end = _mas_add_slot_cnt(mas, slot, min, max);
+ printk("into %p old_end %u new %u %u\n", mas_mn(mas), old_end, new_end, slot);
+ if (mas->index > min)
+ null_entry = true;
- if (split) {
+ if (new_end > slot_cnt) {
/* Not enough space in the node */
- unsigned char split = mas_split(mas, slot, active);
-
+ printk("Split %u > %u, %lu %lu\n", new_end, slot_cnt, mas->min, mas->max);
+ unsigned char split = mas_split(mas, slot, active, new_end,
+ entry);
if (mas_is_err(mas))
return 0;
+ if (mte_is_leaf(mas->node))
+ return old_end - new_end;
+
if (split <= slot)
slot = slot - split;
if (!mte_is_leaf(mas->node)) {
struct maple_enode *leaf;
+ MA_STATE(leaf_state, mas->tree, mas->index, mas->last);
enum maple_type mt;
- // Old limits.
- unsigned long o_min = mas->min;
- unsigned long o_max = mas->max;
unsigned char child_slot = 0;
+ printk("not a leaf\n");
// Create a new node.
mas_node_cnt(mas, 1);
if (mas_is_err(mas))
leaf = mt_mk_node(mas_next_alloc(mas), mt);
mte_set_parent(leaf, mas->node, slot);
// Put the new range in the new leaf.
- mas->node = leaf;
- mas->min = mas->index;
- mas->max = max;
+
+ leaf_state.node = leaf;
+ leaf_state.min = mas->index;
+ leaf_state.max = max;
+ printk("leaf %lu %lu\n", mas->index, max);
if (null_entry)
child_slot = 1;
- mas_set_slot(mas, child_slot);
- _mas_add(mas, entry, overwrite, active);
+ mas_set_slot(&leaf_state, child_slot);
+ _mas_add(&leaf_state, entry, overwrite, active);
// Restore old values and continue inserting.
null_entry = false;
- mas->min = o_min;
- mas->max = o_max;
- mas->node = prev_enode;
- if (append)
- mas->index = mas->max;
entry = leaf;
if (mt == maple_dense)
mas->last = mas->index + mt_max[mt] - 1;
mte_set_pivot(mas->node, slot, mas->last);
/* Write NULL entry */
- mte_set_rcu_slot(mas->node, --slot, NULL);
+ mte_set_rcu_slot(mas->node, --slot, contents);
if (append)
wmb(); /* NULL needs to be written before pivot. */
}
slot += 2;
} else {
+ if (overwrite && slot) {
+ printk("Setting %p[%u] %lu\n", mas_mn(mas), slot - 1, mas->index-1);
+ mte_set_pivot(mas->node, slot-1, mas->index - 1);
+ }
+
/* Write the entry */
mte_set_rcu_slot(mas->node, slot, entry);
if (old_end == new_end + 1) // Append.
mas_update_gap(mas);
return ret;
-}
+ }
static inline int _mas_insert(struct ma_state *mas, void *entry,
unsigned char slot)
unsigned char this_p_slot, r_p_slot; // Parent slots (this one and right)
unsigned char all_slots; // Total slots needed for this and right node.
unsigned char l_slot_cnt, r_slot_cnt; // left and right slot counts
- unsigned char trimmed = 0; // Trimmed terminating null range from left
unsigned long l_piv, r_piv = 0; // left and right pivots
unsigned char r_end, r_coalesce; // right node end and values that can be coalesced.
unsigned char node_cnt; // Number of nodes to allocate
- unsigned long this_max = mas->max, this_min = mas->min; // ma state saves for this entry
- unsigned long r_max, r_min; // ma state saves for right.
enum maple_type new_type;
bool p_coalesce = false; // coalesce parent.
+ bool l_null_end = false, r_null_start = false;
+ MA_STATE(p_state, mas->tree, mas->index, mas->last);
+ MA_STATE(r_state, mas->tree, mas->index, mas->last);
MA_CP(cp, this_enode, NULL, 0, end);
l_slot_cnt = ma_hard_data(end, coalesce);
+ printk("l_slot_cnt %u %u %u\n", end, coalesce, l_slot_cnt);
if (mte_is_root(this_enode))
return mas_coalesce_node(mas, end, coalesce, true);
this_p_slot = mte_parent_slot(this_enode);
- mas_encoded_parent(mas);
- mas_set_slot(mas, this_p_slot + 1);
- if (!mas_next_nentry(mas, ULONG_MAX, &r_piv)) {
+ mas_dup_state(&p_state, mas); // set the parent node, etc.
+ mas_encoded_parent(&p_state); // Actually make it the parent.
+ mas_set_slot(&p_state, this_p_slot + 1);
+ if (!mas_next_nentry(&p_state, ULONG_MAX, &r_piv)) {
// this_enode is the right-most node of the parent.
- mas->node = this_enode;
- mas->max = this_max;
- mas->min = this_min;
- //mas_set_slot(mas, this_p_slot);
- mas_encoded_parent(mas);
- mas_set_slot(mas, this_p_slot);
- if (mas_prev_nentry(mas, 0, &r_piv)) {
+ // Reset parent info.
+ mas_dup_state(&p_state, mas);
+ mas_encoded_parent(&p_state);
+ mas_set_slot(&p_state, this_p_slot);
+ if (mas_prev_nentry(&p_state, 0, &r_piv)) {
// If there is a node to the left, rebalance the left
- mas->node =
- mte_get_rcu_slot(mas->node, mas_get_slot(mas));
- end = mas_data_end(mas, mte_node_type(mas->node),
- &l_piv, &coalesce);
- return mas_rebalance(mas, end, coalesce);
+ mt_dump(p_state.tree);
+ mas_descend(&p_state);
+ end = mas_data_end(&p_state,
+ mte_node_type(p_state.node), &l_piv, &coalesce);
+ printk("%p end %u\n", mas_mn(&p_state), end);
+ return mas_rebalance(&p_state, end, coalesce);
}
// No left or right, rebalance the parent.
// But first, remove this entry if it's empty.
- mas->node = this_enode;
- mas->max = this_max;
- mas->min = this_min;
- mas_set_slot(mas, this_p_slot);
if (end < coalesce) {
- enum maple_type p_type =
- mas_parent_enum(mas, this_enode);
- struct maple_enode *eparent =
- mt_mk_node(mte_parent(this_enode), p_type);
- mas_coalesce_empty(mas, eparent, this_p_slot, p_type);
+ mas_coalesce_empty(&p_state, p_state.node, this_p_slot,
+ mte_node_type(p_state.node));
}
- mas_encoded_parent(mas);
- mas_coalesce(mas);
- if (mas_is_err(mas))
+ mas_coalesce(&p_state);
+ if (mas_is_err(&p_state)) {
+ mas->node = p_state.node;
return 0;
+ }
return 1;
}
// If we reached here, then the node to the right exists.
// set the ma_state information and save a copy for this slot.
- mas->min = mas_get_safe_pivot(mas, mas_get_slot(mas) - 1); // safe as it is a right node.
- mas->max = mas_get_safe_pivot(mas, mas_get_slot(mas));
- mas->node = mte_get_rcu_slot(mas->node, mas_get_slot(mas));
- r_min = mas->min;
- r_max = mas->max;
- r_enode = mas->node;
+ mas_dup_state(&r_state, &p_state);
+ mas_descend(&r_state);
+ r_enode = r_state.node;
- r_end = mas_data_end(mas, mte_node_type(r_enode), &l_piv, &r_coalesce);
+ printk("Using right %p\n", mas_mn(&r_state));
+ r_end = mas_data_end(&r_state, mte_node_type(r_enode), &l_piv, &r_coalesce);
r_slot_cnt = ma_hard_data(r_end, r_coalesce);
+ printk("r_end %u %u\n", r_end, r_slot_cnt);
if (r_end < r_coalesce) {
+ // This node needs to be a skip entry.
all_slots = l_slot_cnt;
new_type = mte_node_type(this_enode);
- l_piv = r_max;
- mas->min = this_min;
+ l_piv = r_state.max;
goto right_empty;
}
// Add 1 for each slot 0.
all_slots = r_slot_cnt + 2 + l_slot_cnt;
-
- // check if left ends in NULL, right start in NULL..
- if ((mt_will_coalesce(mte_get_rcu_slot(this_enode, end)) ||
- !mte_get_rcu_slot(this_enode, end)) &&
- (mt_will_coalesce(mte_get_rcu_slot(mas->node, 0)) ||
- !mte_get_rcu_slot(mas->node, 0))) {
- all_slots--;
- trimmed = 1;
- }
+ printk("Total slot count %u\n", all_slots);
+ printk("Using right %p\n", mas_mn(&r_state));
node_cnt = 1;
if (all_slots > mt_slot_count(this_enode) - 1)
node_cnt++;
+ printk("Total slot count %u nodes %u\n", all_slots, node_cnt);
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
return 0;
- // Restore the left node (this_enode)
- mas->node = this_enode;
- mas->max = this_max;
- mas->min = this_min;
// Coalesce this_enode into a new node.
- cp.dst_end = l_slot_cnt - trimmed;
+ cp.dst_end = l_slot_cnt;
+ printk("copy from %p[%u-%u] to %p[%u-%u]\n", cp.src, cp.src_start, cp.src_end,
+ cp.dst, cp.dst_start, cp.dst_end);
mas_copy(mas, &cp); // cp.dst now has coalesced this_enode.
+ printk("dst_start = %u src_start %u\n", cp.dst_start, cp.src_start);
- // Restore the right state.
- mas->max = r_max;
- mas->min = r_min;
+ // check if left ends in NULL, right start in NULL..
+ printk("Check %p[%u] %p\n", cp.dst, cp.dst_start - 1, mte_get_rcu_slot(cp.dst, cp.dst_start - 1));
+ if (!mte_get_rcu_slot(cp.dst, cp.dst_start - 1))
+ l_null_end = true;
+
+ printk("Check %p[%u] %p\n", r_enode, 0, mte_get_rcu_slot(r_enode, 0));
+ if (mt_will_coalesce(mte_get_rcu_slot(r_enode, 0)) ||
+ !mte_get_rcu_slot(r_enode, 0))
+ r_null_start = true;
+
+ if (l_null_end && r_null_start)
+ {
+ printk("r_enode %p[0] = %p\n", mas_mn(&r_state), mte_get_rcu_slot(r_enode, 0));
+ all_slots--;
+ // Can remove a lost.
+ } else if (!l_null_end && !r_null_start) {
+ // Need an null if the pivots don't line up here.
+ mte_set_pivot(cp.dst, cp.dst_start++, r_state.min);
+ all_slots++;
+ }
+
+ // Copy from the right node.
cp.src = r_enode;
cp.src_start = 0;
cp.dst_end = mt_slot_count(cp.dst) - 1;
else
cp.dst_end = (all_slots + 1)/ 2; // Take 1/2 the entries.
- mas_copy(mas, &cp); // cp.dst is now complete, place it in the tree.
+ printk("copy from %p[%u-%u] to %p[%u-%u]\n", cp.src, cp.src_start, cp.src_end,
+ cp.dst, cp.dst_start, cp.dst_end);
+
+ mas_copy(&r_state, &cp); // cp.dst is now complete, place it in the tree.
mte_to_node(cp.dst)->parent = mte_to_node(this_enode)->parent;
new_type = mte_node_type(cp.dst);
mas->node = cp.dst;
mas_replace(mas);
l_piv = mas_get_safe_pivot(mas, cp.dst_start - 1);
ret = mt_slot_count(mas->node) - cp.dst_start + 1;
- mas_encoded_parent(mas);
- mte_set_pivot(mas->node, this_p_slot, l_piv);
+ mte_set_pivot(p_state.node, this_p_slot, l_piv);
- mas->node = r_enode;
- mas->max = r_max;
- mas->min = r_min;
right_empty:
if (all_slots <= mt_slots[new_type] - 1) {
// Right was entirely consumed.
void *entry = XA_SKIP_ENTRY;
- if (mas->max == ULONG_MAX)
+ if (r_state.max == ULONG_MAX)
entry = NULL;
ret = mt_slots[new_type] - all_slots;
if (!ret)
ret = 1;
r_p_slot = mte_parent_slot(r_enode);
- mas_encoded_parent(mas);
- mte_set_rcu_slot(mas->node, r_p_slot, entry);
+ mte_set_rcu_slot(p_state.node, r_p_slot, entry);
if (xa_is_skip(entry) &&
r_p_slot < mt_pivot_count(mas->node))
- mte_set_pivot(mas->node, r_p_slot, l_piv);
+ mte_set_pivot(p_state.node, r_p_slot, l_piv);
mte_free(r_enode);
p_coalesce = true;
goto right_done;
cp.dst = NULL;
cp.dst_start = 0;
cp.dst_end = r_end;
- mas_copy(mas, &cp); // cp.dst is coalesced remainder of r_enode.
+ mas_copy(&r_state, &cp); // cp.dst is coalesced remainder of r_enode.
mte_to_node(cp.dst)->parent = mte_to_node(r_enode)->parent;
- mas->node = cp.dst;
- r_piv = mas_get_safe_pivot(mas, cp.dst_start - 1);
+ r_state.node = cp.dst;
+ r_piv = mas_get_safe_pivot(&r_state, cp.dst_start - 1);
r_p_slot = mte_parent_slot(r_enode);
- mas_replace(mas);
- mas_encoded_parent(mas);
- if (r_p_slot < mt_pivot_count(mas->node)-1)
- mte_set_pivot(mas->node, r_p_slot, r_piv);
+ mas_replace(&r_state);
+ if (r_p_slot < mt_pivot_count(p_state.node)-1)
+ mte_set_pivot(p_state.node, r_p_slot, r_piv);
right_done:
// Set the parent slots to l_piv for all skip slots..
while (r_p_slot-- > this_p_slot)
- mte_set_pivot(mas->node, r_p_slot, l_piv);
+ mte_set_pivot(p_state.node, r_p_slot, l_piv);
/* If there is a freed node, then mas->node must point to the parent
* which contained the freed node so it can also be checked for
* coalescing.
*/
if (p_coalesce)
- mas_coalesce(mas);
+ mas_coalesce(&p_state);
return ret;
}
enum maple_type type;
struct maple_enode *next;
unsigned long pivot = 0;
- unsigned long max, min, i;
+ unsigned long max, min;
+ unsigned char i;
bool ret = false;
min = mas->min;
erase_check_erase(mt, 8);
}
-#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, a[i],\
- a[i + 1] - 1, ptr)
+#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, \
+ a[(i)], a[(i + 1)] - 1, ptr)
#define STORE 1
#define ERASE 2
+void *mas_next(struct ma_state *mas, unsigned long max);
static noinline void check_erase2_testset(struct maple_tree *mt,
unsigned long *set, unsigned long size)
{
void *foo;
unsigned long addr = 0;
MA_STATE(mas, mt, 0, 0);
+ void *s_entry = NULL, *e_entry = NULL;
+ mt_set_non_kernel(60);
for (int i = 0; i < size; i += 3) {
+ unsigned long s_min, s_max;
+ unsigned long e_min, e_max;
+ MA_STATE(mas_start, mt, set[i+1], set[i+1]);
+ MA_STATE(mas_end, mt, set[i+2]-1, set[i+2]-1);
+ printk("%s: %s %lu - %lu\n", __func__,
+ set[i] == STORE ? "store" : "erase",
+ set[i+1], set[i+2]-1);
+
+ s_entry = mas_range_load(&mas_start, &s_min, &s_max);
+ e_entry = mas_range_load(&mas_end, &e_min, &e_max);
+ printk("start %lu-%lu => %p\n", s_min, s_max, s_entry);
+ printk("end %lu-%lu => %p\n", e_min, e_max, e_entry);
+
switch(set[i]) {
case STORE:
- if (!mtree_test_insert_range(mt, set[i + 1],
- set[i + 2] - 1,
- xa_mk_value(set[i+1])))
- entry_cnt++;
- else
- erase_check_store_range(mt, set, i + 1,
- xa_mk_value(set[i+1]));
+ printk("entry_cnt = %d\n", entry_cnt);
+ if ((s_min == e_min) && (s_max == e_max)) {
+ if (!entry_cnt)
+ entry_cnt++;
+ else if (!mt_is_empty(s_entry)) {
+ if (e_max > mas_end.last)
+ entry_cnt++;
+ else if (s_min < mas_start.index)
+ entry_cnt++;
+ } else {
+ entry_cnt++;
+ }
+ } else {
+ unsigned char cnt = 0;
+ printk("%ld adn %ld\n", s_min, mas_start.index);
+ // check start
+ if (s_entry && (s_min <= mas_start.index))
+ cnt++;
+
+ printk("%ld and %ld\n", e_max, mas_end.last);
+ // check end
+ if (e_entry && (e_max >= mas_end.last))
+ cnt++;
+ // count slots
+ while (!mas_is_none(&mas_start) &&
+ (e_entry != s_entry))
+ {
+ if (s_entry) {
+ printk("inc\n");
+ cnt++;
+ }
+ s_entry = mas_next(&mas_start,
+ set[i+2] - 1);
+ }
+ printk("decrement by %d\n", cnt - 1);
+ entry_cnt = entry_cnt - cnt + 1;
+ }
+
+ erase_check_store_range(mt, set, i + 1,
+ xa_mk_value(set[i+1]));
+ printk("entry_cnt = %d\n", entry_cnt);
break;
case ERASE:
check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
check++;
if (check > entry_cnt)
break;
+ printk("%ld\n", addr - 1);
}
+ printk("mt_for_each %d == %d\n", check, entry_cnt);
MT_BUG_ON(mt, check != entry_cnt);
check = 0;
if (check > entry_cnt)
break;
}
+ printk("Check = %d\n", check);
+ mt_dump(mas.tree);
MT_BUG_ON(mt, check != entry_cnt);
}
STORE, 47430432014336, 47430432018432,
STORE, 47430432018432, 47430432022528,
};
+ unsigned long set7[] = {
+STORE, 140737488347136, 140737488351232,
+STORE, 140729808330752, 140737488351232,
+ERASE, 140729808330752, 140737488351232,
+STORE, 140729808330752, 140729808334848,
+STORE, 94629632020480, 94629632192512,
+ERASE, 94629632020480, 94629632192512,
+STORE, 94629632020480, 94629632036864,
+STORE, 94629632036864, 94629632192512,
+ERASE, 94629632036864, 94629632192512,
+STORE, 94629632036864, 94629632139264,
+STORE, 94629632139264, 94629632180224,
+STORE, 94629632180224, 94629632192512,
+STORE, 47439981776896, 47439981948928,
+ERASE, 47439981776896, 47439981948928,
+STORE, 47439981776896, 47439981780992,
+STORE, 47439981780992, 47439981948928,
+ERASE, 47439981780992, 47439981948928,
+STORE, 47439981780992, 47439981903872,
+STORE, 47439981903872, 47439981936640,
+STORE, 47439981936640, 47439981944832,
+STORE, 47439981944832, 47439981948928,
+STORE, 140729808474112, 140729808478208,
+STORE, 140729808461824, 140729808474112,
+STORE, 47439981948928, 47439981957120,
+STORE, 47439981957120, 47439981965312,
+STORE, 47439981965312, 47439982129152,
+ERASE, 47439981965312, 47439982129152,
+STORE, 47439981965312, 47439981977600,
+STORE, 47439981977600, 47439982129152,
+STORE, 47439982075904, 47439982129152,
+STORE, 47439981977600, 47439982075904,
+ERASE, 47439981977600, 47439982075904,
+STORE, 47439981977600, 47439982075904,
+STORE, 47439982120960, 47439982129152,
+STORE, 47439982075904, 47439982120960,
+ERASE, 47439982075904, 47439982120960,
+STORE, 47439982075904, 47439982129152,
+ERASE, 47439982075904, 47439982129152,
+STORE, 47439982075904, 47439982120960,
+STORE, 47439982120960, 47439982129152,
+ERASE, 47439982120960, 47439982129152,
+STORE, 47439982120960, 47439982129152,
+STORE, 47439982129152, 47439985180672,
+STORE, 47439982673920, 47439985180672,
+STORE, 47439982129152, 47439982673920,
+ERASE, 47439982673920, 47439985180672,
+STORE, 47439982673920, 47439984959488,
+STORE, 47439984959488, 47439985180672,
+STORE, 47439984369664, 47439984959488,
+STORE, 47439982673920, 47439984369664,
+ERASE, 47439982673920, 47439984369664,
+STORE, 47439982673920, 47439984369664,
+STORE, 47439984955392, 47439984959488,
+STORE, 47439984369664, 47439984955392,
+ERASE, 47439984369664, 47439984955392,
+STORE, 47439984369664, 47439984955392,
+STORE, 47439985164288, 47439985180672,
+STORE, 47439984959488, 47439985164288,
+ERASE, 47439984959488, 47439985164288,
+STORE, 47439984959488, 47439985164288,
+ERASE, 47439985164288, 47439985180672,
+STORE, 47439985164288, 47439985180672,
+STORE, 47439985180672, 47439987019776,
+STORE, 47439985319936, 47439987019776,
+STORE, 47439985180672, 47439985319936,
+ERASE, 47439985319936, 47439987019776,
+STORE, 47439985319936, 47439986978816,
+STORE, 47439986978816, 47439987019776,
+STORE, 47439986663424, 47439986978816,
+STORE, 47439985319936, 47439986663424,
+ERASE, 47439985319936, 47439986663424,
+STORE, 47439985319936, 47439986663424,
+STORE, 47439986974720, 47439986978816,
+STORE, 47439986663424, 47439986974720,
+ERASE, 47439986663424, 47439986974720,
+STORE, 47439986663424, 47439986974720,
+STORE, 47439987003392, 47439987019776,
+STORE, 47439986978816, 47439987003392,
+ERASE, 47439986978816, 47439987003392,
+STORE, 47439986978816, 47439987003392,
+ERASE, 47439987003392, 47439987019776,
+STORE, 47439987003392, 47439987019776,
+STORE, 47439987019776, 47439987154944,
+ERASE, 47439987019776, 47439987154944,
+STORE, 47439987019776, 47439987044352,
+STORE, 47439987044352, 47439987154944,
+STORE, 47439987105792, 47439987154944,
+STORE, 47439987044352, 47439987105792,
+ERASE, 47439987044352, 47439987105792,
+STORE, 47439987044352, 47439987105792,
+STORE, 47439987130368, 47439987154944,
+STORE, 47439987105792, 47439987130368,
+ERASE, 47439987105792, 47439987130368,
+STORE, 47439987105792, 47439987154944,
+ERASE, 47439987105792, 47439987154944,
+STORE, 47439987105792, 47439987130368,
+STORE, 47439987130368, 47439987154944,
+STORE, 47439987138560, 47439987154944,
+STORE, 47439987130368, 47439987138560,
+ERASE, 47439987130368, 47439987138560,
+STORE, 47439987130368, 47439987138560,
+ERASE, 47439987138560, 47439987154944,
+STORE, 47439987138560, 47439987154944,
+STORE, 47439987154944, 47439987175424,
+ERASE, 47439987154944, 47439987175424,
+STORE, 47439987154944, 47439987159040,
+STORE, 47439987159040, 47439987175424,
+STORE, 47439987163136, 47439987175424,
+STORE, 47439987159040, 47439987163136,
+ERASE, 47439987159040, 47439987163136,
+STORE, 47439987159040, 47439987163136,
+STORE, 47439987167232, 47439987175424,
+STORE, 47439987163136, 47439987167232,
+ERASE, 47439987163136, 47439987167232,
+STORE, 47439987163136, 47439987175424,
+ERASE, 47439987163136, 47439987175424,
+STORE, 47439987163136, 47439987167232,
+STORE, 47439987167232, 47439987175424,
+ERASE, 47439987167232, 47439987175424,
+STORE, 47439987167232, 47439987175424,
+STORE, 47439987175424, 47439987183616,
+ERASE, 47439986978816, 47439987003392,
+STORE, 47439986978816, 47439986995200,
+STORE, 47439986995200, 47439987003392,
+ERASE, 47439987167232, 47439987175424,
+STORE, 47439987167232, 47439987171328,
+STORE, 47439987171328, 47439987175424,
+ERASE, 47439987130368, 47439987138560,
+STORE, 47439987130368, 47439987134464,
+STORE, 47439987134464, 47439987138560,
+ };
+ unsigned long set8[] = {
+STORE, 140737488347136, 140737488351232,
+STORE, 140722482974720, 140737488351232,
+ERASE, 140722482974720, 140737488351232,
+STORE, 140722482974720, 140722482978816,
+STORE, 94121505034240, 94121505206272,
+ERASE, 94121505034240, 94121505206272,
+STORE, 94121505034240, 94121505050624,
+STORE, 94121505050624, 94121505206272,
+ERASE, 94121505050624, 94121505206272,
+STORE, 94121505050624, 94121505153024,
+STORE, 94121505153024, 94121505193984,
+STORE, 94121505193984, 94121505206272,
+STORE, 47708483284992, 47708483457024,
+ERASE, 47708483284992, 47708483457024,
+STORE, 47708483284992, 47708483289088,
+STORE, 47708483289088, 47708483457024,
+ERASE, 47708483289088, 47708483457024,
+STORE, 47708483289088, 47708483411968,
+STORE, 47708483411968, 47708483444736,
+STORE, 47708483444736, 47708483452928,
+STORE, 47708483452928, 47708483457024,
+STORE, 140722483142656, 140722483146752,
+STORE, 140722483130368, 140722483142656,
+STORE, 47708483457024, 47708483465216,
+STORE, 47708483465216, 47708483473408,
+STORE, 47708483473408, 47708483637248,
+ERASE, 47708483473408, 47708483637248,
+STORE, 47708483473408, 47708483485696,
+STORE, 47708483485696, 47708483637248,
+STORE, 47708483584000, 47708483637248,
+STORE, 47708483485696, 47708483584000,
+ERASE, 47708483485696, 47708483584000,
+STORE, 47708483485696, 47708483584000,
+STORE, 47708483629056, 47708483637248,
+STORE, 47708483584000, 47708483629056,
+ERASE, 47708483584000, 47708483629056,
+STORE, 47708483584000, 47708483637248,
+ERASE, 47708483584000, 47708483637248,
+STORE, 47708483584000, 47708483629056,
+STORE, 47708483629056, 47708483637248,
+ERASE, 47708483629056, 47708483637248,
+STORE, 47708483629056, 47708483637248,
+STORE, 47708483637248, 47708486688768,
+STORE, 47708484182016, 47708486688768,
+STORE, 47708483637248, 47708484182016,
+ERASE, 47708484182016, 47708486688768,
+STORE, 47708484182016, 47708486467584,
+STORE, 47708486467584, 47708486688768,
+STORE, 47708485877760, 47708486467584,
+STORE, 47708484182016, 47708485877760,
+ERASE, 47708484182016, 47708485877760,
+STORE, 47708484182016, 47708485877760,
+STORE, 47708486463488, 47708486467584,
+STORE, 47708485877760, 47708486463488,
+ERASE, 47708485877760, 47708486463488,
+STORE, 47708485877760, 47708486463488,
+STORE, 47708486672384, 47708486688768,
+STORE, 47708486467584, 47708486672384,
+ERASE, 47708486467584, 47708486672384,
+STORE, 47708486467584, 47708486672384,
+ERASE, 47708486672384, 47708486688768,
+STORE, 47708486672384, 47708486688768,
+STORE, 47708486688768, 47708488527872,
+STORE, 47708486828032, 47708488527872,
+STORE, 47708486688768, 47708486828032,
+ERASE, 47708486828032, 47708488527872,
+STORE, 47708486828032, 47708488486912,
+STORE, 47708488486912, 47708488527872,
+STORE, 47708488171520, 47708488486912,
+STORE, 47708486828032, 47708488171520,
+ERASE, 47708486828032, 47708488171520,
+STORE, 47708486828032, 47708488171520,
+STORE, 47708488482816, 47708488486912,
+STORE, 47708488171520, 47708488482816,
+ERASE, 47708488171520, 47708488482816,
+STORE, 47708488171520, 47708488482816,
+STORE, 47708488511488, 47708488527872,
+STORE, 47708488486912, 47708488511488,
+ERASE, 47708488486912, 47708488511488,
+STORE, 47708488486912, 47708488511488,
+ERASE, 47708488511488, 47708488527872,
+STORE, 47708488511488, 47708488527872,
+STORE, 47708488527872, 47708488663040,
+ERASE, 47708488527872, 47708488663040,
+STORE, 47708488527872, 47708488552448,
+STORE, 47708488552448, 47708488663040,
+STORE, 47708488613888, 47708488663040,
+STORE, 47708488552448, 47708488613888,
+ERASE, 47708488552448, 47708488613888,
+STORE, 47708488552448, 47708488613888,
+STORE, 47708488638464, 47708488663040,
+STORE, 47708488613888, 47708488638464,
+ERASE, 47708488613888, 47708488638464,
+STORE, 47708488613888, 47708488663040,
+ERASE, 47708488613888, 47708488663040,
+STORE, 47708488613888, 47708488638464,
+STORE, 47708488638464, 47708488663040,
+STORE, 47708488646656, 47708488663040,
+STORE, 47708488638464, 47708488646656,
+ERASE, 47708488638464, 47708488646656,
+STORE, 47708488638464, 47708488646656,
+ERASE, 47708488646656, 47708488663040,
+STORE, 47708488646656, 47708488663040,
+STORE, 47708488663040, 47708488683520,
+ERASE, 47708488663040, 47708488683520,
+STORE, 47708488663040, 47708488667136,
+STORE, 47708488667136, 47708488683520,
+STORE, 47708488671232, 47708488683520,
+STORE, 47708488667136, 47708488671232,
+ERASE, 47708488667136, 47708488671232,
+STORE, 47708488667136, 47708488671232,
+STORE, 47708488675328, 47708488683520,
+STORE, 47708488671232, 47708488675328,
+ERASE, 47708488671232, 47708488675328,
+STORE, 47708488671232, 47708488683520,
+ERASE, 47708488671232, 47708488683520,
+STORE, 47708488671232, 47708488675328,
+STORE, 47708488675328, 47708488683520,
+ERASE, 47708488675328, 47708488683520,
+STORE, 47708488675328, 47708488683520,
+STORE, 47708488683520, 47708488691712,
+ERASE, 47708488486912, 47708488511488,
+STORE, 47708488486912, 47708488503296,
+STORE, 47708488503296, 47708488511488,
+ERASE, 47708488675328, 47708488683520,
+STORE, 47708488675328, 47708488679424,
+STORE, 47708488679424, 47708488683520,
+ERASE, 47708488638464, 47708488646656,
+STORE, 47708488638464, 47708488642560,
+STORE, 47708488642560, 47708488646656,
+ };
mt_set_non_kernel(3);
check_erase2_testset(mt, set, ARRAY_SIZE(set));
check_erase2_testset(mt, set6, ARRAY_SIZE(set6));
mtree_destroy(mt);
+ mtree_init(mt, 0);
+ check_erase2_testset(mt, set7, ARRAY_SIZE(set7));
+ mtree_destroy(mt);
+
+ mtree_init(mt, 0);
+ check_erase2_testset(mt, set8, ARRAY_SIZE(set8));
+ mt_dump(mt);
+ mtree_destroy(mt);
+
rcu_read_unlock();
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)