mas_set_alloc_req(mas, slot);
}
-static inline unsigned long _mte_get_pivot(const struct maple_enode *mn,
+static inline unsigned long ma_get_pivot(const struct maple_node *mn,
unsigned char slot, enum maple_type type)
{
switch (type) {
case maple_arange_64:
- return mte_to_node(mn)->ma64.pivot[slot];
+ return mn->ma64.pivot[slot];
case maple_range_64:
case maple_leaf_64:
- return mte_to_node(mn)->mr64.pivot[slot];
+ return mn->mr64.pivot[slot];
case maple_sparse_6:
- return mte_to_node(mn)->ms6.pivot;
+ return mn->ms6.pivot;
case maple_sparse_9:
- return mte_to_node(mn)->ms9.pivot[slot];
+ return mn->ms9.pivot[slot];
case maple_sparse_16:
- return mte_to_node(mn)->ms16.pivot[slot];
+ return mn->ms16.pivot[slot];
case maple_sparse_21:
- return mte_to_node(mn)->ms21.pivot[slot];
+ return mn->ms21.pivot[slot];
case maple_sparse_32:
- return mte_to_node(mn)->ms32.pivot[slot];
+ return mn->ms32.pivot[slot];
case maple_sparse_64:
- return mte_to_node(mn)->ms64.pivot[slot];
+ return mn->ms64.pivot[slot];
case maple_range_16:
case maple_leaf_16:
- return mte_to_node(mn)->mr16.pivot[slot];
+ return mn->mr16.pivot[slot];
case maple_range_32:
case maple_leaf_32:
- return mte_to_node(mn)->mr32.pivot[slot];
+ return mn->mr32.pivot[slot];
case maple_dense:
default:
return 0;
}
}
+static inline unsigned long _mte_get_pivot(const struct maple_enode *mn,
+ unsigned char slot, enum maple_type type)
+{
+ return ma_get_pivot(mte_to_node(mn), slot, type);
+}
static inline unsigned long mte_get_pivot(const struct maple_enode *mn,
unsigned char slot)
{
return _mas_get_safe_pivot(mas, slot, type);
}
-static inline void mte_set_pivot(struct maple_enode *mn, unsigned char slot,
- unsigned long val)
+static inline void ma_set_pivot(struct maple_node *mn, unsigned char slot,
+ enum maple_type type, unsigned long val)
{
- enum maple_type type = mte_node_type(mn);
-
switch (type) {
default:
case maple_range_64:
case maple_leaf_64:
- (&mte_to_node(mn)->mr64)->pivot[slot] = val;
+ (&mn->mr64)->pivot[slot] = val;
break;
case maple_arange_64:
- (&mte_to_node(mn)->ma64)->pivot[slot] = val;
+ (&mn->ma64)->pivot[slot] = val;
case maple_dense:
break;
case maple_sparse_6:
- (&mte_to_node(mn)->ms6)->pivot = val;
+ (&mn->ms6)->pivot = val;
break;
case maple_sparse_9:
- (&mte_to_node(mn)->ms9)->pivot[slot] = val;
+ (&mn->ms9)->pivot[slot] = val;
break;
case maple_sparse_16:
- (&mte_to_node(mn)->ms16)->pivot[slot] = val;
+ (&mn->ms16)->pivot[slot] = val;
break;
case maple_sparse_21:
- (&mte_to_node(mn)->ms21)->pivot[slot] = val;
+ (&mn->ms21)->pivot[slot] = val;
break;
case maple_sparse_32:
- (&mte_to_node(mn)->ms32)->pivot[slot] = val;
+ (&mn->ms32)->pivot[slot] = val;
break;
case maple_sparse_64:
- (&mte_to_node(mn)->ms64)->pivot[slot] = val;
+ (&mn->ms64)->pivot[slot] = val;
break;
case maple_range_16:
case maple_leaf_16:
- (&mte_to_node(mn)->mr16)->pivot[slot] = val;
+ (&mn->mr16)->pivot[slot] = val;
break;
case maple_range_32:
case maple_leaf_32:
- (&mte_to_node(mn)->mr32)->pivot[slot] = val;
+ (&mn->mr32)->pivot[slot] = val;
break;
}
}
+static inline void mte_set_pivot(struct maple_enode *mn, unsigned char slot,
+ unsigned long val)
+{
+ return ma_set_pivot(mte_to_node(mn), slot, mte_node_type(mn), val);
+}
static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot,
unsigned long val)
{
{
return _mte_get_rcu_slot(mn, slot, mte_node_type(mn));
}
-static inline void mte_set_rcu_slot(const struct maple_enode *mn,
- unsigned char slot, void *val)
+static inline void ma_set_rcu_slot(struct maple_node *mn,
+ unsigned char slot, enum maple_type type, void *val)
{
- enum maple_type type = mte_node_type(mn);
switch (type) {
default:
case maple_dense:
- RCU_INIT_POINTER(mte_to_node(mn)->slot[slot], val);
+ RCU_INIT_POINTER(mn->slot[slot], val);
break;
case maple_sparse_6:
- RCU_INIT_POINTER(mte_to_node(mn)->ms6.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms6.slot[slot], val);
break;
case maple_sparse_9:
- RCU_INIT_POINTER(mte_to_node(mn)->ms9.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms9.slot[slot], val);
break;
case maple_sparse_16:
- RCU_INIT_POINTER(mte_to_node(mn)->ms16.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms16.slot[slot], val);
break;
case maple_sparse_21:
- RCU_INIT_POINTER(mte_to_node(mn)->ms21.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms21.slot[slot], val);
break;
case maple_sparse_32:
- RCU_INIT_POINTER(mte_to_node(mn)->ms32.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms32.slot[slot], val);
break;
case maple_sparse_64:
- RCU_INIT_POINTER(mte_to_node(mn)->ms64.slot[slot], val);
+ RCU_INIT_POINTER(mn->ms64.slot[slot], val);
break;
case maple_range_16:
case maple_leaf_16:
- RCU_INIT_POINTER(mte_to_node(mn)->mr16.slot[slot], val);
+ RCU_INIT_POINTER(mn->mr16.slot[slot], val);
break;
case maple_range_32:
case maple_leaf_32:
- RCU_INIT_POINTER(mte_to_node(mn)->mr32.slot[slot], val);
+ RCU_INIT_POINTER(mn->mr32.slot[slot], val);
break;
case maple_range_64:
case maple_leaf_64:
- RCU_INIT_POINTER(mte_to_node(mn)->mr64.slot[slot], val);
+ RCU_INIT_POINTER(mn->mr64.slot[slot], val);
break;
case maple_arange_64:
- RCU_INIT_POINTER(mte_to_node(mn)->ma64.slot[slot], val);
+ RCU_INIT_POINTER(mn->ma64.slot[slot], val);
break;
}
}
+static inline void mte_set_rcu_slot(const struct maple_enode *mn,
+ unsigned char slot, void *val)
+{
+ ma_set_rcu_slot(mte_to_node(mn), slot, mte_node_type(mn), val);
+}
static inline void mas_dup_state(struct ma_state *dst, struct ma_state *src)
{
dst->index = src->index;
dst->last = src->last;
dst->node = src->node;
+ dst->max = src->max;
+ dst->min = src->min;
mas_set_slot(dst, mas_get_slot(src));
}
static inline void mas_descend(struct ma_state *mas)
}
}
-static inline unsigned long _ma_get_gap(const struct maple_node *mn,
+static inline unsigned long ma_get_gap(const struct maple_node *mn,
unsigned char gap, enum maple_type type)
{
switch (type) {
static inline unsigned long mte_get_gap(const struct maple_enode *mn,
unsigned char gap)
{
- return _ma_get_gap(mte_to_node(mn), gap, mte_node_type(mn));
+ return ma_get_gap(mte_to_node(mn), gap, mte_node_type(mn));
}
-static inline void mte_set_gap(const struct maple_enode *mn,
- unsigned char gap, unsigned long val)
-{
- enum maple_type type = mte_node_type(mn);
+static inline void ma_set_gap(struct maple_node *mn, unsigned char gap,
+ enum maple_type type, unsigned long val)
+{
switch (type) {
default:
break;
case maple_arange_64:
- mte_to_node(mn)->ma64.gap[gap] = val;
+ mn->ma64.gap[gap] = val;
break;
}
}
+static inline void mte_set_gap(const struct maple_enode *mn,
+ unsigned char gap, unsigned long val)
+{
+ ma_set_gap(mte_to_node(mn), gap, mte_node_type(mn), val);
+}
static inline void mte_cp_gap(struct maple_enode *dst,
unsigned char dloc, struct maple_enode *src, unsigned long sloc)
{
smn = node->slot[(cnt/15) - 1];
smn->slot[cnt % 15] = reuse;
}
+ BUG_ON(!mas_get_alloc_cnt(mas));
}
static inline void mas_node_node(struct ma_state *ms, gfp_t gfp)
{
mas_set_alloc_req(ms, req);
list_failed:
- if (req > 0)
+ if (req > 0) {
mas_set_err(ms, -ENOMEM);
+ }
}
// Free the allocations.
struct maple_enode *mn = mas->node;
unsigned long piv = mas->min, prev_piv = mas->min;
unsigned char slot;
- bool counted_null = false;
+ unsigned char counted_null = 0;
*coalesce = 0;
for (slot = 0; slot < mt_slot_count(mn); slot++) {
piv = _mas_get_safe_pivot(mas, slot, type);
if (!piv && slot) { // Past the end of data.
slot--;
- *last_piv = prev_piv;
- return slot;
+ piv = prev_piv;
+ // At this point, we are saying the last slot is the
+ // end.
+ if (counted_null) { // if the have ended in a run of NULL
+ if (slot <= counted_null) {
+ slot = 0;
+ (*coalesce) = 0;
+ break;
+ }
+ printk("data end with counted nulls %p\n", mas_mn(mas));
+ printk("%u %u %u\n", slot, (*coalesce), counted_null);
+ mt_dump(mas->tree);
+ (*coalesce) = (*coalesce) - counted_null + 1;
+ slot -= counted_null;
+ }
+ break;
}
entry = _mte_get_rcu_slot(mn, slot, type);
if (counted_null) {
(*coalesce)++;
}
- counted_null = true;
+ counted_null++;
} else {
- counted_null = false;
+ counted_null = 0;
}
if (piv == mas->max)
return end - coalesce;
}
-#define mas_do_split mas_no_dense_split
+#define mas_do_split mas_ma_cp
+
+static inline struct maple_node *mas_switch_nodes(struct ma_state *cp,
+ struct ma_state *left, struct ma_state *right,
+ unsigned long piv)
+{
+ left->max = piv;
+ printk("going to use %lu as max\n", left->max);
+ right->min = piv + 1;
+ mas_dup_state(cp, right);
+ return mas_mn(right);
+}
+
+static inline void mas_ma_cp_store(struct maple_node *mn,
+ enum maple_type type, unsigned char slot,
+ unsigned long piv, void *entry, bool flush)
+{
+ printk("set %p[%u] -> %p\n", mn, slot, entry);
+ ma_set_rcu_slot(mn, slot, type, entry);
+ if (flush)
+ wmb(); // data needs to exist before pivot for readers
+
+ if (slot < mt_pivots[type])
+ ma_set_pivot(mn, slot, type, piv);
+}
+
+static inline unsigned char mas_cp_calc_split(struct ma_state *mas,
+ enum maple_type mas_type)
+{
+ unsigned char ret = 7;
+ unsigned char slot;
+ unsigned long range = mas->max - mas->min;
+ unsigned long half;
+ if (mas_type == maple_arange_64) {
+ ret = 3;
+ printk("arange split\n");
+ }
+ half = ret / 2;
+ if (ma_is_leaf(mas_type)) {
+ if (range <= 8)
+ return ret;
+
+ for (slot = 0; slot <= mt_pivots[mas_type]; slot++) {
+ unsigned long piv = mas_get_safe_pivot(mas, slot);
+
+ if (!piv)
+ return ret;
+
+ range = piv - mas->min;
+ if (range >= 8)
+ return slot > half ? slot : half;
+ }
+ }
+ return half;
+}
+static inline unsigned long mas_leaf_max_gap(struct ma_state *mas);
+static inline void mas_ma_update_gap(struct ma_state *mas,
+ enum maple_type mas_type, struct maple_node *mn,
+ enum maple_type mn_type, unsigned long piv,
+ unsigned long written_piv, unsigned char src_slot,
+ unsigned char slot, unsigned long *max_gap, void *loop_entry)
+{
+ unsigned long gap = piv - written_piv;
+ printk("%s\n", __func__);
+
+ if (!ma_is_leaf(mn_type)) {// non-leaf have gaps already.
+ gap = ma_get_gap(mas_mn(mas), src_slot, mas_type);
+ ma_set_gap(mn, slot, mn_type, gap);
+ } else if (loop_entry) // zero gap in leaf.
+ return;
+
+ if (gap > (*max_gap))
+ (*max_gap) = gap;
+ printk("\tCalc gap %lu max %lu\n", gap, *max_gap);
+}
/*
* Private
*
+ * mas_ma_cp() - copy mas->node to mn (or left->node and right->node for
+ * splits).
+ *
* Trying to calculate a split is a rather difficult task. One has to
* remember:
* 1. Keep gaps at the end of a node
* 3. Use the target slot and the new count to better get a 50/50 split.
*
*/
-static inline unsigned char mas_no_dense_split(struct ma_state *mas,
- struct ma_state *left, struct ma_state *right,
- unsigned char entry_cnt, void *entry)
+static inline unsigned char mas_ma_cp(struct ma_state *mas,
+ unsigned char p_slot, struct ma_state *left,
+ struct ma_state *right, struct maple_node *mn,
+ enum maple_type mn_type, int entry_cnt, void *entry,
+ bool append)
{
enum maple_type mas_type = mte_node_type(mas->node);
- unsigned char ret = 0, half = mt_slots[mas_type] / 2;
+ struct maple_node *parent = mte_to_node(mas->node);
+ unsigned char ret = 0;
+ unsigned char split = mt_slots[mas_type] / 2;
unsigned char src_slot = 0, slot = 0;
- unsigned long piv = 0, prev_piv = mas->min, written_piv = mas->min;
+ unsigned long prev_piv = mas->min, written_piv = mas->min - 1;
+ unsigned long max_gap = 0;
bool attempt_insert = false;
bool appending = false;
+ void *existing = NULL;
+ bool null_run = false;
+ bool update_gaps = mt_is_alloc(mas->tree);
+ unsigned long piv[3] = {
+ mas->index - 1,
+ mas->last,
+ mas->min,
+ };
MA_STATE(cp, mas->tree, mas->index, mas->last);
- left->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), mas_type);
- right->node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), mas_type);
-
if (ma_is_leaf(mas_type))
attempt_insert = true;
- mas_dup_state(&cp, left);
+
+ split = mas_cp_calc_split(mas, mas_type);
+ printk("Using split of %d\n", split);
+
+ if (left) {
+ mas_dup_state(&cp, left);
+ }
+
+ if (!mn) {
+ mn = mas_mn(&cp);
+ mn_type = mte_node_type(cp.node);
+ p_slot = mte_parent_slot(left->node);
+ parent = mte_parent(left->node);
+ mas_type = mas_parent_enum(mas, left->node);
+ printk("parent is %p\n", parent);
+ }
+
+ if (!entry_cnt)
+ piv[2] = mas->max;
+
+ if (append) { // start at a given slot, so append there.
+ printk("append\n");
+ //mt_dump(mas->tree);
+ mas_dup_state(&cp, mas);
+ src_slot = entry_cnt;
+ existing = mte_get_rcu_slot(mas->node, src_slot);
+ written_piv = mas_get_safe_pivot(mas, src_slot);
+ if (!written_piv) // empty node.
+ written_piv = mas->min;
+
+ printk("src_slot %u written %lu\n", src_slot, written_piv);
+ slot = entry_cnt;
+ if (existing) {
+ slot++;
+ src_slot++;
+ printk("there is an existing\n");
+ }
+
+ entry_cnt = 0;
+ appending = true;
+ prev_piv = written_piv;
+ attempt_insert = true;
+ if (mt_is_alloc(mas->tree))
+ max_gap = mas_leaf_max_gap(mas);
+ }
+
do {
- void *existing = NULL;
+ int i = 2; // Default to just writing the pivot.
+ int j = 3; // Loop limit.
+ printk("src slot %u entry_cnt %d slot %u\n", src_slot, entry_cnt, slot);
- if (entry_cnt <= 0) {
+ if (entry_cnt < 0) {
+ printk("Setting to appending mode\n");
appending = true;
existing = NULL;
+ piv[2] = mas->max;
} else {
- piv = mas_get_safe_pivot(mas, src_slot);
+ piv[2] = mas_get_safe_pivot(mas, src_slot);
+ if (!piv[2] && src_slot)
+ piv[2] = mas->max;
existing = mte_get_rcu_slot(mas->node, src_slot);
+ printk("getting %u (%lu->%p)\n", src_slot, piv[2],
+ existing);
}
+
+
if (!appending) {
if (mt_will_coalesce(existing)) {
+ printk("coalesce\n");
goto skip_src_slot;
}
- if (prev_piv > piv) {// insert overwrote this pivot.
+ if (src_slot && piv[2] == prev_piv) {
goto skip_src_slot;
}
- if (src_slot && piv == prev_piv) {
+ if (src_slot && written_piv >= piv[2]) // overwritten
goto skip_src_slot;
- }
}
- if (attempt_insert && (cp.index <= piv)) {
- if (written_piv != cp.index - 1) {
- // Insert a previous entry..
- mte_set_rcu_slot(cp.node, slot, existing);
- mte_set_pivot(cp.node, slot, cp.index - 1);
- slot++;
+ if (append || (attempt_insert && (cp.index <= piv[2]))) {
+ i = 0;
+ printk("Written piv %lu\n", written_piv);
+ if (written_piv == cp.index - 1)
+ i = 1; // previous pivot matches exactly.
+ else if (!src_slot && written_piv == cp.index) {
+ i = 1; // mas->min matches exactly.
}
- mte_set_rcu_slot(cp.node, slot, entry);
- mte_set_pivot(cp.node, slot, cp.last);
- slot++;
- if (!appending && (piv > cp.last)) {
- mte_set_rcu_slot(cp.node, slot, existing);
- mte_set_pivot(cp.node, slot, piv);
- } else
- piv = cp.last;
-
+ printk("piv2 %lu <= last %lu\n", piv[2], cp.last);
+ if (appending)
+ j--;
+ else if (!appending && (piv[2] <= cp.last))
+ j--; // skip last pivot.
+ printk("piv array %u-%u\n", i, j);
attempt_insert = false;
- } else {
- mte_set_pivot(cp.node, slot, piv);
- if (mas_type == maple_arange_64)
- mte_cp_gap(cp.node, slot, mas->node, src_slot);
- mte_set_rcu_slot(cp.node, slot, existing);
}
- // next
- if ((slot >= half) && (cp.node != right->node)) {
- ret = slot;
- left->max = piv;
- right->min = piv + 1;
- mas_dup_state(&cp, right);
- slot = 0;
- } else {
+
+ for (; i < j; i++) {
+ void *loop_entry = existing;
+ bool wmb = false;
+
+ if (!loop_entry && !ma_is_leaf(mn_type))
+ {
+ printk("BUG Here\n");
+ }
+ printk("write array %d %lu to %u entry_cnt %d\n", i, piv[i], slot, entry_cnt);
+ if (i == 1)
+ loop_entry = entry;
+
+ if (append)
+ wmb = true;
+
+ if (mt_is_empty(loop_entry)) {
+ if (null_run) {
+ slot--;
+ }
+ null_run = true;
+ } else
+ null_run = false;
+ mas_ma_cp_store(mn, mn_type, slot, piv[i],
+ loop_entry, wmb);
+
+ if (update_gaps) {
+ mas_ma_update_gap(mas, mas_type, mn, mn_type,
+ piv[i], written_piv, src_slot,
+ slot, &max_gap, loop_entry);
+ }
+
+ written_piv = piv[i];
slot++;
+ // If this is a split operation, right will exist
+ // and the target may need to be switched.
+ if ((right) && (cp.node != right->node) &&
+ (slot > split)) {
+ ret = slot - 1;
+ if (update_gaps) {
+ printk("Write gap %p[%u] -> %lu\n", parent, p_slot, max_gap);
+ parent->ma64.gap[p_slot] = max_gap;
+ }
+
+ mn = mas_switch_nodes(&cp, left, right,
+ piv[i]);
+ slot = 0;
+ max_gap = 0;
+ p_slot++;
+ }
}
- written_piv = piv;
+ prev_piv = piv[2];
skip_src_slot:
-
- prev_piv = piv;
src_slot++;
} while (entry_cnt-- > 0 || attempt_insert);
- right->max = piv;
+ if (right && update_gaps)
+ parent->ma64.gap[p_slot] = max_gap;
+
+ printk("Done\n");
return ret;
}
if (!pend && i)
pend = mas->max;
gap = pend - pstart + 1;
+ printk("%d gap %lu %lu-%lu\n", i, gap, pend, pstart);
if (!mt_is_empty(mte_get_rcu_slot(mas->node, i)))
goto next;
unsigned long max, min;
struct maple_enode *mn;
-
if (!mte_is_leaf(mas->node))
return;
*/
max_gap = mas_leaf_max_gap(mas);
pslot = mte_parent_slot(mas->node);
- p_gap = _ma_get_gap(mte_parent(mas->node), pslot,
+ p_gap = ma_get_gap(mte_parent(mas->node), pslot,
mas_parent_enum(mas, mas->node));
if (p_gap != max_gap)
break;
entry = mte_get_rcu_slot(cp->src, sloc);
- if (mt_will_coalesce(entry))
- goto next_src_slot;
+ if (mt_will_coalesce(entry)) {
+ printk("slot %u will coalesce\n", sloc);
+ if (!sloc) {
+ printk("HERE!\n");
+ entry = NULL;
+ if (dloc) {
+ void *p_entry =
+ mte_get_rcu_slot(cp->dst,
+ dloc - 1);
+ if (!p_entry) {
+ mte_set_pivot(cp->dst, dloc - 1,
+ piv);
+ goto next_src_slot;
+ }
+ }
+ }
+ else
+ goto next_src_slot;
+ }
// Last entry.
mte_adopt_children(new);
}
+static inline enum maple_type mas_ptype_leaf(struct ma_state *mas)
+{
+ enum maple_type pt = mte_node_type(mas->node);
+
+ switch (pt) {
+ case maple_arange_64:
+ case maple_range_64:
+ default:
+ return maple_leaf_64;
+ }
+}
/*
* split late, that is to say.. the parent may be full and need to be split.
* Once we know there is space (we need only a single location), then we can
unsigned char split, p_slot = 0, p_end = 0;
struct maple_enode *old_parent, *new_parent;
enum maple_type ptype; // parent type.
+ enum maple_type type; // split type.
MA_STATE(parent, mas->tree, mas->index, mas->last);
MA_STATE(left, mas->tree, mas->index, mas->last);
MA_STATE(right , mas->tree, mas->index, mas->last);
- printk("%s: %p\n", __func__, mas_mn(mas));
+ type = mte_node_type(mas->node);
//mt_dump(mas->tree);
if (mte_is_root(mas->node)) {
old_parent = full;
p_end = mas_data_end(&parent, ptype, &last_pivot, &coalesce);
if (p_end - coalesce >= mt_slots[ptype] - 1) {
/* Must split the parent */
- split = mas_split(&parent, p_slot, active,
+ mas_dup_state(mas, &parent);
+ split = mas_split(mas, p_slot, active,
p_end - coalesce + 1, entry);
- if (mas_is_err(&parent)) {
- mas->node = parent.node;
+ if (mas_is_err(mas))
return 0;
- }
+
+ //mt_dump(mas->tree);
+ mas_dup_state(&parent, mas);
if (split < p_slot)
p_slot -= split;
// Split will return the parent.
// Allocations.
new_parent = mt_mk_node(mas_next_alloc(mas), ptype);
+ mas_dup_state(&left, mas);
+ mas_dup_state(&right, mas);
+ left.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+ right.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), type);
+
+ mte_set_parent(left.node, new_parent, p_slot);
+ mte_set_parent(right.node, new_parent, p_slot+1);
// split the data into left & right and do the insert.
- split = mas_do_split(mas, &left, &right, entry_cnt, entry);
+ split = mas_ma_cp(mas, p_slot, &left, &right, NULL, type, entry_cnt,
+ entry, false);
+ right.max = mas->max;
// Copy the parents information
if (!mte_is_root(full)) {
// right (if it exists, will be placed in p_slot + 1;
if (right.node)
mte_link(right.node, new_parent, p_slot + 1,
- right.max, ptype);
+ right.max, ptype);
// Copy grand parent to the parent, including slot encoding.
mte_to_node(new_parent)->parent = mte_to_node(old_parent)->parent;
// Set up maple state to replace the parent node in the grandparent.
parent.node = new_parent;
+ mas_dup_state(mas, &parent);
// Replace the parent node & free the old parent.
- _mas_replace(&parent, active, true);
+ _mas_replace(mas, active, true);
+
+ if (mt_is_alloc(mas->tree) && !mte_is_root(mas->node)) {
+ unsigned char gap_slot = 0;
+ unsigned long max_gap = mas_max_gap(mas, &gap_slot);
+ gap_slot = mte_parent_slot(mas->node);
+ mas_parent_gap(mas, gap_slot, max_gap);
+ }
+
+ mas_dup_state(mas, &parent);
// Set up the ma_state for the return. Point to the correct node for
// the insert or subsequent split.
p_slot += 2;
}
- // Free the full node.
+ // Free the full node, this may have happened in _mas_replace
if (old_parent != full) {
if (!active)
mas_push_node(mas, full);
mte_free(full);
}
+
return split;
}
-static inline enum maple_type mas_ptype_leaf(struct ma_state *mas)
-{
- enum maple_type pt = mte_node_type(mas->node);
-
- switch (pt) {
- case maple_arange_64:
- case maple_range_64:
- default:
- return maple_leaf_64;
- }
-}
/* Private
*
* When inserting into non-leaf nodes in _mas_insert, a type is needed.
unsigned char end, coalesce;
unsigned long last_piv;
void *last_entry;
- int req_slots;
+ int req_slots = 0;
+ mt_dump(mas->tree);
end = mas_data_end(mas, mte_node_type(mas->node), &last_piv, &coalesce);
- req_slots = end - coalesce + 1;
+ printk("end of %p %u %u %lu\n", mas_mn(mas), end, coalesce, last_piv);
if (max == mas->max && min == mas->min)
- return req_slots; // overwriting a single entry.
+ return end - coalesce; // overwriting a single entry.
if (min < mas->index) {
req_slots++; //slot needed at start.
+ printk("slot at start\n");
}
if (max > mas->last) {
req_slots++; // slot needed at end.
- }else {
+ printk("slot at end\n");
+ } else {
+ printk("wtf\n");
unsigned char end_slot = end;
while (end_slot > slot) {
- if (mas->last <=
- mas_get_safe_pivot(mas, end_slot))
+ unsigned long piv = mas_get_safe_pivot(mas, end_slot);
+ if (mas->last <= piv) {
+ if (mas->last < piv)
+ req_slots++;
break;
+ }
end_slot--;
}
- req_slots -= (end_slot - slot);
}
if (mte_is_dense(mas->node))
return req_slots;
+
do {
- last_entry = mte_get_rcu_slot(mas->node, end--);
- } while (mt_will_coalesce(last_entry) && end);
+ printk("last entry shit %d\n", end);
+ last_entry = mte_get_rcu_slot(mas->node, end);
+ } while (mt_will_coalesce(last_entry) && --end);
+
+ req_slots = end - coalesce + 1;
// can't reuse last null.
- if ((last_entry) && (req_slots >= mt_slot_count(mas->node) - 1))
- return req_slots + 1;
+ if (req_slots >= mt_slot_count(mas->node) - 1) {
+ if (last_entry)
+ return req_slots + 1;
+ else if (mas->last == ULONG_MAX)
+ return req_slots;
+ else if (mas->max == ULONG_MAX)
+ return req_slots + 1;
+ }
return req_slots;
}
+static inline int __mas_add(struct ma_state *mas, void *entry,
+ int entry_cnt, bool active, bool append)
+{
+ enum maple_type mas_type = mte_node_type(mas->node);
+ struct maple_node space;
+ struct maple_node* mn = NULL;
+ int ret = 0;
+
+ MA_STATE(cp, mas->tree, mas->index, mas->last);
+
+ if (append) {
+ printk("Appending.\n");
+ mn = mas_mn(mas);
+ } else if (active) {
+ cp.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mas_type);
+ mn = mas_mn(&cp);
+ } else {
+ printk("Inactive\n");
+ mn = &space;
+ cp.node = NULL;
+ }
+
+ ret = mas_ma_cp(mas, 0, &cp, NULL, mn, mas_type, entry_cnt, entry,
+ append);
+
+ mn->parent = mas_mn(mas)->parent;
+ // FIXME: Propagate gaps.
+
+ if (append)
+ return ret;
+ if (active)
+ mas->node = cp.node;
+ else
+ memcpy(mas_mn(mas), mn, sizeof(struct maple_node));
+
+ return ret;
+}
/* Private
*
* Insert entry into a node.
static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite,
bool active)
{
- struct maple_enode *prev_enode = NULL;
enum maple_type this_type = mte_node_type(mas->node);
unsigned long last_piv;
+ unsigned char coalesce;
+ unsigned char old_end, new_end;
unsigned long max = mas->max;
unsigned long min = mas->min;
- unsigned char slot_cnt = mt_slots[this_type] - 1;
- unsigned char pivot_cnt = mt_pivots[this_type];
- unsigned char coalesce;
unsigned char slot = mas_get_slot(mas);
- bool spans_range = false;
- bool append = false;
- bool null_entry = false;
- int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce);
- int new_end = old_end;
- int ret = 0;
+ unsigned char slot_cnt = mt_slots[this_type] - 1;
+ struct maple_enode *prev_enode = NULL;
void *contents = NULL;
+ int ret = 0;
+
- if (mas->last > mas->max) {
- BUG_ON(!active);
- /* Replace the current tree with a new tree with the requested
- * value. This is sub-optimal, but *a lot* less work. The
- * initial user of the tree will pretty much _never_ do this.
- */
- ret = mas_replace_tree(mas, entry);
- goto update_gap;
- }
- /* Dense node type */
- if (!pivot_cnt) {
+ printk("here\n");
+ if (ma_is_dense(this_type)) {
ret = _mas_add_dense(mas, entry, slot, this_type, overwrite,
active);
if (!ret)
goto update_gap;
}
- printk("slot %u\n", slot);
- if (slot > slot_cnt)
+ old_end = mas_data_end(mas, this_type, &last_piv, &coalesce);
+ if (slot > slot_cnt) // search returned MAPLE_NODE_SLOTS
slot = old_end + 1;
- /* Calculate the range of this slot */
- _mas_get_range(mas, slot, &min, &max);
+ printk("Using %p[%u]\n", mas_mn(mas), slot);
- if (mas->last > max)
- spans_range = true;
+ _mas_get_range(mas, slot, &min, &max);
if (slot <= old_end)
contents = mte_get_rcu_slot(mas->node, slot);
+ // Check early failures.
if (!overwrite) {
- if (spans_range) {
+ printk("check early failures? %lu > %lu and !contents %s\n",
+ mas->last, max, !contents ? "yes" : "no");
+ if (mas->last > max) { // spans range.
mas_set_err(mas, -ERANGE);
return 0;
}
return 0;
}
}
+ printk("do the add\n");
- if (slot >= old_end)
- append = true;
+ // At this point, the we can perform the add.
+ // spans node means we will replace the tree.
+ if (mas->last > mas->max)
+ return mas_replace_tree(mas, entry);
+
+ // Fits neatly into a slot.
if (mas->index == min && mas->last == max) {
mte_set_rcu_slot(mas->node, slot, entry);
if (slot < slot_cnt)
goto complete;
}
- /* Calculate how many new pivots we will need. */
-
- /* Check for splitting */
+ printk("Adding %lu-%lu\n", mas->index, mas->last);
new_end = _mas_add_slot_cnt(mas, slot, min, max);
- if (mas->index > min)
- null_entry = true;
-
+ printk("%p new_end %u slot_cnt %u\n", mas_mn(mas), new_end, slot_cnt);
if (new_end > slot_cnt) {
- /* Not enough space in the node */
- mas_split(mas, slot, active, new_end, entry);
-
- if (mas_is_err(mas))
+ mas_split(mas, slot, active, old_end, entry);
+ if (mas_is_err(mas)) {
return 0;
-
+ }
return old_end - new_end;
}
- prev_enode = mas->node;
-
- if (!mte_is_leaf(mas->node)) {
- struct maple_enode *leaf;
- MA_STATE(leaf_state, mas->tree, mas->index, mas->last);
- enum maple_type mt;
- unsigned char child_slot = 0;
-
- // Create a new node.
- mas_node_cnt(mas, 1);
- if (mas_is_err(mas))
- return 0;
-
- // Set the new leaf parent and type.
- mt = mas_determine_type(mas, min, slot);
- leaf = mt_mk_node(mas_next_alloc(mas), mt);
- mte_set_parent(leaf, mas->node, slot);
- // Put the new range in the new leaf.
-
- leaf_state.node = leaf;
- leaf_state.min = mas->index;
- leaf_state.max = max;
- if (null_entry)
- child_slot = 1;
- mas_set_slot(&leaf_state, child_slot);
- _mas_add(&leaf_state, entry, overwrite, active);
- // Restore old values and continue inserting.
- null_entry = false;
- entry = leaf;
- if (mt == maple_dense)
- mas->last = mas->index + mt_max[mt] - 1;
+ if(!mte_is_leaf(mas->node)) {
+ printk("trying to add %lu-%lu to %p\n", mas->index, mas->last, mas_mn(mas));
}
+ BUG_ON(!mte_is_leaf(mas->node));
-
- if (append) {
- old_end = new_end;
- } else {
- mas_partial_copy(mas, slot);
- if (mas_is_err(mas))
+ if (active) {
+ printk("%s %p\n", __func__, mas);
+ printk("Active and needs a node\n");
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas)) {
+ printk("Failed\n");
return 0;
-
- old_end = slot;
- }
-
- if (null_entry) {
- /* When writing a NULL entry, the order must be reversed to
- * ensure readers don't get incorrect data on appends
- */
-
- /* Write the entry */
- mte_set_rcu_slot(mas->node, ++slot, entry);
- if (slot < pivot_cnt)
- mte_set_pivot(mas->node, slot, mas->last);
-
- /* Write NULL entry */
- mte_set_rcu_slot(mas->node, --slot, contents);
- if (append)
- wmb(); /* NULL needs to be written before pivot. */
-
- mte_set_pivot(mas->node, slot, mas->index - 1);
- if (mt_is_alloc(mas->tree)) {
- unsigned long gap = mas->min;
-
- if (slot > 0)
- gap = mte_get_pivot(mas->node, slot - 1);
- gap = (mas->index - 1) - gap;
- mte_set_gap(mas->node, slot, gap);
- }
- slot += 2;
- } else {
- if (overwrite && slot) {
- mte_set_pivot(mas->node, slot-1, mas->index - 1);
}
-
- /* Write the entry */
- mte_set_rcu_slot(mas->node, slot, entry);
- if (old_end == new_end + 1) // Append.
- wmb(); /* Entry needs to be visible prior to pivot. */
-
- if (slot < pivot_cnt)
- mte_set_pivot(mas->node, slot++, mas->last);
}
+ prev_enode = mas->node;
+ printk("slot %u old_end %u\n", slot, old_end);
+ __mas_add(mas, entry, old_end, active, slot > old_end);
- /* Skip possible duplicate entry that contains a NULL */
- if (!append) {
- MA_CP(cp, prev_enode, mas->node, old_end, slot_cnt - 1);
- if (mas->last >= last_piv)
- goto complete;
-
- while (old_end < slot_cnt) {
- if (old_end >= slot_cnt -1 )
- break;
-
- if (mte_get_pivot(prev_enode, old_end) <= mas->last)
- old_end++;
- else
- break;
- }
-
- /* Copy remainder of node if this isn't an append */
- cp.dst_start = slot;
- cp.dst_end = mt_slot_count(cp.dst) - 1;
- cp.src_start = old_end;
- cp.start_piv = mas->last;
- cp.src_end = mt_slot_count(cp.src) - 1; // copy to the end to collapse last slot null.
- mas_copy(mas, &cp);
- }
complete:
if (prev_enode != mas->node)
_mas_replace(mas, active, true);
+
update_gap:
if (mt_is_alloc(mas->tree))
mas_update_gap(mas);
return ret;
- }
+
+
+}
+
+static inline void ma_inactive_insert(struct ma_state *mas, void *entry)
+{
+ // Restart search for where to insert.
+ mas->node = MAS_START;
+ mas_start(mas);
+ mas_add(mas, entry, false, false);
+}
+static inline void mas_insert(struct ma_state *mas, void *entry)
+{
+ mas_add(mas, entry, false, true);
+}
static inline int _mas_insert(struct ma_state *mas, void *entry,
- unsigned char slot)
+ unsigned char slot, bool active)
{
mas_set_slot(mas, slot);
- return _mas_add(mas, entry, false, true);
+ printk("%s %u -> %p\n", __func__, slot, entry);
+ return _mas_add(mas, entry, false, active);
}
static inline void mas_root_expand(struct ma_state *mas, void *entry)
mn->parent = ma_parent_ptr(
((unsigned long)mas->tree | MA_ROOT_PARENT));
- if (mas->index != 0) {
- slot++;
- /* Assign the old entry to slot 0, or set it to null. */
- mte_set_rcu_slot(mas->node, 0, r_entry);
- if (!r_entry)
- mte_set_pivot(mas->node, 0, mas->index - 1);
- }
+ mte_set_rcu_slot(mas->node, slot, r_entry);
+ mte_set_pivot(mas->node, slot, 0);
+
// FIXME: When task_size / page_size -1 works, check to ensure we are
// not inserting above this.
- _mas_insert(mas, entry, slot);
+ __mas_add(mas, entry, slot++, false, true);
+ if (mas_is_err(mas))
+ return;
+
+ if (mas->last != 1)
+ slot++;
+ //_mas_insert(mas, entry, slot, false);
if (mas_is_err(mas))
return;
// FIXME: mas->index = TASK_SIZE / PAGE_SIZE - 1;
mas->index = 0x2000000000000UL;
mas->last = mt_max[mt];
- _mas_insert(mas, XA_ZERO_ENTRY, 2);
+ __mas_add(mas, XA_ZERO_ENTRY, slot, false, true);
if (mas_is_err(mas))
return;
}
if (slot)
r_start = mas_get_safe_pivot(mas, slot - 1) + 1;
- printk("next entry starting slot %u\n", slot);
while (slot < count) {
pivot = mas_get_safe_pivot(mas, slot);
goto next_node;
}
}
- if (mas_next_nentry(mas, limit, range_start)) {
- printk("nentry break\n");
+
+ if (mas_next_nentry(mas, limit, range_start))
break;
- }
if (*range_start > limit)
return NULL;
unsigned long max = mas->max;
unsigned char slot;
+ printk("Starting at %p\n", mas->node);
while(!mas_is_none(mas)) {
if (mas_prev_nentry(mas, limit, &max))
break;
+ printk("going to prev node\n");
mas_prev_node(mas, limit);
+ printk("here %p\n", mas->node);
mas_set_slot(mas, mt_slot_count(mas->node) - 1);
}
unsigned long piv_val = _mas_get_safe_pivot(mas, slot, type);
unsigned char pivot_cnt = mt_pivots[type];
- int last_null = slot;
while ((slot < pivot_cnt - 1)) {
unsigned long this_pivot = mte_get_pivot(mas->node, slot + 1);
while (--slot >= 0) {
void *entry = mte_get_rcu_slot(mas->node, slot);
- if (!mt_is_empty(entry)) {
- last_null = slot + 1;
+ if (!mt_is_empty(entry))
break;
- }
- if (!slot)
- last_null = slot;
if (skip)
mte_set_rcu_slot(mas->node, slot, XA_SKIP_ENTRY);
mte_set_pivot(mas->node, slot, piv_val);
}
- if (skip)
- mte_set_rcu_slot(mas->node, last_null, NULL);
}
+
+static inline int mas_update_empty_pivots(struct ma_state *mas,
+ unsigned char p_slot)
+{
+ unsigned long piv;
+ unsigned char coalesce, old_end, slot;
+ enum maple_type type = mte_node_type(mas->node);
+
+ MA_STATE(prev, mas->tree, mas->index, mas->last);
+
+ mas_dup_state(&prev, mas);
+ printk("Looking for prev val of %p[%u]\n", mas_mn(&prev),
+ mas_get_slot(&prev));
+ if (mte_is_root(prev.node)) {
+ mas_prev_nentry(mas, 0, &piv);
+ } else {
+ mas_set_slot(&prev, p_slot);
+ mas_encoded_parent(&prev);
+ mas_prev_node(&prev, 0);
+ }
+ printk("Found in node %p[%u]\n", mas_mn(&prev), mas_get_slot(&prev));
+ if (mas_is_none(&prev))
+ return 1;
+
+ old_end = mas_data_end(&prev, type, &piv, &coalesce);
+ if (old_end >= mt_pivots[type])
+ return 1; // setting the pivot would cause incorrect data.
+
+ printk("old end is %u\n", old_end);
+ if (!mte_get_rcu_slot(prev.node, old_end)) { // ends in NULL
+ mte_set_pivot(prev.node, old_end, mas->max);
+ printk("%s: Setting %p[%u] to %lu\n", __func__, mas_mn(&prev),
+ old_end, mas->max);
+ }
+
+ // do while..
+ p_slot = mte_parent_slot(prev.node);
+ mas_encoded_parent(&prev);
+ printk("%s: Setting %p[%u] to %lu\n", __func__, mas_mn(&prev),
+ p_slot, mas->max);
+ mte_set_pivot(prev.node, p_slot, mas->max);
+ return 0;
+}
+
static inline void mas_coalesce_empty(struct ma_state *mas,
struct maple_enode *eparent, unsigned char p_slot,
enum maple_type p_type)
{
- mte_set_rcu_slot(eparent, p_slot, NULL);
+ printk("Coalesce empty\n");
+ mte_set_rcu_slot(eparent, p_slot, XA_SKIP_ENTRY);
mas_set_slot(mas, p_slot);
mas_coalesce_pivots(mas, p_slot, p_type, true);
}
this_p_slot = mte_parent_slot(this_enode);
mas_dup_state(&p_state, mas); // set the parent node, etc.
mas_encoded_parent(&p_state); // Actually make it the parent.
+
+ // Get the next entry.
mas_set_slot(&p_state, this_p_slot + 1);
if (!mas_next_nentry(&p_state, ULONG_MAX, &r_piv)) {
// this_enode is the right-most node of the parent.
mas_set_slot(&p_state, this_p_slot);
if (mas_prev_nentry(&p_state, 0, &r_piv)) {
// If there is a node to the left, rebalance the left
- mt_dump(p_state.tree);
+ //mt_dump(p_state.tree);
mas_descend(&p_state);
end = mas_data_end(&p_state,
mte_node_type(p_state.node), &l_piv, &coalesce);
if (l_null_end && r_null_start)
{
all_slots--;
- } else if (!l_null_end && !r_null_start) {
- // Need an null if the pivots don't line up here.
+ } else if (!l_null_end && !r_null_start &&
+ r_state.min != mas->max + 1) {
+ // Need a null if the pivots don't line up here.
mte_set_pivot(cp.dst, cp.dst_start++, r_state.min);
all_slots++;
}
cp.src_end = r_end;
else
cp.dst_end = (all_slots + 1)/ 2; // Take 1/2 the entries.
+ printk("Copy %p[%u-%u] to %p[%u-%u]\n", mas_mn(&r_state), cp.src_start,
+ cp.src_end, mte_to_node(cp.dst), cp.dst_start, cp.dst_end);
- mas_copy(&r_state, &cp); // cp.dst is now complete, place it in the tree.
+ printk("min of %lu or %lu\n", mte_get_pivot(cp.dst, cp.dst_start-1),
+ r_state.min);
+ _mas_copy(&r_state, &cp,
+ mte_get_pivot(cp.dst, cp.dst_start-1)); // cp.dst is now complete, place it in the tree.
mte_to_node(cp.dst)->parent = mte_to_node(this_enode)->parent;
+ printk("%p == %p\n", this_enode, mas->node);
new_type = mte_node_type(cp.dst);
+ printk("Replace %p with %p\n", mas_mn(mas), mte_to_node(cp.dst));
mas->node = cp.dst;
mas_replace(mas);
+ mt_dump(mas->tree);
l_piv = mas_get_safe_pivot(mas, cp.dst_start - 1);
ret = mt_slot_count(mas->node) - cp.dst_start + 1;
mte_set_pivot(p_state.node, this_p_slot, l_piv);
right_empty:
if (all_slots <= mt_slots[new_type] - 1) {
+ printk("right consumed %p\n", mas_mn(&r_state));
// Right was entirely consumed.
void *entry = XA_SKIP_ENTRY;
- if (r_state.max == ULONG_MAX)
- entry = NULL;
-
ret = mt_slots[new_type] - all_slots;
if (!ret)
ret = 1;
r_p_slot = mte_parent_slot(r_enode);
- mte_set_rcu_slot(p_state.node, r_p_slot, entry);
- if (xa_is_skip(entry) &&
- r_p_slot < mt_pivot_count(mas->node))
- mte_set_pivot(p_state.node, r_p_slot, l_piv);
- mte_free(r_enode);
+ printk("Zero %p[%u] (%u)\n", mas_mn(&p_state), r_p_slot,
+ mt_pivot_count(mas->node));
+ if (r_p_slot < mt_pivot_count(mas->node)) {
+ if (!mas_update_empty_pivots(&r_state, r_p_slot)) {
+ mte_set_rcu_slot(p_state.node, r_p_slot, entry);
+ mte_free(r_enode);
+ }
+ }
p_coalesce = true;
goto right_done;
}
*/
if (p_coalesce)
mas_coalesce(&p_state);
+ //mt_dump(mas->tree);
return ret;
}
if (mas_is_err(mas)) {
if (h_data >= 1)
return;
+
// Single entry and allocation failed, free this_enode
mas->node = this_enode;
mas_encoded_parent(mas);
+ printk("here?\n");
+ if (mas_update_empty_pivots(mas, p_slot))
+ return;
+
mas_coalesce_empty(mas, eparent, p_slot, p_type);
check_parent = true;
- mas->node = this_enode;
goto done;
}
}
goto check_start;
if (end <= coalesce) {
+ printk("here??\n");
+ if (mas_update_empty_pivots(mas, p_slot))
+ return;
mas_coalesce_empty(mas, eparent, p_slot, p_type);
check_parent = true;
+ goto done;
}
// Check if next node starts with null.
if (!slot) {
// Empty node...
+ if (mas_update_empty_pivots(mas, p_slot))
+ return;
mas_coalesce_empty(mas, eparent, p_slot, p_type);
check_parent = true;
piv = mas->min;
}
EXPORT_SYMBOL(mt_find);
-static inline void ma_inactive_insert(struct ma_state *mas, void *entry);
-
-
/* Private
* mas_replace_tree() - Build a new tree and replace the entire structure.
*
unsigned long piv;
unsigned char coalesce;
unsigned int slot_cnt = 0;
- long node_cnt = 0, leaves= 0;
+ long node_cnt = 0, leaves= 1;
struct maple_enode *last = NULL;
enum maple_type p_type = mas_parent_enum(mas, mas->node);
if (slot_cnt > mt_slot_count(mas->node))
leaves++;
+ printk("%d leaves\n", leaves);
node_cnt = 1; // Root node.
while (leaves) { // add the number of nodes at each level.
node_cnt += leaves;
leaves /= mt_slots[p_type];
}
+ printk("Using node count %d\n", node_cnt);
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
return 0;
ma_inactive_insert(&new_mas, entry);
if (mas_is_err(&new_mas))
BUG_ON(1);
- mt_dump(new_mas.tree);
+ //mt_dump(new_mas.tree);
}
// Insert the new value.
if (mas_is_none(&r_mas)) // No entry beyond new_mas.last
goto skip_right;
+ printk("node %p\n", new_mas.node);
mas_for_each(&l_mas, entry, ULONG_MAX) {
if (l_mas.index < r_mas.index)
continue;
if (mas->tree->ma_root && mas->last == 0)
goto exists;
+ printk("%s\n", __func__);
if (mas->last != 0)
mas_root_expand(mas, entry);
else if (((unsigned long) (entry) & 3) == 2)
}
/* Do the add */
+ printk("Adding to %p[%u]\n", mas_mn(mas), slot);
ret = _mas_add(mas, entry, overwrite, active);
if (mas_is_err(mas) && xa_err(mas->node) == -ERANGE)
mas_set_err(mas, -EEXIST);
return 0;
}
-static inline void ma_inactive_insert(struct ma_state *mas, void *entry)
-{
- mas_add(mas, entry, false, false);
-}
-static inline void mas_insert(struct ma_state *mas, void *entry)
-{
- mas_add(mas, entry, false, true);
-}
-
static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
unsigned long size, unsigned long *index)
{
mas->min = mas_get_safe_pivot(mas, pslot - 1) + 1;
mas->node = mn;
- _mas_insert(mas, entry, slot);
+ printk("%s %d\n", __func__, __LINE__);
+ _mas_insert(mas, entry, slot, true);
return 0;
}
mas_start(mas);
if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ printk("%s\n", __func__);
mas_root_expand(mas, entry);
if (mas_is_err(mas))
return xa_err(mas->node);
}
if (mas_is_ptr(mas)) {
+ printk("%s\n", __func__);
mas_root_expand(mas, entry);
if (!mas->index)
return mte_get_pivot(mas->node, 0);
mtree_lock(ms.tree);
retry:
- mas_insert(&ms, entry);
+ printk("Add %lu\n", first);
+ mas_add(&ms, entry, false, true);
if (mas_nomem(&ms, gfp))
goto retry;
p_slot = mte_parent_slot(mas->node);
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
+ if (ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) !=
+ max_gap) {
+ printk("get_gap %p[%u] != gap in %p of %lu\n", p_mn, p_slot,
+ mas_mn(mas), max_gap);
+ }
MT_BUG_ON(mas->tree,
- _ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) !=
+ ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) !=
max_gap);
}