#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
#define ma_enode_ptr(x) ((struct maple_enode *)(x))
+#undef XA_RETRY_ENTRY
+#undef XA_SKIP_ENTRY
+#undef XA_DELETED_ENTRY
static struct kmem_cache *maple_node_cache;
static inline bool mt_is_empty(const void *entry)
{
- return (!entry) || xa_is_deleted(entry) || xa_is_skip(entry);
+ return !entry;
}
-static inline bool mt_will_coalesce(const void *entry)
-{
- return (xa_is_deleted((entry)) || xa_is_skip(entry) ||
- xa_is_retry(entry));
-}
-
static inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
return mte_to_node(mas->node);
}
+static inline void mas_set_node_dead(struct ma_state *mas)
+{
+ mas_mn(mas)->parent = ma_parent_ptr(mas_mn(mas));
+}
static void mte_free(struct maple_enode *enode)
{
ma_free(mte_to_node(enode));
mas->node = p_enode;
}
+static inline void mte_set_safe_piv(struct ma_state *mas, unsigned char slot,
+ unsigned long val)
+{
+ if (slot > mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, slot, val);
+}
+
+static inline bool mas_touches_null(struct ma_state *mas)
+{
+ unsigned char slot = mas_get_slot(mas);
+ if (slot && !mas_get_rcu_slot(mas, slot - 1))
+ return true;
+ if ((slot < mt_slot_count(mas->node) - 1) &&
+ !mas_get_rcu_slot(mas, slot + 1))
+ return true;
+
+ return false;
+}
+
static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot,
unsigned long val)
{
* This exists when moving gaps across many levels of a tree. Basically, walk
* backwards until the nodes meet and set the pivots accordingly.
*
- * Special cases for XA_SKIP_ENTRY is needed.
*/
void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree);
static inline void mas_shift_pivot(struct ma_state *curr,
mas_dup_state(&right, next);
do {
bool leaf = true;
- void *entry, *adv_ent = XA_RETRY_ENTRY;
+ void *entry;
// Ends with NULL side
l_p_slot = mte_parent_slot(left.node);
mas_set_safe_pivot(&left, l_p_slot, piv);
if (!mte_is_leaf(left.node)) {
leaf = false;
- adv_ent = XA_SKIP_ENTRY;
}
if (left.node == right.node) {
do {
entry = mas_get_rcu_slot(&left, slot);
mte_set_pivot(left.node, slot, piv);
- mte_set_rcu_slot(left.node, slot, adv_ent);
if (!leaf) {
if (mt_is_alloc(left.tree))
mte_set_gap(left.node, slot, 0);
while (r_p_slot--) {
entry = mas_get_rcu_slot(&right, r_p_slot);
mte_set_pivot(right.node, r_p_slot, piv);
- mte_set_rcu_slot(right.node, r_p_slot, adv_ent);
if (!leaf ) {
if (mt_is_alloc(right.tree))
mte_set_gap(right.node, r_p_slot, 0);
break; // last entry and it's empty.
mte_set_pivot(left.node, l_p_slot, piv);
- mte_set_rcu_slot(left.node, l_p_slot, adv_ent);
if (!leaf ) {
if (mt_is_alloc(left.tree))
mte_set_gap(left.node, l_p_slot, 0);
* to check the space a node has when coalescing and rebalancing.
*/
static inline unsigned char _mas_data_end(const struct ma_state *mas,
- const enum maple_type type, unsigned long *last_piv,
- unsigned char *coalesce)
+ const enum maple_type type, unsigned long *last_piv)
{
- struct maple_enode *mn = mas->node;
- unsigned long piv = mas->min, prev_piv = mas->min - 1;
- unsigned char slot;
- unsigned char counted_null = 0;
-
- *coalesce = 0;
- for (slot = 0; slot < mt_slot_count(mn); slot++) {
- void *entry;
-
+ int slot = 0;
+ unsigned long piv = mas->min, prev_piv = mas->min;
+ for (; slot < mt_slot_count(mas->node); slot++) {
piv = _mas_get_safe_pivot(mas, slot, type);
- if ((piv == 0 && slot != 0) ||
- (piv > mas->max)) {
- // Past the end of data.
- slot--;
+ if (!piv && slot) {
piv = prev_piv;
- // At this point, we are saying the previous slot is
- // the end.
- if (counted_null) {
- // if this node has ended in a run of NULL
- if (slot <= counted_null) {
- slot = 0;
- (*coalesce) = 0;
- piv = mas->min - 1;
- break;
- }
- (*coalesce) = (*coalesce) - counted_null + 1;
- piv = _mas_get_safe_pivot(mas,
- slot - counted_null, type);
- }
+ slot--;
break;
}
-
- entry = _mte_get_rcu_slot(mn, slot, type, mas->tree);
- if (entry == NULL || xa_is_deleted(entry)) {
- if (counted_null)
- (*coalesce)++;
- counted_null++;
-
- } else if (mt_will_coalesce(entry)) {
- if (piv == prev_piv)
- (*coalesce)++;
- } else {
- counted_null = 0;
- }
-
- if (piv == mas->max)
- break;
-
prev_piv = piv;
}
+ printk("%s: %u\n", __func__, slot);
*last_piv = piv;
return slot;
}
static inline unsigned char mas_data_end(const struct ma_state *mas)
{
unsigned long l;
- unsigned char c;
-
- return _mas_data_end(mas, mte_node_type(mas->node), &l, &c);
-}
-/** Private
- * ma_hard_data - return the number of slots required to store what is
- * currently in this node.
- *
- * @end the last slot with a valid pivot/contents
- * @coalesce the number of slots that would be removed if copied/coalesced.
- *
- */
-static inline int ma_hard_data(unsigned long end,
- unsigned long coalesce)
-{
- if (end < coalesce)
- return 0;
- return end - coalesce;
+ return _mas_data_end(mas, mte_node_type(mas->node), &l);
}
// Set min/max for a given slot in mas->node.
static inline unsigned char mas_append_entry(struct ma_state *mas, void *entry)
{
unsigned long wr_pivot = mas->min ? mas->min - 1 : 0;
- unsigned char coalesce, dst_slot = mas_get_slot(mas);
+ unsigned char dst_slot = mas_get_slot(mas);
if (!mas_get_rcu_slot(mas, 0) && !mte_get_pivot(mas->node, 0))
dst_slot = 0; // empty node.
else if (dst_slot > mt_slot_count(mas->node)) { // Should not happen.
dst_slot = _mas_data_end(mas, mte_node_type(mas->node),
- &wr_pivot, &coalesce); // slot not set.
+ &wr_pivot); // slot not set.
} else if (dst_slot)
wr_pivot = mas_get_safe_pivot(mas, dst_slot - 1);
else
src_piv = ma_get_pivot(smn, src_slot, stype);
- if (!mte_is_leaf(mas->node) && mt_will_coalesce(src_data))
- continue;
-
- if (!src_data || mt_will_coalesce(src_data)) {
- src_data = NULL;
+ if (!src_data) {
if (prev_null && dst_slot) {
mas_set_safe_pivot(mas, dst_slot - 1, src_piv);
next_dst = false;
{
enum maple_type mt = mte_node_type(mas->node);
unsigned long pstart, pend;
- unsigned long prev_gap = 0;
unsigned long max_gap = 0;
unsigned long gap = 0;
void *entry = NULL;
pstart = mas->min;
for (i = 0; i < mt_slots[mt]; i++) {
+ printk("Checking %u\n", i);
pend = mas_get_safe_pivot(mas, i);
if (!pend && i)
pend = mas->max;
- if (pend > mas->max) // possibly a retry.
- break;
-
gap = pend - pstart + 1;
entry = mas_get_rcu_slot(mas, i);
- if (!mt_is_empty(entry) || xa_is_retry(entry)) {
- prev_gap = 0;
+ if (!mt_is_empty(entry) || xa_is_retry(entry))
goto next;
- }
- prev_gap += gap;
- if (prev_gap > max_gap)
- max_gap = prev_gap;
+ printk("gap %lu in %u\n", gap, i);
+ printk("%lu - %lu + 1\n", pend, pstart);
+
+ if (gap > max_gap)
+ max_gap = gap;
next:
if (pend >= mas->max)
pstart = pend + 1;
}
done:
+ printk("max gap is %lu\n", max_gap);
return max_gap;
}
break;
child = _mte_get_rcu_slot(parent, slot, type, mas->tree);
- if (!mt_is_empty(child))
+ if (!mt_is_empty(child)) {
+ printk("set parent of %p\n", child);
mte_set_parent(child, parent, slot);
+ }
}
}
/* Private
parent = mt_mk_node(mte_parent(mas->node), ptype);
slot = mte_parent_slot(mas->node);
+ printk("Get slot %u of %p\n", slot, parent);
prev = mte_get_rcu_slot(parent, slot, mas->tree);
}
mas_ascend(&parent);
old_parent = parent.node;
ptype = mas_parent_enum(mas, mas->node);
- p_end = _mas_data_end(&parent, ptype, &last_pivot, &coalesce);
+ p_end = _mas_data_end(&parent, ptype, &last_pivot);
if (p_end - coalesce >= mt_slots[ptype] - 1) {
/* Must split the parent */
mas_dup_state(mas, &parent);
if (new_end < mt_pivot_count(curr->node))
mte_set_pivot(curr->node, new_end, last_piv);
// The location storing these values has moved.
- mte_set_rcu_slot(curr->node, new_end, XA_RETRY_ENTRY);
+ // FIXME: This doesn't exist.
+// mte_set_rcu_slot(curr->node, new_end, XA_RETRY_ENTRY);
}
}
slot = mte_parent_slot(curr->node);
mas_dup_state(&parent, curr);
mas_ascend(&parent);
- mte_set_rcu_slot(parent.node, slot, XA_RETRY_ENTRY);
+ // FIXME: What do we do here?
+ //mte_set_rcu_slot(parent.node, slot, XA_RETRY_ENTRY);
mas_set_safe_pivot(&parent, slot, last_piv);
if (mt_is_alloc(mas->tree))
mte_set_gap(parent.node, slot, 0);
{
unsigned long last_piv;
- unsigned char coalesce;
unsigned char end;
unsigned char new_end;
unsigned char next_start = 0;
continue;
/* Start by checking the back of the current node. */
- end = _mas_data_end(&curr, mte_node_type(curr.node), &last_piv,
- &coalesce);
+ end = _mas_data_end(&curr, mte_node_type(curr.node), &last_piv);
new_end = end;
do {
entry = mas_get_rcu_slot(&curr, new_end);
next_entry = mas_get_rcu_slot(&next, ++next_start);
}
- if (next_entry != NULL && !mt_will_coalesce(next_entry))
+ if (next_entry)
continue; // Next does not start with null.
if (next_start) {
} while ((curr.node == mas->node) && (mas_move_gap_swap(&curr, &next)));
}
-static inline int mas_add(struct ma_state *mas, void *entry, bool overwrite,
- bool active);
-static inline int _mas_add_dense(struct ma_state *mas, void *entry,
- unsigned char slot, bool overwrite, enum maple_type this_type,
- bool active)
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_NODE_SLOTS + 1)
+struct maple_big_node {
+ struct maple_pnode *parent;
+ struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
+ unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
+ unsigned long gap[MAPLE_BIG_NODE_SLOTS];
+};
+
+static bool mas_check_split_parent(struct ma_state *mas, unsigned char slot,
+ struct ma_state *child_mas)
{
- int ret = 0;
- unsigned long min = mas->index - mas->min;
- unsigned long max = mas->last - mas->min;
+ printk("Checking slot %u\n", slot);
+ if (mte_parent(mas_get_rcu_slot(mas, slot)) != mas_mn(mas))
+ return false;
- if (max > mt_max[this_type])
- max = mt_max[this_type];
+ mas_set_slot(child_mas, slot);
+ mas_descend(child_mas);
+ return true;
+}
- // FIXME: Check entire range, not what we would insert this time.
- if (!overwrite) {
- do {
- if (mas_get_rcu_slot(mas, min++))
- return 0;
- } while (min < max);
+static inline void mas_find_l_split(struct ma_state *mas, struct ma_state *l_mas)
+{
+ unsigned char i, end = mas_data_end(mas);
+ for (i = 0; i <= end; i++) {
+ if (mas_check_split_parent(mas, i, l_mas))
+ return;
}
+ l_mas->node = MAS_NONE;
+}
+static inline void mas_find_r_split(struct ma_state *mas, struct ma_state *r_mas)
+{
+ unsigned char i = mas_data_end(mas);
do {
- mte_update_rcu_slot(mas->node, min++, entry);
- } while (min < max);
-
- if (max != mas->last - mas->min) {
- mas->index = mas->min + max + 1;
- mas_add(mas, entry, overwrite, active);
- }
-
- ret = max - min + 1;
-
- return ret;
+ if (mas_check_split_parent(mas, i, r_mas))
+ return;
+ } while (i--);
+ r_mas->node = MAS_NONE;
}
-
-
-static inline int __mas_add_slot_cnt(struct ma_state *mas,
- unsigned long prev_piv, unsigned char this_slot,
- const unsigned char slot, bool prev_null, bool start)
+static inline int mas_split_leaf(struct ma_state *mas, struct ma_state *l_mas,
+ struct ma_state *r_mas, void *entry,
+ unsigned char new_end)
{
- unsigned long this_piv = mas->min;
- int slot_cnt = 0;
- void *data;
+ unsigned char slot = mas_get_slot(mas); // The store of entry here.
+ unsigned char slot_cnt = mt_slot_count(mas->node);
+ struct maple_big_node b_node;
+ unsigned long piv;
+ void *contents;
+ int ret = 0;
+ int i = 0, j = 0, end;
- while (this_slot < slot) {
- this_piv = mas_get_safe_pivot(mas, this_slot);
- if (!this_piv && this_slot)
- break;
+ b_node.slot[slot] = NULL;
- if (this_piv > mas->max) // possibly a retry.
+ printk("Slot %u put in %lu-%lu\n", slot, mas->index, mas->last);
+ do {
+ b_node.slot[i] = mas_get_rcu_slot(mas, i);
+ b_node.pivot[i] = mas_get_safe_pivot(mas, i);
+ printk("Set %u slot %p => piv %lu\n", i, b_node.slot[i], b_node.pivot[i]);
+ } while (++i < slot);
+ piv = b_node.pivot[i];
+ contents = b_node.slot[i];
+
+ if (mas->index > piv) {
+ b_node.pivot[i++] = mas->index - 1;
+ printk("!Set %u slot %p => piv %lu\n", i, b_node.slot[i-1], b_node.pivot[i-1]);
+ }
+
+ b_node.pivot[i] = mas->last;
+ printk(".Set %u slot %p => piv %lu\n", i, entry, b_node.pivot[i]);
+ b_node.slot[i++] = entry;
+ if (mas->last < piv) {
+ printk("+Set %u slot %p => piv %lu\n", i, contents, piv);
+ b_node.pivot[i] = piv;
+ b_node.slot[i++] = contents;
+ }
+
+ j = i;
+ printk("j = %u\n", j);
+ for (i = slot + 1; i < slot_cnt; i++, j++) {
+ b_node.slot[j] = mas_get_rcu_slot(mas, i);
+ if (i < mt_pivot_count(mas->node))
+ b_node.pivot[j] = mas_get_safe_pivot(mas, i);
+ else {
+ b_node.pivot[j] = mas->max;
break;
+ }
+ printk("Set %u slot %p => piv %lu\n", j, b_node.slot[j], b_node.pivot[j]);
+ }
+ printk("Zero %u\n", j);
+ b_node.slot[j] = NULL;
+ b_node.pivot[j] = 0;
- if (this_piv == prev_piv && this_slot)
- goto skip_slot;
-
- if (this_piv < prev_piv)
- goto skip_slot;
-
- data = mas_get_rcu_slot(mas, this_slot);
- if (!data || mt_will_coalesce(data)) {
- if (xa_is_retry(data))
- goto skip_slot;
-
- if (prev_null)
- goto skip_slot;
-
- prev_null = true;
- } else
- prev_null = false;
-
- slot_cnt++;
-skip_slot:
- prev_piv = this_piv;
- this_slot++;
+ end = i;
+ ret = i / 2;
+ printk("Guessing leaf split of %u (i is %d)\n", ret, i);
+ /* Avoid ending a node in NULL and avoid having a range less than the
+ * slot count
+ */
+ while ((b_node.pivot[ret] - mas->min) < slot_cnt) {
+ printk("Skipping ahead due to min span\n");
+ ret++;
}
- if (start)
- return slot_cnt;
+ if (!b_node.slot[ret]) {
+ if (ret < slot_cnt - 1)
+ ret++;
+ else
+ ret--;
+ }
+ printk("Setting ret (leaf split) to %u\n", ret);
- if (prev_null != true && this_piv != mas->max)
- slot_cnt++;
+ // Copy values up to ret to left.
+ for (i = 0; i <= ret; i++) {
+// printk("left i %u\n", i);
+ mte_set_rcu_slot(l_mas->node, i, b_node.slot[i]);
+ piv = b_node.pivot[i];
+ if (i >= slot_cnt - 1) {
+ ret = i - 1;
+ break;
+ }
+ mte_set_pivot(l_mas->node, i, piv);
+ }
+ printk("l_max is %lu\n", piv);
+ mas_set_slot(l_mas, mte_parent_slot(mas->node));
+ l_mas->max = piv;
+ printk("r_min is %lu\n", piv + 1);
+ r_mas->min = piv + 1;
+
+ j = 0;
+ // In the case of empty.
+ mte_set_pivot(r_mas->node, j, r_mas->max);
+ printk("We are missing one here.. end %u ret %u\n", end, ret);
+ for (i = ret + 2; i <= end; i++, j++) {
+ mte_set_rcu_slot(r_mas->node, j, b_node.slot[i]);
+ mte_set_pivot(r_mas->node, j, b_node.pivot[i]);
+ printk("right j %u i %u piv %lu\n", j, i, b_node.pivot[i]);
+ }
+ mas_set_slot(r_mas, mas_get_slot(l_mas) + 1);
+ printk("Setting slots to %d\n", mas_get_slot(l_mas));
+ printk("%p Last pivot is %u %lu\n", mas_mn(r_mas), j, b_node.pivot[i-1]);
- return slot_cnt;
+ return ret;
}
-static inline int _mas_add_slot_cnt(struct ma_state *mas,
- const unsigned char slot, const unsigned long min,
- const unsigned long max, void *entry)
+static inline int mas_commit_store_split(struct ma_state *mas, void *entry,
+ unsigned char new_end)
{
- int slot_cnt;
- unsigned char slot_max = mt_slot_count(mas->node);
- bool prev_null = false;
- unsigned long prev_piv = (mas->min ? mas->min - 1 : mas->min);
- slot_cnt = __mas_add_slot_cnt(mas, prev_piv, 0, slot, false, true);
- slot_cnt++; // maintains the same slot (this_slot) (1)
- if (min < mas->index) // starts after this_slot.
- slot_cnt++; // (2?)
-
- if (max > mas->last) { // ends before this_slot.
- void *prev_val = mas_get_rcu_slot(mas, slot);
-
- slot_cnt++; // (2 or 3?)
- prev_piv = max;
- if (!prev_val || mt_will_coalesce(prev_val))
- prev_null = true;
- } else {
- if (!entry)
- prev_null = true;
- prev_piv = mas->last;
- }
-
- if (max == mas->max)
- return slot_cnt;
+ struct maple_enode *ancestor = MAS_NONE;
+ struct maple_big_node big_node;
+ unsigned char split = 0;
+ int height = 0;
+ int i;
- slot_cnt += __mas_add_slot_cnt(mas, prev_piv, slot + 1, slot_max,
- prev_null, false);
+ mt_dump(mas->tree);
+ MA_STATE(orig_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
- return slot_cnt;
-}
+ mas_dup_state(&orig_mas, mas);
+ mas_dup_state(&parent, mas);
-static inline int __mas_add(struct ma_state *mas, void *entry,
- int entry_cnt, bool active, bool append)
-{
- enum maple_type mas_type = mte_node_type(mas->node);
- struct maple_node space;
- struct maple_node *mn = NULL;
- unsigned char data_end = mas_data_end(mas);
- unsigned char slot = mas_get_slot(mas);
- unsigned char end_slot = slot;
- unsigned long src_max = mas->max;
- unsigned long piv, prev_piv = mas->min - 1;
- void *existing_entry = NULL;
- int ret = 0;
+// mas->full_cnt++; // For leaf node.
- MA_STATE(cp, mas->tree, mas->index, mas->last);
+ // Allocation failures will happen early.
+ // TODO: Increase this by one when optimizing with a rebalance.
+ mas_node_cnt(mas, 1 + mas->full_cnt * 2);
+ if (mas_is_err(mas))
+ return 0;
- /* Append only if we are appending AND the slot is truly empty.
- * If it's delete, skip, etc, then RCU requires a new node.
+ /* At the start of the loop, the maple state points to the leaf that
+ * needs to be split. The loop walks up and splits all nodes that need
+ * to be split. Obviously it will overflow nodes that need to be split,
+ * so leaf is split into two and then placed in a maple_big_node which
+ * has enough space. Each iteration after that will use the
+ * maple_big_node data to split equally left and right until the height
+ * is larger than mas->full_cnt which will place the maple_big_node data
+ * into the highest common ancestor necessary to be modified (which may
+ * be a new root).
*/
- if (append && !mas_get_rcu_slot(mas, data_end + 1)) {
- mas_set_slot(mas, data_end + 1);
- mas_append_entry(mas, entry);
- return ret;
- }
+ printk("full cnt = %u\n", mas->full_cnt);
+ while (height++ <= mas->full_cnt) {
+ struct maple_node *l, *r;
+ enum maple_type type = mte_node_type(mas->node);
+ printk("height %d\n", height);
+
+ if (height > mas->full_cnt) {
+ // The last node to be created.
+ if (mte_is_root(mas->node)) {
+ if (mt_is_alloc(mas->tree))
+ type = maple_arange_64;
+ else
+ type = maple_range_64;
+ }
+ /* Only a single node is used here, could be root.
+ * Big_node should just fit in a single node.
+ */
+ ancestor = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ type);
+ for (i = 0; i < mt_slots[type]; i++) {
+ if (!big_node.pivot[i])
+ break;
+ mte_set_rcu_slot(ancestor, i, big_node.slot[i]);
+ if (i < mt_pivots[type])
+ mte_set_pivot(ancestor, i,
+ big_node.pivot[i]);
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(ancestor, i,
+ big_node.gap[i]);
+ }
+ // Set the parent for the children.
+ printk("Placing left %u and right %u\n",
+ mas_get_slot(&l_mas), mas_get_slot(&r_mas));
+ mte_set_parent(l_mas.node, ancestor,
+ mas_get_slot(&l_mas));
+ mte_set_parent(r_mas.node, ancestor,
+ mas_get_slot(&r_mas));
+ mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
+ continue;
+ }
- mas_dup_state(&cp, mas);
+ l = ma_mnode_ptr(mas_next_alloc(mas));
+ r = ma_mnode_ptr(mas_next_alloc(mas));
- if (slot)
- prev_piv = mte_get_pivot(mas->node, slot - 1);
+ mas_dup_state(&l_mas, mas);
+ mas_dup_state(&r_mas, mas);
+ l_mas.node = mt_mk_node(l, type);
+ r_mas.node = mt_mk_node(r, type);
+ if (mte_is_leaf(mas->node)) {
+ printk("Splitting leaf %p\n", mas_mn(mas));
+ split = mas_split_leaf(mas, &l_mas, &r_mas, entry,
+ new_end);
+ } else {
+ unsigned char slot_cnt = mt_slot_count(mas->node); /* should be full. */
+ unsigned char p_slot, j;
+
+ /* TODO: Check rebalancing to avoid continuing walking
+ * up if there is space in a neighbour.
+ *
+ * TODO: Use the same splitting as leaves, using height
+ * to figure if there is enough room below.
+ */
- if (active) {
- cp.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
- mas_type);
- mn = mas_mn(mas);
- } else {
- // Note cp.node == mas->node here.
- mn = &space;
- memcpy(mn, mas_mn(mas), sizeof(struct maple_node));
- memset(mas_mn(&cp), 0, sizeof(struct maple_node));
- }
- mas_mn(&cp)->parent = mn->parent;
- if (prev_piv == mas->index - 1) {
- if (slot) // slot - 1 will translate to slot - 1 + 1.
- end_slot = _mas_append(&cp, mn, mas_type, src_max, 0,
- slot - 1);
- } else {
- end_slot = _mas_append(&cp, mn, mas_type, src_max, 0, slot);
- if (end_slot < mt_pivot_count(cp.node))
- mte_set_pivot(cp.node, end_slot, mas->index - 1);
- }
+ /* internal node split - cut in half for now. */
+ i = 0;
+ do {
+ printk("copy %u left\n", i);
+ mte_set_rcu_slot(l_mas.node, i,
+ big_node.slot[i]);
+ mte_set_pivot(l_mas.node, i,
+ big_node.pivot[i]);
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(l_mas.node, i,
+ big_node.gap[i]);
+ } while (++i < slot_cnt / 2);
+
+ l_mas.max = big_node.pivot[i];
+ r_mas.min = l_mas.max + 1;
+
+ for (j = 0; i < slot_cnt; i++, j++) {
+ printk("copy %u right\n", i);
+ mte_set_rcu_slot(r_mas.node, j,
+ big_node.slot[i]);
+ mte_set_pivot(r_mas.node, j,
+ big_node.pivot[i]);
+ if (mt_is_alloc(mas->tree))
+ mte_set_gap(r_mas.node, j,
+ big_node.gap[i]);
+ }
- mas_set_slot(&cp, end_slot);
- end_slot = mas_append_entry(&cp, entry) + 1;
+ p_slot = mas_get_slot(&prev_l_mas);
+ if (p_slot < split) {
+ // prev left side is on left.
+ mte_set_parent(prev_l_mas.node, l_mas.node,
+ p_slot);
+ } else {
+ mte_set_parent(prev_l_mas.node, r_mas.node,
+ p_slot - split + 1);
+ }
+ if (p_slot + 1 < split) {
+ // prev right side is on left.
+ mte_set_parent(prev_r_mas.node, l_mas.node,
+ p_slot + 1);
+ } else {
+ mte_set_parent(prev_r_mas.node, r_mas.node,
+ p_slot + 1 - split + 1);
+ }
+ }
- // Partial slot overwrite
- slot = mas_skip_overwritten(mas, data_end, slot);
- if (slot >= mt_slot_count(mas->node))
- goto done; // potential spanning add.
+ i = 0;
+ if (!mte_is_root(parent.node)) {
+ printk("Ascend parent %p\n", mas_mn(&parent));
+ mas_ascend(&parent); /* Go up a level */
+ do {
+ if (!big_node.pivot[i])
+ break;
+ printk("Copy parent slot %u\n", i);
+ big_node.slot[i] = mas_get_rcu_slot(&parent, i);
+ big_node.pivot[i] =
+ mas_get_safe_pivot(&parent, i);
+ if (mt_is_alloc(mas->tree))
+ big_node.gap[i] =
+ mte_get_gap(parent.node, i);
+ } while (++i < mas_get_slot(&l_mas));
+ }
- mas_get_range(mas, slot, &prev_piv, &piv);
- existing_entry = mas_get_rcu_sanitized(mas, slot);
- if (prev_piv <= mas->last && piv > mas->last) {
- mte_set_rcu_slot(cp.node, end_slot, existing_entry);
- mas_set_safe_pivot(&cp, end_slot++, piv);
- cp.max = piv;
- slot++;
+ split = i;
+ printk("Split is %u\n", split);
+ big_node.slot[i] = l_mas.node;
+ big_node.pivot[i] = l_mas.max;
+ mas_set_slot(&l_mas, i);
+ if (mt_is_alloc(mas->tree)) {
+ if (mte_is_leaf(l_mas.node)) {
+ big_node.gap[i] = mas_leaf_max_gap(&l_mas);
+ big_node.gap[i + 1] = mas_leaf_max_gap(&r_mas);
+ } else {
+ unsigned char slot;
+ big_node.gap[i] = mas_max_gap(&l_mas, &slot);
+ big_node.gap[i + 1] =
+ mas_max_gap(&r_mas, &slot);
+ }
+ }
+ big_node.slot[++i] = r_mas.node;
+ big_node.pivot[i] = r_mas.max;
+ mas_set_slot(&r_mas, i);
+ printk("New right is in %u (%p)\n", i, big_node.slot[i]);
+ printk("piv right is %lu\n", r_mas.max);
+ mas_dup_state(&prev_l_mas, &l_mas);
+ mas_dup_state(&prev_r_mas, &r_mas);
+ printk("Zero %u\n", i + 1);
+ big_node.pivot[i + 1] = 0;
+ big_node.slot[i + 1] = NULL;
+ if (!mte_is_root(mas->node)) {
+ mas_ascend(mas);
+ for (++i; i < mt_slot_count(parent.node); i++) {
+ printk("post-insert Copy parent slot %u\n", i);
+ big_node.pivot[i + 1] =
+ mas_get_safe_pivot(&parent, i);
+ if (!big_node.pivot[i + 1])
+ break;
+ big_node.slot[i + 1] =
+ mas_get_rcu_slot(&parent, i);
+
+ if (mt_is_alloc(mas->tree))
+ big_node.gap[i + 1] =
+ mte_get_gap(parent.node, i);
+ }
+ printk("Zero %u\n", i + 1);
+ big_node.pivot[i] = 0;
+ big_node.slot[i] = NULL;
+ }
}
- if (slot <= data_end && cp.max < mas->max)
- _mas_append(&cp, mn, mas_type, src_max, slot, data_end);
-done:
- if (active)
- mas->node = cp.node;
- return ret;
-}
-static inline bool _mas_walk(struct ma_state *mas);
-static inline int mas_replace_tree(struct ma_state *mas, void *new_entry);
-static inline bool mas_rebalance_node(struct ma_state *mas);
-static inline unsigned long mas_next_node(struct ma_state *mas,
- unsigned long max);
+ mas->node = ancestor;
+ printk("Using %p\n", ancestor);
+ BUG_ON(mas_is_none(mas));
+ // Set the original node as dead
+ mas_set_node_dead(&orig_mas);
+ smp_wmb();
-/* Private
- *
- * mas_rebalance_gaps() - walk down to the mas->index location and update the
- * gaps.
- *
- *
- */
-static inline void mas_rebalance_gaps(struct ma_state *mas)
-{
- if (mt_is_alloc(mas->tree)) {
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
- mas->node = MAS_START;
- _mas_walk(mas); // return to the updated location in the tree.
- mas_dup_state(&r_mas, mas);
- mas_update_gap(mas, true);
- mas_dup_state(mas, &r_mas);
- mas_set_slot(&r_mas, mte_parent_slot(mas->node));
- mas_next_node(&r_mas, ULONG_MAX);
- if (!mas_is_none(&r_mas))
- mas_update_gap(&r_mas, true);
+ // Insert the new data in the tree
+ _mas_replace(mas, false, false);
+
+ /* The new nodes have the correct parent set, so follow the child with
+ * the correct parent on each split. If there is no child with the
+ * correct parent, then the other side of the split will have two
+ * children with the correct parent. Once the new children are found,
+ * then set the correct parent in all of of the parent's children.
+ */
+ mas_dup_state(&prev_l_mas, mas);
+ mas_dup_state(&prev_r_mas, mas);
+ while (!mte_is_leaf(l_mas.node)) {
+
+ mas_find_l_split(&prev_l_mas, &l_mas);
+ printk("new child of %p is %p\n", mas_mn(&prev_l_mas), mas_mn(&l_mas));
+ if (mas_is_none(&l_mas)) {
+ mas_dup_state(&l_mas, &r_mas);
+ mas_adopt_children(&prev_l_mas, prev_l_mas.node);
+ mas_dup_state(&prev_l_mas, &prev_r_mas);
+ mas_find_l_split(&prev_l_mas, &l_mas);
+ }
+ mas_find_r_split(&prev_r_mas, &r_mas);
+ printk("new child of %p is %p\n", mas_mn(&prev_r_mas), mas_mn(&r_mas));
+ if (mas_is_none(&r_mas)) {
+ mas_dup_state(&r_mas, &l_mas);
+ mas_dup_state(&prev_r_mas, &prev_l_mas);
+ mas_find_r_split(&prev_r_mas, &l_mas);
+ }
+ mas_adopt_children(&prev_l_mas, prev_l_mas.node);
+ mas_descend(&l_mas);
+ mas_dup_state(&prev_l_mas, &l_mas);
+ if (prev_r_mas.node != prev_l_mas.node)
+ mas_adopt_children(&prev_r_mas, prev_r_mas.node);
+ mas_descend(&r_mas);
+ mas_dup_state(&prev_r_mas, &r_mas);
}
+ mt_dump(mas->tree);
+ return 1;
}
/* Private
+ * mas_wr_slot_cnt() - Calculate the slots required to write @entry.
+ * @mas - the maple state
+ * @r_min - the minimum range of the slot in @mas
+ * @r_max - the maximum range of the slot in @mas
+ * @entry - the entry that is going to be written.
*
- * mas_spanning_add() - Add a value which spans the nodes range. This is
- * handled separately than other adds because the tree may need significant
- * alterations.
- *
- * Current plan:
- * Alter in-node data to use the new maximum, walk up the tree setting the
- * pivots & inserting skip/retry values as well as rebalance once the nodes
- * have been altered.
- *
- *
+ * Note: May return larger than the node can hold.
*/
-static inline void mas_spanning_cleanup(struct ma_state *p, struct ma_state *c,
- unsigned long new_pivot)
+static inline unsigned char mas_wr_slot_cnt(struct ma_state *mas,
+ unsigned long r_min, unsigned long r_max,
+ void *entry)
{
- struct ma_state prev, curr;
- unsigned char p_pslot, p_cslot; // parent previous and current slot.
- enum maple_type p_type;
- struct maple_node *parent;
+ unsigned char new_end = mas_data_end(mas);
+ unsigned char i, slot = mas_get_slot(mas);
+ void *contents = mas_get_rcu_slot(mas, slot);
+ unsigned long end_max = r_max;
- mas_dup_state(&prev, p);
- mas_dup_state(&curr, c);
- p_type = mas_parent_enum(&prev, prev.node);
- parent = mte_parent(prev.node);
- p_pslot = mte_parent_slot(prev.node);
- p_cslot = mte_parent_slot(curr.node);
+ printk("Checking slot %u with %lu-%lu\n", slot, r_min, r_max);
+ printk(" entry %p with %lu-%lu\n", entry, mas->index, mas->last);
+ if (!contents && !entry)
+ goto check_spans_slots; // Overwriting a null with nulls.
- if (mte_parent(prev.node) == mte_parent(curr.node)) {
- // Set all pivots up to p_cslot to new_pivot.
- while (++p_pslot < p_cslot)
- ma_set_pivot(parent, p_pslot, p_type, new_pivot);
+ if (mas->index > r_min)
+ new_end++;
- return;
- }
+ if (mas->last < r_max)
+ new_end++;
+ else if (mas->last > r_max)
+ goto check_spans_slots;
- // Not the same parent, clear out to the end of p_pslot and the start of
- // p_cslot.
- while(++p_pslot < mt_pivots[p_type]) {
- if (!ma_get_pivot(parent, p_pslot, p_type))
- break;
- ma_set_pivot(parent, p_pslot, p_type, new_pivot);
- }
+ printk("%s: new_end is now %u\n", __func__, new_end);
+ return new_end;
- if (!p_cslot)
- return;
+check_spans_slots: // A store may span slots but
- parent = mte_parent(curr.node);
- p_type = mas_parent_enum(&curr, curr.node);
+ i = slot;
+ while (end_max < mas->last && i < mt_slot_count(mas->node) - 1) {
+ printk("Checking %u\n", i);
+ if (new_end == slot)
+ break;
+ end_max = mte_get_pivot(mas->node, ++i);
+ new_end--;
+ }
- do {
- ma_set_pivot(parent, --p_cslot, p_type, new_pivot);
- } while (p_cslot);
+ printk("%s: check_spans_slot new_end is now %u\n", __func__, new_end);
+ return new_end;
}
-static inline int mas_spanning_add(struct ma_state *mas, void *entry,
- unsigned long old_max)
-{
- unsigned char p_slot;
- unsigned long new_pivot = mas->last;
- int i;
-
- MA_STATE(r_mas, mas->tree, mas->index, mas->last); // right mas.
- MA_STATE(p_mas, mas->tree, mas->index, mas->last); // parent mas.
- mas_dup_state(&p_mas, mas); // point to the start node.
- mas_ascend(&p_mas);
- p_slot = mte_parent_slot(mas->node);
- do {
- MA_STATE(tmp, mas->tree, mas->index, mas->last); // prev mas.
+static inline int mas_spanning_store(struct ma_state *mas, void *entry)
+{
+ printk("Not implemented to store %lu-%lu, span starts at %p\n",
+ mas->index, mas->last, mte_to_node(mas->span_enode));
+ BUG_ON(1);
- mas_set_slot(mas, p_slot); // for mas_next_node.
- mas_set_slot(&p_mas, p_slot); // for pivot changes in parent.
+ if (mt_is_alloc(mas->tree))
+ mas_update_gap(mas, false);
- mas_dup_state(&r_mas, mas); // point to the start node.
- mas_dup_state(&tmp, &r_mas);
- mas_set_slot(&r_mas, mte_parent_slot(r_mas.node));
+}
+static inline int mas_commit_store(struct ma_state *mas, unsigned long r_min,
+ unsigned long r_max, unsigned char new_end,
+ void *entry, bool active)
+{
+ void *this_entry;
+ unsigned long piv;
+ unsigned char slot, end, i, j;
+ struct maple_enode *new_node;
- mas_next_node(&r_mas, ULONG_MAX);
- // Update the pivots.
- mas->max = new_pivot;
- mas_set_safe_pivot(&p_mas, p_slot, mas->max);
-
- if (mas_is_none(&r_mas))
- goto done;
-
- mas_set_slot(&r_mas, mte_parent_slot(r_mas.node));
-
- while (!mas_is_none(&r_mas)) {
- mas_spanning_cleanup(&tmp, &r_mas, new_pivot);
- if (r_mas.max <= r_mas.last) {
- struct maple_enode *enode = r_mas.node;
-
- i = mte_parent_slot(enode);
- mas_ascend(&r_mas);
- mte_set_rcu_slot(r_mas.node, i, XA_SKIP_ENTRY);
- mas_set_safe_pivot(&r_mas, i, r_mas.last);
- if (mt_is_alloc(r_mas.tree))
- mte_set_gap(r_mas.node, i, 0);
- mas_dup_state(&r_mas, &tmp);
- mte_free(enode);
- } else {
- unsigned long piv = mas->min;
+ printk("Commit store %lu\n", mas->index);
+ if (new_end > mt_slot_count(mas->node))
+ return mas_commit_store_split(mas, entry, new_end);
- for (i = 0; i < mt_slot_count(r_mas.node); i++) {
- void *val = XA_RETRY_ENTRY;
+ piv = mas->last;
+ this_entry = entry;
+ end = mas_data_end(mas);
+ slot = mas_get_slot(mas);
- piv = mas_get_safe_pivot(&r_mas, i);
- if (!piv)
- break;
+ if ((slot > end) || !end) {
+ printk("Appending new end %u and %u\n", new_end, end);
+ // Appending
+ if (new_end != end + 1) {
+ mte_set_rcu_slot(mas->node, slot + 1, entry);
+ mte_set_pivot(mas->node, slot + 1, mas->last);
+ this_entry = NULL;
+ piv = mas->index - 1;
+ }
- if (piv > r_mas.last)
- break;
+ mte_set_rcu_slot(mas->node, slot, this_entry);
+ mte_set_pivot(mas->node, slot, piv);
+ goto done;
+ }
- if (!mte_is_leaf(r_mas.node))
- val = XA_SKIP_ENTRY;
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
- mte_set_rcu_slot(r_mas.node, i, val);
- if (i < mt_pivot_count(r_mas.node))
- mte_set_pivot(r_mas.node, i,
- r_mas.last);
+ new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node));
+ mte_to_node(new_node)->parent = mas_mn(mas)->parent;
+ printk("Copy 0 - %u\n", slot);
+ for (i = 0; i <= slot; i++) {
+ this_entry = mas_get_rcu_slot(mas, i);
+ mte_set_rcu_slot(new_node, i, this_entry);
+ piv = mte_get_pivot(mas->node, i);
+ mte_set_pivot(new_node, i, piv);
+ }
+ i--;
- if (!mte_is_leaf(r_mas.node) &&
- mt_is_alloc(r_mas.tree))
- mte_set_gap(r_mas.node, i, 0);
+ j = i;
+ printk("r_min %lu\n", r_min);
+ if (j && mas->index != r_min) {
+ printk("Stop pivot %u at %lu\n", j - 1, mas->index - 1);
+ mte_set_pivot(new_node, j - 1, mas->index - 1);
+ }
- }
- break;
- }
- mas_dup_state(&tmp, &r_mas);
- mas_set_slot(&r_mas, mte_parent_slot(r_mas.node));
- mas_next_node(&r_mas, ULONG_MAX);
- }
+ printk("last piv is %lu\n", piv);
+ printk("Setting %u to %lu\n", j, mas->last);
+ mte_set_rcu_slot(new_node, j, entry);
+ mte_set_pivot(new_node, j++, mas->last);
- if (mas_is_none(&r_mas))
- mas_dup_state(&r_mas, &tmp);
+ if (mas->last < r_max ) {
+ printk("Setting %u\n", j);
+ mte_set_rcu_slot(new_node, j, this_entry);
+ mte_set_pivot(new_node, j++, piv);
+ }
- if (r_mas.max > mas->last && !mas_rebalance_node(mas)) {
- // Best effort, no allocation required.
- if (mt_is_alloc(mas->tree))
- mas_update_gap(&r_mas, true);
+ for (i = slot + 1; i <= end; i++, j++) {
+ piv = mas_get_safe_pivot(mas, i);
+ if (mas->last >= piv)
+ continue;
- if (mas_is_err(mas))
- return 0;
- }
+ printk("Setting %u\n", j);
+ mte_set_rcu_slot(new_node, j, mas_get_rcu_slot(mas, i));
+ if (j < mt_pivot_count(new_node))
+ mte_set_pivot(new_node, j, piv);
- if (mas_is_err(mas))
- return 0;
+ }
+ printk("Replace %p with %p\n", mas_mn(mas), mte_to_node(new_node));
+ mas->node = new_node;
+ mt_dump(mas->tree);
+ mas_replace(mas);
+done:
+ if (mt_is_alloc(mas->tree))
+ mas_update_gap(mas, false);
- mas_dup_state(&p_mas, mas); // parent may be replaced.
- mas_ascend(&p_mas);
+ return new_end - end;
+}
- if (mas_is_err(mas))
- return 0; // FIXME: Broken tree?
+// FIXME: When task_size / page_size -1 works, check to ensure we are
+// not inserting above this.
+static inline int mas_root_expand(struct ma_state *mas, void *entry)
+{
+ void *contents = rcu_dereference_protected(mas->tree->ma_root,
+ lockdep_is_held(&mas->tree->ma_lock));
+ enum maple_type mt = mas_ptype_leaf(mas);
+ int slot = 0;
- // parent slot may have changed during rebalance.
- p_slot = mte_parent_slot(mas->node);
- // Set up for the next loop.
- if (!mte_is_root(mas->node)) {
- // Set the current parent slot for ascend.
- mas_set_slot(mas, p_slot);
- mas_ascend(mas);
- // Get the new levels parent slot (grand-parent slot)
- p_slot = mte_parent_slot(mas->node);
- if (!mte_is_root(p_mas.node)) {
- // Set the slot for ascending.
- mas_set_slot(&p_mas, p_slot);
- mas_ascend(&p_mas);
- }
- }
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
- if (mas->max > new_pivot)
- new_pivot = mas->max;
+ mas->node = mt_mk_node(mas_next_alloc(mas), mt);
+ mas_mn(mas)->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
- } while (mas->max <= mas->last);
+ if (mas->index) {
+ mte_set_rcu_slot(mas->node, slot, contents);
+ if (mas->index > 1)
+ mte_set_pivot(mas->node, slot, mas->index - 1);
+ slot++;
+ }
-done:
+ mte_set_rcu_slot(mas->node, slot, entry);
+ mte_set_pivot(mas->node, slot++, mas->last);
- if (!mte_is_root(mas->node))
- mas_set_safe_pivot(&p_mas, p_slot, mas->max);
+ if (mt_is_alloc(mas->tree)) {
+ //FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1;
+ unsigned long mmap_end = 0x2000000000000UL;
+ if (mas->index < mmap_end - 1)
+ mte_set_pivot(mas->node, slot++, mmap_end - 1);
+ mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY);
+ mte_set_pivot(mas->node, slot++, mt_max[mt]);
+ }
- mas_rebalance_node(mas);
- if (mas_is_err(mas))
- return 0; // FIXME: Broken tree?
+ /* swap the new root into the tree */
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ return slot;
+}
+static inline int ma_root_ptr(struct ma_state *mas, void *entry,
+ void *contents, bool overwrite)
+{
+ int ret = 1;
+ if (xa_is_node(mas->tree->ma_root))
+ return 0;
- mas_rebalance_gaps(mas);
+ if (mas->tree->ma_root && mas->last == 0) {
+ contents = rcu_dereference_protected(mas->tree->ma_root,
+ lockdep_is_held(&mas->tree->ma_lock));
+ if (!overwrite)
+ goto exists;
+ } else {
+ contents = NULL;
+ }
- return 1;
+ if (mas->last != 0)
+ ret = mas_root_expand(mas, entry);
+ else if (((unsigned long) (entry) & 3) == 2)
+ ret = mas_root_expand(mas, entry);
+ else
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ return ret;
+
+exists:
+ mas_set_err(mas, -EEXIST);
+ return 0;
}
/* Private
*
- * Insert entry into a node.
- * If this is not an append, a new node will be generated.
- * If this node is full, split the node & insert or overwrite
- *
- * This is done by:
- * 1. Calculating the range of slot.
- * 2. Figure out how many slots are needed for the entry. (0, 1, 2)
- * 3. Copy the data over
- * 4. Write the entry.
+ * mas_is_span_() - Set span_enode if there is no value already and the
+ * entry being written spans this nodes slot or touches the end of this slot and
+ * is NULL.
+ * @piv - the pivot of the slot in this node
+ * @entry - the entry that is going to be written.
*
- * Returns the number of slots used on success, the slot number on failure.
*/
-static inline int _mas_add(struct ma_state *mas, void *entry, bool overwrite,
- bool active)
+static inline void mas_is_span_wr(struct ma_state *mas, unsigned long piv,
+ void *entry)
{
- enum maple_type this_type = mte_node_type(mas->node);
- unsigned long last_piv;
- unsigned char coalesce;
- unsigned char old_end, new_end;
- unsigned long max = mas->max;
- unsigned long min = mas->min;
- unsigned char slot = mas_get_slot(mas);
- unsigned char slot_cnt = mt_slots[this_type] - 1;
- struct maple_enode *prev_enode = NULL;
- void *contents = NULL;
- bool append = false;
- unsigned long spans_node = 0;
- int ret = 0;
+ if (mas->span_enode) // Already a spanning store.
+ return;
+
+ if (piv > mas->last) // Contained in this pivot
+ return;
+ if (!mte_is_leaf(mas->node)) {
+ if ( mas->last < piv) // Fits in the slot.
+ return;
- if (ma_is_dense(this_type)) {
- ret = _mas_add_dense(mas, entry, slot, this_type, overwrite,
- active);
- if (!ret)
- return ret;
- old_end = 0; // fixme.
- goto update_gap;
+ if (entry && piv == mas->last) // Writes to the end of the child node, but has a value.
+ return;
+ } else {
+ if (mas->last < mas->max) // Fits in the node, but may span slots.
+ return;
+
+ if (entry && mas->last == mas->max) // Writes to the end of the node but not null.
+ return;
}
+ mas->span_enode = mas->node;
+ mas->last_cnt = 0;
+}
- // Bug if we are adding an entry to a non-leaf node.
- MT_BUG_ON(mas->tree, !ma_is_leaf(this_type));
+static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type,
+ unsigned long *range_min, unsigned long *range_max)
+{
+ unsigned char i;
+ unsigned long min = mas->min, pivot = 0;
+ bool ret = true;
- old_end = _mas_data_end(mas, this_type, &last_piv, &coalesce);
- if (slot > slot_cnt) // search returned MAPLE_NODE_SLOTS
- slot = old_end + 1;
+ switch (type) {
+ default:
+ for (i = mas_get_slot(mas); i < mt_slots[type]; i++) {
+ pivot = _mas_get_safe_pivot(mas, i, type);
- mas_get_range(mas, slot, &min, &max);
- if (mas_get_slot(mas) > slot_cnt)
- max = mas->max;
+ if (!pivot && i) {
+ if (mas->max < mas->index) {
+ i = MAPLE_NODE_SLOTS;
+ ret = false;
+ }
+ pivot = mas->max;
+ break;
+ }
- if (slot <= old_end)
- contents = mas_get_rcu_slot(mas, slot);
+ if (mas->index <= pivot)
+ break;
+ min = pivot + 1;
+ }
+ break;
+ case maple_dense:
+ // Linear node.
+ // What if mas->index != mas->last?
+ pivot = min = mas->index;
+ i = mas->index = mas->min;
+ break;
+ }
- // Check early failures.
- if (!overwrite) {
- if (mas->last > max) { // spans range.
- // FIXME, this may be fine if the range isn't
- // coalesced, or such?
- mas_set_err(mas, -ERANGE);
- return 0;
- }
- if (!mt_is_empty(contents)) {
- mas_set_err(mas, -EBUSY);
- return 0;
- }
+ if (ret) {
+ *range_min = min;
+ *range_max = pivot;
}
+ mas_set_slot(mas, i);
+ return ret;
+}
+/* Private
+ *
+ * mas_wr_walk(): Walk the tree for a write. Tracks extra information which
+ * is used in special cases of a write.
+ * @range_min - pointer that will be set to the minimum of the slot range
+ * @range_max - pointer that will be set to the maximum of the slot range
+ * @entry - the value that will be written.
+ */
+static inline bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
+ unsigned long *range_max, void *entry, bool overwrite)
+{
+ enum maple_type type;
+ struct maple_enode *next;
+ unsigned char end;
+ bool ret = false;
- if (mas->last > mas->max) // spans node.
- spans_node = mas->max;
+ mas->span_enode = NULL;
+ mas->full_cnt = 0;
+ mas->last_cnt = 0;
- // At this point, the we can perform the add.
- if (!mte_is_leaf(mas->node)) {
- // An allocation failed previously during a rebalance. There
- // is no way to know how broken things are, so try to rebuild
- // the tree.
- mas_reset(mas);
- mas_first_node(mas, ULONG_MAX);
- return mas_replace_tree(mas, entry);
- }
-
- // Fits neatly into a slot.
- if (mas->index == min && mas->last == max) {
- mte_set_rcu_slot(mas->node, slot, entry);
- if (slot < slot_cnt)
- mte_set_pivot(mas->node, slot, mas->last);
- ret = 1;
- goto complete;
- }
- new_end = _mas_add_slot_cnt(mas, slot, min, max, entry);
- if (new_end > slot_cnt + 1) {
- mas_split(mas, slot, active, old_end, entry);
- if (mas_is_err(mas))
- return 0;
+ while (true) {
+ type = mte_node_type(mas->node);
- ret = old_end - new_end;
- goto complete;
- }
- if (active) {
- mas_node_cnt(mas, 1);
- if (mas_is_err(mas))
- return 0;
- }
- prev_enode = mas->node;
- if (slot > old_end && !coalesce)
- append = true;
+ mas->last_cnt++;
+ mas->full_cnt++;
+ end = mas_data_end(mas);
+ printk("End of %p is %u type %u\n", mas_mn(mas), end, mt_slots[type] - 1);
+ if (end < mt_slots[type] - 1)
+ mas->full_cnt = 0;
- mas_set_slot(mas, slot);
- __mas_add(mas, entry, old_end, active, append);
- mas_set_slot(mas, slot);
+ if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
+ return false;
-complete:
- if (prev_enode != mas->node)
- _mas_replace(mas, active, true);
+ mas_is_span_wr(mas, *range_max, entry);
- // Spanning a node can be complex.
- if (spans_node)
- ret = mas_spanning_add(mas, entry, spans_node);
+ if (ma_is_leaf(type))
+ return true;
- // FIXME: Allocation failures from mas_spanning_add?
-update_gap:
- if (mt_is_alloc(mas->tree)) {
- mas_update_gap(mas, false);
- if (!entry && (slot >= old_end || !slot))
- mas_may_move_gap(mas);
- }
+ next = mas_get_rcu_slot(mas, mas_get_slot(mas));
- return ret;
-}
+ // Traverse.
+ mas->max = *range_max;
+ mas->min = *range_min;
+ if (unlikely(mt_is_empty(next)))
+ return false;
-static inline void ma_inactive_insert(struct ma_state *mas, void *entry)
-{
- // Restart search for where to insert.
- mas->node = MAS_START;
- mas_start(mas);
- mas_add(mas, entry, true, false);
+ mas->node = next;
+ mas_set_slot(mas, 0);
+ }
+ return ret;
}
-static inline void mas_insert(struct ma_state *mas, void *entry)
+static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
{
- mas_add(mas, entry, false, true);
-}
+ unsigned long r_max, r_min;
+ unsigned char new_end;
+ void *content = NULL;
-static inline int _mas_insert(struct ma_state *mas, void *entry,
- unsigned char slot, bool active)
-{
- mas_set_slot(mas, slot);
- return _mas_add(mas, entry, false, active);
-}
+ int ret = 0;
-static inline void mas_root_expand(struct ma_state *mas, void *entry)
-{
- void *r_entry = rcu_dereference_protected(mas->tree->ma_root,
- lockdep_is_held(&mas->tree->ma_lock));
- struct maple_node *mn;
- enum maple_type mt = mas_ptype_leaf(mas);
- int slot = 0;
+ printk("Start: %s %d store %lu-%lu\n", __func__, __LINE__,
+ mas->index, mas->last);
+
+ if (mas_start(mas) || (mas_is_none(mas) || mas->node == MAS_ROOT))
+ ret = ma_root_ptr(mas, entry, content, overwrite);
- mas_node_cnt(mas, 1);
if (mas_is_err(mas))
- return;
+ return NULL;
- mn = mas_next_alloc(mas);
- mas->node = mt_mk_node(mn, mt);
- mn->parent = ma_parent_ptr(
- ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ if (ret > 2)
+ return NULL;
+ else if (ret)
+ return content;
- mte_set_rcu_slot(mas->node, slot, r_entry);
- mte_set_pivot(mas->node, slot, 0);
- if (r_entry)
- mas_set_slot(mas, 1);
+ if (!mas_wr_walk(mas, &r_min, &r_max, entry, overwrite)) {
+ /* Not a leaf = broken tree. */
+ // FIXME, rebuild?
+ printk("%s %d\n", __func__, __LINE__);
+ return NULL;
+ }
- // FIXME: When task_size / page_size -1 works, check to ensure we are
- // not inserting above this.
- __mas_add(mas, entry, slot++, false, false);
- if (mas_is_err(mas))
- return;
+ if (mas->span_enode) {
+ printk("%s %d\n", __func__, __LINE__);
+ if (!overwrite) {
+ mas_set_err(mas, -EEXIST);
+ return NULL; // spanning writes always overwrite something.
+ }
+ ret = mas_spanning_store(mas, entry);
+ goto done;
+ }
- if (mas->last != 1)
- slot++;
- //_mas_insert(mas, entry, slot, false);
+ /* At this point, we are at the leaf node that needs to be altered. */
+ /* Calculate needed space */
+ new_end = mas_wr_slot_cnt(mas, r_min, r_max, entry);
+ content = mas_get_rcu_slot(mas, mas_get_slot(mas));
+ if (!overwrite && ((mas->last > r_max) || content )) {
+ mas_set_err(mas, -EEXIST);
+ goto done;
+ }
+ ret = mas_commit_store(mas, r_min, r_max, new_end, entry, true);
- if (mas_is_err(mas))
- return;
+done:
+ if (ret > 2)
+ return NULL;
- if (mt_is_alloc(mas->tree)) {
- //FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1;
- mas_set_slot(mas, 2);
- mas->index = 0x2000000000000UL;
- mas->last = mt_max[mt];
- __mas_add(mas, XA_ZERO_ENTRY, slot, false, false);
- if (mas_is_err(mas))
- return;
+ return content;
+}
+void *mas_store(struct ma_state *mas, void *entry)
+{
+ if (mas->index > mas->last) {
+ mas_set_err(mas, -EINVAL);
+ return NULL;
}
- /* swap the new root into the tree */
- rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ return _mas_store(mas, entry, true);
+}
+
+static inline bool _mas_walk(struct ma_state *mas);
+static inline bool mas_rebalance_node(struct ma_state *mas);
+static inline unsigned long mas_next_node(struct ma_state *mas,
+ unsigned long max);
+
+/* Private
+ *
+ * mas_rebalance_gaps() - walk down to the mas->index location and update the
+ * gaps.
+ *
+ *
+ */
+static inline void mas_rebalance_gaps(struct ma_state *mas)
+{
+ if (mt_is_alloc(mas->tree)) {
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ mas->node = MAS_START;
+ _mas_walk(mas); // return to the updated location in the tree.
+ mas_dup_state(&r_mas, mas);
+ mas_update_gap(mas, true);
+ mas_dup_state(mas, &r_mas);
+ mas_set_slot(&r_mas, mte_parent_slot(mas->node));
+ mas_next_node(&r_mas, ULONG_MAX);
+ if (!mas_is_none(&r_mas))
+ mas_update_gap(&r_mas, true);
+
+ }
}
static inline int mas_safe_slot(struct ma_state *mas, int *slot, int delta);
static inline void mas_prev_slot(struct ma_state *mas, unsigned long min)
__must_hold(ms->tree->lock)
{
- unsigned char slot, coalesce;
+ unsigned char slot;
if (mte_is_root(mas->node))
goto no_entry;
if (mte_is_leaf(mas->node))
goto done;
- slot = _mas_data_end(mas, mte_node_type(mas->node), &mas->max,
- &coalesce);
+ slot = _mas_data_end(mas, mte_node_type(mas->node), &mas->max);
} while (1);
done:
struct maple_enode *mn;
unsigned long last_pivot;
unsigned long pivot = mas_get_safe_pivot(mas, slot);
- unsigned char coalesce;
if (slot)
min = mas_get_safe_pivot(mas, slot - 1) + 1;
mas->max = pivot;
mas->min = min;
slot = _mas_data_end(mas, mte_node_type(mn),
- &last_pivot, &coalesce) + 1;
+ &last_pivot) + 1;
} while (slot-- > 0);
ascend:
return entry;
}
-void *mas_range_load(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max, bool skip_retry);
-/* Private
- *
- * _mas_next() - Finds the next entry, sets index to the start of the range.
- *
- */
-static inline void *_mas_next(struct ma_state *mas, unsigned long limit,
- unsigned long *range_start)
-{
- void *entry = NULL;
- unsigned long range_max;
-
- if (mas->node && !mas_searchable(mas))
- return NULL;
-
- if (!mas->node || mas_is_start(mas)) {// First run.
- *range_start = 0;
- mas_start(mas);
- entry = mas_range_load(mas, range_start, &range_max, false);
- mas->last = range_max;
- }
-
- if (entry)
- return entry;
-
- return __mas_next(mas, limit, range_start);
-}
-
-/*
- * mas_next() - Get the next entry. Can return the zero entry. mas->node
- * must be a valid node and not a special value. Unsafe for single entry
- * trees.
- *
- */
-void *mas_next(struct ma_state *mas, unsigned long max)
-{
- unsigned long index = 0;
-
- return _mas_next(mas, max, &index);
-}
-EXPORT_SYMBOL_GPL(mas_next);
-
/* Private
*
* _mas_prev() - Find the previous entry from the current ma state.
if (!mas_searchable(mas))
break;
- } while (!entry || mt_will_coalesce(entry));
+ } while (!entry);
return entry;
}
enum maple_type this_type = mte_node_type(this_enode);
unsigned long piv;
unsigned long min, max;
- unsigned char coalesce, hard_data;
- unsigned char end = _mas_data_end(mas, this_type, &piv, &coalesce);
+ unsigned char end = _mas_data_end(mas, this_type, &piv);
MA_STATE(old_mas, mas->tree, mas->index, mas->last);
- hard_data = ma_hard_data(end, coalesce);
- if (hard_data > mt_min_slots[this_type] - 1)
+ if (end > mt_min_slots[this_type] - 1)
return;
/* Check for a single entry in the root node.
* 2. slot count == coalesce
* 3. one entry and one null.
*/
- if (!hard_data ||
- (end + 1 == coalesce) ||
- (end == 1 && !mte_get_rcu_slot(this_enode, 1, mas->tree))) {
+ if (end == 1 && !mte_get_rcu_slot(this_enode, 1, mas->tree)) {
unsigned long piv;
min = mas->min;
}
/* it's not a leaf, remove a level from the tree. */
goto remove_level;
- } else if (hard_data <= mt_min_slots[this_type] - 1) {
+ } else if (end <= mt_min_slots[this_type] - 1) {
goto coalesce; // Compact the node.
}
//into the first slot which is empty.
mas_set_safe_pivot(p_mas, mte_parent_slot(mas->node),
mas->min);
- mte_set_rcu_slot(p_mas->node, mte_parent_slot(mas->node),
- XA_SKIP_ENTRY);
+ // FIXME: Don't reuse nodes, ever.
+// mte_set_rcu_slot(p_mas->node, mte_parent_slot(mas->node),
+// XA_SKIP_ENTRY);
if (mt_is_alloc(mas->tree))
mte_set_gap(p_mas->node, mte_parent_slot(mas->node),
0);
// Redirect reads to the new node.
mas_set_safe_pivot(p_mas, mte_parent_slot(mas->node), r_mas->max);
// indicate to skip this slot.
- mte_set_rcu_slot(p_mas->node, mte_parent_slot(r_mas->node),
- XA_SKIP_ENTRY);
+ // FIXME: Remove skips.
+// mte_set_rcu_slot(p_mas->node, mte_parent_slot(r_mas->node),
+// XA_SKIP_ENTRY);
if (mt_is_alloc(mas->tree))
mte_set_gap(p_mas->node, mte_parent_slot(r_mas->node), 0);
mas_dup_state(&p_mas, mas);
mas_ascend(&p_mas);
l_p_slot = mte_parent_slot(mas->node);
- l_end_slot = _mas_data_end(mas, l_type, &l_end_piv, &l_coalesce);
- if (!try_anyways &&
- (ma_hard_data(l_end_slot, l_coalesce) >= mt_min_slots[l_type])) {
+ l_end_slot = _mas_data_end(mas, l_type, &l_end_piv);
+ if (!try_anyways && (l_end_slot >= mt_min_slots[l_type])) {
goto no_rebalancing; // Everything's perfectly all right now.
}
- printk("Going ahead as l hard data is %u and min is %u\n", ma_hard_data(l_end_slot, l_coalesce),
- mt_min_slots[l_type]);
try_anyways = false;
// Not really r_mas, previous node is left.
mas_descend(&r_mas);
r_type = mte_node_type(r_mas.node);
- r_end_slot = _mas_data_end(&r_mas, r_type, &r_end_piv,
- &r_coalesce);
+ r_end_slot = _mas_data_end(&r_mas, r_type, &r_end_piv);
if (r_end_slot - r_coalesce + l_end_slot - l_coalesce + 2
< mt_slots[l_type]) {
// Force a coalesce of these nodes
// We have a left and a right, check if they can be coalesced.
r_type = mte_node_type(r_mas.node); // not for racing.
- r_end_slot = _mas_data_end(&r_mas, r_type, &r_end_piv, &r_coalesce);
+ r_end_slot = _mas_data_end(&r_mas, r_type, &r_end_piv);
r_p_slot = mte_parent_slot(r_mas.node);
printk("Taking from %p\n", mas_mn(&r_mas));
do {
// relocated.
printk("Set %p[%u] to retry\n", mas_mn(&r_mas), copy_count);
- mte_set_rcu_slot(r_mas.node, copy_count, XA_RETRY_ENTRY);
+//mte_set_rcu_slot(r_mas.node, copy_count, XA_RETRY_ENTRY);
+ //FIXME ..
if (mt_is_alloc(r_mas.tree))
mte_set_gap(r_mas.node, copy_count, 0);
mte_set_pivot(r_mas.node, copy_count, mas->max);
if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
- unsigned char coalesce;
next = mas_get_rcu_slot(mas, i);
mas->min = min;
mas->max = max;
if (!mt_is_empty(next)) {
mas->node = next;
- i = _mas_data_end(mas, mte_node_type(next), &max,
- &coalesce);
+ i = _mas_data_end(mas, mte_node_type(next), &max);
} else {
goto ascend;
}
goto next;
entry = mas_get_rcu_slot(mas, i);
- if (unlikely(xa_is_skip(entry)))
- goto next;
-
if (!mt_is_empty(entry))
goto next;
static inline bool __mas_walk(struct ma_state *mas, unsigned long *range_min,
unsigned long *range_max)
{
- enum maple_type type;
struct maple_enode *next;
- unsigned long pivot = 0;
- unsigned long max, min;
- unsigned char i;
+ enum maple_type type;
bool ret = false;
- min = mas->min;
- max = mas->max;
-
while (true) {
type = mte_node_type(mas->node);
- if (ma_is_leaf(type)) // Leaf.
- ret = true;
-
-skip_entry:
- switch (type) {
- default:
- for (i = mas_get_slot(mas); i < mt_slots[type]; i++) {
- pivot = _mas_get_safe_pivot(mas, i, type);
-
- if (i != 0 && pivot == 0) {
- i = MAPLE_NODE_SLOTS;
- goto done;
- }
-
- if (min > pivot) // coalescing value was in the last slot.
- min = pivot;
-
- if (mas->index <= pivot) {
- max = pivot;
- break;
- }
- min = pivot + 1;
- }
-
- if (ret)
- goto done;
- break;
+ if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
+ return false;
- case maple_dense:
- // Linear node.
- i = mas->index - mas->min;
- mas->min = mas->max = mas->index;
- goto done;
- }
+ if (ma_is_leaf(type)) // Leaf.
+ return true;
- next = mas_get_rcu_slot(mas, i);
- if (unlikely(xa_is_skip(next))) {
- if (unlikely(i == mt_slots[type] - 1)) {
- i = MAPLE_NODE_SLOTS;
- goto done;
- }
- mas_set_slot(mas, i + 1);
- goto skip_entry;
- }
+ next = mas_get_rcu_slot(mas, mas_get_slot(mas));
// Traverse.
- mas->max = max;
- mas->min = min;
- if (mt_is_empty(next)) // Not found.
- goto done;
+ mas->max = *range_max;
+ mas->min = *range_min;
+ if (unlikely(mt_is_empty(next)))
+ return false;
mas->node = next;
mas_set_slot(mas, 0);
}
-done:
- mas_set_slot(mas, i);
- *range_max = max;
- *range_min = min;
return ret;
}
/** Private
return true;
}
-/**
- * mas_find: If mas->node == MAS_START, find the first
- * non-NULL entry >= mas->index.
- * Otherwise, find the first non-NULL entry > mas->index
- *
- * If an entry exists, last and index are updated accordingly.
- *
- * returns entry or null and set mas->node to MAS_NONE.
- */
-void *mas_find(struct ma_state *mas, unsigned long max)
-{
- unsigned long index = mas->min;
- void *entry = NULL;
-
- while (mas_search_cont(mas, index, max, entry)) {
- entry = _mas_next(mas, max, &index);
- if (mt_is_empty(entry))
- entry = NULL;
- }
-
- if (entry)
- mas->index = index;
-
- return entry;
-}
-EXPORT_SYMBOL_GPL(mas_find);
-
+
/**
* mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
*
}
EXPORT_SYMBOL_GPL(mas_pause);
-/* mt_find() - Search from start up until an entry is found.
- *
- * Note: Does not return the zero entry.
- * returns an entry.
- */
-void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
- bool start)
-{
- unsigned long range_start = 0, range_end = 0;
- void *entry = NULL;
- bool leaf;
- unsigned char slot;
- MA_STATE(mas, mt, *index, *index);
-
- if (!start && !(*index))
- return NULL;
-
- rcu_read_lock();
- leaf = _mas_range_walk(&mas, &range_start, &range_end);
- slot = mas_get_slot(&mas);
- if (leaf == true && slot != MAPLE_NODE_SLOTS)
- entry = mas_get_rcu_slot(&mas, slot);
-
- mas.last = range_end;
- if (mt_is_empty(entry) || xa_is_zero(entry) || xa_is_retry(entry))
- entry = NULL;
-
- while (mas_search_cont(&mas, range_start, max, entry)) {
- entry = _mas_next(&mas, max, &range_start);
- if (mt_is_empty(entry) || xa_is_zero(entry) ||
- xa_is_retry(entry))
- entry = NULL;
- }
-
- rcu_read_unlock();
- if (entry)
- *index = mas.last + 1;
-
- return entry;
-}
-void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) {
- return _mt_find(mt, index, max, true);
-}
-EXPORT_SYMBOL(mt_find);
-
-static inline int mas_build_replacement(struct ma_state *mas, void *new_entry,
- long node_cnt)
-
-{
- struct maple_enode *last = NULL;
- unsigned long new_index, new_last;
- unsigned long r_index, r_last;
- struct maple_tree new_tree = MTREE_INIT(name, mas->tree->ma_flags);
- void *entry;
-
- MA_STATE(new_mas, &new_tree, 0, 0);
-
-
- if (!node_cnt)
- return 0;
- node_cnt += 3; // Room for an extra split.
-
- mas_node_cnt(mas, node_cnt);
- if (mas_is_err(mas))
- return 0;
-
- new_index = mas->index;
- new_last = mas->last;
-
- /* Move allocations from mas to new_mas.
- * NOTE: This is necessary as mas will pass back errors and will retry
- * the allocation, so it has to be done in mas and has to be moved for
- * below.
- */
- new_mas.alloc = mas->alloc;
- mas->alloc = NULL;
-
- // Copy left side
- mas_reset(mas);
- mas->index = 0;
- mas->last = 0;
- mas_for_each(mas, entry, new_index - 1) {
- new_mas.index = mas->index;
- new_mas.last = mas_get_safe_pivot(mas, mas_get_slot(mas));
- MT_BUG_ON(mas->tree, entry == XA_DELETED_ENTRY);
- ma_inactive_insert(&new_mas, entry);
- if (mas_is_err(&new_mas))
- goto error;
- }
-
- // Insert the new value.
- new_mas.index = new_index;
- new_mas.last = new_last;
- ma_inactive_insert(&new_mas, new_entry);
- if (mas_is_err(&new_mas))
- goto error;
-
-
- /*
- * We need to run through a few things:
- * - new_mas.last goes beyond anything right now (no entries)
- * - new_mas.last cuts a range
- * - new_mas.last ends in a null
- * - new_mas.last has a sequentially next value
- */
-
- mas_reset(mas);
- mas->index = new_last + 1;
- mas->last = new_last + 1;
- _mas_range_walk(mas, &r_index, &r_last);
-
- if (mas_get_slot(mas) == MAPLE_NODE_SLOTS)
- goto skip_right;
-
-
- if (mte_is_leaf(mas->node)) {
- entry = mas_get_rcu_slot(mas, mas_get_slot(mas));
- if (!mt_is_empty(entry))
- {
- new_mas.index = r_index;
- new_mas.last = r_last;
- ma_inactive_insert(&new_mas, entry);
- if (mas_is_err(&new_mas))
- goto error;
- }
- }
-
- mas_for_each(mas, entry, ULONG_MAX) {
- if (mas->index < new_index)
- continue;
-
- new_mas.index = mas->index;
- new_mas.last = mas_get_safe_pivot(mas, mas_get_slot(mas));
- ma_inactive_insert(&new_mas, entry);
- if (mas_is_err(&new_mas))
- goto error;
- }
-
-skip_right:
-
- last = mas->tree->ma_root;
- mas->node = new_tree.ma_root;
- _mas_replace(mas, false, false);
- if (mt_is_alloc(mas->tree))
- mas_update_gap(mas, false);
-
- mas->node = MAS_START;
- mas->alloc = new_mas.alloc;
- mte_destroy_walk(last, mas->tree);
-
- return node_cnt;
-
-error:
- if (new_mas.tree)
- mte_destroy_walk(new_mas.tree->ma_root, new_mas.tree);
- return 0;
-}
-
-/* Private
- * mas_replace_tree() - Build a new tree and replace the entire structure.
- *
- */
-static inline int mas_replace_tree(struct ma_state *mas, void *new_entry)
-{
- unsigned int slot_cnt = 0;
- long node_cnt = 0, leaves= 1;
- struct maple_enode *last = NULL;
- enum maple_type p_type = mas_parent_enum(mas, mas->node);
-
- // Create a new tree.
- MA_STATE(r_mas, mas->tree, mas->last + 1, mas->last + 1);
-
- // Count the slots that will be used in the node we landed.
- slot_cnt = 3 + mas_get_slot(mas); // 3 is the max a new entry can create.
-
- // Count the nodes that are currently used to the left.
- mas_set_slot(mas, mte_parent_slot(mas->node));
- while (!mas_is_none(mas)) {
- last = mas->node;
- mas_prev_node(mas, 0);
- leaves++;
- }
- // Set mas->node to a valid node.
- mas->node = last;
-
- // Walk down to the right side of the tree.
- _mas_walk(&r_mas);
- // Add the slots to the right of where the search landed.
- if (mas_get_slot(&r_mas) == MAPLE_NODE_SLOTS) {
- r_mas.node = MAS_NONE;
- slot_cnt++; //entry for oo
- goto skip_r_count;
- }
- slot_cnt -= mas_get_slot(&r_mas);
- slot_cnt += mas_data_end(&r_mas);
-
- // Count the nodes to the right.
- mas_set_slot(&r_mas, mte_parent_slot(r_mas.node));
- while (!mas_is_none(&r_mas)) {
- last = r_mas.node;
- mas_next_node(&r_mas, ULONG_MAX);
- leaves++;
- }
-
-skip_r_count:
- // Calculate all the nodes needed for a new tree.
- if (slot_cnt > mt_slot_count(mas->node))
- leaves++;
-
- node_cnt = 1; // Root node. and room to split.
- while (leaves) { // add the number of nodes at each level.
- node_cnt += leaves;
- leaves /= mt_slots[p_type];
- }
- return mas_build_replacement(mas, new_entry, node_cnt);
-}
static inline bool mas_rewind_node(struct ma_state *mas);
static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size)
}
}
-static inline int ma_root_ptr(struct ma_state *mas, void *entry,
- bool overwrite)
-{
- if (xa_is_node(mas->tree->ma_root))
- return 0;
-
- if (!overwrite)
- if (mas->tree->ma_root && mas->last == 0)
- goto exists;
-
- if (mas->last != 0)
- mas_root_expand(mas, entry);
- else if (((unsigned long) (entry) & 3) == 2)
- mas_root_expand(mas, entry);
- else
- rcu_assign_pointer(mas->tree->ma_root, entry);
- return 1;
-
-exists:
- mas_set_err(mas, -EEXIST);
- return 0;
-}
-
-static inline int mas_add(struct ma_state *mas, void *entry, bool overwrite,
- bool active)
-{
- unsigned char slot = MAPLE_NODE_SLOTS;
- bool leaf;
- int ret = 0;
-
- ret = ma_root_ptr(mas, entry, overwrite);
- if (mas_is_err(mas))
- return 0;
-
- if (ret)
- return ret;
-
- leaf = _mas_walk(mas);
- slot = mas_get_slot(mas);
- if (leaf == true) {
- if (slot == MAPLE_NODE_SLOTS) {
- if (mas->index == 0 && !overwrite)
- goto exists;
- } else if (!overwrite) {
- void *entry = mas_get_rcu_slot(mas, slot);
-
- if (!mt_is_empty(entry))
- goto exists;
- }
- }
-
- /* Do the add */
- ret = _mas_add(mas, entry, overwrite, active);
- if (mas_is_err(mas) && xa_err(mas->node) == -ERANGE)
- mas_set_err(mas, -EEXIST);
-
- return ret;
-
-exists:
- mas_set_err(mas, -EEXIST);
- return 0;
-}
-
static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
unsigned long size, unsigned long *index)
{
mas->min = mas_get_safe_pivot(mas, pslot - 1) + 1;
mas->node = mn;
- _mas_insert(mas, entry, slot, true);
+ mas_set_slot(mas, slot);
+ _mas_store(mas, entry, false);
return 0;
}
return mas_range_load(mas, &range_min, &range_max, true);
}
+
+/* Private
+ *
+ * _mas_next() - Finds the next entry, sets index to the start of the range.
+ *
+ */
+static inline void *_mas_next(struct ma_state *mas, unsigned long limit,
+ unsigned long *range_start)
+{
+ void *entry = NULL;
+ unsigned long range_max;
+
+ if (mas->node && !mas_searchable(mas))
+ return NULL;
+
+ if (!mas->node || mas_is_start(mas)) {// First run.
+ *range_start = 0;
+ mas_start(mas);
+ entry = mas_range_load(mas, range_start, &range_max, false);
+ mas->last = range_max;
+ }
+
+ if (entry)
+ return entry;
+
+ return __mas_next(mas, limit, range_start);
+}
+
+/**
+ * mas_find: If mas->node == MAS_START, find the first
+ * non-NULL entry >= mas->index.
+ * Otherwise, find the first non-NULL entry > mas->index
+ *
+ * If an entry exists, last and index are updated accordingly.
+ *
+ * returns entry or null and set mas->node to MAS_NONE.
+ */
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+ unsigned long index = mas->min;
+ void *entry = NULL;
+
+ while (mas_search_cont(mas, index, max, entry)) {
+ entry = _mas_next(mas, max, &index);
+ if (mt_is_empty(entry))
+ entry = NULL;
+ }
+
+ if (entry)
+ mas->index = index;
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/* mt_find() - Search from start up until an entry is found.
+ *
+ * Note: Does not return the zero entry.
+ * returns an entry.
+ */
+void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
+ bool start)
+{
+ unsigned long range_start = 0, range_end = 0;
+ void *entry = NULL;
+ bool leaf;
+ unsigned char slot;
+ MA_STATE(mas, mt, *index, *index);
+
+ if (!start && !(*index))
+ return NULL;
+
+ rcu_read_lock();
+ leaf = _mas_range_walk(&mas, &range_start, &range_end);
+ slot = mas_get_slot(&mas);
+ if (leaf == true && slot != MAPLE_NODE_SLOTS)
+ entry = mas_get_rcu_slot(&mas, slot);
+
+ mas.last = range_end;
+ if (mt_is_empty(entry) || xa_is_zero(entry) || xa_is_retry(entry))
+ entry = NULL;
+
+ while (mas_search_cont(&mas, range_start, max, entry)) {
+ entry = _mas_next(&mas, max, &range_start);
+ if (mt_is_empty(entry) || xa_is_zero(entry) ||
+ xa_is_retry(entry))
+ entry = NULL;
+ }
+
+ rcu_read_unlock();
+ if (entry)
+ *index = mas.last + 1;
+
+ return entry;
+}
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) {
+ return _mt_find(mt, index, max, true);
+}
+EXPORT_SYMBOL(mt_find);
+/*
+ * mas_next() - Get the next entry. Can return the zero entry. mas->node
+ * must be a valid node and not a special value. Unsafe for single entry
+ * trees.
+ *
+ */
+void *mas_next(struct ma_state *mas, unsigned long max)
+{
+ unsigned long index = 0;
+
+ return _mas_next(mas, max, &index);
+}
+EXPORT_SYMBOL_GPL(mas_next);
static inline bool mas_rewind_node(struct ma_state *mas)
{
unsigned char slot;
*/
static inline void *mas_erase(struct ma_state *mas)
{
- int slot;
+ unsigned char slot;
+ unsigned long r_max, r_min;
void *entry = NULL;
+ bool new_node = false;
- _mas_walk(mas);
- if (mas_is_ptr(mas)) {
- entry = mas->tree->ma_root;
- mas->tree->ma_root = NULL;
+ printk("Start: erase %lu\n", mas->index);
+ if (!xa_is_node(mas->tree->ma_root)) {
+ if (mas->index)
+ return NULL;
+ entry = rcu_dereference_protected(mas->tree->ma_root,
+ lockdep_is_held(&mas->tree->ma_lock));
+ rcu_assign_pointer(mas->tree->ma_root, NULL);
return entry;
}
- slot = mas_get_slot(mas);
- if (slot == MAPLE_NODE_SLOTS)
- return NULL;
+ if (!mas_wr_walk(mas, &r_max, &r_min, entry, true)) {
+ /* Not a leaf = broken tree. */
+ // FIXME, rebuild?
+ return 0;
+ }
+ /* At this point, we are at the leaf node that needs to be altered. */
+ /* Calculate needed space */
+ mas_wr_slot_cnt(mas, r_min, r_max, entry);
+ slot = mas_get_slot(mas);
entry = mas_get_rcu_slot(mas, slot);
- mte_update_rcu_slot(mas->node, slot, XA_DELETED_ENTRY);
- // dense nodes only need to set a single value.
- mas_rebalance(mas);
- if (mas_is_err(mas)) {
- mas_empty_alloc(mas);
+ if (mas->span_enode) // Could be writing NULL to the end of a node.
+ mas_spanning_store(mas, NULL);
+
+ if (!entry)
return entry;
- }
- if (mt_is_alloc(mas->tree))
- mas_may_move_gap(mas);
+ mte_set_rcu_slot(mas->node, slot, NULL);
+
+ if (slot && !mas_get_rcu_slot(mas, slot - 1))
+ new_node = true;
+
+ if ((slot < mt_slot_count(mas->node) - 1) &&
+ !mas_get_rcu_slot(mas, slot + 1))
+ new_node = true;
+
+ if (!new_node)
+ return entry;
+
+ // Need to replace the node as there are two nulls.
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ new_node = mt_mk_node(mas_next_alloc(mas), mte_node_type(mas->node));
+
return entry;
}
+
/* Interface */
void __init maple_tree_init(void)
{
mas_lock(&mas);
retry:
- mas_add(&mas, entry, true, true);
+ _mas_store(&mas, entry, true);
if (mas_nomem(&mas, gfp))
goto retry;
mtree_lock(ms.tree);
retry:
- mas_add(&ms, entry, false, true);
+ _mas_store(&ms, entry, false);
if (mas_nomem(&ms, gfp))
goto retry;
for (i = 0; i < mt_slot_count(mas->node); i++) {
unsigned long piv = mas_get_safe_pivot(mas, i);
- void *entry;
if (!piv)
break;
- entry = mas_get_rcu_slot(mas, i);
if (prev_piv > piv) {
- if (!mt_will_coalesce(entry)) {
- pr_err(MA_PTR"[%u] piv %lu < prev_piv %lu\n",
- mas_mn(mas), i, piv, prev_piv);
- mt_dump(mas->tree);
- MT_BUG_ON(mas->tree, piv < prev_piv);
- }
+ pr_err(MA_PTR"[%u] piv %lu < prev_piv %lu\n",
+ mas_mn(mas), i, piv, prev_piv);
+ mt_dump(mas->tree);
+ MT_BUG_ON(mas->tree, piv < prev_piv);
}
if (piv < mas->min) {
-
- if (!mt_will_coalesce(entry)) {
- if (piv < mas->min)
- mt_dump(mas->tree);
- pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
- piv, mas->min);
+ if (piv < mas->min)
mt_dump(mas->tree);
- MT_BUG_ON(mas->tree, piv < mas->min);
- }
+ pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
+ piv, mas->min);
+ mt_dump(mas->tree);
+ MT_BUG_ON(mas->tree, piv < mas->min);
}
- if (!xa_is_retry(entry)) {
- if ((piv > mas->max)) {
- pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i,
- piv, mas->max);
- mt_dump(mas->tree);
- MT_BUG_ON(mas->tree, piv > mas->max);
- }
- prev_piv = piv;
+ if ((piv > mas->max)) {
+ pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i,
+ piv, mas->max);
+ mt_dump(mas->tree);
+ MT_BUG_ON(mas->tree, piv > mas->max);
}
+ prev_piv = piv;
}
}
*/
void mt_validate(struct maple_tree *mt)
{
+printk("Not supported yet\n");
+return;
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);