};
#define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)]
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_NODE_SLOTS * 2 + 1)
+struct maple_big_node {
+ struct maple_pnode *parent;
+ struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
+ unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
+ unsigned long gap[MAPLE_BIG_NODE_SLOTS];
+ unsigned long min;
+ unsigned char b_end;
+ enum maple_type type;
+};
+
+struct maple_subtree_state {
+ struct ma_state *orig_l; /* Original left side of subtree */
+ struct ma_state *orig_r; /* Original rigth side of subtree */
+ struct ma_state *l; /* New left side of subtree */
+ struct ma_state *r; /* New right side of subtree */
+ struct ma_topiary *free; /* nodes to be freed */
+ struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
+ struct maple_big_node *bn;
+};
+
// Functions
static struct maple_node *mt_alloc_one(gfp_t gfp)
{
}
}
-#define MAPLE_BIG_NODE_SLOTS (MAPLE_NODE_SLOTS * 2 + 1)
-struct maple_big_node {
- struct maple_pnode *parent;
- struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
- unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
- unsigned long gap[MAPLE_BIG_NODE_SLOTS];
- unsigned long min;
- unsigned char b_end;
- enum maple_type type;
-};
static inline struct maple_enode *mas_check_split_parent(struct ma_state *mas,
unsigned char slot)
mas_descend(mas);
return true;
}
-/* mas_consume() - Add the portions of the tree which will be replaced by the
+/* mast_consume() - Add the portions of the tree which will be replaced by the
* operation to the removal list; either to be @free or @discard (destroy walk).
*/
-static inline void mas_consume(struct ma_state *l_mas,
- struct ma_state *r_mas,
- struct ma_topiary *destroy)
+static inline void mast_consume(struct maple_subtree_state *mast)
{
unsigned char l_slot, r_slot, slot, end;
unsigned long l_min, range_min, range_max;
// The left node is consumed, so add to the free list.
- l_min = l_mas->index;
- l_mas->index = l_mas->last;
- mas_node_walk(l_mas, mte_node_type(l_mas->node), &range_min, &range_max);
- l_mas->index = l_min;
- l_slot = mas_get_slot(l_mas);
- r_slot = mas_get_slot(r_mas);
- if (l_mas->node == r_mas->node) {
+ l_min = mast->orig_l->index;
+ mast->orig_l->index = mast->orig_l->last;
+ mas_node_walk(mast->orig_l, mte_node_type(mast->orig_l->node),
+ &range_min, &range_max);
+ mast->orig_l->index = l_min;
+ l_slot = mas_get_slot(mast->orig_l);
+ r_slot = mas_get_slot(mast->orig_r);
+ if (mast->orig_l->node == mast->orig_r->node) {
/* If the contents up to l_slot and from r_slot to end are
* still valid and it's the same node, then the rest may need to
- * be destroyed. The exception is if a there is data taken
+ * be mast->destroyed. The exception is if a there is data taken
* from the parent in the previous iteration which is still in
* use.
*/
for (slot = l_slot + 1; slot < r_slot; slot++)
- mat_add(destroy, mas_get_rcu_slot(l_mas, slot));
+ mat_add(mast->destroy,
+ mas_get_rcu_slot(mast->orig_l, slot));
return;
}
- /* r_mas is different and consumed. */
- if (mte_is_leaf(r_mas->node))
+ /* mast->orig_r is different and consumed. */
+ if (mte_is_leaf(mast->orig_r->node))
return;
/* Now free l_slot + 1 -> end and 0 -> r_slot - 1 */
- end = mas_data_end(l_mas);
+ end = mas_data_end(mast->orig_l);
for (slot = l_slot + 1; slot <= end; slot++)
- mat_add(destroy, mas_get_rcu_slot(l_mas, slot));
+ mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_l, slot));
for (slot = 0; slot < r_slot; slot++)
- mat_add(destroy, mas_get_rcu_slot(r_mas, slot));
+ mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_r, slot));
}
static inline void
static inline unsigned long mas_next_node(struct ma_state *mas,
unsigned long max);
static inline int mas_combine_separate(struct ma_state *mas,
- struct ma_state *orig_l_mas,
- struct ma_state *orig_r_mas,
- struct maple_big_node *b_node,
- unsigned char count)
+ struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, r_end, mid_split;
- unsigned char slot = 0, l_slot, r_slot = 0;
+ unsigned char slot = 0, l_slot = 0, r_slot = 0;
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
unsigned long range_min, range_max;
MA_TOPIARY(free, mas->tree);
MA_TOPIARY(destroy, mas->tree);
+ mast->l = &l_mas;
+ mast->r = &r_mas;
+ mast->free = &free;
+ mast->destroy = &destroy;
+
/* MA_START doesn't work here */
l_mas.node = r_mas.node = m_mas.node = NULL;
- mas_consume(orig_l_mas, orig_r_mas, &destroy);
+ mast_consume(mast);
while(count--) {
- b_node->b_end--;
- b_node->type = mte_node_type(orig_l_mas->node);
- b_node->min = orig_l_mas->min;
- split = mas_mab_to_node(mas, b_node, &left, &right, &middle,
+ mast->bn->b_end--;
+ mast->bn->min = mast->orig_l->min;
+ mast->bn->type = mte_node_type(mast->orig_l->node);
+ split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
&mid_split);
/* Set parents from previous run */
mas_separate_set_parent(&m_mas, left, right, &slot, split);
mas_separate_set_parent(&r_mas, left, right, &slot, split);
- /* Copy data from b_node to new nodes */
+ /* Copy data from mast->bn to new nodes */
l_mas.node = left;
m_mas.node = middle;
r_mas.node = right;
- l_mas.min = orig_l_mas->min;
- mab_mas_cp(b_node, 0, split, &l_mas, 0);
- l_mas.max = b_node->pivot[split];
+ l_mas.min = mast->orig_l->min;
+ mab_mas_cp(mast->bn, 0, split, &l_mas, 0);
+ l_mas.max = mast->bn->pivot[split];
r_mas.max = l_mas.max;
if (middle) {
- mab_mas_cp(b_node, 1 + split, mid_split, &m_mas, 0);
- m_mas.min = b_node->pivot[split] + 1;
- m_mas.max = b_node->pivot[mid_split];
+ mab_mas_cp(mast->bn, 1 + split, mid_split, &m_mas, 0);
+ m_mas.min = mast->bn->pivot[split] + 1;
+ m_mas.max = mast->bn->pivot[mid_split];
split = mid_split;
}
if (right) {
- mab_mas_cp(b_node, 1 + split, b_node->b_end, &r_mas, 0);
- r_mas.min = b_node->pivot[split] + 1;
- r_mas.max = b_node->pivot[b_node->b_end];
+ mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, &r_mas, 0);
+ r_mas.min = mast->bn->pivot[split] + 1;
+ r_mas.max = mast->bn->pivot[mast->bn->b_end];
}
- /* Copy data from next level in the tree to b_node from next iteration */
- memset(b_node, 0, sizeof(struct maple_big_node));
- orig_l_mas->depth++;
+ /* Copy data from next level in the tree to mast->bn from next iteration */
+ memset(mast->bn, 0, sizeof(struct maple_big_node));
+ mast->orig_l->depth++;
if (!l_mas.min && l_mas.max == ULONG_MAX) {
mas_mn(&l_mas)->parent = ma_parent_ptr(
((unsigned long)mas->tree | MA_ROOT_PARENT));
- mas->depth = orig_l_mas->depth;
- b_node->b_end = 0;
- if (mte_is_root(orig_l_mas->node)) {
- if ((orig_l_mas->node != mas->node) &&
+ mas->depth = mast->orig_l->depth;
+ mast->bn->b_end = 0;
+ if (mte_is_root(mast->orig_l->node)) {
+ if ((mast->orig_l->node != mas->node) &&
(l_mas.depth > mas->tree->ma_height)) {
mat_add(&free, mas->node);
}
} else {
do {
- mas_ascend_free(orig_l_mas, orig_r_mas,
+ mas_ascend_free(mast->orig_l, mast->orig_r,
&free);
- mas_set_slot(orig_l_mas, 0);
- mas_set_slot(orig_r_mas, 0);
- mas_node_walk(orig_l_mas,
- mte_node_type(orig_l_mas->node),
+ mas_set_slot(mast->orig_l, 0);
+ mas_set_slot(mast->orig_r, 0);
+ mas_node_walk(mast->orig_l,
+ mte_node_type(mast->orig_l->node),
&range_min, &range_max);
- mas_node_walk(orig_r_mas,
- mte_node_type(orig_r_mas->node),
+ mas_node_walk(mast->orig_r,
+ mte_node_type(mast->orig_r->node),
&range_min, &range_max);
- mas_consume(orig_l_mas, orig_r_mas,
- &destroy);
- } while (!mte_is_root(orig_l_mas->node));
+ mast_consume(mast);
+ } while (!mte_is_root(mast->orig_l->node));
}
- mat_add(&free, orig_l_mas->node);
- mas_dup_state(orig_l_mas, &l_mas);
+ mat_add(&free, mast->orig_l->node);
+ mas_dup_state(mast->orig_l, &l_mas);
goto done;
}
- mas_ascend_free(orig_l_mas, orig_r_mas, &free);
+ mas_ascend_free(mast->orig_l, mast->orig_r, &free);
/* Set up the right side of things */
- r_end = mas_data_end(orig_r_mas);
- mas_set_slot(orig_r_mas, 0);
- orig_r_mas->index = r_mas.max;
+ r_end = mas_data_end(mast->orig_r);
+ mas_set_slot(mast->orig_r, 0);
+ mast->orig_r->index = r_mas.max;
/* last should be larger than or equal to index */
- if (orig_r_mas->last < orig_r_mas->index)
- orig_r_mas->last = orig_r_mas->index;
+ if (mast->orig_r->last < mast->orig_r->index)
+ mast->orig_r->last = mast->orig_r->index;
/* The node may not contain the value so set slot to ensure all
* of the nodes contents are freed or destroyed.
*/
- if (!mas_node_walk(orig_r_mas,
- mte_node_type(orig_r_mas->node),
+ if (!mas_node_walk(mast->orig_r,
+ mte_node_type(mast->orig_r->node),
&range_min, &range_max)) {
- mas_set_slot(orig_r_mas, r_end + 1);
+ mas_set_slot(mast->orig_r, r_end + 1);
}
- r_slot = mas_get_slot(orig_r_mas);
+ r_slot = mas_get_slot(mast->orig_r);
/* Set up the left side of things */
- mas_set_slot(orig_l_mas, 0);
- orig_l_mas->index = l_mas.min;
- mas_node_walk(orig_l_mas, mte_node_type(orig_l_mas->node),
+ mas_set_slot(mast->orig_l, 0);
+ mast->orig_l->index = l_mas.min;
+ mas_node_walk(mast->orig_l, mte_node_type(mast->orig_l->node),
&range_min, &range_max);
- l_slot = mas_get_slot(orig_l_mas);
+ l_slot = mas_get_slot(mast->orig_l);
- b_node->b_end = 0;
+ mast->bn->b_end = 0;
if (l_slot)
- b_node->b_end = mas_mab_cp(orig_l_mas, 0, l_slot - 1,
- b_node, 0);
+ mast->bn->b_end = mas_mab_cp(mast->orig_l, 0, l_slot - 1,
+ mast->bn, 0);
- slot = b_node->b_end;
- mab_set_b_end(b_node, &l_mas, left);
- mab_set_b_end(b_node, &m_mas, middle);
- mab_set_b_end(b_node, &r_mas, right);
+ slot = mast->bn->b_end;
+ mab_set_b_end(mast->bn, &l_mas, left);
+ mab_set_b_end(mast->bn, &m_mas, middle);
+ mab_set_b_end(mast->bn, &r_mas, right);
// Copy anything necessary out of the right node.
- if (b_node->pivot[b_node->b_end - 1] < orig_r_mas->max) {
- b_node->b_end = mas_mab_cp(orig_r_mas, r_slot + 1,
- r_end, b_node, b_node->b_end);
- orig_r_mas->last = orig_r_mas->max;
+ if (mast->bn->pivot[mast->bn->b_end - 1] < mast->orig_r->max) {
+ mast->bn->b_end = mas_mab_cp(mast->orig_r, r_slot + 1,
+ r_end, mast->bn, mast->bn->b_end);
+ mast->orig_r->last = mast->orig_r->max;
}
- mas_consume(orig_l_mas, orig_r_mas, &destroy);
- orig_l_mas->last = orig_l_mas->max;
+ mast_consume(mast);
+ mast->orig_l->last = mast->orig_l->max;
// Attempt to balance from this parent
- if (b_node->b_end - 1 < mt_min_slot_cnt(orig_l_mas->node)) {
+ if (mast->bn->b_end - 1 < mt_min_slot_cnt(mast->orig_l->node)) {
unsigned char end;
- struct maple_enode *left = orig_l_mas->node;
- struct maple_enode *right = orig_r_mas->node;
- if (mas_next_sibling(orig_r_mas)) {
- end = mas_data_end(orig_r_mas);
- b_node->b_end = mas_mab_cp(orig_r_mas, 0, end,
- b_node, b_node->b_end);
+ struct maple_enode *left = mast->orig_l->node;
+ struct maple_enode *right = mast->orig_r->node;
+ if (mas_next_sibling(mast->orig_r)) {
+ end = mas_data_end(mast->orig_r);
+ mast->bn->b_end = mas_mab_cp(mast->orig_r, 0, end,
+ mast->bn, mast->bn->b_end);
mat_add(&free, right);
if (right == left)
- orig_l_mas->node = orig_r_mas->node;
- //free[i++] = orig_r_mas->node;
- orig_r_mas->last = orig_r_mas->max;
+ mast->orig_l->node = mast->orig_r->node;
+ mast->orig_r->last = mast->orig_r->max;
if (!count)
count++;
- } else if (mas_prev_sibling(orig_l_mas)) {
- end = mas_data_end(orig_l_mas);
- // shift b_node by prev size
- mab_shift_right(b_node, end + 1,
+ } else if (mas_prev_sibling(mast->orig_l)) {
+ end = mas_data_end(mast->orig_l);
+ // shift mast->bn by prev size
+ mab_shift_right(mast->bn, end + 1,
(mt_is_alloc(mas->tree) ? true : false));
// copy in prev.
- mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
+ mas_mab_cp(mast->orig_l, 0, end, mast->bn, 0);
mat_add(&free, left);
if (right == left)
- orig_r_mas->node = orig_l_mas->node;
- l_mas.min = orig_l_mas->min;
- orig_l_mas->index = orig_l_mas->min;
- b_node->b_end += end + 1;
+ mast->orig_r->node = mast->orig_l->node;
+ l_mas.min = mast->orig_l->min;
+ mast->orig_l->index = mast->orig_l->min;
+ mast->bn->b_end += end + 1;
slot += end + 1;
if (!count)
count++;
// Ensure there is enough data for the next iteration.
- if (b_node->b_end - 1 < mt_min_slot_cnt(orig_l_mas->node)) {
+ if (mast->bn->b_end - 1 < mt_min_slot_cnt(mast->orig_l->node)) {
unsigned char end;
- struct maple_enode *left = orig_l_mas->node;
- struct maple_enode *right = orig_r_mas->node;
+ struct maple_enode *left = mast->orig_l->node;
+ struct maple_enode *right = mast->orig_r->node;
- mas_set_slot(orig_r_mas,
- mte_parent_slot(orig_r_mas->node));
- mas_next_node(orig_r_mas, ULONG_MAX);
- if (!mas_is_none(orig_r_mas)) {
+ mas_set_slot(mast->orig_r,
+ mte_parent_slot(mast->orig_r->node));
+ mas_next_node(mast->orig_r, ULONG_MAX);
+ if (!mas_is_none(mast->orig_r)) {
// Add r_mas.node to clean_list.
// r_mas is now at the next node..
- end = mas_data_end(orig_r_mas);
- b_node->b_end = mas_mab_cp(orig_r_mas, 0, end,
- b_node, b_node->b_end);
- orig_r_mas->last = orig_r_mas->max;
+ end = mas_data_end(mast->orig_r);
+ mast->bn->b_end = mas_mab_cp(mast->orig_r, 0, end,
+ mast->bn, mast->bn->b_end);
+ mast->orig_r->last = mast->orig_r->max;
mat_add(&free, right);
if (right == left)
- orig_l_mas->node = orig_r_mas->node;
- //free[i++] = orig_r_mas->node;
+ mast->orig_l->node = mast->orig_r->node;
} else {
// Put left into right.
- mas_dup_state(orig_r_mas, orig_l_mas);
+ mas_dup_state(mast->orig_r, mast->orig_l);
mas_dup_state(&r_mas, &l_mas);
r_slot = l_slot;
// Use prev for left.
- mas_prev_node(orig_l_mas, 0);
- if (mas_is_none(orig_l_mas)) {
+ mas_prev_node(mast->orig_l, 0);
+ if (mas_is_none(mast->orig_l)) {
// This is going to be a new root of
- // only what is in b_node
- mas_dup_state(orig_l_mas, orig_r_mas);
- b_node->b_end--;
+ // only what is in mast->bn
+ mas_dup_state(mast->orig_l, mast->orig_r);
+ mast->bn->b_end--;
break;
}
mat_add(&free, left);
if (right == left)
- orig_r_mas->node = orig_l_mas->node;
- //free[i++] = orig_l_mas->node;
+ mast->orig_r->node = mast->orig_l->node;
- mas_set_slot(orig_l_mas, 0);
- orig_l_mas->index = orig_l_mas->min;
+ mas_set_slot(mast->orig_l, 0);
+ mast->orig_l->index = mast->orig_l->min;
l_slot = 0;
- end = mas_data_end(orig_l_mas);
- // shift b_node by prev size
- mab_shift_right(b_node, end + 1,
+ end = mas_data_end(mast->orig_l);
+ // shift mast->bn by prev size
+ mab_shift_right(mast->bn, end + 1,
(mt_is_alloc(mas->tree) ? true : false));
// copy in prev.
- mas_mab_cp(orig_l_mas, 0, end, b_node, 0);
- l_mas.min = orig_l_mas->min;
+ mas_mab_cp(mast->orig_l, 0, end, mast->bn, 0);
+ l_mas.min = mast->orig_l->min;
slot += end + 1;
- b_node->b_end += end + 1;
+ mast->bn->b_end += end + 1;
}
if (!count)
count++;
}
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
- mte_node_type(orig_l_mas->node));
- orig_l_mas->depth++;
+ mte_node_type(mast->orig_l->node));
+ mast->orig_l->depth++;
- mab_mas_cp(b_node, 0, b_node->b_end, &l_mas, 0);
+ mab_mas_cp(mast->bn, 0, mast->bn->b_end, &l_mas, 0);
mte_set_parent(left, l_mas.node, slot);
if (middle)
mte_set_parent(middle, l_mas.node, ++slot);
if (right)
mte_set_parent(right, l_mas.node, ++slot);
- if (!b_node->b_end) {
+ if (!mast->bn->b_end) {
mas_mn(&l_mas)->parent =
ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
} else {
- mas_mn(&l_mas)->parent = mas_mn(orig_l_mas)->parent;
+ mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
}
- mat_add(&free, orig_l_mas->node);
- mas_dup_state(orig_l_mas, &l_mas);
- mas->depth = orig_l_mas->depth;
+ mat_add(&free, mast->orig_l->node);
+ mas_dup_state(mast->orig_l, &l_mas);
+ mas->depth = mast->orig_l->depth;
done:
mte_set_node_dead(mas->node);
// Set up mas for insertion.
- mas_dup_state(mas, orig_l_mas);
+ mas_dup_state(mas, mast->orig_l);
mas_wmb_replace(mas, &free, &destroy);
- return b_node->b_end;
+ return mast->bn->b_end;
}
static inline int mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_cnt = mas->full_cnt * -1;
+ struct maple_subtree_state mast;
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
if (mas_is_err(mas))
return 0;
+
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ mast.bn = b_node;
+
mas_dup_state(&l_mas, mas);
mas_dup_state(&r_mas, mas);
l_mas.index = l_mas.last = l_mas.min;
}
- return mas_combine_separate(mas, &l_mas, &r_mas, b_node, empty_cnt);
+ return mas_combine_separate(mas, &mast, empty_cnt);
}
static inline int mas_split(struct ma_state *mas,
unsigned long range_min, range_max;
unsigned char count = 0;
struct maple_big_node b_node;
+ struct maple_subtree_state mast;
int node_cnt = 0;
// Holds new left and right sub-tree
if (mas_is_err(mas))
return 0;
+ mast.bn = &b_node;
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+
mas_dup_state(&l_mas, mas);
mas->last = mas->index;
mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max);
// Combine l_mas and r_mas and split them up evenly again.
l_mas.depth = 0;
- return mas_combine_separate(mas, &l_mas, &r_mas, &b_node, count);
+ return mas_combine_separate(mas, &mast, count);
}
static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)