}
static inline
-void wr_mas_ascend(struct ma_wr_state *wr_mas)
+void wr_mas_setup(struct ma_wr_state *wr_mas, struct ma_state *mas)
{
- struct ma_state *mas = wr_mas->mas;
-
- mas_ascend(mas);
wr_mas->node = mas_mn(mas);
wr_mas->type = mte_node_type(mas->node);
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, mas->offset);
wr_mas->r_max = mas_safe_pivot(mas, wr_mas->pivots, mas->offset,
wr_mas->type);
+}
+
+static inline
+void wr_mas_ascend(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ mas_ascend(mas);
+ wr_mas_setup(wr_mas, mas);
/* Careful, this may be wrong.. */
wr_mas->end_piv = wr_mas->r_max;
wr_mas->offset_end = mas->offset;
mast->l->offset += end;
}
+static inline void rebalance_sib(struct ma_state *mas, struct ma_state *sib)
+{
+ unsigned char end;
+
+ *sib = *mas;
+ mas_ascend(sib);
+ end = mas_data_end(sib);
+ /* Prioritize move right to pull data left */
+ if (sib->offset < end)
+ sib->offset++;
+ else
+ sib->offset--;
+
+ mas_descend(sib);
+ sib->end = mas_data_end(sib);
+}
+
static inline
-void mas_spanning_move(struct ma_wr_state *l_wr_mas,
+void spanning_sib(struct ma_wr_state *l_wr_mas,
struct ma_wr_state *r_wr_mas, struct ma_state *nneighbour)
{
struct ma_state l_tmp = *l_wr_mas->mas;
unsigned long *s_gaps, *d_gaps;
unsigned long d_max;
-
-
d_slots = ma_slots(dst, d_mt) + d_start;
d_pivots = ma_pivots(dst, d_mt) + d_start;
s_slots = ma_slots(src, s_mt) + start;
{
unsigned char end = 0;
+ cp->height = 1;
/* Create entries to insert including split entries to left and right */
if (l_wr_mas->r_min < mas->index) {
end++;
cp->end = end;
}
+
/*
+ * rebalance_data_calc() - Calculate the size of the data (1 indexed).
+ * @cp: The maple copy struct with the new data populated.
+ * @l_wr_mas: The maple write state containing the data to the left of the write
+ * @r_wr_mas: The maple write state containing the data to the right of the
+ * write
+ *
* cp->data will not be 0 indexed.
*/
-static inline void spanning_data_calc(struct maple_copy *cp,
- struct ma_state *mas, struct ma_wr_state *l_wr_mas,
- struct ma_wr_state *r_wr_mas, struct ma_state *sib)
+
+static inline void __rebalance_data_calc(struct maple_copy *cp,
+ struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
{
/* Add 1 every time for the 0th element */
cp->data = l_wr_mas->mas->offset;
-
+ /* Add the new data and any partial overwrites */
cp->data += cp->end + 1;
-
/* Data from right (offset + 1 to end), +1 for zero */
- cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset;
+ cp->data += r_wr_mas->mas->end - r_wr_mas->offset_end;
+}
+
+static inline void rebalance_data_calc(struct maple_copy *cp,
+ struct ma_wr_state *wr_mas, struct ma_state *sib)
+{
+ __rebalance_data_calc(cp, wr_mas, wr_mas);
+
+ if (((wr_mas->mas->min != 0) || (wr_mas->mas->max != ULONG_MAX)) &&
+ (cp->data <= mt_min_slots[wr_mas->type])) {
+ rebalance_sib(wr_mas->mas, sib);
+ cp->data += sib->end + 1;
+ } else {
+ sib->end = 0;
+ }
+}
+/*
+ * cp->data will not be 0 indexed.
+ */
+static inline void spanning_data_calc(struct maple_copy *cp,
+ struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
+ struct ma_state *sib)
+{
+ __rebalance_data_calc(cp, l_wr_mas, r_wr_mas);
if (((l_wr_mas->mas->min != 0) || (r_wr_mas->mas->max != ULONG_MAX)) &&
(cp->data <= mt_min_slots[l_wr_mas->type])) {
- mas_spanning_move(l_wr_mas, r_wr_mas, sib);
+ spanning_sib(l_wr_mas, r_wr_mas, sib);
cp->data += sib->end + 1;
} else {
sib->end = 0;
}
static inline
-void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas,
+void rebalance_dest_setup(struct maple_copy *cp, struct ma_state *mas,
enum maple_type mt)
{
/* Data is 1 indexed, every src has +1 added. */
* src->start and end are 0 indexed
*/
static inline
-void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas,
- struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
- struct ma_state *sib)
+void multi_src_setup(struct maple_copy *cp, struct ma_wr_state *l_wr_mas,
+ struct ma_wr_state *r_wr_mas, struct ma_state *sib)
{
cp->s_count = 0;
if (sib->end && sib->max < l_wr_mas->mas->min)
spanning_init_cp_src(cp);
/* Copy right either from offset or offset + 1 pending on r_max */
- if (r_wr_mas->mas->end != r_wr_mas->mas->offset)
- append_node_cp(cp, r_wr_mas->mas, r_wr_mas->mas->offset + 1,
+ if (r_wr_mas->mas->end != r_wr_mas->offset_end)
+ append_node_cp(cp, r_wr_mas->mas, r_wr_mas->offset_end + 1,
r_wr_mas->mas->end);
if (sib->end && sib->min > r_wr_mas->mas->max)
}
static inline
-void spanning_data_write(struct maple_copy *cp, struct ma_state *mas)
+void cp_data_write(struct maple_copy *cp, struct ma_state *mas)
{
struct maple_node *dst, *src;
unsigned char s, d;
}
-static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas,
- struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
- struct ma_state *sib)
+static inline void ma_cp_to_slots(struct maple_copy *cp, unsigned long min,
+ struct ma_state *mas)
{
unsigned char d;
- unsigned long min;
- if (sib->end) {
- if (sib->max < l_wr_mas->mas->min)
- *l_wr_mas->mas = *sib;
- else
- *r_wr_mas->mas = *sib;
- }
-
- min = l_wr_mas->mas->min;
for (d = 0; d < cp->d_count; d++) {
struct maple_node *mn = cp->dst[d].node;
enum maple_type mt = cp->dst[d].mt;
min = max + 1;
}
+}
+
+static inline void rebalance_new_root(struct maple_copy *cp, struct ma_state *mas)
+{
+ if (cp->d_count != 1) {
+ enum maple_type mt = maple_arange_64;
+
+ if (!mt_is_alloc(mas->tree))
+ mt = maple_range_64;
+
+ cp->data = cp->d_count;
+ cp->s_count = 0;
+ rebalance_dest_setup(cp, mas, mt);
+ spanning_init_cp_src(cp);
+ node_copy(mas, cp->src[0].node, 0, cp->data, cp->max, maple_copy,
+ cp->dst[0].node, 0, mt);
+ node_finalise(cp->dst[0].node, mt, cp->end + 1);
+ cp->slot[0] = mt_mk_node(cp->dst[0].node, mt);
+ cp->height++;
+ }
+ WARN_ON_ONCE(cp->dst[0].node != mte_to_node(cp->slot[0]));
+ cp->dst[0].node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ while (!mte_is_root(mas->node))
+ mas_ascend(mas);
+}
+
+static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas,
+ struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
+ struct ma_state *sib)
+{
+ if (sib->end) {
+ if (sib->max < l_wr_mas->mas->min)
+ *l_wr_mas->mas = *sib;
+ else
+ *r_wr_mas->mas = *sib;
+ }
+
+ ma_cp_to_slots(cp, l_wr_mas->mas->min, mas);
cp->end = cp->d_count - 1;
cp->min = l_wr_mas->mas->min;
cp->max = r_wr_mas->mas->max;
if (!cp->min && cp->max == ULONG_MAX) {
- if (cp->d_count != 1) {
- enum maple_type mt = maple_arange_64;
-
- if (!mt_is_alloc(mas->tree))
- mt = maple_range_64;
-
- cp->data = cp->d_count;
- cp->s_count = 0;
- spanning_split_dest_setup(cp, mas, mt);
- spanning_init_cp_src(cp);
- node_copy(mas, cp->src[0].node, 0, cp->data, cp->max, maple_copy,
- cp->dst[0].node, 0, mt);
- node_finalise(cp->dst[0].node, mt, cp->end + 1);
- cp->slot[0] = mt_mk_node(cp->dst[0].node, mt);
- cp->height++;
- }
- WARN_ON_ONCE(cp->dst[0].node != mte_to_node(cp->slot[0]));
- cp->dst[0].node->parent = ma_parent_ptr(mas_tree_parent(mas));
- while (!mte_is_root(mas->node))
- mas_ascend(mas);
+ rebalance_new_root(cp, mas);
return false;
}
return true;
}
-static void mas_spanning_rebalance_loop(struct ma_state *mas,
- struct maple_subtree_state *mast, unsigned char count)
-{
-
- unsigned char split, mid_split;
- unsigned char slot = 0;
- unsigned char new_height = 0; /* used if node is a new root */
- struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
- struct maple_enode *old_enode;
-
- /*
- * Each level of the tree is examined and balanced, pushing data to the left or
- * right, or rebalancing against left or right nodes is employed to avoid
- * rippling up the tree to limit the amount of churn. Once a new sub-section of
- * the tree is created, there may be a mix of new and old nodes. The old nodes
- * will have the incorrect parent pointers and currently be in two trees: the
- * original tree and the partially new tree. To remedy the parent pointers in
- * the old tree, the new data is swapped into the active tree and a walk down
- * the tree is performed and the parent pointers are updated.
- * See mas_topiary_replace() for more information.
- */
- while (count--) {
- mast->bn->b_end--;
- mast->bn->type = mte_node_type(mast->orig_l->node);
- split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
- &mid_split);
- mast_set_split_parents(mast, left, middle, right, split,
- mid_split);
- mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
- new_height++;
-
- /*
- * Copy data from next level in the tree to mast->bn from next
- * iteration
- */
- memset(mast->bn, 0, sizeof(struct maple_big_node));
- mast->bn->type = mte_node_type(left);
-
- /* Root already stored in l->node. */
- if (mas_is_root_limits(mast->l))
- goto new_root;
-
- mast_ascend(mast);
- mast_combine_cp_left(mast);
- mast->l->offset = mast->bn->b_end;
- mab_set_b_end(mast->bn, mast->l, left);
- mab_set_b_end(mast->bn, mast->m, middle);
- mab_set_b_end(mast->bn, mast->r, right);
-
- /* Copy anything necessary out of the right node. */
- mast_combine_cp_right(mast);
- mast->orig_l->last = mast->orig_l->max;
-
- if (mast_sufficient(mast)) {
- if (mast_overflow(mast))
- continue;
-
- if (mast->orig_l->node == mast->orig_r->node) {
- /*
- * The data in b_node should be stored in one
- * node and in the tree
- */
- slot = mast->l->offset;
- break;
- }
-
- continue;
- }
-
- /* May be a new root stored in mast->bn */
- if (mas_is_root_limits(mast->orig_l))
- break;
-
- mast_spanning_rebalance(mast);
-
- /* rebalancing from other nodes may require another loop. */
- if (!count)
- count++;
- }
-
- mast->l->node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
- mte_node_type(mast->orig_l->node));
-
- mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
- new_height++;
- mas_set_parent(mas, left, mast->l->node, slot);
- if (middle)
- mas_set_parent(mas, middle, mast->l->node, ++slot);
-
- if (right)
- mas_set_parent(mas, right, mast->l->node, ++slot);
-
- if (mas_is_root_limits(mast->l)) {
-new_root:
- mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
- while (!mte_is_root(mast->orig_l->node))
- mast_ascend(mast);
- } else {
- mas_mn(mast->l)->parent = mas_mn(mast->orig_l)->parent;
- }
-
- old_enode = mast->orig_l->node;
- mas->depth = mast->l->depth;
- mas->node = mast->l->node;
- mas->min = mast->l->min;
- mas->max = mast->l->max;
- mas->offset = mast->l->offset;
- mas_wmb_replace(mas, old_enode, new_height);
- mtree_range_walk(mas);
- return;
-}
-
-/*
- * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
- * @mas: The starting maple state
- * @mast: The maple_subtree_state, keeps track of 4 maple states.
- * @count: The estimated count of iterations needed.
- *
- * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
- * is hit. First @b_node is split into two entries which are inserted into the
- * next iteration of the loop. @b_node is returned populated with the final
- * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
- * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
- * to account of what has been copied into the new sub-tree. The update of
- * orig_l_mas->last is used in mas_consume to find the slots that will need to
- * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
- * the new sub-tree in case the sub-tree becomes the full tree.
- */
-static void mas_spanning_rebalance(struct ma_state *mas,
- struct maple_subtree_state *mast, unsigned char count)
-{
-
- MA_STATE(l_mas, mas->tree, mas->index, mas->index);
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
- MA_STATE(m_mas, mas->tree, mas->index, mas->index);
-
- /*
- * The tree needs to be rebalanced and leaves need to be kept at the same level.
- * Rebalancing is done by use of the ``struct maple_topiary``.
- */
- mast->l = &l_mas;
- mast->m = &m_mas;
- mast->r = &r_mas;
- l_mas.status = r_mas.status = m_mas.status = ma_none;
-
- /* Check if this is not root and has sufficient data. */
- if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
- unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
- mast_spanning_rebalance(mast);
-
- mas_spanning_rebalance_loop(mas, mast, count);
-}
-
-
static void mas_wr_spanning_rebalance(struct ma_state *mas,
struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
{
struct maple_enode *old_enode;
- struct ma_state sib;
struct maple_copy cp;
+ struct ma_state sib;
/*
* Spanning store is different in that the write is actually from
* being stored to the last slot of the left node.
*/
- cp.height = 1;
rebalance_leaf_init(&cp, mas, l_wr_mas, r_wr_mas);
do {
- spanning_data_calc(&cp, mas, l_wr_mas, r_wr_mas, &sib);
- spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib);
- spanning_split_dest_setup(&cp, mas, l_wr_mas->type);
- spanning_data_write(&cp, mas);
+ spanning_data_calc(&cp, l_wr_mas, r_wr_mas, &sib);
+ multi_src_setup(&cp, l_wr_mas, r_wr_mas, &sib);
+ rebalance_dest_setup(&cp, mas, l_wr_mas->type);
+ cp_data_write(&cp, mas);
} while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib));
old_enode = mas->node;
}
/*
- * mas_rebalance() - Rebalance a given node.
- * @mas: The maple state
- * @b_node: The big maple node.
+ * rebalance_ascend() - Ascend the tree and set up for the next loop - if
+ * necessary
*
- * Rebalance two nodes into a single node or two new nodes that are sufficient.
- * Continue upwards until tree is sufficient.
+ * Returns: True if there is more rebalancing to do, false otherwise.
*/
-static inline void mas_rebalance(struct ma_state *mas,
- struct maple_big_node *b_node)
+static inline bool rebalance_ascend(struct maple_copy *cp,
+ struct ma_wr_state *wr_mas, struct ma_state *sib)
{
- char empty_count = mas_mt_height(mas);
- struct maple_subtree_state mast;
- unsigned char shift, b_end = ++b_node->b_end;
-
- MA_STATE(l_mas, mas->tree, mas->index, mas->last);
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
-
- trace_ma_op(__func__, mas);
+ struct ma_state *mas;
+ unsigned long min;
+ struct ma_state *l, *r;
- /*
- * Rebalancing occurs if a node is insufficient. Data is rebalanced
- * against the node to the right if it exists, otherwise the node to the
- * left of this node is rebalanced against this node. If rebalancing
- * causes just one node to be produced instead of two, then the parent
- * is also examined and rebalanced if it is insufficient. Every level
- * tries to combine the data in the same way. If one node contains the
- * entire range of the tree, then that node is used as a new root node.
- */
+ mas = wr_mas->mas;
+ if (sib->min > mas->max) { /* Move right succeeded */
+ min = mas->min;
+ l = mas;
+ r = sib;
+ } else {
+ min = sib->min;
+ l = sib;
+ r = mas;
+ }
- mast.orig_l = &l_mas;
- mast.orig_r = &r_mas;
- mast.bn = b_node;
- mast.bn->type = mte_node_type(mas->node);
+ ma_cp_to_slots(cp, min, mas);
+ cp->end = cp->d_count - 1;
+ cp->min = l->min;
+ cp->max = r->max;
- l_mas = r_mas = *mas;
+ if (!cp->min && cp->max == ULONG_MAX) {
+ rebalance_new_root(cp, mas);
+ return false;
+ }
- if (mas_next_sibling(&r_mas)) {
- mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
- r_mas.last = r_mas.index = r_mas.max;
- } else {
- mas_prev_sibling(&l_mas);
- shift = mas_data_end(&l_mas) + 1;
- mab_shift_right(b_node, shift);
- mas->offset += shift;
- mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
- b_node->b_end = shift + b_end;
- l_mas.index = l_mas.last = l_mas.min;
+ if (cp->d_count == 1 && !sib->end) {
+ cp->dst[0].node->parent = ma_parent_ptr(mas_mn(mas)->parent);
+ return false;
}
- return mas_spanning_rebalance(mas, &mast, empty_count);
+ MAS_WR_BUG_ON(wr_mas, cp->height > 4);
+ cp->height++;
+ mas_ascend(mas);
+ wr_mas_setup(wr_mas, mas);
+ wr_mas->offset_end = mas->offset;
+ if (r == sib)
+ wr_mas->offset_end++;
+ else
+ wr_mas->mas->offset--;
+ return true;
}
+
+
/*
* mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
* state.
* a rebalance is required for the operation to complete and an overflow
* of data may happen.
*/
- mas = wr_mas->mas;
- trace_ma_op(__func__, mas);
- //mt_dump(mas->tree, mt_dump_hex);
- //printk ("%p %s %lx - %lx => %p\n", mas->tree, __func__, mas->index, mas->last, wr_mas->entry);
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
+ mas = wr_mas->mas;
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
/*
* mas_wr_rebalance() - Insufficient data in one node needs to either get data
* from a sibling or absorb a sibling all together.
* @wr_mas: The write maple state
+ *
+ * Rebalance is different than a spanning store in that the write state is
+ * already at the leaf node that's being altered. What is
+ *
*/
-static noinline_for_kasan void mas_wr_rebalance(struct ma_wr_state *wr_mas)
+static void mas_wr_rebalance(struct ma_wr_state *wr_mas)
{
- struct maple_big_node b_node;
+
+ struct maple_enode *old_enode;
+ struct ma_state *mas;
+ struct maple_copy cp;
+ struct ma_state sib;
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
- memset(&b_node, 0, sizeof(struct maple_big_node));
- mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- WARN_ON_ONCE(wr_mas->mas->store_type != wr_rebalance);
- return mas_rebalance(wr_mas->mas, &b_node);
+
+ /*
+ * Rebalancing occurs if a node is insufficient. Data is rebalanced
+ * against the node to the right if it exists, otherwise the node to the
+ * left of this node is rebalanced against this node. If rebalancing
+ * causes just one node to be produced instead of two, then the parent
+ * is also examined and rebalanced if it is insufficient. Every level
+ * tries to combine the data in the same way. If one node contains the
+ * entire range of the tree, then that node is used as a new root node.
+ */
+
+ mas = wr_mas->mas;
+ rebalance_leaf_init(&cp, mas, wr_mas, wr_mas);
+ do {
+ rebalance_data_calc(&cp, wr_mas, &sib);
+ multi_src_setup(&cp, wr_mas, wr_mas, &sib);
+ rebalance_dest_setup(&cp, mas, wr_mas->type);
+ cp_data_write(&cp, mas);
+ } while (rebalance_ascend(&cp, wr_mas, &sib));
+
+ old_enode = mas->node;
+ mas->node = cp.slot[0];
+ mas_wmb_replace(mas, old_enode, cp.height);
+ mtree_range_walk(mas);
}
/*