};
#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
-#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
-#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
-
-struct maple_big_node {
- unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
- union {
- struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
- struct {
- unsigned long padding[MAPLE_BIG_NODE_GAPS];
- unsigned long gap[MAPLE_BIG_NODE_GAPS];
- };
- };
- unsigned char b_end;
- enum maple_type type;
-};
-
-/*
- * The maple_subtree_state is used to build a tree to replace a segment of an
- * existing tree in a more atomic way. Any walkers of the older tree will hit a
- * dead node and restart on updates.
- */
-struct maple_subtree_state {
- struct ma_state *orig_l; /* Original left side of subtree */
- struct ma_state *orig_r; /* Original right side of subtree */
- struct ma_state *l; /* New left side of subtree */
- struct ma_state *m; /* New middle of subtree (rare) */
- struct ma_state *r; /* New right side of subtree */
- struct ma_topiary *free; /* nodes to be freed */
- struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
- struct maple_big_node *bn;
-};
-
-#ifdef CONFIG_KASAN_STACK
-/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
-#define noinline_for_kasan noinline_for_stack
-#else
-#define noinline_for_kasan inline
-#endif
-
/* Functions */
static inline struct maple_node *mt_alloc_one(gfp_t gfp)
{
return false;
}
-/*
- * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
- * old data or set b_node->b_end.
- * @b_node: the maple_big_node
- * @shift: the shift count
- */
-static inline void mab_shift_right(struct maple_big_node *b_node,
- unsigned char shift)
-{
- unsigned long size = b_node->b_end * sizeof(unsigned long);
-
- memmove(b_node->pivot + shift, b_node->pivot, size);
- memmove(b_node->slot + shift, b_node->slot, size);
- if (b_node->type == maple_arange_64)
- memmove(b_node->gap + shift, b_node->gap, size);
-}
-
-/*
- * mab_middle_node() - Check if a middle node is needed (unlikely)
- * @b_node: the maple_big_node that contains the data.
- * @split: the potential split location
- * @slot_count: the size that can be stored in a single node being considered.
- *
- * Return: true if a middle node is required.
- */
-static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
- unsigned char slot_count)
-{
- unsigned char size = b_node->b_end;
-
- if (size >= 2 * slot_count)
- return true;
-
- if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
- return true;
-
- return false;
-}
-
-/*
- * mab_no_null_split() - ensure the split doesn't fall on a NULL
- * @b_node: the maple_big_node with the data
- * @split: the suggested split location
- * @slot_count: the number of slots in the node being considered.
- *
- * Return: the split location.
- */
-static inline int mab_no_null_split(struct maple_big_node *b_node,
- unsigned char split, unsigned char slot_count)
-{
- if (!b_node->slot[split]) {
- /*
- * If the split is less than the max slot && the right side will
- * still be sufficient, then increment the split on NULL.
- */
- if ((split < slot_count - 1) &&
- (b_node->b_end - split) > (mt_min_slots[b_node->type]))
- split++;
- else
- split--;
- }
- return split;
-}
-
-/*
- * mab_calc_split() - Calculate the split location and if there needs to be two
- * splits.
- * @mas: The maple state
- * @bn: The maple_big_node with the data
- * @mid_split: The second split, if required. 0 otherwise.
- *
- * Return: The first split location. The middle split is set in @mid_split.
- */
-static inline int mab_calc_split(struct ma_state *mas,
- struct maple_big_node *bn, unsigned char *mid_split)
-{
- unsigned char b_end = bn->b_end;
- int split = b_end / 2; /* Assume equal split. */
- unsigned char slot_count = mt_slots[bn->type];
-
- /*
- * To support gap tracking, all NULL entries are kept together and a node cannot
- * end on a NULL entry, with the exception of the left-most leaf. The
- * limitation means that the split of a node must be checked for this condition
- * and be able to put more data in one direction or the other.
- */
- if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
- *mid_split = 0;
- split = b_end - mt_min_slots[bn->type];
-
- if (!ma_is_leaf(bn->type))
- return split;
-
- mas->mas_flags |= MA_STATE_REBALANCE;
- if (!bn->slot[split])
- split--;
- return split;
- }
-
- /*
- * Although extremely rare, it is possible to enter what is known as the 3-way
- * split scenario. The 3-way split comes about by means of a store of a range
- * that overwrites the end and beginning of two full nodes. The result is a set
- * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
- * also be located in different parent nodes which are also full. This can
- * carry upwards all the way to the root in the worst case.
- */
- if (unlikely(mab_middle_node(bn, split, slot_count))) {
- split = b_end / 3;
- *mid_split = split * 2;
- } else {
- *mid_split = 0;
- }
-
- /* Avoid ending a node on a NULL entry */
- split = mab_no_null_split(bn, split, slot_count);
-
- if (unlikely(*mid_split))
- *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
-
- return split;
-}
-
-/*
- * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
- * and set @b_node->b_end to the next free slot.
- * @mas: The maple state
- * @mas_start: The starting slot to copy
- * @mas_end: The end slot to copy (inclusively)
- * @b_node: The maple_big_node to place the data
- * @mab_start: The starting location in maple_big_node to store the data.
- */
-static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
- unsigned char mas_end, struct maple_big_node *b_node,
- unsigned char mab_start)
-{
- enum maple_type mt;
- struct maple_node *node;
- void __rcu **slots;
- unsigned long *pivots, *gaps;
- int i = mas_start, j = mab_start;
- unsigned char piv_end;
-
- node = mas_mn(mas);
- mt = mte_node_type(mas->node);
- pivots = ma_pivots(node, mt);
- if (!i) {
- b_node->pivot[j] = pivots[i++];
- if (unlikely(i > mas_end))
- goto complete;
- j++;
- }
-
- piv_end = min(mas_end, mt_pivots[mt]);
- for (; i < piv_end; i++, j++) {
- b_node->pivot[j] = pivots[i];
- if (unlikely(!b_node->pivot[j]))
- goto complete;
-
- if (unlikely(mas->max == b_node->pivot[j]))
- goto complete;
- }
-
- b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
-
-complete:
- b_node->b_end = ++j;
- j -= mab_start;
- slots = ma_slots(node, mt);
- memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
- if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
- gaps = ma_gaps(node, mt);
- memcpy(b_node->gap + mab_start, gaps + mas_start,
- sizeof(unsigned long) * j);
- }
-}
-
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
* @node: The maple node
ma_set_meta(node, mt, 0, end);
}
-/*
- * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
- * @b_node: the maple_big_node that has the data
- * @mab_start: the start location in @b_node.
- * @mab_end: The end location in @b_node (inclusively)
- * @mas: The maple state with the maple encoded node.
- */
-static inline void mab_mas_cp(struct maple_big_node *b_node,
- unsigned char mab_start, unsigned char mab_end,
- struct ma_state *mas, bool new_max)
-{
- int i, j = 0;
- enum maple_type mt = mte_node_type(mas->node);
- struct maple_node *node = mte_to_node(mas->node);
- void __rcu **slots = ma_slots(node, mt);
- unsigned long *pivots = ma_pivots(node, mt);
- unsigned long *gaps = NULL;
- unsigned char end;
-
- if (mab_end - mab_start > mt_pivots[mt])
- mab_end--;
-
- if (!pivots[mt_pivots[mt] - 1])
- slots[mt_pivots[mt]] = NULL;
-
- i = mab_start;
- do {
- pivots[j++] = b_node->pivot[i++];
- } while (i <= mab_end && likely(b_node->pivot[i]));
-
- memcpy(slots, b_node->slot + mab_start,
- sizeof(void *) * (i - mab_start));
-
- if (new_max)
- mas->max = b_node->pivot[i - 1];
-
- end = j - 1;
- if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
- unsigned long max_gap = 0;
- unsigned char offset = 0;
-
- gaps = ma_gaps(node, mt);
- do {
- gaps[--j] = b_node->gap[--i];
- if (gaps[j] > max_gap) {
- offset = j;
- max_gap = gaps[j];
- }
- } while (j);
-
- ma_set_meta(node, mt, offset, end);
- } else {
- mas_leaf_set_meta(node, mt, end);
- }
-}
-
/*
* mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
* @mas: The maple state
}
}
-/*
- * mas_store_b_node() - Store an @entry into the b_node while also copying the
- * data from a maple encoded node.
- * @wr_mas: the maple write state
- * @b_node: the maple_big_node to fill with data
- * @offset_end: the offset to end copying
- *
- * Return: The actual end of the data stored in @b_node
- */
-static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *b_node, unsigned char offset_end)
-{
- unsigned char slot;
- unsigned char b_end;
- /* Possible underflow of piv will wrap back to 0 before use. */
- unsigned long piv;
- struct ma_state *mas = wr_mas->mas;
-
- b_node->type = wr_mas->type;
- b_end = 0;
- slot = mas->offset;
- if (slot) {
- /* Copy start data up to insert. */
- mas_mab_cp(mas, 0, slot - 1, b_node, 0);
- b_end = b_node->b_end;
- piv = b_node->pivot[b_end - 1];
- } else
- piv = mas->min - 1;
-
- if (piv + 1 < mas->index) {
- /* Handle range starting after old range */
- b_node->slot[b_end] = wr_mas->content;
- if (!wr_mas->content)
- b_node->gap[b_end] = mas->index - 1 - piv;
- b_node->pivot[b_end++] = mas->index - 1;
- }
-
- /* Store the new entry. */
- mas->offset = b_end;
- b_node->slot[b_end] = wr_mas->entry;
- b_node->pivot[b_end] = mas->last;
-
- /* Appended. */
- if (mas->last >= mas->max)
- goto b_end;
-
- /* Handle new range ending before old range ends */
- piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
- if (piv > mas->last) {
- if (piv == ULONG_MAX)
- mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
-
- if (offset_end != slot)
- wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
- offset_end);
-
- b_node->slot[++b_end] = wr_mas->content;
- if (!wr_mas->content)
- b_node->gap[b_end] = piv - mas->last + 1;
- b_node->pivot[b_end] = piv;
- }
-
- slot = offset_end + 1;
- if (slot > mas->end)
- goto b_end;
-
- /* Copy end data to the end of the node. */
- mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
- b_node->b_end--;
- return;
-
-b_end:
- b_node->b_end = b_end;
-}
-
/*
* mas_prev_sibling() - Find the previous node with the same parent.
* @mas: the maple state
wr_mas->offset_end = mas->offset = offset;
}
-/*
- * mast_rebalance_next() - Rebalance against the next node
- * @mast: The maple subtree state
- */
-static inline void mast_rebalance_next(struct maple_subtree_state *mast)
-{
- unsigned char b_end = mast->bn->b_end;
-
- mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
- mast->bn, b_end);
- mast->orig_r->last = mast->orig_r->max;
-}
-
-/*
- * mast_rebalance_prev() - Rebalance against the previous node
- * @mast: The maple subtree state
- */
-static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
-{
- unsigned char end = mas_data_end(mast->orig_l) + 1;
- unsigned char b_end = mast->bn->b_end;
-
- mab_shift_right(mast->bn, end);
- mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
- mast->l->min = mast->orig_l->min;
- mast->orig_l->index = mast->orig_l->min;
- mast->bn->b_end = end + b_end;
- mast->l->offset += end;
-}
-
static inline void rebalance_sib(struct ma_state *mas, struct ma_state *sib)
{
unsigned char end;
WARN_ON_ONCE(1);
}
-/*
- * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
- * the node to the right. Checking the nodes to the right then the left at each
- * level upwards until root is reached.
- * Data is copied into the @mast->bn.
- * @mast: The maple_subtree_state.
- */
-static inline
-bool mast_spanning_rebalance(struct maple_subtree_state *mast)
-{
- struct ma_state r_tmp = *mast->orig_r;
- struct ma_state l_tmp = *mast->orig_l;
- unsigned char depth = 0;
-
- do {
- mas_ascend(mast->orig_r);
- mas_ascend(mast->orig_l);
- depth++;
- if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
- mast->orig_r->offset++;
- do {
- mas_descend(mast->orig_r);
- mast->orig_r->offset = 0;
- } while (--depth);
-
- mast_rebalance_next(mast);
- *mast->orig_l = l_tmp;
- return true;
- } else if (mast->orig_l->offset != 0) {
- mast->orig_l->offset--;
- do {
- mas_descend(mast->orig_l);
- mast->orig_l->offset =
- mas_data_end(mast->orig_l);
- } while (--depth);
-
- mast_rebalance_prev(mast);
- *mast->orig_r = r_tmp;
- return true;
- }
- } while (!mte_is_root(mast->orig_r->node));
-
- *mast->orig_r = r_tmp;
- *mast->orig_l = l_tmp;
- return false;
-}
-
-/*
- * mast_ascend() - Ascend the original left and right maple states.
- * @mast: the maple subtree state.
- *
- * Ascend the original left and right sides. Set the offsets to point to the
- * data already in the new tree (@mast->l and @mast->r).
- */
-static inline void mast_ascend(struct maple_subtree_state *mast)
-{
- MA_WR_STATE(wr_mas, mast->orig_r, NULL);
- mas_ascend(mast->orig_l);
- mas_ascend(mast->orig_r);
-
- mast->orig_r->offset = 0;
- mast->orig_r->index = mast->r->max;
- /* last should be larger than or equal to index */
- if (mast->orig_r->last < mast->orig_r->index)
- mast->orig_r->last = mast->orig_r->index;
-
- wr_mas.type = mte_node_type(mast->orig_r->node);
- mas_wr_node_walk(&wr_mas);
- /* Set up the left side of things */
- mast->orig_l->offset = 0;
- mast->orig_l->index = mast->l->min;
- wr_mas.mas = mast->orig_l;
- wr_mas.type = mte_node_type(mast->orig_l->node);
- mas_wr_node_walk(&wr_mas);
-
- mast->bn->type = wr_mas.type;
-}
-
-/*
- * mas_new_ma_node() - Create and return a new maple node. Helper function.
- * @mas: the maple state with the allocations.
- * @b_node: the maple_big_node with the type encoding.
- *
- * Use the node type from the maple_big_node to allocate a new node from the
- * ma_state. This function exists mainly for code readability.
- *
- * Return: A new maple encoded node
- */
-static inline struct maple_enode
-*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
-{
- return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
-}
-
-/*
- * mas_mab_to_node() - Set up right and middle nodes
- *
- * @mas: the maple state that contains the allocations.
- * @b_node: the node which contains the data.
- * @left: The pointer which will have the left node
- * @right: The pointer which may have the right node
- * @middle: the pointer which may have the middle node (rare)
- * @mid_split: the split location for the middle node
- *
- * Return: the split of left.
- */
-static inline unsigned char mas_mab_to_node(struct ma_state *mas,
- struct maple_big_node *b_node, struct maple_enode **left,
- struct maple_enode **right, struct maple_enode **middle,
- unsigned char *mid_split)
-{
- unsigned char split = 0;
- unsigned char slot_count = mt_slots[b_node->type];
-
- *left = mas_new_ma_node(mas, b_node);
- *right = NULL;
- *middle = NULL;
- *mid_split = 0;
-
- if (b_node->b_end < slot_count) {
- split = b_node->b_end;
- } else {
- split = mab_calc_split(mas, b_node, mid_split);
- *right = mas_new_ma_node(mas, b_node);
- }
-
- if (*mid_split)
- *middle = mas_new_ma_node(mas, b_node);
-
- return split;
-
-}
-
-/*
- * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
- * pointer.
- * @b_node: the big node to add the entry
- * @mas: the maple state to get the pivot (mas->max)
- * @entry: the entry to add, if NULL nothing happens.
- */
-static inline void mab_set_b_end(struct maple_big_node *b_node,
- struct ma_state *mas,
- void *entry)
-{
- if (!entry)
- return;
-
- b_node->slot[b_node->b_end] = entry;
- if (mt_is_alloc(mas->tree))
- b_node->gap[b_node->b_end] = mas_max_gap(mas);
- b_node->pivot[b_node->b_end++] = mas->max;
-}
-
/*
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*split = mid_split;
}
-/*
- * mast_set_split_parents() - Helper function to set three nodes parents. Slot
- * is taken from @mast->l.
- * @mast: the maple subtree state
- * @left: the left node
- * @right: the right node
- * @split: the split location.
- */
-static inline void mast_set_split_parents(struct maple_subtree_state *mast,
- struct maple_enode *left,
- struct maple_enode *middle,
- struct maple_enode *right,
- unsigned char split,
- unsigned char mid_split)
-{
- unsigned char slot;
- struct maple_enode *l = left;
- struct maple_enode *r = right;
-
- if (mas_is_none(mast->l))
- return;
-
- if (middle)
- r = middle;
-
- slot = mast->l->offset;
-
- mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
- mas_set_split_parent(mast->l, l, r, &slot, split);
-
- mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
- mas_set_split_parent(mast->m, l, r, &slot, split);
-
- mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
- mas_set_split_parent(mast->r, l, r, &slot, split);
-}
-
/*
* mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
mas_update_gap(mas);
}
-/*
- * mast_cp_to_nodes() - Copy data out to nodes.
- * @mast: The maple subtree state
- * @left: The left encoded maple node
- * @middle: The middle encoded maple node
- * @right: The right encoded maple node
- * @split: The location to split between left and (middle ? middle : right)
- * @mid_split: The location to split between middle and right.
- */
-static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
- struct maple_enode *left, struct maple_enode *middle,
- struct maple_enode *right, unsigned char split, unsigned char mid_split)
-{
- bool new_lmax = true;
-
- mas_node_or_none(mast->l, left);
- mas_node_or_none(mast->m, middle);
- mas_node_or_none(mast->r, right);
-
- mast->l->min = mast->orig_l->min;
- if (split == mast->bn->b_end) {
- mast->l->max = mast->orig_r->max;
- new_lmax = false;
- }
-
- mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
-
- if (middle) {
- mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
- mast->m->min = mast->bn->pivot[split] + 1;
- split = mid_split;
- }
-
- mast->r->max = mast->orig_r->max;
- if (right) {
- mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
- mast->r->min = mast->bn->pivot[split] + 1;
- }
-}
-
-/*
- * mast_combine_cp_left - Copy in the original left side of the tree into the
- * combined data set in the maple subtree state big node.
- * @mast: The maple subtree state
- */
-static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
-{
- unsigned char l_slot = mast->orig_l->offset;
-
- if (!l_slot)
- return;
-
- mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
-}
-
-/*
- * mast_combine_cp_right: Copy in the original right side of the tree into the
- * combined data set in the maple subtree state big node.
- * @mast: The maple subtree state
- */
-static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
-{
- if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
- return;
-
- mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
- mt_slot_count(mast->orig_r->node), mast->bn,
- mast->bn->b_end);
- mast->orig_r->last = mast->orig_r->max;
-}
-
-/*
- * mast_sufficient: Check if the maple subtree state has enough data in the big
- * node to create at least one sufficient node
- * @mast: the maple subtree state
- */
-static inline bool mast_sufficient(struct maple_subtree_state *mast)
-{
- if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
- return true;
-
- return false;
-}
-
-/*
- * mast_overflow: Check if there is too much data in the subtree state for a
- * single node.
- * @mast: The maple subtree state
- */
-static inline bool mast_overflow(struct maple_subtree_state *mast)
-{
- if (mast->bn->b_end > mt_slot_count(mast->orig_l->node))
- return true;
-
- return false;
-}
-
static inline void *mtree_range_walk(struct ma_state *mas)
{
unsigned long *pivots;
mas_update_gap(mas);
}
-/*
- * mas_split_final_node() - Split the final node in a subtree operation.
- * @mast: the maple subtree state
- * @mas: The maple state
- */
-static inline void mas_split_final_node(struct maple_subtree_state *mast,
- struct ma_state *mas)
-{
- struct maple_enode *ancestor;
-
- if (mte_is_root(mas->node)) {
- if (mt_is_alloc(mas->tree))
- mast->bn->type = maple_arange_64;
- else
- mast->bn->type = maple_range_64;
- }
- /*
- * Only a single node is used here, could be root.
- * The Big_node data should just fit in a single node.
- */
- ancestor = mas_new_ma_node(mas, mast->bn);
- mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
- mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
- mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
-
- mast->l->node = ancestor;
- mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
- mas->offset = mast->bn->b_end - 1;
-}
-
-/*
- * mast_fill_bnode() - Copy data into the big node in the subtree state
- * @mast: The maple subtree state
- * @mas: the maple state
- * @skip: The number of entries to skip for new nodes insertion.
- */
-static inline void mast_fill_bnode(struct maple_subtree_state *mast,
- struct ma_state *mas,
- unsigned char skip)
-{
- bool cp = true;
- unsigned char split;
-
- memset(mast->bn, 0, sizeof(struct maple_big_node));
-
- if (mte_is_root(mas->node)) {
- cp = false;
- } else {
- mas_ascend(mas);
- mas->offset = mte_parent_slot(mas->node);
- }
-
- if (cp && mast->l->offset)
- mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
-
- split = mast->bn->b_end;
- mab_set_b_end(mast->bn, mast->l, mast->l->node);
- mast->r->offset = mast->bn->b_end;
- mab_set_b_end(mast->bn, mast->r, mast->r->node);
- if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
- cp = false;
-
- if (cp)
- mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
- mast->bn, mast->bn->b_end);
-
- mast->bn->b_end--;
- mast->bn->type = mte_node_type(mas->node);
-}
-
-/*
- * mast_split_data() - Split the data in the subtree state big node into regular
- * nodes.
- * @mast: The maple subtree state
- * @mas: The maple state
- * @split: The location to split the big node
- */
-static inline void mast_split_data(struct maple_subtree_state *mast,
- struct ma_state *mas, unsigned char split)
-{
- unsigned char p_slot;
-
- mab_mas_cp(mast->bn, 0, split, mast->l, true);
- mte_set_pivot(mast->r->node, 0, mast->r->max);
- mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
- mast->l->offset = mte_parent_slot(mas->node);
- mast->l->max = mast->bn->pivot[split];
- mast->r->min = mast->l->max + 1;
- if (mte_is_leaf(mas->node))
- return;
-
- p_slot = mast->orig_l->offset;
- mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
- &p_slot, split);
- mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
- &p_slot, split);
-}
-
-/*
- * mas_push_data() - Instead of splitting a node, it is beneficial to push the
- * data to the right or left node if there is room.
- * @mas: The maple state
- * @mast: The maple subtree state
- * @left: Push left or not.
- *
- * Keeping the height of the tree low means faster lookups.
- *
- * Return: True if pushed, false otherwise.
- */
-static inline bool mas_push_data(struct ma_state *mas,
- struct maple_subtree_state *mast, bool left)
-{
- unsigned char slot_total = mast->bn->b_end;
- unsigned char end, space, split;
-
- MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
- tmp_mas = *mas;
- tmp_mas.depth = mast->l->depth;
-
- if (left && !mas_prev_sibling(&tmp_mas))
- return false;
- else if (!left && !mas_next_sibling(&tmp_mas))
- return false;
-
- end = mas_data_end(&tmp_mas);
- slot_total += end;
- space = 2 * mt_slot_count(mas->node) - 2;
- /* -2 instead of -1 to ensure there isn't a triple split */
- if (ma_is_leaf(mast->bn->type))
- space--;
-
- if (mas->max == ULONG_MAX)
- space--;
-
- if (slot_total >= space)
- return false;
-
- /* Get the data; Fill mast->bn */
- mast->bn->b_end++;
- if (left) {
- mab_shift_right(mast->bn, end + 1);
- mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
- mast->bn->b_end = slot_total + 1;
- } else {
- mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
- }
-
- /* Configure mast for splitting of mast->bn */
- split = mt_slots[mast->bn->type] - 2;
- if (left) {
- /* Switch mas to prev node */
- *mas = tmp_mas;
- /* Start using mast->l for the left side. */
- tmp_mas.node = mast->l->node;
- *mast->l = tmp_mas;
- } else {
- tmp_mas.node = mast->r->node;
- *mast->r = tmp_mas;
- split = slot_total - split;
- }
- split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
- /* Update parent slot for split calculation. */
- if (left)
- mast->orig_l->offset += end + 1;
-
- mast_split_data(mast, mas, split);
- mast_fill_bnode(mast, mas, 2);
- mas_split_final_node(mast, mas);
- return true;
-}
-
-/*
- * mas_split() - Split data that is too big for one node into two.
- * @mas: The maple state
- * @b_node: The maple big node
- */
-static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
-{
- struct maple_subtree_state mast;
- int height = 0;
- unsigned int orig_height = mas_mt_height(mas);
- unsigned char mid_split, split = 0;
- struct maple_enode *old;
-
- /*
- * Splitting is handled differently from any other B-tree; the Maple
- * Tree splits upwards. Splitting up means that the split operation
- * occurs when the walk of the tree hits the leaves and not on the way
- * down. The reason for splitting up is that it is impossible to know
- * how much space will be needed until the leaf is (or leaves are)
- * reached. Since overwriting data is allowed and a range could
- * overwrite more than one range or result in changing one entry into 3
- * entries, it is impossible to know if a split is required until the
- * data is examined.
- *
- * Splitting is a balancing act between keeping allocations to a minimum
- * and avoiding a 'jitter' event where a tree is expanded to make room
- * for an entry followed by a contraction when the entry is removed. To
- * accomplish the balance, there are empty slots remaining in both left
- * and right nodes after a split.
- */
- MA_STATE(l_mas, mas->tree, mas->index, mas->last);
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
- MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
- MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
-
- trace_ma_op(__func__, mas);
-
- mast.l = &l_mas;
- mast.r = &r_mas;
- mast.orig_l = &prev_l_mas;
- mast.orig_r = &prev_r_mas;
- mast.bn = b_node;
-
- while (height++ <= orig_height) {
- if (mt_slots[b_node->type] > b_node->b_end) {
- mas_split_final_node(&mast, mas);
- break;
- }
-
- l_mas = r_mas = *mas;
- l_mas.node = mas_new_ma_node(mas, b_node);
- r_mas.node = mas_new_ma_node(mas, b_node);
- /*
- * Another way that 'jitter' is avoided is to terminate a split up early if the
- * left or right node has space to spare. This is referred to as "pushing left"
- * or "pushing right" and is similar to the B* tree, except the nodes left or
- * right can rarely be reused due to RCU, but the ripple upwards is halted which
- * is a significant savings.
- */
- /* Try to push left. */
- if (mas_push_data(mas, &mast, true)) {
- height++;
- break;
- }
- /* Try to push right. */
- if (mas_push_data(mas, &mast, false)) {
- height++;
- break;
- }
-
- split = mab_calc_split(mas, b_node, &mid_split);
- mast_split_data(&mast, mas, split);
- /*
- * Usually correct, mab_mas_cp in the above call overwrites
- * r->max.
- */
- mast.r->max = mas->max;
- mast_fill_bnode(&mast, mas, 1);
- prev_l_mas = *mast.l;
- prev_r_mas = *mast.r;
- }
-
- /* Set the original node as dead */
- old = mas->node;
- mas->node = l_mas.node;
- mas_wmb_replace(mas, old, height);
- mtree_range_walk(mas);
- return;
-}
-
/*
* mas_root_expand() - Expand a root to a node
* @mas: The maple state