/*
* Maple Tree implementation
* Copyright (c) 2018 Oracle Corporation
- * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Authors: Liam R. Howlett <jedix@infradead.org>
* Matthew Wilcox <willy@infradead.org>
*/
return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
}
+static inline bool mt_is_empty(const void *entry)
+{
+ return (!entry) || xa_is_deleted((entry)) || xa_is_skip(entry);
+}
+static inline bool mt_will_coalesce(const void *entry)
+{
+ return (xa_is_deleted((entry)) || xa_is_skip(entry) ||
+ xa_is_retry(entry));
+}
+
static inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
return (struct maple_node *)((unsigned long)entry & ~127);
}
+static void mte_free(struct maple_enode *enode)
+{
+ mt_free(mt_to_node(enode));
+}
+
static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
enum maple_type type) {
return (void *)((unsigned long)node | (type << 3) | 4);
}
static inline bool _ma_is_root(struct maple_node *node)
{
- if (((unsigned long)node->parent & MA_ROOT_PARENT) == 1)
- return true;
- return false;
+ return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
+
static inline bool ma_is_root(struct maple_enode *node)
{
return _ma_is_root(mt_to_node(node));
*
* Return: The pivot (including mas->max for the final slot)
*/
-static inline unsigned long ma_get_safe_pivot(const struct ma_state *mas,
- unsigned char slot)
+static inline unsigned long _ma_get_safe_pivot(const struct ma_state *mas,
+ unsigned char slot, enum maple_type type)
{
- enum maple_type type = mt_node_type(mas->node);
-
if (slot >= mt_pivots[type])
return mas->max;
+
return _ma_get_pivot(mas->node, slot, type);
}
+static inline unsigned long ma_get_safe_pivot(const struct ma_state *mas,
+ unsigned char slot)
+{
+ enum maple_type type = mt_node_type(mas->node);
+ return _ma_get_safe_pivot(mas, slot, type);
+}
+
static inline void ma_set_pivot(struct maple_enode *mn, unsigned char slot,
unsigned long val)
{
break;
}
}
+static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot,
+ unsigned long val)
+{
+ enum maple_type type = mt_node_type(mas->node);
+
+ if (slot >= mt_pivots[type]) {
+ unsigned char p_slot;
+ struct maple_enode *me = mas->node;
+
+ if (_ma_is_root(mt_parent(mas->node)))
+ return;
+
+ p_slot = mt_parent_slot(mas->node);
+ mas_set_safe_pivot(mas, p_slot + 1, val);
+ mas->node = me;
+ return;
+ }
+ ma_set_pivot(mas->node, slot, val);
+}
static inline void ma_cp_pivot(struct maple_enode *dst,
unsigned char dloc, struct maple_enode *src, unsigned long sloc)
{
return _ma_get_rcu_slot(mn, slot, mt_node_type(mn));
}
static inline void ma_set_rcu_slot(const struct maple_enode *mn,
- unsigned char slot, struct maple_enode *val)
+ unsigned char slot, void *val)
{
enum maple_type type = mt_node_type(mn);
{
void *entry = ma_get_rcu_slot(src, sloc);
- if (xa_is_retry(entry))
+ if (mt_is_empty(entry) || xa_is_retry(entry))
entry = NULL;
ma_set_rcu_slot(dst, dloc, entry);
smn = node->slot[cnt/15];
smn->slot[cnt % 15] = reuse;
}
+ if (cnt != ma_get_alloc_cnt(mas) + 1)
+ BUG_ON(0);
}
static inline void ma_new_node(struct ma_state *ms, gfp_t gfp)
{
return mas->node;
}
+/* Private
+ * Returns the last slot that contains data.
+ */
static inline unsigned char ma_data_end(const struct ma_state *mas,
const enum maple_type type, unsigned long *last_piv,
unsigned char *coalesce)
struct maple_enode *mn = mas->node;
*coalesce = 0;
- for (data_end = 0; data_end < mt_slot_count(mn) - 1; data_end++) {
- *last_piv = _ma_get_pivot(mn, data_end, type);
- if (*last_piv == 0 && data_end > 0) {
+ prev_piv = _ma_get_safe_pivot(mas, 0, type);
+ for (data_end = 1; data_end < mt_slot_count(mn); data_end++) {
+ *last_piv = _ma_get_safe_pivot(mas, data_end, type);
+ if (*last_piv == 0) {
*last_piv = prev_piv;
return data_end - 1;
}
if (prev_piv == *last_piv)
(*coalesce)++;
+ else if (mt_will_coalesce(_ma_get_rcu_slot(mn, data_end, type)))
+ (*coalesce)++;
+
+ if (*last_piv == mas->max)
+ return data_end;
prev_piv = *last_piv;
}
-
- if (!_ma_get_rcu_slot(mn, data_end, type))
- return data_end - 1;
-
-
return data_end;
}
+/** Private
+ * ma_hard_data - return the number of slots required to store what is
+ * currently in this node.
+ *
+ * @end the last slot with a valid pivot/contents
+ * @coalesce the number of slots that would be removed if copied/coalesced.
+ *
+ */
+static inline unsigned char ma_hard_data(unsigned long end,
+ unsigned long coalesce)
+{
+ return end - coalesce;
+}
+
#define ma_calc_split ma_no_dense_calc_split
static inline unsigned char ma_no_dense_calc_split(struct ma_state *mas,
*right = (struct maple_enode *)ma_next_alloc(mas);
*right = mt_mk_node(ma_mnode_ptr(*right), type);
+
+ if (ma_get_pivot(mas->node, half) > mas->index)
+ return half;
+ return half + 1;
+
if (ma_is_root(mas->node))
return half;
if (data_end - j >= half)
return j;
- return i > 2 ? i : half - 1;
+ return half;
}
static inline unsigned char ma_dense_calc_split(struct ma_state *mas,
struct maple_enode **left, struct maple_enode **right)
}
-// First non-null entry in mas->node
+/* Private
+ * mas_first_node() - Finds the first node in mas->node and returns the pivot,
+ * mas->max if no node is found. Node is returned as mas->node which may be
+ * MAS_NONE.
+ *
+ * @mas: maple state
+ * @max: The maximum index to consider valid.
+ */
static inline unsigned long mas_first_node(struct ma_state *mas,
unsigned long max)
{
mn = ma_get_rcu_slot(mas->node, slot);
- if (!mn)
+ if (mt_is_empty(mn))
continue;
if (!mt_is_leaf(mas->node))
mas->node = MAS_NONE;
return mas->max;
}
-
+/* Private
+ *
+ * Returns the first entry.
+ *
+ */
static inline unsigned long mas_first_entry(struct ma_state *mas,
unsigned long max)
{
case maple_arange_64:
for (i = 0; i < slot_cnt; i++) {
node = ma_get_rcu_slot(mn, i);
- if (node)
+ if (!mt_is_empty(node))
ma_destroy_walk(node);
}
break;
else
piv = mas->max;
+ // Last entry.
+ if (dloc && (piv == mas->max || !piv)) {
+ if (!ma_get_rcu_slot(cp->dst, dloc -1) &&
+ ! ma_get_rcu_slot(cp->src, sloc)) {
+ ma_set_pivot(cp->dst, dloc -1, 0);
+ break;
+ }
+ }
+
if (sloc && !piv)
break;
if (dtype == maple_arange_64)
ma_cp_gap(cp->dst, dloc, cp->src, sloc);
- ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc);
+ ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc);
prev_piv = piv;
next_src_slot:
sloc++;
}
cp->dst_start = dloc;
+ cp->src_start = sloc;
}
static inline int ma_split_data(struct ma_state *mas, struct maple_enode *left,
struct maple_enode *right, unsigned char split)
}
/* Private
- * Replace mn with mas->node in the tree
+ * _mt_replace() - Replace a maple node in the tree with mas->node. Uses the
+ * parent encoding to locate the maple node in the tree.
+ * @free: Free the old node
+ * @push: push the old node onto the allocated nodes in mas->alloc
+ *
*/
static inline void _mt_replace(struct ma_state *mas, bool free, bool push)
{
static inline void ma_gap_link(struct ma_state *mas, struct maple_enode *parent,
unsigned char slot, unsigned long pivot)
{
- unsigned long gap;
+ unsigned long gap, max;
unsigned char max_slot;
+ max = mas->max;
if (slot)
mas->min = ma_get_pivot(parent, slot - 1) + 1;
gap = ma_leaf_max_gap(mas);
ma_set_gap(parent, slot, gap);
+ mas->max = max;
}
static inline void ma_link(struct maple_enode *new, struct maple_enode *parent,
unsigned char slot, unsigned long pivot, enum maple_type type)
}
ptype = mt_node_type(mas->node);
p_max = mas->max;
- mas_update_limits(mas, p_slot, ptype);
p_end = ma_data_end(mas, ptype, &last_pivot, &coalesce);
+ mas_update_limits(mas, p_slot, ptype);
mas->node = ma_get_rcu_slot(old_parent, p_slot);
}
mas_node_cnt(mas, 3);
- if (mas_is_err(mas))
+ if (mas_is_err(mas)) {
return 0;
+ }
// Allocations.
new_parent = mt_mk_node(ma_next_alloc(mas), ptype);
goto busy;
entry = ma_get_rcu_slot(mas->node, slot);
- if (entry && !xa_is_retry(entry))
+ if (!mt_is_empty(entry))
goto busy;
}
// Old limits.
unsigned long o_min = mas->min;
unsigned long o_max = mas->max;
+ unsigned char child_slot = 0;
// Create a new node.
mas_node_cnt(mas, 1);
mas->node = leaf;
mas->min = mas->index;
mas->max = max;
- ma_set_slot(mas, 0);
+ if (null_entry)
+ child_slot = 1;
+ ma_set_slot(mas, child_slot);
_ma_add(mas, entry, overwrite, active);
// Restore old values and continue inserting.
+ null_entry = false;
mas->min = o_min;
mas->max = o_max;
mas->node = prev_enode;
+ if (append)
+ mas->index = mas->max;
entry = leaf;
if (mt == maple_dense)
mas->last = mas->index + mt_max[mt] - 1;
if (mas->last >= last_piv)
goto complete;
- while (ma_get_pivot(prev_enode, old_end) <= mas->last &&
- old_end < slot_cnt)
- old_end++;
+ while (old_end < slot_cnt) {
+ if (old_end >= slot_cnt -1 )
+ break;
+
+ if (ma_get_pivot(prev_enode, old_end) <= mas->last)
+ old_end++;
+ else
+ break;
+ }
/* Copy remainder of node if this isn't an append */
MA_CP(cp, prev_enode, mas->node, old_end, slot_cnt - 1);
cp.dst_start = slot;
cp.dst_end = mt_slot_count(cp.dst) - 1;
cp.start_piv = mas->last;
+ cp.src_end = mt_slot_count(cp.src) - 1; // copy to the end to collapse last slot null.
ma_copy(mas, &cp);
}
rcu_assign_pointer(ms->tree->ma_root, mt_mk_root(ms->node));
}
+static inline int mas_safe_slot(struct ma_state *mas, unsigned char *slot,
+ int delta);
static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
-/*
- * Find the prev non-null entry at the same level in the tree. The prev value
- * will be mas->node[ma_get_slot(mas)] or MAS_NONE.
+static inline void mas_next_slot(struct ma_state *mas, unsigned long max)
+ __must_hold(ms->tree->lock)
+{
+ unsigned char slot;
+
+ // walk up.
+ while (1) {
+ slot = mt_parent_slot(mas->node);
+walk_again:
+ ma_encoded_parent(mas);
+ if (mas->max > max)
+ goto no_entry;
+
+ if (slot < mt_slot_count(mas->node) - 1) {
+ if (!ma_get_safe_pivot(mas, slot + 1))
+ continue;
+ slot++;
+ goto walk_down;
+ }
+
+ if (ma_is_root(mas->node))
+ goto no_entry;
+ }
+
+
+walk_down:
+ do {
+ void *entry = NULL;
+ if (slot)
+ mas->min = ma_get_safe_pivot(mas, slot - 1);
+ mas->max = ma_get_safe_pivot(mas, slot);
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (xa_is_skip(entry)) {
+ if (mas->max >= max) {
+ goto no_entry;
+ } else if (slot < mt_slot_count(mas->node)) {
+ slot++;
+ goto walk_down;
+ } else {
+ goto walk_again;
+ }
+ }
+
+ mas->node = entry;
+ if (mt_is_empty(mas->node))
+ goto no_entry;
+
+ if (mt_is_leaf(mas->node))
+ goto done;
+ slot = 0;
+
+ } while (1);
+
+done:
+ ma_set_slot(mas, slot);
+ return;
+
+no_entry:
+ mas->node = MAS_NONE;
+ return;
+}
+/** Private
+ * mas_prev_slot() - Find the previous leaf slot, regardless of having an
+ * entry or not
+ *
+ * NOTE: Not read safe - does not check for dead nodes.
+ * Not root safe, cannot be the root node.
+ */
+static inline void mas_prev_slot(struct ma_state *mas, unsigned long min)
+ __must_hold(ms->tree->lock)
+{
+ unsigned char slot, coalesce;
+
+ // Walk up.
+ while (1) {
+ slot = mt_parent_slot(mas->node);
+ ma_encoded_parent(mas);
+ if (mas->min < min)
+ goto no_entry;
+
+ if (slot) {
+ slot--;
+ goto walk_down;
+ }
+ if (ma_is_root(mas->node))
+ goto no_entry;
+ }
+
+walk_down:
+ do {
+ if (slot)
+ mas->min = ma_get_safe_pivot(mas, slot - 1);
+ mas->max = ma_get_safe_pivot(mas, slot);
+ mas->node = ma_get_rcu_slot(mas->node, slot);
+ if (mt_is_empty(mas->node))
+ goto done;
+
+ if (mt_is_leaf(mas->node))
+ goto done;
+
+ slot = ma_data_end(mas, mt_node_type(mas->node), &mas->max,
+ &coalesce);
+ } while (1);
+
+done:
+ ma_set_slot(mas, slot);
+ return;
+
+no_entry:
+ mas->node = MAS_NONE;
+ return;
+}
+
+/** Private
+ * mas_prev_node() - Find the prev non-null entry at the same level in the
+ * tree. The prev value will be mas->node[ma_get_slot(mas)] or MAS_NONE.
*/
static inline void mas_prev_node(struct ma_state *mas, unsigned long min)
{
unsigned char slot;
unsigned long start_piv;
+ slot = ma_get_slot(mas);
+ start_piv = ma_get_safe_pivot(mas, slot);
restart_prev_node:
level = 0;
slot = ma_get_slot(mas);
- start_piv = ma_get_safe_pivot(mas, slot);
if (ma_is_root(mas->node))
goto no_entry;
ma_encoded_parent(mas);
level++;
+ if (!mas_safe_slot(mas, &slot, -1))
+ goto ascend;
+
if (mas_dead_node(mas, start_piv))
goto restart_prev_node;
* Find the next non-null entry at the same level in the tree. The next value
* will be mas->node[ma_get_slot(mas)] or MAS_NONE.
*
+ *
+ * Node: Not safe to call with mas->node == root
*/
static inline unsigned long mas_next_node(struct ma_state *mas,
unsigned long max)
if (!ma_is_root(mas->node))
ma_encoded_parent(mas);
+ if (!mas_safe_slot(mas, &slot, 1)) {
+ if (ma_is_root(mas->node))
+ goto no_entry;
+ goto restart_next_node;
+ }
+
if (mas_dead_node(mas, start_piv))
goto restart_next_node;
}
-// Next node entry or return 0 on none.
+/** Private
+ * prev node entry
+ */
+static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long min,
+ unsigned long *piv)
+{
+ unsigned long pivot;
+ unsigned char slot = ma_get_slot(mas);
+ void *entry;
+
+ if (!slot)
+ return false;
+
+ slot--;
+ do {
+ pivot = ma_get_safe_pivot(mas, slot);
+ if (pivot < min)
+ goto no_entry;
+
+ *piv = pivot;
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (!mt_is_empty(entry))
+ goto found;
+ } while (slot--);
+
+no_entry:
+ return false;
+
+found:
+ ma_set_slot(mas, slot);
+ return true;
+}
+
+/** Private
+ * next node entry
+ */
static inline bool mas_next_nentry(struct ma_state *mas, unsigned long max,
unsigned long *piv)
{
unsigned char slot = ma_get_slot(mas);
unsigned char count = mt_slot_count(mas->node);
void *entry;
- bool ret = false;
while (slot < count) {
pivot = ma_get_safe_pivot(mas, slot);
*piv = pivot;
- /* Valid pivot */;
-
entry = ma_get_rcu_slot(mas->node, slot);
- if (entry) {
- ret = true;
- break;
- }
+ if (!mt_is_empty(entry))
+ goto found;
/* Ran over the limit, this is was the last slot to try */
if (pivot >= max)
slot++;
}
- ma_set_slot(mas, slot);
- return ret;
no_entry:
return false;
+
+found:
+ ma_set_slot(mas, slot);
+ return true;
}
+
static inline unsigned long mas_next_entry(struct ma_state *mas,
unsigned long max)
{
goto retry;
return piv;
}
-/* Private
- * Combine nulls with the same pivot value
+
+static inline int mas_coalesce_node(struct ma_state *mas, unsigned char end,
+ unsigned char coalesce, bool replace)
+{
+ mas_partial_copy(mas, mt_slot_count(mas->node));
+ if (mas_is_err(mas))
+ return 0;
+ if (replace)
+ mt_replace(mas);
+ return ma_hard_data(end, coalesce);
+}
+
+static inline void mas_coalesce_empty(struct ma_state *mas,
+ struct maple_enode *eparent, unsigned char p_slot)
+{
+
+ ma_set_rcu_slot(eparent, p_slot, XA_DELETED_ENTRY);
+ ma_set_slot(mas, p_slot);
+}
+
+/** Private
+ *
+ * mas_rebalance() - Rebalance mas->node by acquiring other pivots from the
+ * node to the right or rebalance the left node.
*
- * Allocation of a new node can occur in mas_partial_copy.
- * If allocation fails, gaps must still be updated.
*
+ *
+ * Try to allocate 1-2 nodes, depending on if the right node will be
+ * completely consumed.
+ *
+ * returns 0 on success or the number of slots that were not filled.
+ *
+ */
+/*
+ *
+ * FIXME: Test spanning multiple levels of the tree.
+ *
+ */
+static inline void mas_coalesce(struct ma_state *mas);
+static inline int mas_rebalance(struct ma_state *mas, unsigned char end,
+ unsigned char coalesce)
+{
+ struct maple_enode *r_enode, *this_enode = mas->node;
+ unsigned long ret = 0;
+ unsigned char this_p_slot, r_p_slot; // Parent slots (this one and right)
+ unsigned char all_slots; // Total slots needed for this and right node.
+ unsigned char l_slot_cnt, r_slot_cnt; // left and right slot counts
+ unsigned char trimmed = 0; // Trimmed terminating null range from left
+ unsigned long l_piv, r_piv; // left and right pivots
+ unsigned char r_end, r_coalesce; // right node end and values that can be coalesced.
+ unsigned char node_cnt; // Number of nodes to allocate
+ unsigned long this_max = mas->max, this_min = mas->min; // ma state saves for this entry
+ unsigned long r_max, r_min; // ma state saves for right.
+ enum maple_type new_type;
+ bool p_coalesce = false; // coalesce parent.
+
+ if (coalesce)
+ coalesce--;
+ l_slot_cnt = ma_hard_data(end, coalesce);
+ if (ma_is_root(this_enode))
+ return mas_coalesce_node(mas, end, coalesce, true);
+
+ this_p_slot = mt_parent_slot(this_enode);
+ ma_encoded_parent(mas);
+ ma_set_slot(mas, this_p_slot + 1);
+ if (!mas_next_nentry(mas, ULONG_MAX, &r_piv)) {
+ // this_enode is the right-most node.
+ BUG_ON(!l_slot_cnt);
+ mas->node = this_enode;
+ mas->max = this_max;
+ mas->min = this_min;
+ ma_set_slot(mas, this_p_slot);
+ ma_encoded_parent(mas);
+ ma_set_slot(mas, this_p_slot);
+ // If there is a single entry, rebalance the parent.
+ if (mas_prev_nentry(mas, 0, &r_piv)) {
+ mas->node =
+ ma_get_rcu_slot(mas->node, ma_get_slot(mas));
+ end = ma_data_end(mas, mt_node_type(mas->node),
+ &l_piv, &coalesce);
+ return mas_rebalance(mas, end, coalesce);
+ }
+ mas->node = this_enode;
+ mas->max = this_max;
+ mas->min = this_min;
+ ma_set_slot(mas, this_p_slot);
+ ma_encoded_parent(mas);
+ mas_coalesce(mas);
+ }
+
+ // If we reached here, then the node to the right exists.
+ // set the ma_state information and save a copy for this slot.
+ mas->min = ma_get_safe_pivot(mas, ma_get_slot(mas) - 1); // safe as it is a right node.
+ mas->max = ma_get_safe_pivot(mas, ma_get_slot(mas));
+ mas->node = ma_get_rcu_slot(mas->node, ma_get_slot(mas));
+ r_min = mas->min;
+ r_max = mas->max;
+ r_enode = mas->node;
+
+ r_end = ma_data_end(mas, mt_node_type(r_enode), &l_piv, &r_coalesce);
+ r_slot_cnt = ma_hard_data(r_end, r_coalesce);
+ // Add 1 for slot 0 on the right.
+ all_slots = r_slot_cnt + 1 + l_slot_cnt;
+
+ // check if left ends in NULL, right start in NULL..
+ if ((mt_will_coalesce(ma_get_rcu_slot(this_enode, end)) ||
+ !ma_get_rcu_slot(this_enode, end)) &&
+ (mt_will_coalesce(ma_get_rcu_slot(mas->node, 0)) ||
+ !ma_get_rcu_slot(mas->node, 0))) {
+ all_slots--;
+ trimmed = 1;
+ }
+
+ node_cnt = 1;
+ if (all_slots > mt_slot_count(this_enode) - 1)
+ node_cnt++;
+
+ mas_node_cnt(mas, node_cnt);
+ if (mas_is_err(mas))
+ return ret;
+
+ // Restore the left node (this_enode)
+ mas->node = this_enode;
+ mas->max = this_max;
+ mas->min = this_min;
+ // Coalesce this_enode into a new node.
+ MA_CP(cp, mas->node, NULL, 0, end);
+ cp.dst_end = l_slot_cnt - trimmed;
+ ma_copy(mas, &cp); // cp.dst now has coalesced this_enode.
+
+ // Restore the right state.
+ mas->max = r_max;
+ mas->min = r_min;
+ cp.src = r_enode;
+ cp.src_start = 0;
+ cp.dst_end = mt_slot_count(cp.dst) - 1;
+ if (all_slots <= mt_slot_count(mas->node) - 1)
+ cp.src_end = r_end;
+ else
+ cp.dst_end = (all_slots + 1)/ 2; // Take 1/2 the entries.
+
+ ma_copy(mas, &cp); // cp.dst is now complete, place it in the tree.
+ mt_to_node(cp.dst)->parent = mt_to_node(this_enode)->parent;
+ new_type = mt_node_type(cp.dst);
+ mas->node = cp.dst;
+ mt_replace(mas);
+ l_piv = ma_get_safe_pivot(mas, cp.dst_start - 1);
+ ret = mt_slot_count(mas->node) - cp.dst_start + 1;
+ ma_encoded_parent(mas);
+ ma_set_pivot(mas->node, this_p_slot, l_piv);
+
+ mas->node = r_enode;
+ mas->max = r_max;
+ mas->min = r_min;
+ if (all_slots <= mt_slots[new_type] - 1) {
+ // Right was entirely consumed.
+ void *entry = XA_SKIP_ENTRY;
+
+ if (mas->max == ULONG_MAX)
+ entry = NULL;
+
+ ret = mt_slots[new_type] - all_slots;
+ r_p_slot = mt_parent_slot(r_enode);
+ ma_encoded_parent(mas);
+ ma_set_rcu_slot(mas->node, r_p_slot, entry);
+ mt_free(mt_to_node(r_enode));
+ p_coalesce = true;
+ goto right_done;
+ }
+
+ cp.src_end = r_end;
+ cp.dst = NULL;
+ cp.dst_start = 0;
+ cp.dst_end = r_end;
+ ma_copy(mas, &cp); // cp.dst is coalesced remainder of r_enode.
+ mt_to_node(cp.dst)->parent = mt_to_node(r_enode)->parent;
+ mas->node = cp.dst;
+ r_piv = ma_get_safe_pivot(mas, cp.dst_start - 1);
+ r_p_slot = mt_parent_slot(r_enode);
+ mt_replace(mas);
+ ma_encoded_parent(mas);
+ ma_set_pivot(mas->node, r_p_slot, r_piv);
+
+
+right_done:
+ while (r_p_slot-- > this_p_slot) {
+ ma_set_pivot(mas->node, r_p_slot, l_piv);
+ }
+
+ /* If there is a freed node, then mas->node must point to the parent
+ * which contained the freed node so it can also be checked for
+ * coalescing.
+ */
+ if (p_coalesce)
+ mas_coalesce(mas);
+ return ret;
+}
+
+/** Private
*
*/
-static inline int mas_coalesce(struct ma_state *mas)
+static inline void mas_coalesce_root(struct ma_state *mas)
{
- int ret = 0;
- unsigned char slot_cnt;
- unsigned long pivot, last = 0;
- struct maple_enode *m_en = mas->node;
- unsigned char s_slot = ma_get_slot(mas);
- unsigned char slot;
+ unsigned char end, coalesce, hard_data;
+ struct maple_enode *this_enode;
+ enum maple_type this_type;
+ unsigned long piv;
- slot_cnt = mt_slot_count(mas->node);
- for (slot = 0; slot < slot_cnt; slot++) {
- pivot = ma_get_safe_pivot(mas, slot);
- if (slot && !pivot)
- break;
+ this_enode = mas->node;
+ this_type = mt_node_type(this_enode);
+ end = ma_data_end(mas, this_type, &piv, &coalesce);
+ hard_data = ma_hard_data(end, coalesce);
+ if ((end <= coalesce)) {
+ if(!mt_is_leaf(this_enode)) {
+ // Remove level in tree.
+ ma_set_slot(mas, 0);
+ mas_first_node(mas, ULONG_MAX);
+ mt_to_node(mas->node)->parent =
+ mt_to_node(this_enode)->parent;
+ mas->node = mt_mk_root(mas->node);
+ mt_replace(mas);
+ return;
+ }
- if (slot && last == pivot) {
- mas_partial_copy(mas, mt_slot_count(mas->node));
- if (mas_is_err(mas)) {
- mas->node = m_en;
- goto mas_error;
+ mas->tree->ma_root = NULL;
+ mte_free(this_enode);
+ return;
+ }
+
+ if (mt_is_leaf(this_enode) && hard_data == 1) {
+ void *entry;
+ ma_set_slot(mas, 0);
+ if (!mas_first_entry(mas, ULONG_MAX)) {
+ entry = ma_get_rcu_slot(mas->node,
+ ma_get_slot(mas));
+ if (((unsigned long) (entry) & 3) != 2) {
+ rcu_assign_pointer(mas->tree->ma_root,
+ entry);
+ mte_free(this_enode);
+ return;
}
+ }
+ }
+ if (hard_data < mt_min_slots[this_type] - 1) {
+ MA_CP(cp, mas->node, NULL, 0, end);
+ ma_copy(mas, &cp);
+ if (mas_is_err(mas))
+ return;
- ret = 1;
+ mas->node = cp.dst;
+ mt_to_node(mas->node)->parent =
+ mt_to_node(this_enode)->parent;
+ mas->node = mt_mk_root(mas->node);
+ mt_replace(mas);
+ }
+}
+
+/** Private
+ * mas_coalesce() - Check prev/next node for end/start NULLs and adjust the
+ * gaps and parent pivot accordingly.
+ *
+ *
+ * Attempt to move things left with a rebalance. Upon failure, check if there
+ * is a contigeous gap from the end of this node to the start of the next.
+ *
+ * Notes:
+ * - Entries move Left, appending data at the end of the leaf
+ * - Holes move Right.
+ * - Either way, parent pivots need to be changed.
+ *
+ *
+ */
+static inline void mas_coalesce(struct ma_state *mas)
+{
+ unsigned char end, p_slot, coalesce;
+ struct maple_enode *this_enode, *eparent;
+ enum maple_type this_type;
+ unsigned long piv;
+ void *entry;
+ bool check_parent = false;
+
+ if (ma_is_root(mas->node))
+ return mas_coalesce_root(mas);
+start:
+ this_enode = mas->node;
+ this_type = mt_node_type(this_enode);
+ end = ma_data_end(mas, this_type, &piv, &coalesce);
+
+
+ /* If there is any space to save, try to reallocate */
+ if (ma_hard_data(end, coalesce) < mt_min_slots[this_type] - 1) {
+ if (mas_rebalance(mas, end, coalesce))
goto done;
+
+ if (mas_is_err(mas))
+ return;
+ }
+
+ /* Group the gaps together. Acquire any data from the next node, if
+ * necessary
+ */
+ p_slot = mt_parent_slot(this_enode);
+ eparent = mt_mk_node(mt_parent(this_enode),
+ mt_parent_enum(mas, this_enode));
+
+ entry = ma_get_rcu_slot(this_enode, end);
+ if (!mt_is_empty(entry))
+ goto check_start;
+
+ if (!end || end + 1 <= coalesce) {
+ mas_coalesce_empty(mas, eparent, p_slot);
+ check_parent = true;
+ }
+
+ // Check if next node starts with null.
+ mas_next_slot(mas, ULONG_MAX);
+ if (mas->node == MAS_NONE)
+ goto check_start;
+
+ if (mt_is_empty(mas->node) ||
+ mt_is_empty(ma_get_rcu_slot(mas->node, 0))) {
+ unsigned char slot = end;
+
+ if (check_parent)
+ slot = 0;
+
+ while (slot > 0) {
+ if (!mt_is_empty(ma_get_rcu_slot(this_enode, slot)))
+ break;
+ slot--;
}
- if (pivot == mas->max)
- break;
+ mas->node = eparent;
+ mas_update_limits(mas, p_slot, mt_node_type(mas->node));
+ mas->node = this_enode;
- last = pivot;
+ if (!slot) {
+ // Empty node...
+ mas_coalesce_empty(mas, eparent, p_slot);
+ check_parent = true;
+ piv = mas->min;
+ } else {
+ piv = ma_get_safe_pivot(mas, slot);
+ }
+
+ if (p_slot <= mt_pivot_count(eparent))
+ ma_set_pivot(eparent, p_slot, piv);
+ // Walk up checking for the old pivot and set to piv.
+
+ if (!slot)
+ goto done;
+
+ // Indicate value has been moved.
+ while (++slot <= end)
+ ma_set_rcu_slot(this_enode, slot, XA_RETRY_ENTRY);
}
+check_start:
+ mas->node = this_enode;
+ entry = ma_get_rcu_slot(this_enode, 0);
+ if (mt_is_empty(entry)) {
+ unsigned char prev_end;
+ // Check the previous node.
+ mas_prev_slot(mas, 0);
+ if (mas->node == MAS_NONE)
+ goto done;
+
+ if (!mt_is_empty(mas->node)) {
+ prev_end = ma_data_end(mas, mt_node_type(mas->node),
+ &piv, &coalesce);
+
+ if (!mt_is_empty(ma_get_rcu_slot(mas->node, prev_end)))
+ goto done;
+ } else {
+ piv = mas->min;
+ }
+
+ if (p_slot)
+ ma_set_pivot(eparent, p_slot - 1, piv);
+
+ // Walk up and set all the pivots to piv
+
+ }
done:
- // Restore slot.
- if (ret)
- mt_replace(mas);
+ mas->node = this_enode;
+ if (check_parent) {
+ check_parent = false;
+ ma_encoded_parent(mas);
+ mte_free(this_enode);
+ goto start;
+ }
-mas_error: // Regardless of allocation, update gaps.
if (mt_is_alloc(mas->tree))
ma_update_gap(mas);
- ma_set_slot(mas, s_slot);
- return ret;
+ return;
}
unsigned long this_gap = 0;
void *entry = NULL;
- if (i == pivot_cnt - 1)
- pivot = max;
- else
- pivot = _ma_get_pivot(mas->node, i, type);
+ pivot = _ma_get_safe_pivot(mas, i, type);
/* End of data in this leaf */
if (i && !pivot)
goto next;
entry = _ma_get_rcu_slot(mas->node, i, type);
- if (entry)
+ if (unlikely(xa_is_skip(entry)))
+ goto next;
+
+ if (!mt_is_empty(entry))
goto next;
this_gap = pivot - mas->index;
next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
mas->max = max;
- if (next) {
+ if (!mt_is_empty(next)) {
mas->node = next;
i = 0;
} else {
if (ma_is_leaf(type)) // Leaf.
ret = true;
+skip_entry:
switch (type) {
default:
for (i = ma_get_slot(mas); i < pivot_cnt; i++) {
}
next = _ma_get_rcu_slot(mas->node, i, type);
- if (!next) // Not found.
+ if (unlikely(xa_is_skip(next))) {
+ ma_set_slot(mas, i + 1);
+ goto skip_entry;
+ }
+
+ if (mt_is_empty(next)) // Not found.
goto done;
// Traverse.
ma_set_slot(mas, 0);
return __mas_walk(mas);
}
+
+/* Private
+ * Skip any slots that have special values.
+ * If the limit of the slot is hit, then return false.
+ */
+static inline int mas_safe_slot(struct ma_state *mas, unsigned char *slot,
+ int delta)
+{
+ unsigned char max = mt_slot_count(mas->node);
+ unsigned char limit = max;
+ if (0 > delta)
+ limit = 0;
+ while (*slot != limit) {
+ if (!mt_is_empty(ma_get_rcu_slot(mas->node, *slot + delta)))
+ return true;
+ *slot += delta;
+ }
+ return false;
+}
static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
{
if (!mt_dead_node(mas->node))
* Note: Does not return the zero entry.
* returns an entry.
*/
-static inline void *mt_find(struct maple_tree *mt, unsigned long start,
+void *mt_find(struct maple_tree *mt, unsigned long start,
unsigned long max)
{
MA_STATE(mas, mt, start, start);
*
* Modifies index and last to point to the newly found range.
*/
-static inline void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max)
{
MA_STATE(mas, mt, *index, *index);
static inline void ma_inactive_insert(struct ma_state *mas, void *entry);
static inline int mas_replace_tree(struct ma_state *mas, void *new_entry)
{
- long l_node_cnt = -1, r_node_cnt = 0;
+ long l_node_cnt = 0, r_node_cnt = 0;
unsigned int r_slot_cnt = 0, slot_cnt = 0;
long node_cnt = 0, nodes = 0;
void *entry;
mas_prev_node(mas, 0);
l_node_cnt++;
}
+
mas->node = last;
_mas_walk(&r_mas);
-
if (ma_get_slot(&r_mas) != MAPLE_NODE_SLOTS) {
unsigned long piv;
unsigned char coalesce;
nodes++;
nodes = l_node_cnt + r_node_cnt + 2;
-
node_cnt = 1; // Root node.
while (nodes) {
node_cnt += nodes;
nodes /= 8;
}
+ // FIXME: When splitting & reusing we need an extra node.
mas_node_cnt(mas, node_cnt + 1);
if (mas_is_err(mas))
return 0;
new_mas.index = mas->min;
new_mas.last = mas->min;
while (slot < mt_slot_count(mas->node)) {
- if (new_mas.index == mas->index)
+ if (new_mas.index == mas->index) {
break;
+ }
new_mas.last = ma_get_safe_pivot(mas, slot);
if (!new_mas.last && slot)
break;
entry = ma_get_rcu_slot(mas->node, slot);
if (entry) {
+ int cnt = ma_get_alloc_cnt(&new_mas);
ma_inactive_insert(&new_mas, entry);
if (mas_is_err(&new_mas))
BUG_ON(1);
+ if (cnt < ma_get_alloc_cnt(&new_mas) - 1)
+ BUG_ON(1);
}
if (new_mas.last == mas->index)
ma_set_slot(mas, mt_parent_slot(mas->node));
mas_next_node(mas, mas->index);
slot = 0;
- } while (mas->node != MAS_NONE);
+ } while (mas->node != MAS_NONE && mas->min < mas->index);
// Insert the new value.
new_mas.index = mas->index;
if (!overwrite) {
void *entry = ma_get_rcu_slot(mas->node, slot);
- if (entry && !xa_is_retry(entry))
+ if (!mt_is_empty(entry))
goto exists;
}
}
* range.
*
* Any previous pivots with no value will be set to the same pivot value.
- * Return: the number of concurrent slots that are NULL or XA_RETRY_ENTRY.
+ * Return: the number of concurrent slots that are NULL or XA_DELETED_ENTRY.
*/
static inline int ma_erase(struct ma_state *mas)
{
int slot, ret = 1;
slot = ma_get_slot(mas);
-
- ma_update_rcu_slot(mas->node, slot, XA_RETRY_ENTRY);
-
- if ((slot >= slot_cnt - 1))
- return ret;
-
+ ma_update_rcu_slot(mas->node, slot, XA_DELETED_ENTRY);
// dense nodes only need to set a single value.
if (!pivot_cnt)
return ret;
- piv_val = ma_get_pivot(mas->node, slot);
+ if ((slot >= slot_cnt - 1))
+ piv_val = mas->max;
+ else
+ piv_val = ma_get_pivot(mas->node, slot);
while ((slot < pivot_cnt - 1)) {
unsigned long this_pivot = ma_get_pivot(mas->node, slot + 1);
while (--slot >= 0) {
void *entry = ma_get_rcu_slot(mas->node, slot);
- if (entry && entry != XA_RETRY_ENTRY)
+ if (!mt_is_empty(entry))
break;
ma_set_pivot(mas->node, slot, piv_val);
ret++;
}
- /* The error on allocation failure can be ignored */
mas_coalesce(mas);
return ret;
}
entry = mas_walk(&mas);
rcu_read_unlock();
- if (xa_is_zero(entry) || xa_is_retry(entry))
+ if (xa_is_zero(entry) || mt_is_empty(entry))
return NULL;
+
return entry;
}
EXPORT_SYMBOL(mtree_load);
xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_deleted(entry))
+ pr_cont("deleted (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_skip(entry))
+ pr_cont("skip (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_retry(entry))
+ pr_cont("retry (%ld)\n", xa_to_internal(entry));
else if (mt_is_reserved(entry))
pr_cont("UNKNOWN ENTRY (%p)\n", entry);
else
if (i < (MAPLE_RANGE64_SLOTS - 1))
last = node->pivot[i];
- else if (node->slot[i] == NULL)
+ else if (node->slot[i] == NULL && max != mt_max[mt_node_type(entry)])
break;
if (last == 0 && i > 0)
break;
if (leaf)
mt_dump_entry(node->slot[i], first, last, depth + 1);
+ else if (xa_is_deleted(node->slot[i]))
+ mt_dump_entry(node->slot[i], first, last, depth + 1);
+ else if (xa_is_skip(node->slot[i]))
+ mt_dump_entry(node->slot[i], first, last, depth + 1);
else if (node->slot[i])
mt_dump_node(node->slot[i], first, last, depth + 1);
break;
if (leaf)
mt_dump_entry(node->slot[i], first, last, depth + 1);
+ else if (xa_is_deleted(node->slot[i]))
+ mt_dump_entry(node->slot[i], first, last, depth + 1);
+ else if (xa_is_skip(node->slot[i]))
+ mt_dump_entry(node->slot[i], first, last, depth + 1);
else if (node->slot[i])
mt_dump_node(node->slot[i], first, last, depth + 1);
val = 1;
}
-
/* Test mas_pause */
val = 0;
mas_reset(&mas);
}
+
+
+#define erase_ptr(i) entry[i%2]
+#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2])
+#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
+
static noinline void check_erase_testset(struct maple_tree *mt)
{
- unsigned long set[] = {5015, 5014, 5017, 25, 1000,
- 1001, 1002, 1003, 1005, 0,
- 5003, 5002};
+ unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 6003, 6002, 6008, 6012, 6015,
+ 7003, 7002, 7008, 7012, 7015,
+ 8003, 8002, 8008, 8012, 8015,
+ 9003, 9002, 9008, 9012, 9015,
+ 10003, 10002, 10008, 10012, 10015,
+ 11003, 11002, 11008, 11012, 11015,
+ 12003, 12002, 12008, 12012, 12015,
+ 13003, 13002, 13008, 13012, 13015,
+ 14003, 14002, 14008, 14012, 14015,
+ 15003, 15002, 15008, 15012, 15015,
+
+ };
void *ptr = &set;
+ void *entry[2] = { ptr, mt };
void *root_node;
- check_insert(mt, set[0], ptr); // 5015
- check_insert(mt, set[1], mt); // 5014
- check_insert(mt, set[2], ptr); // 5017
- check_insert(mt, set[3], mt); // 25
- check_load(mt, set[0], ptr);
- check_load(mt, set[1], mt);
- check_load(mt, set[2], ptr);
- check_load(mt, set[3], mt);
- mt_set_non_kernel(1);
+
+ for (int i = 0; i < 4; i++)
+ erase_check_insert(mt, i);
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ mt_set_non_kernel(2);
check_erase(mt, set[1]);
- check_load(mt, set[0], ptr);
+ erase_check_load(mt, 0);
check_load(mt, set[1], NULL);
- check_load(mt, set[2], ptr);
- check_load(mt, set[3], mt);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
- check_insert(mt, set[1], mt);
- // Check erase and load without an allocation.
+ check_erase(mt, set[2]);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ check_load(mt, set[2], NULL);
+ erase_check_load(mt, 3);
+
+ erase_check_insert(mt, 1);
+ erase_check_insert(mt, 2);
+
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ // Check erase and load without an allocation.
check_erase(mt, set[1]);
- check_load(mt, set[0], ptr);
+ erase_check_load(mt, 0);
check_load(mt, set[1], NULL);
- check_load(mt, set[2], ptr);
- check_load(mt, set[3], mt);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
// Set the newly erased node. This will produce a different allocated
// node to avoid busy slots.
root_node = mt->ma_root;
- check_insert(mt, set[1], mt);
+ erase_check_insert(mt, 1);
// The root node should be replaced to avoid writing a busy slot.
MT_BUG_ON(mt, root_node == mt->ma_root);
- check_load(mt, set[0], ptr);
+ erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
- check_load(mt, set[1], mt);
+ erase_check_load(mt, 1);
check_load(mt, 5013, NULL);
- check_load(mt, set[2], ptr);
+ erase_check_load(mt, 2);
check_load(mt, 5018, NULL);
- check_load(mt, set[3], mt);
+ erase_check_load(mt, 3);
check_erase(mt, set[2]); // erase 5017 to check append
- check_load(mt, set[0], ptr);
+ erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
- check_load(mt, set[1], mt);
+ erase_check_load(mt, 1);
check_load(mt, 5013, NULL);
check_load(mt, set[2], NULL);
check_load(mt, 5018, NULL);
- check_load(mt, set[3], mt);
+ erase_check_load(mt, 3);
root_node = mt->ma_root;
- check_insert(mt, set[2], ptr);
+ mt_dump(mt);
+ erase_check_insert(mt, 2);
// The root node should be replaced to avoid writing a busy slot.
MT_BUG_ON(mt, root_node == mt->ma_root);
- check_load(mt, set[0], ptr);
+ erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
- check_load(mt, set[1], mt);
+ erase_check_load(mt, 1);
check_load(mt, 5013, NULL);
- check_load(mt, set[2], ptr);
+ erase_check_load(mt, 2);
check_load(mt, 5018, NULL);
- check_load(mt, set[3], mt);
+ erase_check_load(mt, 3);
check_erase(mt, set[2]); // erase 5017 to check append
check_erase(mt, set[0]); // erase 5015 to check append
- check_insert(mt, set[4], ptr); // 1000 < Should NOT split
+ erase_check_insert(mt, 4); // 1000 < Should not split.
check_load(mt, set[0], NULL);
check_load(mt, 5016, NULL);
- check_load(mt, set[1], mt);
+ erase_check_load(mt, 1);
check_load(mt, 5013, NULL);
check_load(mt, set[2], NULL);
check_load(mt, 5018, NULL);
- check_load(mt, set[3], mt);
+ erase_check_load(mt, 4);
check_load(mt, 999, NULL);
check_load(mt, 1001, NULL);
- check_load(mt, set[4], ptr);
+ erase_check_load(mt, 4);
// Should be a new node.
MT_BUG_ON(mt, root_node == mt->ma_root);
// Should not have split.
MT_BUG_ON(mt, !mt_is_leaf(mt->ma_root));
- mtree_destroy(mt);
+
+ // Coalesce testing
+ erase_check_insert(mt, 0);
+ erase_check_insert(mt, 2);
+ mt_dump(mt);
+
+ for (int i = 5; i < 25; i++) {
+ erase_check_insert(mt, i);
+ for (int j = i; j >= 0; j--) {
+ erase_check_load(mt, j);
+ }
+ }
+
+ check_erase(mt, set[14]); //6015
+ for (int i = 0; i < 25; i++) {
+ if (i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ check_erase(mt, set[16]); //7002
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(1);
+ check_erase(mt, set[13]); //6012
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14 || i == 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ check_erase(mt, set[15]); //7003
+ for (int i = 0; i < 25; i++) {
+ if (i <= 16 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ check_erase(mt, set[17]); //7008 *should* cause coalesce.
+ for (int i = 0; i < 25; i++) {
+ if (i <= 17 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(1);
+ check_erase(mt, set[18]); //7012
+ mt_dump(mt);
+ for (int i = 0; i < 25; i++) {
+ if (i <= 18 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ check_erase(mt, set[19]); //7015
+ for (int i = 0; i < 25; i++) {
+ if (i <= 19 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+
+ check_erase(mt, set[20]); //8003
+ for (int i = 0; i < 25; i++) {
+ if (i <= 20 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ check_erase(mt, set[21]); //8002
+ for (int i = 0; i < 25; i++) {
+ if (i <= 21 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+
+ mt_set_non_kernel(1);
+ check_erase(mt, set[22]); //8008
+ for (int i = 0; i < 25; i++) {
+ if (i <= 22 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ for (int i = 23; i < 25; i++) {
+ check_erase(mt, set[i]);
+ mt_dump(mt);
+ }
+ for (int i = 0; i < 25; i++) {
+ if (i <= 25 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ // Shrinking tree test.
+ //
+
+ for (int i = 13; i < ARRAY_SIZE(set); i++)
+ erase_check_insert(mt, i);
+
+ mt_set_non_kernel(99);
+ for (int i = 18; i < ARRAY_SIZE(set); i++) {
+ check_erase(mt, set[i]);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 || j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ mt_set_non_kernel(30);
+ for (int i = 0; i < 18; i++) {
+ check_erase(mt, set[i]);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 && j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ erase_check_insert(mt, 8);
+ erase_check_insert(mt, 9);
+ check_erase(mt, set[8]);
+ mt_dump(mt);
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
}
+ mt_dump(mt);
for (i = 0; i < req_range_cnt; i += 5) {
check_mtree_alloc_rrange(mt,
mtree_init(&tree, 0);
check_erase_testset(&tree);
+ mtree_destroy(&tree);
mtree_init(&tree, 0);
/*