#include <asm/barrier.h>
//#include <linux/mm.h> // for task_size
-#define CONFIG_DEBUG_MAPLE_TREE
#define MA_ROOT_PARENT 1
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
return ma_is_leaf(mte_node_type(entry));
}
-/** Private
+/*
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
{
return mas->node == MAS_ROOT;
}
-static inline bool mas_is_none(struct ma_state *mas)
-{
- return mas->node == MAS_NONE;
-}
-
static inline bool mas_is_start(struct ma_state *mas)
{
return mas->node == MAS_START;
static inline bool mas_searchable(struct ma_state *mas)
{
- if (!mas->node)
- return false;
-
if (mas_is_none(mas))
return false;
return mte_parent_range_enum(parent);
}
-/** Private
+/*
+ * mte_set_parent() - Set the parent node and encode the slot.
*
* Type is encoded in the node->parent
* bit 0: 1 = root, 0 otherwise
case maple_range_64:
case maple_arange_64:
type |= 4;
- /* fallthrough */
+ fallthrough;
case maple_range_32:
type |= 2;
break;
break;
}
- BUG_ON(slot > MAPLE_NODE_SLOTS); // Only 4 bits to use.
val &= ~bitmask; // Remove any old slot number.
val |= (slot << slot_shift); // Set the slot.
val |= type;
return _mte_get_pivot(mas->node, slot, type);
}
-/** Private
+/*
* mas_get_safe_pivot() - Return the pivot or the mas->max.
*
* Return: The pivot (including mas->max for the final slot)
static inline void ma_set_pivot(struct maple_node *mn, unsigned char slot,
enum maple_type type, unsigned long val)
{
- BUG_ON(slot >= mt_slots[type]);
+ BUG_ON(slot >= mt_pivots[type]);
switch (type) {
default:
{
return mte_get_rcu_slot(mas->node, slot, mas->tree);
}
-/** Private
+/*
* mte_destroy_walk() - Free the sub-tree from @mn and below.
*
* @mn - the head of the sub-tree to free.
mte_free(mn);
}
-/** Private
- * matadd() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
+/*
+ * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
+ *
+ * Add the %dead_enode to the linked list in %mat.
*
* @mat - the ma_topiary, a linked list of dead nodes.
* @dead_enode - the node to be marked as dead and added to the tail of the list
return;
}
- //* Set the next entry.
mte_to_mat(mat->tail)->next = dead_enode;
mat->tail = dead_enode;
}
-/** Private
+/*
* mat_free() - Free all nodes in a dead list.
*
+ * Free or destroy walk a dead list.
+ *
* @mat - the ma_topiary linked list of dead nodes to free.
* @recursive - specifies if this sub-tree is to be freed or just the single
* node.
}
}
-/** Private
+/*
* ma_set_rcu_slot() - Set a nodes rcu slot.
*
* @mn - the maple node for the operation
static inline void ma_set_rcu_slot(struct maple_node *mn,
unsigned char slot, enum maple_type type, void *val)
{
+ BUG_ON(slot >= mt_slots[type]);
switch (type) {
default:
break;
case maple_range_64:
case maple_leaf_64:
- BUG_ON(slot >= MAPLE_RANGE64_SLOTS);
rcu_assign_pointer(mn->mr64.slot[slot], val);
break;
case maple_arange_64:
- BUG_ON(slot >= MAPLE_ARANGE64_SLOTS);
rcu_assign_pointer(mn->ma64.slot[slot], val);
break;
}
}
-/** Private
+/*
* mte_set_rcu_slot() - Set an encoded nodes rcu slot.
*/
static inline void mte_set_rcu_slot(const struct maple_enode *mn,
{
ma_set_rcu_slot(mte_to_node(mn), slot, mte_node_type(mn), val);
}
-/** Private
+/*
* mas_dup_state() - duplicate the internal state of a ma_state.
*
* @dst - the destination to store the state information
dst->min = src->min;
mas_set_slot(dst, mas_get_slot(src));
}
-/** Private
+/*
* mas_descend() - Descend into the slot stored in the ma_state.
*
* @mas - the maple state.
if (!max || min == ULONG_MAX) {
if (mas->node == a_enode) {
//FIXME: Restart and retry?
- printk("Dead node %p\n", mas_mn(mas));
MT_BUG_ON(mas->tree, mas->node == a_enode);
}
mas->node = a_enode;
ma_free_alloc(node);
mas->alloc = NULL;
}
-/** Private
+/*
* Check if there was an error allocating and do the allocation if necessary
* If there are allocations, then free them.
*/
return mas->alloc;
}
-/** Private
+/*
* Sets up maple state for operations by setting mas->min = 0 & mas->node to
* certain values.
* returns:
return entry;
}
-/** Private
+/*
* mas_data_end() - Find the end of the data (slot). Sets the value of the
* last pivot to @last_piv.
*
{
int slot = 0;
unsigned long piv = mas->min, prev_piv = mas->min;
+
for (; slot < mt_slot_count(mas->node); slot++) {
piv = _mas_get_safe_pivot(mas, slot, type);
if (piv >= mas->max)
static inline unsigned char mas_data_end(const struct ma_state *mas)
{
unsigned long l;
+
return _mas_data_end(mas, mte_node_type(mas->node), &l);
}
-/** Private
+/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
*
* @mas - the maple state
return max_gap;
}
-/** Private
+/*
* mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
slot = mte_parent_slot(gaps.node);
goto ascend;
}
-/** Private
+/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
*
* @mas - the maple state.
}
-/** Private
+/*
* mas_first_node() - Finds the first node in mas->node and returns the pivot,
* mas->max if no node is found. Node is returned as mas->node which may be
* MAS_NONE.
mas->node = MAS_NONE;
return mas->max;
}
-/** Private
+/*
* mas_first_entry() - * Returns the pivot which points to the entry with the
* lowest index.
*
}
}
-/** Private
+/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
*
}
}
-/** Private
+/*
* mas_replace() - Replace a maple node in the tree with mas->node. Uses the
* parent encoding to locate the maple node in the tree.
*
}
}
-/** Private
- * mas_check_split_parent() - Check to see if this node has the correct parent
- * set or not.
- * @mas - the maple state
- * @slot - the slot to examine.
+/*
+ * mas_new_child() - Find the new child of a node.
+ * @mas: the maple state
+ * @child: the maple state to store the child.
+ *
*/
-static inline struct maple_enode *mas_check_split_parent(struct ma_state *mas,
- unsigned char slot)
-{
- void *entry = mas_get_rcu_slot(mas, slot);
-
- if (!entry)
- return NULL;
-
- if (mte_parent(entry) == entry) {
- printk("%s: Dead node %p", __func__, entry);
- MT_BUG_ON(mas->tree, 1);
- }
- if (mte_parent(entry) != mas_mn(mas))
- return NULL;
-
- mas_set_slot(mas, slot);
- return entry;
-}
-
-static inline struct maple_enode *mas_find_l_split(struct ma_state *mas)
+static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
{
- unsigned char i, end = mas_data_end(mas);
- struct maple_enode *en = NULL;
+ unsigned char slot, end = mt_slot_count(mas->node);
+ struct maple_enode *entry;
- for (i = 0; i <= end; i++) {
- if ((en = mas_check_split_parent(mas, i)))
+ for (slot = mas_get_slot(mas); slot < end; slot++) {
+ entry = mas_get_rcu_slot(mas, slot);
+ if (!entry) // end of node data.
break;
+
+ if (mte_parent(entry) == mas_mn(mas)) {
+ mas_set_slot(mas, slot);
+ mas_dup_state(child, mas);
+ mas_set_slot(mas, slot + 1);
+ mas_descend(child);
+ return true;
+ }
}
- return en;
+ return false;
}
-static inline struct maple_enode *mas_find_r_split(struct ma_state *mas)
-{
- unsigned char i = mas_data_end(mas);
- struct maple_enode *en;
-
- do {
- en = mas_check_split_parent(mas, i);
- } while (!en && i--);
- return en;
-}
-/** Private
+/*
* mab_shift_right() - Shift the data in mab right. Note, does not clean out the
* old data or set b_node->b_end.
*
unsigned char shift)
{
unsigned char b_end = b_node->b_end - 1;
+
do {
b_node->pivot[b_end + shift] = b_node->pivot[b_end];
b_node->slot[b_end + shift] = b_node->slot[b_end];
} while (b_end--);
}
-/** Private
+/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
*
* @b_node: the maple_big_node that contains the data.
return false;
}
-/** Private
+/*
* mab_no_null_split() - ensure the split doesn't fall on a NULL
*
* @b_node: the maple_big_node with the data
unsigned char split, unsigned char slot_cnt)
{
if (!b_node->slot[split]) {
- if (split < slot_cnt - 1)
+ /* If the split is less than the max slot && the right side will
+ * still be sufficient, then increment the split on NULL.
+ */
+ if ((split < slot_cnt - 1) &&
+ (b_node->b_end - split) < (mt_min_slots[b_node->type]))
split++;
else
split--;
return split;
}
-/** Private
+/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
*
int split = b_node->b_end / 2; // Assume equal split.
unsigned char slot_cnt = mt_slots[b_node->type];
- if (ma_is_leaf(b_node->type) &&
- mab_middle_node(b_node, split, slot_cnt)) {
+ if (mab_middle_node(b_node, split, slot_cnt)) {
split = b_node->b_end / 3;
*mid_split = split * 2;
} else {
*mid_split = 0;
/* Avoid having a range less than the slot count unless it
- * causes one node to be deficient. */
- /* FIXME: Linear allocations will cause either a wasted slot
- * as is, or will cause a deficient node with not enough entries
+ * causes one node to be deficient.
+ * NOTE: mt_min_slots is 1 based, b_end and split are zero.
*/
while (((b_node->pivot[split] - b_node->min) < slot_cnt - 1) &&
- (split < slot_cnt) &&
- (b_node->b_end - split > mt_min_slots[b_node->type]))
+ (split < slot_cnt - 1) &&
+ (b_node->b_end - split > mt_min_slots[b_node->type] - 1))
split++;
}
return split;
}
-/** Private
+/*
* mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
* and set @b_node->b_end to the next free slot.
*
unsigned char mab_start)
{
int i, j;
+
for (i = mas_start, j = mab_start; i <= mas_end; i++, j++) {
b_node->slot[j] = mas_get_rcu_slot(mas, i);
- if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree)) {
+ if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
b_node->gap[j] = mte_get_gap(mas->node, i);
- }
+
if (i < mt_pivot_count(mas->node)) {
b_node->pivot[j] = mas_get_safe_pivot(mas, i);
} else {
}
b_node->b_end = j;
}
-/** Private
+/*
* mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
*
* @b_node: the maple_big_node that has the data
int i, j = 0;
for (i = mab_start; i <= mab_end; i++, j++) {
- if(j && !b_node->pivot[i])
+ if (j && !b_node->pivot[i])
break;
mas->max = b_node->pivot[i];
}
}
-/** Private
+/*
* mas_descend_adopt() - Descend through a sub-tree and adopt children who do
* not have the correct parents set. Follow the parents which have the correct
* parents as they are the new entries which need to be followed to find other
*/
static inline void mas_descend_adopt(struct ma_state *mas)
{
- struct maple_enode *l_enode, *r_enode;
+ struct ma_state list[3], next[3];
+ int i, n;
- MA_STATE(l_mas, mas->tree, mas->index, mas->last);
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ mas_dup_state(&next[0], mas);
+ for (i = 0; i < 3; i++) {
+ mas_dup_state(&list[i], mas);
+ mas_set_slot(&list[i], 0);
+ }
- mas_dup_state(&l_mas, mas);
- mas_dup_state(&r_mas, mas);
- while (!mte_is_leaf(l_mas.node)) {
- if (!(l_enode = mas_find_l_split(&l_mas))) {
- mas_adopt_children(&l_mas, l_mas.node);
- mas_dup_state(&l_mas, &r_mas);
- if (!(l_enode = mas_find_l_split(&l_mas))) {
- mas_adopt_children(&r_mas, r_mas.node);
- break;
- }
- }
+ while (!mte_is_leaf(list[0].node))
+ {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&list[i]))
+ continue;
- if (!(r_enode = mas_find_r_split(&r_mas))) {
- mas_adopt_children(&r_mas, r_mas.node);
- mas_dup_state(&r_mas, &l_mas);
- r_enode = mas_find_r_split(&r_mas);
+ if (i && list[i-1].node == list[i].node)
+ continue;
+
+ while ((n < 3) && (mas_new_child(&list[i], &next[n])))
+ n++;
+
+ mas_adopt_children(&list[i], list[i].node);
}
- mas_adopt_children(&l_mas, l_mas.node);
- if (r_mas.node != l_mas.node)
- mas_adopt_children(&r_mas, r_mas.node);
+ while (n < 3)
+ next[n++].node = MAS_NONE;
- mas_descend(&l_mas);
- mas_descend(&r_mas);
+ for (i = 0; i < 3; i++) { // descend.
+ mas_dup_state(&list[i], &next[i]);
+ mas_set_slot(&list[i], 0);
+ }
}
}
-/** Private
+/*
* mas_store_b_node() - Store an @entry into the b_node while also copying the
* data from a maple encoded node.
*
static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type,
unsigned long *range_min, unsigned long *range_max);
-/** Private
+/*
* mas_prev_sibling() - Find the previous node with the same parent.
*
* @mas: the maple state
*/
static inline bool mas_prev_sibling(struct ma_state *mas)
{
- unsigned p_slot = mte_parent_slot(mas->node);
+ unsigned int p_slot = mte_parent_slot(mas->node);
if (mte_is_root(mas->node))
return false;
return true;
}
-/** Private
+/*
* mas_next_sibling() - Find the next node with the same parent.
*
* @mas: the maple state
static inline bool mas_next_sibling(struct ma_state *mas)
{
unsigned char p_end, p_slot = mte_parent_slot(mas->node);
+
MA_STATE(parent, mas->tree, mas->index, mas->last);
if (mte_is_root(mas->node))
return true;
}
-/** Private
+static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+{
+ if (enode)
+ return enode;
+
+ return ma_enode_ptr(MAS_NONE);
+}
+/*
* mast_topiary() - Add the portions of the tree to the removal list; either to
* be freed or discarded (destroy walk).
*
mat_add(mast->destroy, mas_get_rcu_slot(mast->orig_r, slot));
}
-/** Private
+/*
* mast_rebalance_from_siblings() - Rebalance from nodes with the same parents.
* Check the right side, then the left. Data is copied into the @mast->bn.
*
*/
static inline bool mast_rebalance_from_siblings(struct maple_subtree_state *mast)
{
- unsigned char end;
struct maple_enode *left = mast->orig_l->node;
struct maple_enode *right = mast->orig_r->node;
+ unsigned char b_end = mast->bn->b_end;
+ unsigned char end;
if (mas_next_sibling(mast->orig_r)) {
end = mas_data_end(mast->orig_r);
- mas_mab_cp(mast->orig_r, 0, end, mast->bn, mast->bn->b_end);
+ mas_mab_cp(mast->orig_r, 0, end, mast->bn, b_end);
mat_add(mast->free, right);
if (right == left)
mast->orig_l->node = mast->orig_r->node;
return true;
}
if (mas_prev_sibling(mast->orig_l)) {
- unsigned char b_end = mast->bn->b_end;
end = mas_data_end(mast->orig_l);
mab_shift_right(mast->bn, end + 1);
mas_mab_cp(mast->orig_l, 0, end, mast->bn, 0);
static inline void mas_prev_node(struct ma_state *mas, unsigned long limit);
static inline unsigned long mas_next_node(struct ma_state *mas,
unsigned long max);
-/** Private
+/*
* mast_rebalance_from_cousins() - Rebalance from nodes with different parents.
* Check the right side, then the left. Data is copied into the @mast->bn.
*
mas_set_slot(mast->l, mas_get_slot(mast->l) + end + 1);
return true;
}
-/** Private
- * mast_ascend_free() - ascend the original left and right sides and add the
- * previous nodes to the free list. Set the slots to point to the correct location
- * in the new nodes.
- *
+/*
+ * mast_ascend_free() - Add current original maple state nodes to the free list
+ * and ascend.
* @mast: the maple subtree state.
+ *
+ * Ascend the original left and right sides and add the previous nodes to the
+ * free list. Set the slots to point to the correct location in the new nodes.
*/
static inline void
mast_ascend_free(struct maple_subtree_state *mast)
&range_min, &range_max);
}
-/** Private
+/*
* mas_new_ma_node() - Create and return a new maple node. Helper function.
- *
* @mas: the maple state with the allocations.
* @b_node: the maple_big_node with the type encoding.
+ *
+ * Use the node type from the maple_big_node to allocate a new node from the
+ * maple_state. This function exists mainly for code readability.
+ *
* Returns: A new maple encoded node
*/
static inline struct maple_enode
{
return mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)), b_node->type);
}
-/** Private
+/*
* mas_mab_to_node() - Set up right and middle nodes
*
* @mas: the maple state that contains the allocations.
}
-/** Private
+/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
* @b_node - the big node to add the entry
b_node->pivot[b_node->b_end++] = mas->max;
}
-/** Private
- * mas_set_split_parent() - combine_separate helper function. Sets the parent
+/*
+ * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
* @mas - the maple state with the node that needs a parent
struct maple_enode *right,
unsigned char *slot, unsigned char split)
{
- if (!mas->node)
+ if (mas_is_none(mas))
return;
if ((*slot) <= split)
mte_set_parent(mas->node, left, *slot);
- else
+ else if (right)
mte_set_parent(mas->node, right, (*slot) - split - 1);
(*slot)++;
}
-/** Private
+/*
+ * mte_mid_split_check() - Check if the next node passes the mid-split
+ */
+static inline void mte_mid_split_check(struct maple_enode **l,
+ struct maple_enode **r,
+ struct maple_enode *right,
+ unsigned char slot,
+ unsigned char *split,
+ unsigned char mid_split)
+{
+ if (*r == right)
+ return;
+
+ if (slot < mid_split)
+ return;
+
+ *l = *r;
+ *r = right;
+ *split = mid_split;
+}
+
+/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
*
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
+ struct maple_enode *middle,
struct maple_enode *right,
- unsigned char split)
+ unsigned char split,
+ unsigned char mid_split)
{
- unsigned char slot = mas_get_slot(mast->l);
+ unsigned char slot;
+ struct maple_enode *l = left;
+ struct maple_enode *r = right;
+
+ if (mas_is_none(mast->l))
+ return;
+
+ if (middle)
+ r = middle;
+
+ slot = mas_get_slot(mast->l);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ // Set left parent.
+ mas_set_split_parent(mast->l, l, r, &slot, split);
- mas_set_split_parent(mast->l, left, right, &slot, split);
- mas_set_split_parent(mast->m, left, right, &slot, split);
- mas_set_split_parent(mast->r, left, right, &slot, split);
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ // Set middle parent.
+ mas_set_split_parent(mast->m, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ // Set right parent.
+ mas_set_split_parent(mast->r, l, r, &slot, split);
}
static inline void mas_wmb_replace(struct ma_state *mas,
struct ma_topiary *free,
struct ma_topiary *destroy)
{
+ /* Before replacing the tree items, be sure the old data is marked dead
+ * across all CPUs
+ */
smp_wmb();
// Insert the new data in the tree
} while (!mte_is_root(mast->orig_l->node));
} else if ((mast->orig_l->node != mas->node) &&
(mast->l->depth > mas->tree->ma_height)) {
- mat_add(mast->free, mas->node);
+ mat_add(mast->free, mas->node);
}
mat_add(mast->free, mast->orig_l->node);
unsigned char split,
unsigned char mid_split)
{
- mast->l->node = left;
- mast->m->node = middle;
- mast->r->node = right;
+ mast->l->node = mte_node_or_none(left);
+ mast->m->node = mte_node_or_none(middle);
+ mast->r->node = mte_node_or_none(right);
mast->l->min = mast->orig_l->min;
mast->l->max = mast->bn->pivot[split];
mast->bn->type = mte_node_type(mast->orig_l->node);
}
-/** Private
- *
- * mas_combine_separate() - Follow the tree upwards from @l_mas and @r_mas for
- * @count, or until the root is hit. First @b_node is split into two entries
- * which are inserted into the next iteration of the loop. @b_node is returned
- * populated with the final iteration. @mas is used to obtain allocations.
+/*
*
+ * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
+ * @mas: The starting maple state
+ * @mast: The maple_subtree_state, keeps track of 4 maple states.
+ * @count: The estimated count of iterations needed.
*
- * orig_l_mas keeps track of the nodes that will remain active by using
- * orig_l_mas->index and orig_l_mas->last to account of what has been copied
- * into the new sub-tree. The update of orig_l_mas->last is used in mas_consume
- * to find the slots that will need to be either freed or destroyed.
- * orig_l_mas->depth keeps track of the height of the new sub-tree in case the
- * sub-tree becomes the full tree.
+ * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
+ * is hit. First @b_node is split into two entries which are inserted into the
+ * next iteration of the loop. @b_node is returned populated with the final
+ * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
+ * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
+ * to account of what has been copied into the new sub-tree. The update of
+ * orig_l_mas->last is used in mas_consume to find the slots that will need to
+ * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
+ * the new sub-tree in case the sub-tree becomes the full tree.
*
* Returns the number of elements in b_node during the last loop.
*/
-static inline int mas_combine_separate(struct ma_state *mas,
- struct maple_subtree_state *mast,
- unsigned char count)
+static inline int mas_spanning_rebalance(struct ma_state *mas,
+ struct maple_subtree_state *mast,
+ unsigned char count)
{
unsigned char split, mid_split;
unsigned char slot = 0;
mast->r = &r_mas;
mast->free = &free;
mast->destroy = &destroy;
+ l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
- /* MA_START doesn't work here */
- l_mas.node = r_mas.node = m_mas.node = NULL;
-
+ if (mast->orig_l->depth != mast->orig_r->depth) {
+ printk("Error: %d != %d\n",
+ mast->orig_l->depth, mast->orig_r->depth);
+ }
+ MT_BUG_ON(mas->tree, mast->orig_l->depth != mast->orig_r->depth);
+ mast->orig_l->depth = 0;
mast_topiary(mast);
- while(count--) {
+ while (count--) {
mast_setup_bnode_for_split(mast);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
&mid_split);
- mast_set_split_parents(mast, left, right, split);
+ mast_set_split_parents(mast, left, middle, right, split,
+ mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
/* Copy data from next level in the tree to mast->bn from next iteration */
mast_topiary(mast);
mast->orig_l->last = mast->orig_l->max;
- if(mast_sufficient(mast))
+ if (mast_sufficient(mast))
continue;
// Try to get enough data for the next iteration.
if (!count)
count++;
}
-
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
mte_node_type(mast->orig_l->node));
mast->orig_l->depth++;
- mab_mas_cp(mast->bn, 0, mast->bn->b_end, &l_mas);
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas);
mte_set_parent(left, l_mas.node, slot);
if (middle)
mte_set_parent(middle, l_mas.node, ++slot);
return mast->bn->b_end;
}
+/*
+ * mas_rebalance() - Rebalance a given node.
+ *
+ * @mas: The maple state
+ * @b_node: The big maple node.
+ *
+ * Rebalance two nodes into a single node or two new nodes that are sufficient.
+ * Continue upwards until tree is sufficient.
+ *
+ * Returns the number of elements in b_node during the last loop.
+ */
static inline int mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_cnt = mas->full_cnt * -1;
struct maple_subtree_state mast;
+ unsigned char shift, b_end = ++b_node->b_end;
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
mas_dup_state(&l_mas, mas);
mas_dup_state(&r_mas, mas);
- b_node->b_end++;
if (mas_next_sibling(&r_mas)) {
- mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas), b_node,
- b_node->b_end);
+ mas_mab_cp(&r_mas, 0, mas_data_end(&r_mas), b_node, b_end);
r_mas.last = r_mas.index = r_mas.max;
} else {
- unsigned char shift, b_end = b_node->b_end;
+
mas_prev_sibling(&l_mas);
shift = mas_data_end(&l_mas) + 1;
mab_shift_right(b_node, shift);
l_mas.index = l_mas.last = l_mas.min;
}
- return mas_combine_separate(mas, &mast, empty_cnt);
+ return mas_spanning_rebalance(mas, &mast, empty_cnt);
}
static inline bool mas_split_final_node(struct maple_subtree_state *mast,
if ((b_node->b_end < mt_min_slot_cnt(mas->node)) &&
(!mte_is_root(mas->node)) &&
- (mas->tree->ma_height > 1) )
+ (mas->tree->ma_height > 1))
return mas_rebalance(mas, b_node);
if (b_node->b_end >= mt_slot_count(mas->node))
if (mt_is_alloc(mas->tree)) {
//FIXME: arch_get_mmap_end? mas->index = TASK_SIZE / PAGE_SIZE - 1;
unsigned long mmap_end = 0x2000000000000UL;
+
if (mas->index < mmap_end - 1)
- mte_set_pivot(mas->node, slot++, mmap_end - 1);
+ mte_set_pivot(mas->node, slot++, mmap_end - 1);
mte_set_rcu_slot(mas->node, slot, XA_ZERO_ENTRY);
mte_set_pivot(mas->node, slot++, mt_max[type]);
}
void *contents, bool overwrite)
{
int ret = 1;
+
if (xa_is_node(mas->tree->ma_root))
return 0;
mas_set_err(mas, -EEXIST);
return 0;
}
-/** Private
+/*
*
* mas_is_span_() - Set span_enode if there is no value already and the
* entry being written spans this nodes slot or touches the end of this slot and
return false;
if (!mte_is_leaf(mas->node)) { // Internal nodes.
- if ( mas->last < piv) // Fits in the slot.
+ if (mas->last < piv) // Fits in the slot.
return false;
- if (entry && piv == mas->last) // Writes to the end of the child node, but has a value.
+ if (entry && piv == mas->last) // Writes a value to the end of the child node
return false;
} else { // A leaf node.
if (mas->last < mas->max) // Fits in the node, but may span slots.
else
mas->full_cnt--;
}
-/** Private
+/*
*
* mas_wr_walk(): Walk the tree for a write. Tracks extra information which
* is used in special cases of a write.
return ret;
}
-static inline unsigned char mas_extend_null( struct ma_state *l_mas,
+static inline unsigned char mas_extend_null(struct ma_state *l_mas,
struct ma_state *r_mas)
{
unsigned char l_slot = mas_get_slot(l_mas);
return r_slot;
}
/*
- * Private
+ *
* __mas_walk(): Locates a value and sets the mas->node and slot accordingly.
* range_min and range_max are set to the range which the entry is valid.
* Returns true if mas->node is a leaf.
next = mas_get_rcu_slot(mas, mas_get_slot(mas));
// Traverse.
+ mas->depth++;
mas->max = *range_max;
mas->min = *range_min;
if (unlikely(mt_is_empty(next)))
}
return ret;
}
-/** Private
+/*
*
* mas_spanning_store() - Create a subtree with the store operation completed
* and new nodes where necessary, then place the sub-tree in the actual tree.
else
node_cnt = 1 + -1 * mas->full_cnt * 2; // For rebalance upwards.
- /* Node rebalancing may occur due to a store, so there may be two new
- * entries per level plus a new root. */
+ /* Node rebalancing may occur due to this store, so there may be two new
+ * entries per level plus a new root.
+ */
node_cnt += 1 + mas->tree->ma_height * 2;
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
+ // FIXME: Is this needed?
+#if 0
mas_dup_state(&l_mas, mas);
mas->last = mas->index;
mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max);
mas->index = mas->last = l_mas.last;
mas_node_walk(mas, mte_node_type(mas->node), &range_min, &range_max);
-
mas_dup_state(mas, &l_mas);
+#endif
+
// Set up right side.
mas_dup_state(&r_mas, mas);
- r_mas.last++;
+ r_mas.depth = mas->depth;
+ if (r_mas.last + 1) // Avoid overflow.
+ r_mas.last++;
+
r_mas.index = r_mas.last;
mas_set_slot(&r_mas, 0);
__mas_walk(&r_mas, &range_min, &range_max);
// Set up left side.
mas_dup_state(&l_mas, mas);
+ l_mas.depth = mas->depth;
mas_set_slot(&l_mas, 0);
__mas_walk(&l_mas, &range_min, &range_max);
+ MT_BUG_ON(mas->tree, l_mas.depth != r_mas.depth);
+
if (!entry) {
mas_extend_null(&l_mas, &r_mas);
mas->index = l_mas.index;
count = mas_cnt_positive(mas) + mas->tree->ma_height - mas->depth + 1;
// Combine l_mas and r_mas and split them up evenly again.
- l_mas.depth = 0;
- return mas_combine_separate(mas, &mast, count);
+ return mas_spanning_rebalance(mas, &mast, count);
}
static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
int ret = 0;
if (mas_start(mas) || (mas_is_none(mas) || mas->node == MAS_ROOT))
- ret = ma_root_ptr(mas, entry, content, overwrite);
+ ret = ma_root_ptr(mas, entry, content, overwrite);
if (mas_is_err(mas))
return NULL;
slot = mas_get_slot(mas);
slot_cnt = mt_slot_count(mas->node);
content = mas_get_rcu_slot(mas, slot);
- if (!overwrite && ((mas->last > r_max) || content )) {
+ if (!overwrite && ((mas->last > r_max) || content)) {
mas_set_err(mas, -EEXIST);
goto exists;
}
}
static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
-/** Private
+/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
* tree. The prev value will be mas->node[mas_get_slot(mas)] or MAS_NONE.
*/
static inline void mas_prev_node(struct ma_state *mas, unsigned long limit)
{
- int level;
+ unsigned long pivot, start_piv, last_pivot, min;
int slot = mas_get_slot(mas);
- unsigned long start_piv;
+ struct maple_enode *mn;
+ int level;
start_piv = mas_get_safe_pivot(mas, slot);
restart_prev_node:
goto no_entry;
while (1) {
- unsigned long min;
+
slot = mte_parent_slot(mas->node);
mas_ascend(mas);
level++;
slot--;
do {
- struct maple_enode *mn;
- unsigned long last_pivot;
- unsigned long pivot = mas_get_safe_pivot(mas, slot);
+ pivot = mas_get_safe_pivot(mas, slot);
min = mas_get_safe_lower_bound(mas, slot);
if (pivot < limit)
if (level == 1) {
mas_set_slot(mas, slot);
mas->node = mn;
- if (mas_dead_node(mas, start_piv)) {
+ if (mas_dead_node(mas, start_piv))
goto restart_next_node;
- }
+
return pivot;
}
}
-/** Private
+/*
* prev node entry
*/
static inline bool mas_prev_nentry(struct ma_state *mas, unsigned long limit,
mas_set_slot(mas, slot);
return true;
}
-/** Private
+/*
* mas_next_nentry() - Next node entry. Set the @mas slot to the next valid
* entry and range_start to the start value for that entry. If there is no
* entry, returns false.
mas_set_slot(mas, slot);
return true;
}
-/** Private
+/*
*
* Returns the pivot which points to the entry with the highest index.
* @mas slot is set to the entry location.
* @limit is the minimum index to check.
*
*/
-static inline void* mas_last_entry(struct ma_state *mas,
+static inline void *mas_last_entry(struct ma_state *mas,
unsigned long limit)
{
unsigned long prev_min, prev_max, range_start = 0;
unsigned char slot = 1;
+ void *entry;
if (mas_start(mas) || mas_is_none(mas))
return NULL;
while (range_start < limit) {
mas_set_slot(mas, slot);
if (!mas_next_nentry(mas, limit, &range_start)) {
- void *entry = mas_get_rcu_slot(mas, slot - 1);
+ entry = mas_get_rcu_slot(mas, slot - 1);
if (mte_is_leaf(mas->node)) {
mas->index = range_start - 1;
mas->index = mte_get_pivot(mas->node, slot - 1);
return entry;
}
+
mas->max = prev_max;
mas->min = prev_min;
mas->node = entry;
return NULL;
}
-/** Private
+/*
*
* __mas_next() Set the @mas->node to the next entry and the range_start to
* the beginning value for the entry. Does not check beyond @limit.
void *entry = NULL;
unsigned long index = mas->index;
unsigned char slot = mas_get_slot(mas);
+
mas_set_slot(mas, slot + 1);
retry:
return entry;
}
-/** Private
+/*
*
* _mas_prev() - Find the previous entry from the current ma state.
* @mas the current maple state (must have a valid slot)
*/
-static inline void* _mas_prev(struct ma_state *mas, unsigned long limit)
+static inline void *_mas_prev(struct ma_state *mas, unsigned long limit)
{
unsigned long max = mas->max;
unsigned char slot;
mas_set_slot(mas, mt_slot_count(mas->node));
}
- if (mas_is_none(mas))
+ if (mas_is_none(mas)) {
+ mas->index = 0;
return NULL;
+ }
mas->last = max;
slot = mas_get_slot(mas);
void *mas_prev(struct ma_state *mas, unsigned long min)
{
void *entry;
- if (mas->node && !mas_searchable(mas))
+
+ if (!mas->index) // Nothing comes before 0.
return NULL;
- if (!mas->node)
+ if (mas_is_none(mas))
mas->node = MAS_START;
+ if (!mas_searchable(mas))
+ return NULL;
+
if (mas_is_start(mas)) {
mas_start(mas);
return mas_last_entry(mas, ULONG_MAX);
switch (type) {
default:
slot = mas_get_slot(mas);
+ fallthrough;
case maple_leaf_64:
min = mas_get_safe_lower_bound(mas, slot);
for (; slot <= pivot_cnt; slot++) {
return found;
}
-/** Private
+/*
* _mas_range_walk(): A walk that supports returning the range in which an
* index is located.
*
return true;
}
-/**
+/*
* mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
*
* Some users need to pause a walk and drop the lock they're holding in
mas->last = gap_max;
mas->index = mas->last - size + 1;
}
-static void _mas_empty_or_single_unmapped_area(struct ma_state *mas,
+static void _mas_empty_or_single_empty_area(struct ma_state *mas,
unsigned long min, unsigned long max, unsigned long size,
bool fwd)
{
unsigned long start = 0;
+
if (!mas_is_none(mas))
start++; // mas_is_ptr
mas->index = max;
}
-static inline int _mas_get_unmapped_area(struct ma_state *mas,
- unsigned long min, unsigned long max, unsigned long size, bool
- forward)
+static inline int _mas_get_empty_area(struct ma_state *mas,
+ unsigned long min, unsigned long max,
+ unsigned long size, bool forward)
{
mas_start(mas);
max--; // Convert to inclusive.
// Empty set.
if (mas_is_none(mas) || mas_is_ptr(mas)) {
- _mas_empty_or_single_unmapped_area(mas, min, max, size, forward);
+ _mas_empty_or_single_empty_area(mas, min, max, size, forward);
return 0;
}
return 0;
}
-int mas_get_unmapped_area(struct ma_state *mas, unsigned long min,
+int mas_get_empty_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
- return _mas_get_unmapped_area(mas, min, max, size, true);
+ return _mas_get_empty_area(mas, min, max, size, true);
}
-int mas_get_unmapped_area_rev(struct ma_state *mas, unsigned long min,
+int mas_get_empty_area_rev(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
- return _mas_get_unmapped_area(mas, min, max, size, false);
+ return _mas_get_empty_area(mas, min, max, size, false);
}
-/** Private
+/*
* mas_alloc() - Allocate a range.
*
* Give a size, a minimum starting point (mas->index), a maximum (mas->last),
{
unsigned char slot = MAPLE_NODE_SLOTS;
unsigned long min;
- mas_start(mas);
+ mas_start(mas);
if (mas_is_none(mas) || mas_is_ptr(mas)) {
mas_root_expand(mas, entry);
if (mas_is_err(mas))
}
mas_awalk(mas, size); // Must be walking a tree.
-
if (mas_is_err(mas))
return xa_err(mas->node);
no_gap:
return -EBUSY;
}
-/** Private
+/*
* mas_rev_alloc() - Reverse allocate a range.
*
* Give a size, a minimum value (mas->index), a maximum starting point
unsigned char slot = MAPLE_NODE_SLOTS;
int ret = 0;
- ret = _mas_get_unmapped_area(mas, min, max, size, false);
+ ret = _mas_get_empty_area(mas, min, max, size, false);
if (ret)
return ret;
return -EBUSY;
}
-/**
+/*
*
* Must hold rcu_read_lock or the write lock.
*
return mas_range_load(mas, &range_min, &range_max);
}
-/** Private
+/*
*
* _mas_next() - Finds the next entry, sets index to the start of the range.
*
void *entry = NULL;
unsigned long range_max;
- if (mas->node && !mas_searchable(mas))
+ if (!mas_searchable(mas))
return NULL;
- if (!mas->node || mas_is_start(mas)) {// First run.
+ if (mas_is_start(mas)) {// First run.
*range_start = 0;
mas_start(mas);
entry = mas_range_load(mas, range_start, &range_max);
return __mas_next(mas, limit, range_start);
}
-/**
+/*
* mas_find: If mas->node == MAS_START, find the first
* non-NULL entry >= mas->index.
* Otherwise, find the first non-NULL entry > mas->index
return entry;
}
-void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) {
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
+{
return _mt_find(mt, index, max, true);
}
EXPORT_SYMBOL(mt_find);
return _mas_next(mas, max, &index);
}
EXPORT_SYMBOL_GPL(mas_next);
-/** Private
+/*
* mas_erase() - Find the range in which index resides and erase the entire
* range.
*
unsigned long max, gfp_t gfp)
{
int ret = 0;
- MA_STATE(mas, mt, min, max - size);
+ MA_STATE(mas, mt, min, max - size);
if (!mt_is_alloc(mt))
return -EINVAL;
unsigned long max, gfp_t gfp)
{
int ret = 0;
- MA_STATE(mas, mt, min, max - size);
+ MA_STATE(mas, mt, min, max - size);
if (!mt_is_alloc(mt))
return -EINVAL;
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned int maple_tree_tests_run;
-unsigned int maple_tree_tests_passed;
EXPORT_SYMBOL_GPL(maple_tree_tests_run);
+unsigned int maple_tree_tests_passed;
EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
#ifndef __KERNEL__
return kmem_cache_get_alloc(maple_node_cache);
}
#define MA_PTR "%p"
-#else
+#else // __KERNEL__ is defined.
#define MA_PTR "%px"
#endif
// Tree validations
if (i < (MAPLE_RANGE64_SLOTS - 1))
last = node->pivot[i];
- else if (node->slot[i] == NULL && max != mt_max[mte_node_type(entry)])
+ else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
break;
if (last == 0 && i > 0)
break;
if (i < (MAPLE_ARANGE64_SLOTS - 1))
last = node->pivot[i];
- else if (node->slot[i] == NULL)
+ else if (!node->slot[i])
break;
if (last == 0 && i > 0)
break;
mt_dump_node(entry, 0, mt_max[mte_node_type(entry)], 0);
}
-/**
+/*
* Calculate the maximum gap in a node and check if that's what is reported in
* the parent (unless root).
*/
gap += p_end - p_start + 1;
} else {
void *entry = mas_get_rcu_slot(mas, i);
+
gap = mte_get_gap(mte, i);
if (mt_is_empty(entry)) {
if (gap != p_end - p_start + 1) {
}
}
}
-void mas_validate_child_slot(struct ma_state *mas) {
+void mas_validate_child_slot(struct ma_state *mas)
+{
enum maple_type type = mte_node_type(mas->node);
struct maple_enode *child;
unsigned char i;
}
}
}
-/**
+/*
* Validate all pivots are within mas->min and mas->max.
*/
void mas_validate_limits(struct ma_state *mas)
mt_dump(mas->tree);
MT_BUG_ON(mas->tree, piv < mas->min);
}
- if ((piv > mas->max)) {
+ if (piv > mas->max) {
pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
mt_dump(mas->tree);
mas_set_slot(mas, mte_parent_slot(mas->node));
mas_next_node(mas, max);
- if (mas->node != MAS_NONE)
+ if (!mas_is_none(mas))
return;
if (mte_is_root(mn))
mas->max = p_max;
mas->min = p_min;
}
-/**
+/*
* validate a maple tree by checking:
* 1. The limits (pivots are within mas->min to mas->max)
* 2. The gap is correctly set in the parents
void mt_validate(struct maple_tree *mt)
{
unsigned char end;
- MA_STATE(mas, mt, 0, 0);
-
+ MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);
mas_first_entry(&mas, ULONG_MAX);
while (mas.node != MAS_NONE) {
if (!mte_is_root(mas.node)) {
end = mas_data_end(&mas);
- if (end < mt_min_slot_cnt(mas.node)) {
+ if ((end < mt_min_slot_cnt(mas.node)) &&
+ (mas.max != ULONG_MAX)) {
pr_err("Invalid size %u of "MA_PTR"\n", end,
mas_mn(&mas));
MT_BUG_ON(mas.tree, 1);
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
for (int i = 1; i < 128; i++) {
int j;
+
mas_node_cnt(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled
}
for (int i = 1; i < 128; i++) {
int j;
- MA_STATE(mas2, mt, 0, 0);
+ MA_STATE(mas2, mt, 0, 0);
mas_node_cnt(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled
for (j = 1; j <= i; j++) { // Move the allocations to mas2
mn = mas_next_alloc(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
- mas_push_node(&mas2, (struct maple_enode*)mn);
+ mas_push_node(&mas2, (struct maple_enode *)mn);
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j);
}
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
unsigned long huge = 8000UL * 1000 * 1000;
check_insert(mt, huge, (void *) huge);
- check_insert(mt, 0, xa_mk_value(0) );
+ check_insert(mt, 0, xa_mk_value(0));
check_lb_not_empty(mt);
}
MT_BUG_ON(mt, entry != entry2);
MT_BUG_ON(mt, index != mas.index);
MT_BUG_ON(mt, last != mas.last);
+
+
+ mas.node = MAS_NONE;
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ entry2 = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != entry2);
+
+ mas_reset(&mas);
+ mas.index = 0;
+ mas.last = 0;
+ MT_BUG_ON(mt, !!mas_prev(&mas, 0));
+
mas_unlock(&mas);
mtree_destroy(mt);
}
{
unsigned long i, j;
void *entry;
- MA_STATE(mas, mt, 0, 0);
+ MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_for_each(&mas, entry, ULONG_MAX)
MT_BUG_ON(mt, true);
rcu_read_lock();
mas_for_each(&mas, entry, ULONG_MAX) {
if (xa_is_zero(entry))
- continue;
+ continue;
+
MT_BUG_ON(mt, entry != xa_mk_value(j));
j++;
}
for (int i = 5; i < 25; i++) {
erase_check_insert(mt, i);
- for (int j = i; j >= 0; j--) {
+ for (int j = i; j >= 0; j--)
erase_check_load(mt, j);
- }
}
erase_check_erase(mt, 14); //6015
else
erase_check_load(mt, i);
}
- for (int i = 23; i < 25; i++) {
+ for (int i = 23; i < 25; i++)
erase_check_erase(mt, i);
- }
+
for (int i = 0; i < 25; i++) {
if (i <= 25 && i >= 13)
check_load(mt, set[i], NULL);
#define SNULL 2
#define ERASE 3
#define ec_type_str(x) \
- ( ((x) == STORE) ? \
+ (((x) == STORE) ? \
"STORE" : \
(((x) == SNULL) ? \
"SNULL" : "ERASE") \
int check = 0;
void *foo;
unsigned long addr = 0;
- MA_STATE(mas, mt, 0, 0);
void *s_entry = NULL, *e_entry = NULL;
unsigned long retry = 0;
+ MA_STATE(mas, mt, 0, 0);
+
for (int i = 0; i < size; i += 3) {
unsigned long s_min, s_max;
unsigned long e_min, e_max;
void *value = NULL;
+
MA_STATE(mas_start, mt, set[i+1], set[i+1]);
MA_STATE(mas_end, mt, set[i+2], set[i+2]);
mt_set_non_kernel(127);
if ((s_min == e_min) && (s_max == e_max)) {
if (!entry_cnt)
entry_cnt++;
+
else if (!mt_is_empty(s_entry)) {
- if (e_max > mas_end.last) {
+ if (e_max > mas_end.last)
entry_cnt++;
- }
- if (s_min < mas_start.index) {
+
+ if (s_min < mas_start.index)
entry_cnt++;
- }
+
} else {
entry_cnt++;
}
s_entry = mas_next(&mas_start,
set[i+2]);
while (!mas_is_none(&mas_start) &&
- (mas_start.last != e_max) ) {
+ (mas_start.last != e_max)) {
BUG_ON(retry > 50); // stop infinite retry on testing.
if (xa_is_zero(s_entry)) {
retry++;
erase_check_store_range(mt, set, i + 1, value);
break;
case ERASE:
+ if (!s_entry)
+ break;
check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
entry_cnt--;
break;
STORE, 140590386819072, 140590386823167,
STORE, 140590386823168, 140590386827263,
SNULL, 140590376591359, 140590376595455,
-/*
-STORE, 140590376587264, 140590376591359,
-STORE, 140590376591360, 140590376595455,
-*/
};
unsigned long set21[] = {
STORE, 93874710941696, 93874711363583,
ERASE, 140612707803136, 140612716191743,
ERASE, 140613504712704, 140613504716799,
ERASE, 140613504716800, 140613513105407,
-#if 0
-ERASE, 140613546643456, 140613546647551,
-ERASE, 140613546647552, 140613555036159,
-ERASE, 140613387280384, 140613387284479,
-ERASE, 140613387284480, 140613395673087,
-ERASE, 140613538250752, 140613538254847,
-ERASE, 140613538254848, 140613546643455,
-ERASE, 140612699406336, 140612699410431,
-ERASE, 140612699410432, 140612707799039,
-#endif
};
unsigned long set39[] = {
int cnt = 0;
void *ptr = NULL;
+
MA_STATE(mas, mt, 0, 0);
mt_set_non_kernel(3);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set11, ARRAY_SIZE(set11));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 12288, 140014592737280, 0x2000);
+ mas_get_empty_area_rev(&mas, 12288, 140014592737280, 0x2000);
MT_BUG_ON(mt, mas.index != 140014592565248);
mtree_destroy(mt);
check_erase2_testset(mt, set13, ARRAY_SIZE(set13));
mtree_erase(mt, 140373516443648);
rcu_read_lock();
- mas_get_unmapped_area_rev(&mas, 0, 140373518663680, 4096);
+ mas_get_empty_area_rev(&mas, 0, 140373518663680, 4096);
rcu_read_unlock();
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set16, ARRAY_SIZE(set16));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 139921865637888, 0x6000);
+ mas_get_empty_area_rev(&mas, 4096, 139921865637888, 0x6000);
MT_BUG_ON(mt, mas.index != 139921865523200);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set17, ARRAY_SIZE(set17));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 139953197334528, 0x1000);
+ mas_get_empty_area_rev(&mas, 4096, 139953197334528, 0x1000);
MT_BUG_ON(mt, mas.index != 139953197318144);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set18, ARRAY_SIZE(set18));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 140222972858368, 2215936);
+ mas_get_empty_area_rev(&mas, 4096, 140222972858368, 2215936);
MT_BUG_ON(mt, mas.index != 140222966259712);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set26, ARRAY_SIZE(set26));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 140109042671616, 409600);
+ mas_get_empty_area_rev(&mas, 4096, 140109042671616, 409600);
MT_BUG_ON(mt, mas.index != 140109040549888);
mt_set_non_kernel(0);
mt_validate(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set28, ARRAY_SIZE(set28));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 139918413357056, 4190208);
- mas.index = (mas.index + 2093056 - 0) &(~2093056); // align_mast = 2093056 offset = 0
+ mas_get_empty_area_rev(&mas, 4096, 139918413357056, 4190208);
+ mas.index = (mas.index + 2093056 - 0) & (~2093056); // align_mast = 2093056 offset = 0
MT_BUG_ON(mt, mas.index != 139918401601536);
mt_set_non_kernel(0);
mt_validate(mt);
mt_validate(mt);
mtree_destroy(mt);
-//mmap: unmapped_area_topdown: ffff88821c9cb600 Gap was found: mt 140582827569152 gap_end 140582869532672
-//mmap: window was 140583656296448 - 4096 size 134217728
-//mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879
-//mmap: mas.index 140582827569152 align mask 0 offset 0
-//mmap: rb_find_vma find on 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00)
+/* mmap: empty_area_topdown: ffff88821c9cb600 Gap was found:
+ * mt 140582827569152 gap_end 140582869532672
+ * mmap: window was 140583656296448 - 4096 size 134217728
+ * mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879
+ * mmap: mas.index 140582827569152 align mask 0 offset 0
+ * mmap: rb_find_vma find on 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00)
+ */
// move gap failed due to an entirely empty node.
mt_set_non_kernel(99);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set33, ARRAY_SIZE(set33));
rcu_barrier();
- mas_get_unmapped_area_rev(&mas, 4096, 140583656296448, 134217728);
+ mas_get_empty_area_rev(&mas, 4096, 140583656296448, 134217728);
MT_BUG_ON(mt, mas.index != 140582869532672);
mt_set_non_kernel(0);
mt_validate(mt);
rcu_barrier();
mt_validate(mt);
mtree_destroy(mt);
-
-
-
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
min, holes[i+1]>>12, holes[i+2]>>12,
holes[i] >> 12);
#endif
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min,
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, min,
holes[i+1] >> 12,
holes[i+2] >> 12));
#if DEBUG_REV_RANGE
- printk("Found %lu %lu\n", mas.index, mas.last);
- printk("gap %lu %lu\n", (holes[i] >> 12),
+ pr_debug("Found %lu %lu\n", mas.index, mas.last);
+ pr_deubg("gap %lu %lu\n", (holes[i] >> 12),
(holes[i+1] >> 12));
#endif
MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
};
int i, range_cnt = ARRAY_SIZE(range);
int req_range_cnt = ARRAY_SIZE(req_range);
+ unsigned long min = 0x565234af2000;
for (i = 0; i < range_cnt; i += 2) {
#define DEBUG_ALLOC_RANGE 0
MA_STATE(mas, mt, 0, 0);
- unsigned long min = 0x565234af2000;
- for (i = 0; i < ARRAY_SIZE(holes); i+= 3) {
+
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
#if DEBUG_ALLOC_RANGE
- pr_debug("\tGet unmapped %lu-%lu size %lu\n", min >> 12,
+ pr_debug("\tGet empty %lu-%lu size %lu\n", min >> 12,
holes[i+1] >> 12, holes[i+2] >> 12);
#endif
- MT_BUG_ON(mt, mas_get_unmapped_area(&mas, min >> 12,
+ MT_BUG_ON(mt, mas_get_empty_area(&mas, min >> 12,
holes[i+1] >> 12,
holes[i+2] >> 12));
MT_BUG_ON(mt, mas.index != holes[i] >> 12);
static noinline void check_ranges(struct maple_tree *mt)
{
- int i;
+ int i, val, val2;
unsigned long r[] = {
10, 15,
20, 25,
mt_set_non_kernel(50);
for (i = 0; i <= 500; i++) {
- int val = i*5;
- int val2 = (i+1)*5;
+ val = i*5;
+ val2 = (i+1)*5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0);
mt_set_non_kernel(50);
for (i = 0; i <= 500; i++) {
- int val = i*5;
- int val2 = (i+1)*5;
+ val = i*5;
+ val2 = (i+1)*5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0);
mtree_init(mt, MAPLE_ALLOC_RANGE);
mt_set_non_kernel(50);
for (i = 0; i <= 50; i++) {
- int val = i*10;
- int val2 = (i+1)*10;
+ val = i*10;
+ val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 161, 161, xa_mk_value(161), 0);
mtree_init(mt, MAPLE_ALLOC_RANGE);
for (i = 0; i <= 500; i++) {
- int val = i*10;
- int val2 = (i+1)*10;
+ val = i*10;
+ val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 4600, 4959, xa_mk_value(1), 0);
mt_validate(mt);
mtree_destroy(mt);
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0);
+ check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0);
+ check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0);
+ check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0);
+ check_store_range(mt, 4842, 4849, NULL, 0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ for (i = 0; i <= 200; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ check_store_range(mt, 1655, 1655, xa_mk_value(1655), 0);
+ check_store_range(mt, 1656, 1656, xa_mk_value(1656), 0);
+ check_store_range(mt, 1666, 1666, xa_mk_value(1666), 0);
+
+ check_store_range(mt, 1705, 1705, xa_mk_value(1705), 0);
+ check_store_range(mt, 1706, 1706, xa_mk_value(1706), 0);
+ check_store_range(mt, 1716, 1716, xa_mk_value(1716), 0);
+
+ check_store_range(mt, 1755, 1755, xa_mk_value(1755), 0);
+ check_store_range(mt, 1756, 1756, xa_mk_value(1756), 0);
+
+ check_store_range(mt, 1805, 1806, xa_mk_value(1805), 0);
+ check_store_range(mt, 1806, 1806, xa_mk_value(1806), 0);
+
+ check_store_range(mt, 1855, 1855, xa_mk_value(1855), 0);
+ check_store_range(mt, 1856, 1856, xa_mk_value(1856), 0);
+ check_store_range(mt, 1866, 1866, xa_mk_value(1866), 0);
+ // Cause a 3 child split.
+ check_store_range(mt, 1792, 1799, NULL, 0);
+ mt_validate(mt);
+ mtree_destroy(mt);
}
static noinline void check_next_entry(struct maple_tree *mt)
{
void *entry = NULL;
unsigned long limit = 30, i = 0;
+
MT_BUG_ON(mt, !mtree_empty(mt));
MA_STATE(mas, mt, i, i);
check_seq(mt, limit, false);
rcu_read_lock();
- for (;i <= limit + 1; i++) {
+ for ( ; i <= limit + 1; i++) {
entry = mas_next(&mas, limit);
if (i > limit)
MT_BUG_ON(mt, entry != NULL);
{
unsigned long index = 16;
void *value;
+
MA_STATE(mas, mt, index, index);
MT_BUG_ON(mt, !mtree_empty(mt));
*/
mt_set_non_kernel(1);
mas_reset(&mas);
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 50, 100, 2));
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 50, 100, 2));
MT_BUG_ON(mt, mas.index != index + 1);
rcu_read_unlock();
* 50 for size 3.
*/
mas_reset(&mas);
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 20, 50, 3));
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 20, 50, 3));
MT_BUG_ON(mt, mas.index != 38);
rcu_read_unlock();
mas_reset(&mas);
rcu_read_lock();
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 81, 2));
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 81, 2));
MT_BUG_ON(mt, mas.index != 79);
mt_validate(mt);
rcu_read_unlock();
mtree_test_erase(mt, 81);
mas_reset(&mas);
rcu_read_lock();
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 76, 85, 4));
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 76, 85, 4));
rcu_read_unlock();
MT_BUG_ON(mt, mas.index != 78);
mt_validate(mt);
mt_set_non_kernel(2);
mas_reset(&mas);
rcu_read_lock();
- MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, 1700, 1800, 2));
+ MT_BUG_ON(mt, mas_get_empty_area_rev(&mas, 1700, 1800, 2));
MT_BUG_ON(mt, mas.index != 1791);
rcu_read_unlock();
mt_validate(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_seq(mt, 400, false);
mtree_test_store_range(mt, 376, 391, NULL);
- mt_dump(mt);
mt_set_non_kernel(0);
mtree_destroy(mt);