* Matthew Wilcox <willy@infradead.org>
*/
+#define CONFIG_DEBUG_MAPLE_TREE
+
#include <linux/maple_tree.h>
#include <linux/xarray.h>
#include <linux/types.h>
{
return (struct maple_node *)((unsigned long)entry & ~127);
}
+static inline struct maple_node *mas_mn(const struct ma_state *mas)
+{
+ return mte_to_node(mas->node);
+}
static void mte_free(struct maple_enode *enode)
{
}
mte_set_pivot(mas->node, slot, val);
}
-static inline struct maple_enode *__mte_get_rcu_slot(
+static inline struct maple_enode *_mte_get_rcu_slot(
const struct maple_enode *mn, unsigned char slot,
enum maple_type type)
{
}
}
-static inline struct maple_enode *_mte_get_rcu_slot(
- const struct maple_enode *mn, unsigned char slot,
- enum maple_type type)
-{
- struct maple_enode *p = __mte_get_rcu_slot(mn, slot, type);
-
- if (mte_dead_node(mn))
- return MAS_START;
- return p;
-}
-
static inline struct maple_enode *mte_get_rcu_slot(const struct maple_enode *mn,
unsigned char slot)
{
}
/* Private
- * Returns the last slot that contains data.
+ * mas_data_end() - Find the end of the data (slot +1). Sets the value of the
+ * last pivot to @last_piv, sets @coalesce to the number of slots that can be
+ * removed by coalescing.
*/
static inline unsigned char mas_data_end(const struct ma_state *mas,
const enum maple_type type, unsigned long *last_piv,
unsigned char *coalesce)
{
- unsigned char data_end = 0;
- unsigned long prev_piv = mas->max;
struct maple_enode *mn = mas->node;
+ unsigned long piv = mas->min, prev_piv = mas->min;
+ unsigned char slot;
+ bool counted_null = false;
+
*coalesce = 0;
+ for (slot = 0; slot < mt_slot_count(mn); slot++) {
+ void *entry;
- prev_piv = _mas_get_safe_pivot(mas, 0, type);
- for (data_end = 1; data_end < mt_slot_count(mn); data_end++) {
- *last_piv = _mas_get_safe_pivot(mas, data_end, type);
- if (*last_piv == 0) {
+ piv = _mas_get_safe_pivot(mas, slot, type);
+ if (!piv && slot) { // Past the end of data.
+ slot--;
*last_piv = prev_piv;
- return data_end - 1;
+ return slot;
}
- if (prev_piv == *last_piv)
- (*coalesce)++;
- else if (mt_will_coalesce(_mte_get_rcu_slot(mn, data_end, type)))
- (*coalesce)++;
+ entry = _mte_get_rcu_slot(mn, slot, type);
+ if (mt_will_coalesce(entry)) {
+ if (piv == prev_piv)
+ (*coalesce)++;
+ } else if (!entry) {
+ if (counted_null)
+ (*coalesce)++;
+ counted_null = true;
+ } else {
+ counted_null = false;
+ }
- if (*last_piv == mas->max)
- return data_end;
+ if (piv == mas->max)
+ break;
- prev_piv = *last_piv;
+ prev_piv = piv;
}
- return data_end;
+
+ *last_piv = piv;
+ return slot;
}
/** Private
static inline int ma_hard_data(unsigned long end,
unsigned long coalesce)
{
+ if (end < coalesce)
+ return 0;
return end - coalesce;
}
unsigned long min = mas->min;
unsigned long max = min;
enum maple_type type = mte_node_type(mas->node);
- unsigned long pivot_cnt = mt_pivots[type];
+ unsigned long slot_cnt = mt_slots[type];
unsigned long half = mt_slots[type] / 2;
unsigned long last_pivot;
unsigned char coalesce;
if (mte_get_pivot(mas->node, half) > mas->index)
- return half;
- return half + 1;
+ return half - 1;
+
+ return half;
if (mte_is_root(mas->node))
return half;
if (i >= half)
return i;
- if (data_end < pivot_cnt)
- max = mte_get_pivot(mas->node, data_end);
+ if (data_end < slot_cnt)
+ max = mte_get_pivot(mas->node, data_end - 1);
else
max = mas->max;
pstart = mas->min;
for (i = 0; i < mt_slots[mt]; i++) {
pend = mas_get_safe_pivot(mas, i);
+ if (!pend && i)
+ pend = mas->max;
gap = pend - pstart + 1;
if (!mt_is_empty(mte_get_rcu_slot(mas->node, i)))
max_gap = gap;
next:
- if (pend == mas->max)
+ if (pend >= mas->max)
break;
pstart = pend + 1;
{
unsigned char pslot;
unsigned long p_gap, max_gap = 0;
+ unsigned long max, min;
+ struct maple_enode *mn;
if (!mte_is_leaf(mas->node))
if (mte_is_root(mas->node))
return;
+ mn = mas->node;
+ max = mas->max;
+ min = mas->min;
/* Get the largest gap in this leaf and check it against the reported
* largest.
*/
if (p_gap != max_gap)
mas_parent_gap(mas, pslot, max_gap);
+
+ mas->max = max;
+ mas->min = min;
+ mas->node = mn;
}
cp->src_start = sloc;
}
static inline int mas_split_data(struct ma_state *mas, struct maple_enode *left,
- struct maple_enode *right, unsigned char split)
+ struct maple_enode *right, unsigned char split,
+ unsigned long r_max)
{
MA_CP(cp, mas->node, left, 0, split);
cp.dst = right;
cp.dst_start = 0;
mas_copy(mas, &cp);
+ if (cp.dst_start + split >= mt_pivot_count(cp.dst)) {
+ unsigned char slot = cp.dst_start;
+
+ if (mt_is_empty(mte_get_rcu_slot(right, slot)))
+ slot--;
+ mte_set_pivot(right, slot, r_max);
+ }
}
return split;
}
*/
static inline void _mas_replace(struct ma_state *mas, bool free, bool push)
{
- struct maple_node *mn = mte_to_node(mas->node);
+ struct maple_node *mn = mas_mn(mas);
struct maple_enode *parent = NULL;
struct maple_enode *prev;
unsigned char slot = 0;
struct maple_enode *left = NULL, *right = NULL;
enum maple_type ptype; // parent type.
unsigned long pivot;
- unsigned long p_max = 0;
+ unsigned long r_max, p_max = ULONG_MAX;
if (mte_is_root(mas->node)) {
old_parent = full;
// calculate the split location.
split = mas_calc_split(mas, &left, &right);
+ r_max = mas->max;
+ if (p_slot == p_end) // Right will be the end of the parent node.
+ r_max = p_max;
// the node type for the children types.
// Node types must be set to copy data into them.
- mas_split_data(mas, left, right, split);
+ mas_split_data(mas, left, right, split, r_max);
if (right) {
pivot = mte_get_pivot(left, split);
if (!pivot) // dense node
mte_link(left, new_parent, p_slot, pivot, ptype);
if (right) {
- unsigned long r_max = mas->max;
-
- if (mt_node_max(right) < mas->max)
+ if (mt_node_max(right) < r_max)
r_max = pivot + mt_node_max(right);
mte_link(right, new_parent, p_slot + 1, r_max, ptype);
}
unsigned char slot_cnt = mt_slots[this_type] - 1;
unsigned char pivot_cnt = mt_pivots[this_type];
unsigned char coalesce;
- unsigned char slot = mas_get_slot(mas);
+ unsigned char data_slot, slot = mas_get_slot(mas);
bool spans_range = false;
bool append = false;
+ bool split = false;
bool null_entry = false;
int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce);
int new_end = old_end;
- int ret = 1;
+ int ret = 0;
if (mas->last > mas->max) {
BUG_ON(!active);
if (!overwrite) {
void *entry;
- if (spans_range)
- goto busy;
+ if (spans_range) {
+ mas_set_err(mas, -ERANGE);
+ return 0;
+ }
entry = mte_get_rcu_slot(mas->node, slot);
- if (!mt_is_empty(entry))
- goto busy;
+ if (!mt_is_empty(entry)) {
+ mas_set_err(mas, -EBUSY);
+ return 0;
+ }
}
- if (last_piv == mas->max)
- slot_cnt++;
-
- if (slot > old_end)
+ if (slot >= old_end)
append = true;
- if (mas->index && min != mas->index)
- null_entry = true;
-
if (mas->index == min && mas->last == max) {
mte_set_rcu_slot(mas->node, slot, entry);
ret = 1;
/* Calculate how many new pivots we will need. */
// Possible pivot before the new range.
- if (mas->index > min)
- ret++;
-
- // Possible pivot after the new range.
- if (mas->last < max)
- ret++;
+ data_slot = slot;
+ if (mas->index > min) {
+ if (!slot) { // storing to slot 0 means we need the null.
+ ret++;
+ } else if (append) { // Appending will need a null.
+ ret++;
+ } else { // a starting null is only needed if there isn't one
+ // there.
+ struct maple_enode *slot_val;
- // Detect duplicate null entries and don't copy them.
- if (null_entry) {
- struct maple_enode *slot_val;
- slot_val = mte_get_rcu_slot(mas->node, slot + 1);
- if (mt_will_coalesce(slot_val)) {
- if (ret)
- ret--;
- }
- if (slot > 0) {
- slot_val = mte_get_rcu_slot(mas->node, slot - 1);
- if (mt_is_empty(slot_val)) {
- slot--;
- if (ret)
- ret--;
- }
+ slot_val = mte_get_rcu_slot(mas->node, slot);
+ if (!mt_is_empty(slot_val))
+ ret++;
}
+
+ null_entry = true;
+ }
+ data_slot += ret;
+
+ // Possible pivot after the new range - should exists, but make sure
+ // we don't overflow to the last slot which implies a pivot.
+ if (mas->last < max) {
+ unsigned char max_slots = old_end + ret - coalesce + 2;
+
+ if (max_slots >= slot_cnt)
+ ret++;
}
+
/* Check for splitting */
- new_end += ret - coalesce;
- if (new_end > slot_cnt) {
+ new_end += ret - coalesce + 1;
+ if (new_end == slot_cnt && mas->max == mt_node_max(mas->node))
+ split = true; // Need a NULL for the maximum range.
+ else if (new_end > slot_cnt)
+ split = true; // There is not enough space.
+
+
+ if (split) {
/* Not enough space in the node */
unsigned char split = mas_split(mas, slot, active);
+
if (mas_is_err(mas))
return 0;
mas_partial_copy(mas, slot);
if (mas_is_err(mas))
return 0;
+
old_end = slot;
}
mas_update_gap(mas);
return ret;
-
-busy:
- mas_set_err(mas, -EBUSY);
- return 0;
}
+
static inline int _mas_insert(struct ma_state *mas, void *entry,
unsigned char slot)
{
mas_set_slot(mas, slot);
return _mas_add(mas, entry, false, true);
}
-static inline int _ma_store(struct ma_state *mas, void *entry,
- unsigned char slot)
-{
- mas_set_slot(mas, slot);
- return _mas_add(mas, entry, true, true);
-}
static inline void mas_root_expand(struct ma_state *mas, void *entry)
{
/* Private
*
- * _mas_prev() - Find the previous entry from the current mas state.
+ * _mas_prev() - Find the previous entry from the current ma state.
* @mas the current maple state (must have a valid slot)
*/
static inline void* _mas_prev(struct ma_state *mas, unsigned long limit)
* Try to allocate 1-2 nodes, depending on if the right node will be
* completely consumed.
*
- * returns 0 on success or the number of slots that were not filled.
+ * returns 0 on failure or the number of slots that were not filled.
*
*/
/*
MA_CP(cp, this_enode, NULL, 0, end);
- if (coalesce)
- coalesce--;
-
l_slot_cnt = ma_hard_data(end, coalesce);
if (mte_is_root(this_enode))
return mas_coalesce_node(mas, end, coalesce, true);
mas->max = this_max;
mas->min = this_min;
mas_set_slot(mas, this_p_slot);
- if (l_slot_cnt <= 1) {
+ if (end < coalesce) {
enum maple_type p_type =
mas_parent_enum(mas, this_enode);
struct maple_enode *eparent =
r_end = mas_data_end(mas, mte_node_type(r_enode), &l_piv, &r_coalesce);
r_slot_cnt = ma_hard_data(r_end, r_coalesce);
- // Add 1 for slot 0 on the right.
- all_slots = r_slot_cnt + 1 + l_slot_cnt;
+ if (r_end < r_coalesce) {
+ all_slots = l_slot_cnt;
+ new_type = mte_node_type(this_enode);
+ l_piv = r_max;
+ mas->min = this_min;
+ goto right_empty;
+ }
+ // Add 1 for each slot 0.
+ all_slots = r_slot_cnt + 2 + l_slot_cnt;
// check if left ends in NULL, right start in NULL..
if ((mt_will_coalesce(mte_get_rcu_slot(this_enode, end)) ||
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
- return ret;
+ return 0;
// Restore the left node (this_enode)
mas->node = this_enode;
mas->node = r_enode;
mas->max = r_max;
mas->min = r_min;
+right_empty:
if (all_slots <= mt_slots[new_type] - 1) {
// Right was entirely consumed.
void *entry = XA_SKIP_ENTRY;
entry = NULL;
ret = mt_slots[new_type] - all_slots;
+ if (!ret)
+ ret = 1;
r_p_slot = mte_parent_slot(r_enode);
mas_encoded_parent(mas);
mte_set_rcu_slot(mas->node, r_p_slot, entry);
+ if (xa_is_skip(entry) &&
+ r_p_slot < mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, r_p_slot, l_piv);
mte_free(r_enode);
p_coalesce = true;
goto right_done;
r_p_slot = mte_parent_slot(r_enode);
mas_replace(mas);
mas_encoded_parent(mas);
- mte_set_pivot(mas->node, r_p_slot, r_piv);
+ if (r_p_slot < mt_pivot_count(mas->node)-1)
+ mte_set_pivot(mas->node, r_p_slot, r_piv);
right_done:
- while (r_p_slot-- > this_p_slot) {
+ // Set the parent slots to l_piv for all skip slots..
+ while (r_p_slot-- > this_p_slot)
mte_set_pivot(mas->node, r_p_slot, l_piv);
- }
/* If there is a freed node, then mas->node must point to the parent
* which contained the freed node so it can also be checked for
*/
static inline void mas_coalesce_root(struct ma_state *mas)
{
- unsigned char end, coalesce, hard_data;
- struct maple_enode *this_enode;
- enum maple_type this_type;
+ struct maple_enode *this_enode = mas->node;
+ enum maple_type this_type = mte_node_type(this_enode);
unsigned long piv;
unsigned long min, max;
+ unsigned char coalesce, hard_data;
+ unsigned char end = mas_data_end(mas, this_type, &piv, &coalesce);
- this_enode = mas->node;
- this_type = mte_node_type(this_enode);
- end = mas_data_end(mas, this_type, &piv, &coalesce);
- hard_data = ma_hard_data(end, coalesce);
- if (end == 1 && coalesce == 1) {
- min = mas->min;
- max = mas->max;
- mas_first_node(mas, ULONG_MAX);
- if (mas_is_none(mas)) {
- // Allocation failures
-
- mas->tree->ma_root = NULL;
- mte_free(this_enode);
- return;
- }
- mas->node = this_enode;
- mas->min = min;
- mas->max = max;
- }
-
- if ((end <= coalesce)) {
- if(!mte_is_leaf(this_enode)) {
- // Remove level in tree.
- mas_set_slot(mas, 0);
- mas_first_node(mas, ULONG_MAX);
-
- mte_to_node(mas->node)->parent =
- mte_to_node(this_enode)->parent;
- mas->node = mte_mk_root(mas->node);
- mas_replace(mas);
- return;
- }
+ MA_CP(cp, mas->node, NULL, 0, end);
- mas->tree->ma_root = NULL;
- mte_free(this_enode);
+ hard_data = ma_hard_data(end, coalesce);
+ if (hard_data > mt_min_slots[this_type] - 1)
return;
- }
- if (mte_is_leaf(this_enode) && hard_data == 1) {
- void *entry;
+ /* Check for a single entry in the root node.
+ * 1. 0-oo => node
+ * 2. slot count == coalesce
+ * 3. one entry and one null.
+ */
+ if (!hard_data ||
+ (end + 1 == coalesce) ||
+ (end == 1 && !mte_get_rcu_slot(this_enode, 1))) {
+ unsigned long piv;
+
+ min = mas->min;
+ max = mas->max;
mas_set_slot(mas, 0);
- if (!mas_first_entry(mas, ULONG_MAX)) {
- entry = mte_get_rcu_slot(mas->node,
- mas_get_slot(mas));
- if (((unsigned long) (entry) & 3) != 2) {
+ piv = mas_first_node(mas, ULONG_MAX);
+ if (mte_is_leaf(this_enode)) {
+ if (!piv) {
+ void *entry = mte_get_rcu_slot(this_enode,
+ mas_get_slot(mas));
rcu_assign_pointer(mas->tree->ma_root,
entry);
mte_free(this_enode);
return;
}
+ // coalesce the node..
+ mas->min = min;
+ mas->max = max;
+ mas->node = this_enode;
+ goto coalesce;
+ } else if (mas_is_none(mas)) {
+ /* allocation failed to create a leaf for this empty
+ * node.
+ */
+ rcu_assign_pointer(mas->tree->ma_root, NULL);
+ mte_free(this_enode);
+ return;
}
- }
- if (hard_data < mt_min_slots[this_type] - 1) {
- MA_CP(cp, mas->node, NULL, 0, end);
+ /* it's not a leaf, remove a level from the tree. */
+ goto remove_level;
+ } else if (hard_data < mt_min_slots[this_type] - 1)
+ goto coalesce; // Compact the node.
+
+ return;
+
+coalesce:
mas_copy(mas, &cp);
if (mas_is_err(mas))
return;
mas->node = cp.dst;
- mte_to_node(mas->node)->parent =
- mte_to_node(this_enode)->parent;
+remove_level:
+ mas_mn(mas)->parent = mte_to_node(this_enode)->parent;
mas->node = mte_mk_root(mas->node);
mas_replace(mas);
- }
}
/** Private
if (!mt_is_empty(entry))
goto check_start;
- if (!end || end + 1 <= coalesce) {
+ if (end <= coalesce) {
mas_coalesce_empty(mas, eparent, p_slot, p_type);
check_parent = true;
}
if (check_parent)
slot = 0;
- while (slot > 0) {
+ while (--slot > 0) {
if (!mt_is_empty(mte_get_rcu_slot(this_enode, slot)))
break;
- slot--;
}
mas->node = eparent;
{
enum maple_type type;
unsigned long max, min;
- unsigned char pivot_cnt, i;
+ unsigned char i;
bool found = false;
type = mte_node_type(mas->node);
switch (type) {
case maple_leaf_64:
- pivot_cnt = mt_pivots[type];
- if (i >= pivot_cnt - 1)
- max = mas->max;
- else
- max = _mte_get_pivot(mas->node, i, type);
+ max = _mas_get_safe_pivot(mas, i, type);
do {
unsigned long this_gap = 0;
unsigned char slot;
MA_STATE(mas, mt, *index, *index);
+ if (!start && !(*index))
+ return NULL;
+
rcu_read_lock();
leaf = _mas_range_walk(&mas, &range_start, &range_end);
slot = mas_get_slot(&mas);
entry = mte_get_rcu_slot(mas.node, slot);
mas.last = range_end;
-
if (mt_is_empty(entry) || xa_is_zero(entry))
entry = NULL;
entry = NULL;
}
+ rcu_read_unlock();
+
if (entry)
*index = mas.last + 1;
- rcu_read_unlock();
return entry;
}
if (mas_is_ptr(mas))
return;
+ if (mas_is_err(mas))
+ return;
slot = mas_data_end(mas, mte_node_type(mas->node), &last_piv, &coalesce);
mas_set_slot(mas, slot);
* no gap found. (return, slot == MAPLE_NODE_SLOTS)
* found the gap. (return, slot != MAPLE_NODE_SLOTS)
*/
- while (!_mas_rev_awalk(mas, size) && !mas_is_err(mas)) {
+ while (!mas_is_err(mas) && !_mas_rev_awalk(mas, size)) {
if (last == mas->node)
mas_rewind_node(mas);
else
}
/* Do the add */
- return _mas_add(mas, entry, overwrite, active);
+ ret = _mas_add(mas, entry, overwrite, active);
+ if (mas_is_err(mas) && xa_err(mas->node) == -ERANGE)
+ mas_set_err(mas, -EEXIST);
+
+ return ret;
exists:
mas_set_err(mas, -EEXIST);
static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
unsigned long size, unsigned long *index)
{
+ unsigned char pslot = mte_parent_slot(mas->node);
+ struct maple_enode *mn = mas->node;
/* mas->index is the start address for the search
* which may no longer be needed.
* mas->last is the end address for the search
*index = mas->index;
mas->last = mas->index + size - 1;
+
+ /* It is possible that using mas->max and mas->min to correctly
+ * calculate the index and last will cause an issue in the gap
+ * calculation, so fix the ma_state here
+ */
+ mas_encoded_parent(mas);
+ mas->max = mas_get_safe_pivot(mas, pslot);
+ if (pslot)
+ mas->min = mas_get_safe_pivot(mas, pslot - 1) + 1;
+
+ mas->node = mn;
_mas_insert(mas, entry, slot);
return 0;
}
// The start of the window can only be within these values.
mas->index = min;
- mas->last = max - size + 1;
+ mas->last = max - size;
if (forward)
mas_awalk(mas, size);
return xa_err(mas->node);
if (mas_get_slot(mas) == MAPLE_NODE_SLOTS)
- goto no_gap;
+ return -EBUSY;
if (forward)
mas_set_fwd_index(mas, size);
mas_set_rev_index(mas, size);
return 0;
-
-no_gap:
- return -EBUSY;
-
-
}
+
int mas_get_unmapped_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
mtree_lock(mas.tree);
retry:
+ mas_set_slot(&mas, 0);
mas.index = min;
mas.last = max - size;
ret = mas_alloc(&mas, entry, size, startp);
}
EXPORT_SYMBOL(mtree_destroy);
-#define CONFIG_DEBUG_MAPLE_TREE
#ifdef CONFIG_DEBUG_MAPLE_TREE
+
void mt_dump_node(void *entry, unsigned long min, unsigned long max,
unsigned int depth);
void mt_dump_range(unsigned long min, unsigned long max, unsigned int depth)
for(i = 0; i < mt_slot_count(mte); i++) {
p_end = mas_get_safe_pivot(mas, i);
- if (!p_end && i != 0)
- break;
+ if (!p_end && i)
+ p_end = mas->max;
if (mte_is_leaf(mte)) {
gap = p_end - p_start + 1;
p_slot = mte_parent_slot(mas->node);
p_mn = mte_parent(mte);
- BUG_ON(_ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte))
- != max_gap);
+ MT_BUG_ON(mas->tree, max_gap > mas->max);
+ MT_BUG_ON(mas->tree,
+ _ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) !=
+ max_gap);
}
/**
#include <linux/maple_tree.h>
#include <linux/module.h>
-static unsigned int tests_run;
-static unsigned int tests_passed;
-
-#ifndef MAPLE_TREE_DEBUG
-# ifdef __KERNEL__
-void mt_dump(const struct maple_tree *mt) { }
-# endif
-#undef MT_BUG_ON
-#define MT_BUG_ON(tree, x) do { \
- tests_run++; \
- if (x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, x); \
- mt_dump(tree); \
- pr_info("Pass: %u Run:%u\n", tests_passed, tests_run); \
- dump_stack(); \
- } else { \
- tests_passed++; \
- } \
-} while (0)
-#endif
#define MTREE_ALLOC_MAX 0x2000000000000Ul
static
int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
val = 1;
}
+ val = 0;
+ max = 0;
+ index = 0;
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+ mt_for_each(mt, entry, index, ULONG_MAX) {
+ if (val == 4398046511104)
+ MT_BUG_ON(mt, entry !=
+ xa_mk_value(ULONG_MAX & LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+ if (val == 64) // Skip zero entry.
+ val <<= 2;
+ // For zero check.
+ if (!val)
+ val = 1;
+ max++;
+ MT_BUG_ON(mt, max > 25);
+ }
+ mtree_erase_index(mt, ULONG_MAX);
+
mas_reset(&mas);
index = 17;
entry = mt_find(mt, &index, 512, true);
erase_check_load(mt, i);
}
- mt_set_non_kernel(1);
erase_check_erase(mt, 18); //7012
for (int i = 0; i < 25; i++) {
if (i <= 18 && i >= 13)
erase_check_load(mt, i);
}
+ mt_set_non_kernel(2);
erase_check_erase(mt, 19); //7015
for (int i = 0; i < 25; i++) {
if (i <= 19 && i >= 13)
// Shrinking tree test.
//
- for (int i = 13; i < ARRAY_SIZE(set); i++)
+ for (int i = 13; i < ARRAY_SIZE(set); i++) {
erase_check_insert(mt, i);
+ }
mt_set_non_kernel(99);
for (int i = 18; i < ARRAY_SIZE(set); i++) {
check_load(mt, set[j], NULL);
}
}
- mt_set_non_kernel(30);
+ mt_set_non_kernel(35);
for (int i = 0; i < 18; i++) {
erase_check_erase(mt, i);
for (int j = 0; j < ARRAY_SIZE(set); j++) {
erase_check_insert(mt, 8);
erase_check_insert(mt, 9);
erase_check_erase(mt, 8);
-
-
}
#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, a[i],\
- a[i + 1], ptr)
+ a[i + 1] - 1, ptr)
#define STORE 1
#define ERASE 2
static noinline void check_erase2_testset(struct maple_tree *mt,
void *foo;
unsigned long addr = 0;
- mt_dump(mt);
- for (int i = 0; i < size; i+=3){
+ for (int i = 0; i < size; i += 3) {
switch(set[i]) {
- case STORE:
- if (!mtree_test_insert_range(mt, set[i + 1],
- set[i + 2], &set))
- entry_cnt++;
- else
- erase_check_store_range(mt, set, i + 1, &set);
- break;
- case ERASE:
- check_erase(mt, set[i+1], &set);
- entry_cnt--;
+ case STORE:
+ if (!mtree_test_insert_range(mt, set[i + 1],
+ set[i + 2] - 1, &set))
+ entry_cnt++;
+ else
+ erase_check_store_range(mt, set, i + 1, &set);
+ break;
+ case ERASE:
+ check_erase(mt, set[i+1], &set);
+ entry_cnt--;
+ break;
+ }
+
+ check = 0;
+ addr = 0;
+ mt_for_each(mt, foo, addr, ULONG_MAX) {
+ check++;
+ if (check > entry_cnt)
break;
}
- mt_dump(mt);
- }
- mt_for_each(mt, foo, addr, ULONG_MAX) {
- printk("addr %lu -> %p\n", addr, foo);
- check++;
+ MT_BUG_ON(mt, check != entry_cnt);
}
-
- printk("test %d != %d\n", check, entry_cnt);
- MT_BUG_ON(mt, check != entry_cnt);
}
STORE, 140277094821888, 140277094825984,
STORE, 140735933906944, 140735933911040,
};
+ unsigned long set3[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735790264320, 140737488351231,
+ERASE, 140735790264320, 140737488351231,
+STORE, 140735790264320, 140735790268415,
+STORE, 94016597282816, 94016597454847,
+ERASE, 94016597282816, 94016597454847,
+STORE, 94016597282816, 94016597299199,
+STORE, 94016597299200, 94016597454847,
+ERASE, 94016597299200, 94016597454847,
+STORE, 94016597299200, 94016597401599,
+STORE, 94016597401600, 94016597442559,
+STORE, 94016597442560, 94016597454847,
+STORE, 140496959283200, 140496959455231,
+ERASE, 140496959283200, 140496959455231,
+STORE, 140496959283200, 140496959287295,
+STORE, 140496959287296, 140496959455231,
+ERASE, 140496959287296, 140496959455231,
+STORE, 140496959287296, 140496959410175,
+STORE, 140496959410176, 140496959442943,
+STORE, 140496959442944, 140496959451135,
+STORE, 140496959451136, 140496959455231,
+STORE, 140735791718400, 140735791722495,
+STORE, 140735791706112, 140735791718399,
+STORE, 47135835713536, 47135835721727,
+STORE, 47135835721728, 47135835729919,
+STORE, 47135835729920, 47135835893759,
+ERASE, 47135835729920, 47135835893759,
+STORE, 47135835729920, 47135835742207,
+STORE, 47135835742208, 47135835893759,
+STORE, 47135835840512, 47135835893759,
+STORE, 47135835742208, 47135835840511,
+ERASE, 47135835742208, 47135835840511,
+STORE, 47135835742208, 47135835840511,
+STORE, 47135835885568, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+ERASE, 47135835840512, 47135835885567,
+STORE, 47135835840512, 47135835893759,
+ERASE, 47135835840512, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+STORE, 47135835885568, 47135835893759,
+
+ };
+
check_erase2_testset(mt, set, ARRAY_SIZE(set));
mtree_destroy(mt);
+
mtree_init(mt, 0);
check_erase2_testset(mt, set2, ARRAY_SIZE(set2));
start = 140735933894656;
- printk("mt_find check %lu-%lu\n", start, 140735933906943UL);
- MT_BUG_ON(mt, mt_find(mt, &start, 140735933906943UL, true));
+ MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL, true));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(2);
+ mtree_init(mt, 0);
+ check_erase2_testset(mt, set3, ARRAY_SIZE(set3));
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
};
unsigned long holes[] = {
- // Start of hole, end of hole, size of hole (+1)
+ /* Note: start of hole is INCLUSIVE
+ * end of hole is EXCLUSIVE
+ * (opposite of the above table.)
+ * Start of hole, end of hole, size of hole (+1)
+ */
0x565234afb000, 0x565234afc000, 0x1000,
0x565234afe000, 0x565235def000, 0x12F1000,
0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
};
- MT_BUG_ON(mt, !mtree_empty(mt));
-
int i, range_cnt = ARRAY_SIZE(range);
int req_range_cnt = ARRAY_SIZE(req_range);
unsigned long min = 0;
for (i = 0; i < ARRAY_SIZE(holes); i+= 3) {
MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min,
- holes[i+1],
- holes[i+2]));
- MT_BUG_ON(mt, mas.index != holes[i + 1] - holes[i+2] + 1);
- min = holes[i+1];
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+ MT_BUG_ON(mt, mas.index != holes[i] >> 12);
+ min = holes[i+1] >> 12;
mas_reset(&mas);
}
int i, range_cnt = ARRAY_SIZE(range);
int req_range_cnt = ARRAY_SIZE(req_range);
- MT_BUG_ON(mt, !mtree_empty(mt));
-
for (i = 0; i < range_cnt; i += 2) {
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
MT_BUG_ON(mt, !mtree_empty(mt));
check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0);
check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0);
- check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EBUSY);
+ check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST);
// Store
check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0);
check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0);
mtree_init(&tree, 0);
check_new_node(&tree);
+ mtree_destroy(&tree);
+ mtree_init(&tree, 0);
+ check_erase2_sets(&tree);
+ mtree_destroy(&tree);
/* Test ranges (store and insert) */
mtree_init(&tree, 0);
check_ranges(&tree);
+ mtree_destroy(&tree);
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_alloc_range(&tree);
+ mtree_destroy(&tree);
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_alloc_rev_range(&tree);
+ mtree_destroy(&tree);
mtree_init(&tree, 0);