#include <asm/barrier.h>
#define MA_ROOT_PARENT 1
-#define ma_parent_ptr(x) ((struct maple_pnode*)(x))
-#define ma_mnode_ptr(x) ((struct maple_node*)(x))
-#define ma_enode_ptr(x) ((struct maple_enode*)(x))
+#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define ma_mnode_ptr(x) ((struct maple_node *)(x))
+#define ma_enode_ptr(x) ((struct maple_enode *)(x))
/**
* mas_for_each() - Iterate over a range of the maple tree.
static void mt_free_rcu(struct rcu_head *head)
{
struct maple_node *node = container_of(head, struct maple_node, rcu);
+
kmem_cache_free(maple_node_cache, node);
}
if (!ma_is_root(mas->node)) {
parent = (unsigned long) mt_to_node(node)->parent;
slot_shift = mt_parent_shift(parent);
- parent &= (1 << slot_shift ) - 1;
+ parent &= (1 << slot_shift) - 1;
}
if (mt_is_alloc(mas->tree))
static inline bool mt_dead_node(const struct maple_enode *enode)
{
struct maple_node *parent, *node = mt_to_node(enode);
+
parent = mt_parent(enode);
return (parent == node);
}
{
int ret = 1;
int slot = 0;
+
while (slot < MAPLE_NODE_SLOTS) {
if (!node->slot[slot])
return ret;
unsigned char slot)
{
enum maple_type type = mt_node_type(mas->node);
+
if (slot >= mt_pivots[type])
return mas->max;
return _ma_get_pivot(mas->node, slot, type);
(&mt_to_node(mn)->mr32)->pivot[slot] = val;
break;
}
- return;
}
static inline void ma_cp_pivot(struct maple_enode *dst,
unsigned char dloc, struct maple_enode *src, unsigned long sloc)
unsigned char gap, unsigned long val)
{
enum maple_type type = mt_node_type(mn);
+
switch (type) {
default:
break;
} else {
unsigned char slot = cnt - 2;
struct maple_node *zero = mn;
+
if (cnt - 1 >= MAPLE_NODE_SLOTS) {
slot /= MAPLE_NODE_SLOTS;
slot--;
node->slot[cnt] = reuse;
} else {
struct maple_node *smn;
+
cnt--;
smn = node->slot[cnt/15];
smn->slot[cnt % 15] = reuse;
mas->node = MAS_ROOT;
ma_set_slot(mas, MAPLE_NODE_SLOTS);
return NULL;
- } else {
- struct maple_enode *root = mt_safe_root(
- mas->tree->ma_root);
- mas->max = mt_node_max(root);
- return root;
}
+ struct maple_enode *root = mt_safe_root(
+ mas->tree->ma_root);
+ mas->max = mt_node_max(root);
+ return root;
}
return mas->node;
}
prev_piv = *last_piv;
}
- if (!_ma_get_rcu_slot(mn, data_end, type)) {
+ if (!_ma_get_rcu_slot(mn, data_end, type))
data_end--;
- } else {
+ else
*last_piv = mas->max;
- }
-
return data_end;
}
unsigned long last_pivot;
unsigned char data_end = ma_data_end(mas, type, &last_pivot);
- *left = (struct maple_enode*)ma_next_alloc(mas);
+ *left = (struct maple_enode *)ma_next_alloc(mas);
*left = mt_mk_node(ma_mnode_ptr(*left), type);
- *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = (struct maple_enode *)ma_next_alloc(mas);
*right = mt_mk_node(ma_mnode_ptr(*right), type);
if (ma_is_root(mas->node))
if (ma_is_root(mas->node)) {
i = half;
- *left = (struct maple_enode*)ma_next_alloc(mas);
- *right = (struct maple_enode*)ma_next_alloc(mas);
+ *left = (struct maple_enode *)ma_next_alloc(mas);
+ *right = (struct maple_enode *)ma_next_alloc(mas);
goto even_split;
}
- *left = (struct maple_enode*)ma_next_alloc(mas);
+ *left = (struct maple_enode *)ma_next_alloc(mas);
for (i = 0; i < data_end; i++) {
max = ma_get_pivot(mas->node, i);
if ((max - min) > 15) {
if (i >= data_end) {
*left = mt_mk_node(ma_mnode_ptr(*left), maple_dense);
if (mas->last >= mas->min + mt_max[type]) {
- *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = (struct maple_enode *)ma_next_alloc(mas);
*right = mt_mk_node(ma_mnode_ptr(*right), type);
}
if (!i)
return i;
}
- *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = (struct maple_enode *)ma_next_alloc(mas);
if (i >= half) {
*left = mt_mk_node(ma_mnode_ptr(*left), maple_dense);
*right = mt_mk_node(ma_mnode_ptr(*right), type);
unsigned long min, piv;
void *ptr;
- if (!sloc) {
+ if (!sloc)
min = mas->min;
- }
- else {
+ else
min = ma_get_pivot(cp->src, sloc - 1) + 1;
- }
piv = min;
-
while (sloc <= cp->src_end) {
unsigned long end;
unsigned char slot, end_slot;
slot = piv - min;
end_slot = end - min;
- for ( ; slot <= end_slot; slot++) {
+ for (; slot <= end_slot; slot++)
ma_set_rcu_slot(cp->dst, slot, ptr);
- }
piv = end + 1;
sloc++;
{
unsigned long max_gap = 0;
unsigned char i;
+
for (i = 0; i < mt_slot_count(mas->node); i++) {
unsigned long gap;
+
gap = ma_get_gap(mas->node, i);
- if ( gap > max_gap) {
+ if (gap > max_gap) {
*slot = i;
max_gap = gap;
}
}
+
return max_gap;
}
static inline void ma_parent_gap(struct ma_state *mas, unsigned char slot,
max_gap = ma_max_gap(mas, &max_slot);
- if (slot == max_slot || max_gap < new) {
- unsigned char pslot = mt_parent_slot(mas->node);
- ma_parent_gap(mas, pslot, new);
- }
+ if (slot == max_slot || max_gap < new)
+ ma_parent_gap(mas, mt_parent_slot(mas->node), new);
}
/* Private
*/
// First non-null entry in mas->node
-static inline unsigned long
-mas_first_node (struct ma_state *mas, unsigned long max)
+static inline unsigned long mas_first_node(struct ma_state *mas,
+ unsigned long max)
{
unsigned char slot = ma_get_slot(mas) - 1;
unsigned char count = mt_slot_count(mas->node);
- while(++slot < count) {
+ while (++slot < count) {
struct maple_enode *mn;
unsigned long pivot;
return mas->max;
}
-static inline unsigned long
-mas_first_entry (struct ma_state *mas, unsigned long max)
+static inline unsigned long mas_first_entry(struct ma_state *mas,
+ unsigned long max)
{
unsigned long pivot;
- while (1)
- {
+ while (1) {
pivot = mas_first_node(mas, max);
if (mas->node == MAS_NONE)
return pivot;
}
-static inline void mas_prev_entry (struct ma_state *mas) {
-}
void ma_destroy_walk(struct maple_enode *mn)
{
struct maple_enode *node;
mt_free(mt_to_node(mn));
}
-static inline void ma_copy (struct ma_state *mas, struct ma_cp *cp)
+static inline void ma_copy(struct ma_state *mas, struct ma_cp *cp)
{
unsigned char sloc = cp->src_start; // src location
unsigned char dloc = cp->dst_start; // dst location
prev = mas->tree->ma_root;
} else {
enum maple_type ptype = mt_parent_enum(mas, mas->node);
+
parent = mt_mk_node(mt_parent(mas->node), ptype);
slot = mt_parent_slot(mas->node);
prev = ma_get_rcu_slot(parent, slot);
mt_to_node(cp.dst)->parent = mt_to_node(cp.src)->parent;
mas->node = cp.dst;
- return;
}
static inline void ma_gap_link(struct ma_state *mas, struct maple_enode *parent,
if (ma_is_root(mas->node)) {
old_parent = full;
- if(mt_is_alloc(mas->tree))
+ if (mt_is_alloc(mas->tree))
ptype = maple_arange_64;
else
ptype = maple_range_64;
p_slot = 0;
} else {
unsigned long last_pivot;
+
p_slot = mt_parent_slot(mas->node);
ma_encoded_parent(mas);
old_parent = mas->node;
// Copy the parents information
if (!ma_is_root(full)) {
unsigned long c_max = mas->max;
+
mas->max = p_max;
// Copy the parent data except p_slot, which will later be
// replaced.
pivot = mas->min + split - 1;
} else {
pivot = mt_node_max(left);
-
if (!p_slot || mt_node_type(left) == maple_dense)
pivot += mas->min;
else
if (mt_is_alloc(mas->tree)) {
if (right) {
unsigned long min = mas->min;
+
mas->node = right;
ma_gap_link(mas, new_parent, p_slot + 1, mas->max);
mas->min = min;
return split;
}
-static inline enum maple_type ma_ptype_leaf(struct ma_state *mas) {
+static inline enum maple_type ma_ptype_leaf(struct ma_state *mas)
+{
enum maple_type pt = mt_node_type(mas->node);
switch (pt) {
{
struct maple_enode *sibling;
unsigned char sibling_slot = slot;
- enum maple_type stype, mt = ma_ptype_leaf(mas);;
+ enum maple_type stype, mt = ma_ptype_leaf(mas);
if (slot > 0)
sibling_slot -= 1;
// FIXME: Check entire range, not what we would insert this time.
if (!overwrite) {
- do
+ do {
if (_ma_get_rcu_slot(mas->node, min++, this_type))
return 0;
- while (min < max);
+ } while (min < max);
}
- do
+ do {
ma_update_rcu_slot(mas->node, min++, entry);
- while (min < max);
+ } while (min < max);
if (max != mas->last - mas->min) {
mas->index = mas->min + max + 1;
}
/* Dense node type */
if (!pivot_cnt) {
- ret = _ma_add_dense(mas, entry, slot, this_type, overwrite, active);
+ ret = _ma_add_dense(mas, entry, slot, this_type, overwrite,
+ active);
if (!ret)
return ret;
goto update_gap;
}
/* Check for splitting */
new_end += ret;
- if (new_end > slot_cnt ) {
+ if (new_end > slot_cnt) {
/* Not enough space in the node */
unsigned char split = ma_split(mas, slot, active);
+
if (mas_is_err(mas))
return 0;
/* Write NULL entry */
ma_set_rcu_slot(mas->node, --slot, NULL);
if (append)
- wmb();
+ wmb(); /* NULL needs to be written before pivot. */
ma_set_pivot(mas->node, slot, mas->index - 1);
if (mt_is_alloc(mas->tree)) {
unsigned long gap = mas->min;
+
if (slot > 0)
gap = ma_get_pivot(mas->node, slot - 1);
gap = (mas->index - 1) - gap;
/* Write the entry */
ma_set_rcu_slot(mas->node, slot, entry);
if (old_end == new_end + 1) // Append.
- wmb();
+ wmb(); /* Entry needs to be visible prior to pivot. */
if (slot < pivot_cnt)
ma_set_pivot(mas->node, slot++, mas->last);
* Find the prev non-null entry at the same level in the tree. The prev value
* will be mas->node[ma_get_slot(mas)] or MAS_NONE.
*/
-static inline void mas_prev_node (struct ma_state *mas, unsigned long min)
+static inline void mas_prev_node(struct ma_state *mas, unsigned long min)
{
int level;
unsigned char slot;
level--;
mas->node = mn;
slot = ma_data_end(mas, mt_node_type(mn), &last_pivot);
- } while(slot-- > 0);
+ } while (slot-- > 0);
ascend:
if (ma_is_root(mas->node))
no_entry:
mas->node = MAS_NONE;
- return;
}
/*
* will be mas->node[ma_get_slot(mas)] or MAS_NONE.
*
*/
-static inline unsigned long
-mas_next_node (struct ma_state *mas, unsigned long max)
+static inline unsigned long mas_next_node(struct ma_state *mas,
+ unsigned long max)
{
int level;
unsigned long start_piv;
}
// Next node entry or return 0 on none.
-static inline bool
-mas_next_nentry (struct ma_state *mas, unsigned long max, unsigned long *piv)
+static inline bool mas_next_nentry(struct ma_state *mas, unsigned long max,
+ unsigned long *piv)
{
unsigned long pivot = mas->min;
unsigned char slot = ma_get_slot(mas);
return false;
}
-static inline unsigned long
-mas_next_entry (struct ma_state *mas, unsigned long max)
+static inline unsigned long mas_next_entry(struct ma_state *mas,
+ unsigned long max)
{
unsigned long pivot = max;
else {
while (mas->node != MAS_NONE) {
unsigned char p_slot;
+
if (mas_next_nentry(mas, max, &pivot))
break;
}
return pivot;
}
-static inline unsigned long
-mas_safe_next_entry (struct ma_state *mas, unsigned long max)
+static inline unsigned long mas_safe_next_entry(struct ma_state *mas,
+ unsigned long max)
{
unsigned long piv;
+
retry:
piv = mas_next_entry(mas, max);
if (mas->node == MAS_NONE)
do {
unsigned long this_gap;
+
if (!i)
min = mas->min;
else
goto ascend;
max = min - 1;
- if (mas->index > max ) {
+ if (mas->index > max) {
mas_set_err(mas, -EBUSY);
return false;
}
if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
+
next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
mas->max = max;
i = ma_get_slot(mas);
for (; i < pivot_cnt; i++) {
unsigned long this_gap;
+
pivot = _ma_get_pivot(mas->node, i, type);
if (i && !pivot)
goto ascend;
if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
+
next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
mas->max = max;
// Linear node.
i = mas->index - mas->min;
goto done;
- break;
}
next = _ma_get_rcu_slot(mas->node, i, type);
{
if (!mt_dead_node(mas->node))
return 0;
-
+
mas->index = index;
_mas_walk(mas);
return 1;
if (mas->node == MAS_START) {
_mas_walk(mas);
- do {} while(mas_dead_node(mas, mas->index));
+ do {} while (mas_dead_node(mas, mas->index));
slot = ma_get_slot(mas);
long l_node_cnt = -1, r_node_cnt = 0;
unsigned int r_slot_cnt = 0, slot_cnt = 0;
long node_cnt = 0, nodes = 0;
- MA_STATE(r_mas, mas->tree, mas->last + 1, mas->last + 1);
void *entry;
struct maple_enode *r_node = NULL, *last = NULL;
unsigned char r_slot = 0, slot;
-
-
+ MA_STATE(r_mas, mas->tree, mas->last + 1, mas->last + 1);
slot_cnt = 1 + ma_get_slot(mas);
ma_set_slot(mas, mt_parent_slot(mas->node));
while (mas->node != MAS_NONE) {
if (ma_get_slot(&r_mas) != MAPLE_NODE_SLOTS) {
unsigned long piv;
- r_slot_cnt += ma_data_end(&r_mas, mt_node_type(r_mas.node), &piv);
+
+ r_slot_cnt += ma_data_end(&r_mas, mt_node_type(r_mas.node),
+ &piv);
if (piv != ULONG_MAX) {
unsigned char p_slot = mt_parent_slot(r_mas.node);
+
r_node = r_mas.node;
r_slot = ma_get_slot(&r_mas);
r_slot_cnt -= r_slot;
// Create a new tree.
DEFINE_MTREE(new_tree);
+
MA_STATE(new_mas, &new_tree, 0, 0);
new_mas.alloc = mas->alloc;
else
last = mas->node;
}
- return;
}
static inline bool mas_skip_node(struct ma_state *mas);
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
else
last = mas->node;
}
- return;
}
static inline int ma_root_ptr(struct ma_state *mas, void *entry,
retry:
leaf = _mas_walk(mas);
slot = ma_get_slot(mas);
-
// Clean this node
if (mas_coalesce(mas, 0)) {
if (mas_is_err(mas))
goto exists;
}
-
/* Do the add */
return _ma_add(mas, entry, overwrite, active);
static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
unsigned long size, unsigned long *index)
{
- //mas->index is the start address for the search, which may be done with?
- //mas->last is the end address for the search
+ /* mas->index is the start address for the search
+ * which may no longer be needed.
+ * mas->last is the end address for the search
+ */
*index = mas->index;
mas->last = mas->index + size - 1;
}
/* Walk down and set all the previous pivots with NULLs to piv_val */
- while(--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL) {
+ while (--slot >= 0 && ma_get_rcu_slot(mas->node, slot) == NULL) {
ma_set_pivot(mas->node, slot, piv_val);
ret++;
}
- mas_coalesce(mas, ++slot); /* The error on allocation failure can be ignored */
+ /* The error on allocation failure can be ignored */
+ mas_coalesce(mas, ++slot);
return ret;
}
mt->ma_flags = ma_flags;
rcu_assign_pointer(mt->ma_root, NULL);
}
-
EXPORT_SYMBOL(mtree_init);
void *mtree_load(struct maple_tree *mt, unsigned long index)
{
void *entry;
+
MA_STATE(mas, mt, index, index);
rcu_read_lock();
entry = mas_walk(&mas);
return entry;
}
EXPORT_SYMBOL(mtree_load);
+
int mtree_store_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
int mtree_next(struct maple_tree *mt, unsigned long index, unsigned long *next)
{
int ret = -ENOENT;
+
MA_STATE(mas, mt, index, index);
rcu_read_lock();
//mas_walk_next(&mas);
mtree_lock(mt);
destroyed = mt->ma_root;
-
-
- if (xa_is_node(destroyed)) {
+ if (xa_is_node(destroyed))
ma_destroy_walk(destroyed);
- }
+
mt->ma_flags = 0;
rcu_assign_pointer(mt->ma_root, NULL);
mtree_unlock(mt);
void mt_dump_range(unsigned long min, unsigned long max, unsigned int depth)
{
static const char spaces[] = " ";
+
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
else
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i);
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
break;
}
first = last + 1;
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i);
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
break;
}
first = last + 1;
{
int ret = -EINVAL;
unsigned long i;
+
ret = mtree_test_store_range(mt, start, end, ptr);
printk("ret %d expected %d\n", ret, expected);
MT_BUG_ON(mt, ret != expected);
if (ret)
return;
- for (i = start; i <= end; i++) {
+ for (i = start; i <= end; i++)
check_load(mt, i, ptr);
- }
}
static noinline void check_insert_range(struct maple_tree *mt,
{
int ret = -EINVAL;
unsigned long i;
+
ret = mtree_test_insert_range(mt, start, end, ptr);
MT_BUG_ON(mt, ret != expected);
if (ret)
return;
- for (i = start; i <= end; i++) {
+ for (i = start; i <= end; i++)
check_load(mt, i, ptr);
- }
}
static noinline void check_insert(struct maple_tree *mt, unsigned long index,
void *ptr)
{
int ret = -EINVAL;
+
ret = mtree_test_insert(mt, index, ptr);
MT_BUG_ON(mt, ret != 0);
}
static noinline void check_erase(struct maple_tree *mt, unsigned long index)
{
int ret = -EINVAL;
+
ret = mtree_test_erase(mt, index);
MT_BUG_ON(mt, ret < 0);
}
unsigned long index, void *ptr)
{
int ret = -EINVAL;
+
ret = mtree_test_insert(mt, index, ptr);
MT_BUG_ON(mt, ret != -EEXIST);
}
struct maple_node *mn, *smn;
int cnt = 0;
+
MA_STATE(mas, mt, 0, 0);
/* Try allocating 3 nodes */
mtree_lock(mt);
- mas_node_cnt(&mas, 3); // request 3 nodes to be allocated.
- MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 3); // Allocation request of 3.
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); // Allocate failed.
+ // request 3 nodes to be allocated.
+ mas_node_cnt(&mas, 3);
+ // Allocation request of 3.
+ MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 3);
+ // Allocate failed.
+ MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 3);
/* Try allocating 1 node, then 2 more */
mtree_lock(mt);
- ma_set_alloc_req(&mas, 1); // Set allocation request to 1.
- MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 1); // Check Allocation request of 1.
+ // Set allocation request to 1.
+ ma_set_alloc_req(&mas, 1);
+ // Check Allocation request of 1.
+ MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 1);
mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); // Validate allocation request.
- mn = ma_next_alloc(&mas); // Eat the requested node.
+ // Validate allocation request.
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ // Eat the requested node.
+ mn = ma_next_alloc(&mas);
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] != NULL);
MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 0);
mt_free(mn);
- mas_node_cnt(&mas, 3); // Allocate 3 nodes, will fail.
- mas_nomem(&mas, GFP_KERNEL); // Drop the lock and allocate 3 nodes.
- MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 3); // Ensure 3 are allocated.
- MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 0); // Allocation request of 0.
+ // Allocate 3 nodes, will fail.
+ mas_node_cnt(&mas, 3);
+ // Drop the lock and allocate 3 nodes.
+ mas_nomem(&mas, GFP_KERNEL);
+ // Ensure 3 are allocated.
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 3);
+ // Allocation request of 0.
+ MT_BUG_ON(mt, ma_get_alloc_req(&mas) != 0);
mn = mas.alloc;
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] == NULL);
MT_BUG_ON(mt, mn->slot[1] == NULL);
- MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 3); // Ensure we counted 3.
- mas_nomem(&mas, GFP_KERNEL); // Free.
+ // Ensure we counted 3.
+ MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 3);
+ // Free.
+ mas_nomem(&mas, GFP_KERNEL);
- printk("Testing max allocation\n");
- mas_node_cnt(&mas, 127); // Set allocation request to 127.
- mas_nomem(&mas, GFP_KERNEL); // Drop the lock and allocate 3 nodes.
+ // Set allocation request to 127.
+ mas_node_cnt(&mas, 127);
+ // Drop the lock and allocate 3 nodes.
+ mas_nomem(&mas, GFP_KERNEL);
mn = ma_get_alloc(&mas);
MT_BUG_ON(mt, mn == NULL);
cnt++;
- for(int i = 0; i < 7; i++) {
+ for (int i = 0; i < 7; i++) {
smn = mn->slot[i];
MT_BUG_ON(mt, smn == NULL);
cnt++;
for (int j = 0; j < 15; j++) {
- printk("Checking %d %p[%d] [%d]\n", cnt, smn, i, j);
MT_BUG_ON(mt, smn->slot[j] == NULL);
cnt++;
}
for (int j = 0; j < 6; j++) {
MT_BUG_ON(mt, smn->slot[j] == NULL);
cnt++;
-
}
- for(int i = 8; i < 15; i++) {
+ for (int i = 8; i < 15; i++) {
MT_BUG_ON(mt, mn->slot[i] == NULL);
cnt++;
}
- printk("Checked %d\n", cnt);
MT_BUG_ON(mt, ma_get_alloc_cnt(&mas) != 127);
mas_nomem(&mas, GFP_KERNEL); // Free.
for (i = 0; i <= max; i++) {
MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
- for (j = 0; j <= i; j++) {
+ for (j = 0; j <= i; j++)
check_index_load(mt, j);
- }
+
check_load(mt, i + 1, NULL);
}
if (verbose) {
i = huge;
while (i > 4096) {
- check_insert(mt, i, (void*) i);
+ check_insert(mt, i, (void *) i);
for (j = huge; j >= i; j /= 2) {
check_load(mt, j-1, NULL);
- check_load(mt, j, (void*) j);
+ check_load(mt, j, (void *) j);
check_load(mt, j+1, NULL);
}
i /= 2;
i = 4096;
while (i < huge) {
- check_insert(mt, i, (void*) i);
+ check_insert(mt, i, (void *) i);
for (j = i; j >= huge; j *= 2) {
check_load(mt, j-1, NULL);
- check_load(mt, j, (void*) j);
+ check_load(mt, j, (void *) j);
check_load(mt, j+1, NULL);
}
i *= 2;
static noinline void check_mid_split(struct maple_tree *mt)
{
unsigned long huge = 8000UL * 1000 * 1000;
- check_insert(mt, huge, (void*) huge);
- check_insert(mt, 0, (void*) 0);
+
+ check_insert(mt, huge, (void *) huge);
+ check_insert(mt, 0, (void *) 0);
check_lb_not_empty(mt);
}
unsigned long max;
unsigned long index = 0;
void *entry;
+
MA_STATE(mas, mt, 0, 0);
// Insert 0.
mas_reset(&mas);
mas.index = val;
- while ( (entry = mas_find(&mas, 268435456)) != NULL) {
+ while ((entry = mas_find(&mas, 268435456)) != NULL) {
if (val != 64)
MT_BUG_ON(mt, xa_mk_value(val) != entry);
else
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
{
- // cat /proc/self/maps|awk '{print $1}'| awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ /* Generated by:
+ * cat /proc/self/maps | awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
unsigned long range[] = {
// Inclusive , Exclusive.
0x565234af2000, 0x565234af4000,
0x565234af9000, // Min
0x7fff58791000, // Max
0x1000, // Size
- 0x7fff5878d << 12, // First rev hole in our data of size 0x1000
+ 0x7fff5878d << 12, // First rev hole of size 0x1000
0, // Return value success.
0x0, // Min
0x0, // Min
0x0, // Max
0x1000, // Size
- 0x0, // First rev hole in our data of size 0x1000
+ 0x0, // First rev hole of size 0x1000
0, // Return value success.
0x0, // Min
0x7F36D510A << 12, // Max
0x4000, // Size
- 0x7F36D5107 << 12, // First rev hole in our data of size 0x4000
+ 0x7F36D5107 << 12, // First rev hole of size 0x4000
0, // Return value success.
// Ascend test.
MT_BUG_ON(mt, !mtree_empty(mt));
- int i, range_cnt = sizeof(range)/sizeof(range[0]);
- int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
+ int i, range_cnt = ARRAY_SIZE(range);
+ int req_range_cnt = ARRAY_SIZE(req_range);
- for (i = 0; i < range_cnt; i+=2) {
+ for (i = 0; i < range_cnt; i += 2) {
// Inclusive , Inclusive (with the -1)
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
}
printk("Starting reverse range allocation tests.\n");
- for (i = 0; i < req_range_cnt; i+=5) {
+ for (i = 0; i < req_range_cnt; i += 5) {
mt_dump(mt);
printk("%s %d: alloc %ld in range %ld - %ld\n",
__func__, i,
static noinline void check_alloc_range(struct maple_tree *mt)
{
- // cat /proc/self/maps|awk '{print $1}'| awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ /* Generated by:
+ * cat /proc/self/maps|awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
unsigned long range[] = {
// Inclusive , Exclusive.
0x565234af2000, 0x565234af4000,
-EBUSY, // Return value failed.
// Test filling entire gap.
- 34148798623<< 12, // Min
+ 34148798623 << 12, // Min
0x7fff587AF000, // Max
0x10000, // Size
34148798632 << 12, // Expected location
34359052178 << 12, // Expected location
-EBUSY, // Return failure.
};
- int i, range_cnt = sizeof(range)/sizeof(range[0]);
- int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
+ int i, range_cnt = ARRAY_SIZE(range);
+ int req_range_cnt = ARRAY_SIZE(req_range);
MT_BUG_ON(mt, !mtree_empty(mt));
- for (i = 0; i < range_cnt; i+=2) {
+ for (i = 0; i < range_cnt; i += 2) {
mt_dump(mt);
printk("Starting check insert %d\n", i);
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
}
- for (i = 0; i < req_range_cnt; i+=5) {
+ for (i = 0; i < req_range_cnt; i += 5) {
mt_dump(mt);
printk("%s %d: alloc %ld in range %ld - %ld\n",
__func__, i,
mtree_init(&tree, 0);
check_next_entry(&tree);
-
/* Test ranges (store and insert) */
mtree_init(&tree, 0);