{
struct maple_node *mn, *smn;
- int i, j, total, full_slots, cnt = 0;
+ int i, j, total, full_slots, count = 0;
MA_STATE(mas, mt, 0, 0);
/* Try allocating 3 nodes */
mtree_lock(mt);
// request 3 nodes to be allocated.
- mas_node_cnt(&mas, 3);
+ mas_node_count(&mas, 3);
// Allocation request of 3.
MT_BUG_ON(mt, mas_alloc_req(&mas) != 3);
// Allocate failed.
MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 3);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
mn = mas_get_alloc(&mas);
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] == NULL);
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, mn->slot[0] != NULL);
MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 0);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
ma_free_rcu(mn);
mas.node = MAS_START;
mas_nomem(&mas, GFP_KERNEL);
// Allocate 3 nodes, will fail.
- mas_node_cnt(&mas, 3);
+ mas_node_count(&mas, 3);
// Drop the lock and allocate 3 nodes.
mas_nomem(&mas, GFP_KERNEL);
// Ensure 3 are allocated.
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 3);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
// Allocation request of 0.
MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
MT_BUG_ON(mt, mn->slot[0] == NULL);
MT_BUG_ON(mt, mn->slot[1] == NULL);
// Ensure we counted 3.
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 3);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 3);
// Free.
mas_nomem(&mas, GFP_KERNEL);
// Set allocation request to 127.
total = 127;
full_slots = (total - MAPLE_NODE_SLOTS) / MAPLE_NODE_SLOTS;
- mas_node_cnt(&mas, total);
+ mas_node_count(&mas, total);
// Drop the lock and allocate 127 nodes.
mas_nomem(&mas, GFP_KERNEL);
mn = mas_get_alloc(&mas);
MT_BUG_ON(mt, mn == NULL);
- cnt++;
+ count++;
for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
j = 0;
smn = mn->slot[i];
MT_BUG_ON(mt, smn == NULL);
- cnt++;
+ count++;
while ((i < full_slots) && (j < MAPLE_NODE_SLOTS)) {
MT_BUG_ON(mt, smn->slot[j] == NULL);
- cnt++;
+ count++;
j++;
}
}
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 127);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 127);
mas_nomem(&mas, GFP_KERNEL); // Free.
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 0);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
for (i = 1; i < 128; i++) {
- mas_node_cnt(&mas, i); // Request
+ mas_node_count(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != i); // check request filled
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled
for (j = i; j > 0; j--) { //Free the requests
mn = mas_next_alloc(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
ma_free_rcu(mn);
}
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 0);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
}
for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
MA_STATE(mas2, mt, 0, 0);
- mas_node_cnt(&mas, i); // Request
+ mas_node_count(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != i); // check request filled
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != i); // check request filled
for (j = 1; j <= i; j++) { // Move the allocations to mas2
mn = mas_next_alloc(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
mas_push_node(&mas2, (struct maple_enode *)mn);
- MT_BUG_ON(mt, mas_alloc_cnt(&mas2) != j);
+ MT_BUG_ON(mt, mas_alloc_count(&mas2) != j);
}
- MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 0);
- MT_BUG_ON(mt, mas_alloc_cnt(&mas2) != i);
+ MT_BUG_ON(mt, mas_alloc_count(&mas) != 0);
+ MT_BUG_ON(mt, mas_alloc_count(&mas2) != i);
for (j = i; j > 0; j--) { //Free the requests
- MT_BUG_ON(mt, mas_alloc_cnt(&mas2) != j);
+ MT_BUG_ON(mt, mas_alloc_count(&mas2) != j);
mn = mas_next_alloc(&mas2); // get the next node.
MT_BUG_ON(mt, mn == NULL);
ma_free_rcu(mn);
}
- MT_BUG_ON(mt, mas_alloc_cnt(&mas2) != 0);
+ MT_BUG_ON(mt, mas_alloc_count(&mas2) != 0);
}
mtree_unlock(mt);
#define check_erase2_debug 0
void *mas_next(struct ma_state *mas, unsigned long max);
// Calculate the overwritten entries.
-int mas_ce2_over_cnt(struct ma_state *mas_start, struct ma_state *mas_end,
+int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
void *s_entry, unsigned long s_min,
void *e_entry, unsigned long e_max,
unsigned long *set, int i, bool null_entry)
{
- int cnt = 0, span = 0;
+ int count = 0, span = 0;
unsigned long retry = 0;
void *entry;
retry++;
continue;
}
- cnt++;
+ count++;
span++;
entry = mas_next(mas_start, mas_end->last);
}
if (null_entry) {
// Check splitting end.
if (e_entry && (e_max > mas_end->last))
- cnt--;
+ count--;
// check overwrite of entire start
if (s_entry && (s_min == mas_start->index))
- cnt++;
+ count++;
} else { // !null_entry (store)
bool esplit = e_max > mas_end->last;
bool ssplit = s_min != mas_start->index;
if (s_entry && e_entry) {
if (esplit && ssplit)
- cnt--;
+ count--;
else if (ssplit)
- cnt--;
+ count--;
else if (esplit) {
if (span)
- cnt--;
+ count--;
}
} else if (s_entry && !e_entry) {
if (ssplit)
- cnt--;
+ count--;
} else if (!s_entry && e_entry) {
if (esplit)
- cnt--;
- cnt--;
+ count--;
+ count--;
} else {
- cnt--;
+ count--;
}
}
- return cnt;
+ return count;
}
static noinline void check_erase2_testset(struct maple_tree *mt,
unsigned long *set, unsigned long size)
{
- int entry_cnt = 0;
+ int entry_count = 0;
int check = 0;
void *foo;
unsigned long addr = 0;
switch (set[i]) {
case SNULL:
if ((s_min == set[i+1]) && (s_max == set[i+2])) {
- entry_cnt--;
+ entry_count--;
} else if ((s_min != set[i+1]) && (s_max != set[i+2])) {
- entry_cnt++;
+ entry_count++;
} else if ((mas_start.node != mas_end.node) ||
(mas_offset(&mas_start) != mas_offset(&mas_end))) {
- entry_cnt -=
- mas_ce2_over_cnt(&mas_start, &mas_end,
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
s_entry, s_min,
e_entry, e_max, set, i,
true);
case STORE:
value = xa_mk_value(set[i + 1]);
if (mas_offset(&mas_start) > mt_slot_count(mas_start.node)) {
- entry_cnt++; // appending an entry.
+ entry_count++; // appending an entry.
} else if ((s_min == e_min) && (s_max == e_max)) {
- if (!entry_cnt)
- entry_cnt++;
+ if (!entry_count)
+ entry_count++;
else if (s_entry) {
if (e_max > mas_end.last)
- entry_cnt++;
+ entry_count++;
if (s_min < mas_start.index)
- entry_cnt++;
+ entry_count++;
} else {
- entry_cnt++;
+ entry_count++;
}
} else {
- entry_cnt -=
- mas_ce2_over_cnt(&mas_start, &mas_end,
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
s_entry, s_min,
e_entry, e_max, set, i,
false);
if (!s_entry)
break;
check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
- entry_cnt--;
+ entry_count--;
break;
}
mt_validate(mt);
- if (entry_cnt)
+ if (entry_count)
MT_BUG_ON(mt, !mt_height(mt));
#if check_erase2_debug > 1
mt_dump(mt);
pr_err("mt: %lu -> %p\n", addr+1, foo);
#endif
check++;
- if (check > entry_cnt)
+ if (check > entry_count)
break;
}
#if check_erase2_debug > 2
- pr_err("mt_for_each %d and cnt %d\n", check, entry_cnt);
+ pr_err("mt_for_each %d and count %d\n", check, entry_count);
#endif
- MT_BUG_ON(mt, check != entry_cnt);
+ MT_BUG_ON(mt, check != entry_count);
check = 0;
addr = 0;
pr_err("mas: %lu -> %p\n", mas.index, foo);
#endif
check++;
- if (check > entry_cnt)
+ if (check > entry_count)
break;
}
rcu_read_unlock();
#if check_erase2_debug > 2
- pr_err("mas_for_each %d and cnt %d\n", check, entry_cnt);
+ pr_err("mas_for_each %d and count %d\n", check, entry_count);
mt_validate(mt);
#endif
- MT_BUG_ON(mt, check != entry_cnt);
+ MT_BUG_ON(mt, check != entry_count);
MT_BUG_ON(mt, mtree_load(mas.tree, 0) != NULL);
}
*/
};
- int cnt = 0;
+ int count = 0;
void *ptr = NULL;
MA_STATE(mas, mt, 0, 0);
mas_reset(&mas);
mas.tree = mt;
- cnt = 0;
+ count = 0;
mas.index = 0;
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set12, ARRAY_SIZE(set12));
mas_for_each(&mas, entry, ULONG_MAX) {
if (xa_is_zero(entry))
continue;
- BUG_ON(cnt > 12);
- cnt++;
+ BUG_ON(count > 12);
+ count++;
}
mtree_destroy(mt);
};
- int i, range_cnt = ARRAY_SIZE(range);
- int req_range_cnt = ARRAY_SIZE(req_range);
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
unsigned long min = 0;
MA_STATE(mas, mt, 0, 0);
mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
GFP_KERNEL);
#define DEBUG_REV_RANGE 0
- for (i = 0; i < range_cnt; i += 2) {
+ for (i = 0; i < range_count; i += 2) {
/* Inclusive, Inclusive (with the -1) */
#if DEBUG_REV_RANGE
mas_reset(&mas);
}
- for (i = 0; i < req_range_cnt; i += 5) {
+ for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_REV_RANGE
pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n",
req_range[i] >> 12,
34359052178 << 12, // Expected location
-EBUSY, // Return failure.
};
- int i, range_cnt = ARRAY_SIZE(range);
- int req_range_cnt = ARRAY_SIZE(req_range);
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
unsigned long min = 0x565234af2000;
mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
GFP_KERNEL);
- for (i = 0; i < range_cnt; i += 2) {
+ for (i = 0; i < range_count; i += 2) {
#define DEBUG_ALLOC_RANGE 0
#if DEBUG_ALLOC_RANGE
pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
min = holes[i+1];
mas_reset(&mas);
}
- for (i = 0; i < req_range_cnt; i += 5) {
+ for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_ALLOC_RANGE
pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
i/5, req_range[i] >> 12, req_range[i + 1] >> 12,
static void check_dfs_preorder(struct maple_tree *mt)
{
- int count = 0;
+ unsigned long count = 0, max = 1000;
MA_STATE(mas, mt, 0, 0);
- check_seq(mt, 1000, false);
+ check_seq(mt, max, false);
do {
count++;
mas_dfs_preorder(&mas);
} while(!mas_is_none(&mas));
// 68 + MAS_START = 69
- printk("count %u\n", count);
+ //printk("count %lu\n", count);
MT_BUG_ON(mt, count != 69);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
mas_reset(&mas);
count = 0;
- check_seq(mt, 1000, false);
+ check_seq(mt, max, false);
do {
count++;
mas_dfs_preorder(&mas);
} while(!mas_is_none(&mas));
- printk("count %u\n", count);
+ //printk("count %lu\n", count);
// 71 + MAS_START = 72
MT_BUG_ON(mt, count != 72);
mtree_destroy(mt);