mn = mas_get_alloc(ms);
if (cnt == 1) {
ms->alloc = NULL;
- } else {
- unsigned char slot = cnt - 2;
- struct maple_node *zero = mn;
-
- if (cnt - 1 >= MAPLE_NODE_SLOTS) {
- slot /= MAPLE_NODE_SLOTS;
- slot--;
- }
- smn = ma_mnode_ptr(mn->slot[slot]);
- if (cnt - 1 >= MAPLE_NODE_SLOTS) {
- slot = ((cnt - 1) % MAPLE_NODE_SLOTS);
- zero = smn;
- smn = ma_mnode_ptr(smn->slot[slot]);
- }
- zero->slot[slot] = NULL;
- mn = smn;
+ } else if (cnt <= 16) {
+ cnt-=2;
+ smn = mn->slot[cnt];
+ mn->slot[cnt] = NULL;
+ mn = smn;
+ } else if (cnt > 16) {
+ cnt-=2;
+ smn = mn->slot[(cnt / 15) - 1];
+ mn = smn->slot[(cnt % 15)];
+ smn->slot[cnt % 15] = NULL;
}
return mn;
cnt = mas_get_alloc_cnt(mas);
if (cnt == 0) {
mas->alloc = reuse;
- } else if (cnt <= 16) {
+ } else if (cnt <= 15) {
cnt--;
node->slot[cnt] = reuse;
} else {
struct maple_node *smn;
cnt--;
- smn = node->slot[cnt/15];
+ smn = node->slot[(cnt/15) - 1];
smn->slot[cnt % 15] = reuse;
}
- if (cnt != mas_get_alloc_cnt(mas) + 1)
- BUG_ON(0);
}
static inline void mas_node_node(struct ma_state *ms, gfp_t gfp)
{
if (p_end - coalesce >= mt_slots[ptype] - 1) {
/* Must split the parent */
split = mas_split(mas, p_slot, active);
- if (mas_is_err(mas))
+ if (mas_is_err(mas)) {
return 0;
+ }
if (split < p_slot)
- p_slot -= split + 1;
+ p_slot -= split;
// Split will return the parent.
old_parent = mas->node;
mas_set_slot(mas, p_slot);
}
mas_node_cnt(mas, 3);
- if (mas_is_err(mas)) {
+ if (mas_is_err(mas))
return 0;
- }
// Allocations.
new_parent = mt_mk_node(mas_next_alloc(mas), ptype);
r_max = p_max;
// the node type for the children types.
// Node types must be set to copy data into them.
- mas_split_data(mas, left, right, split, r_max);
+ mas_split_data(mas, left, right, split - 1, r_max);
if (right) {
- pivot = mte_get_pivot(left, split);
+ pivot = mte_get_pivot(left, split - 1);
if (!pivot) // dense node
- pivot = mas->min + split - 1;
+ pivot = mas->min + split - 2;
} else {
pivot = mt_node_max(left);
if (!p_slot || mte_node_type(left) == maple_dense)
bool append = false;
bool split = false;
bool null_entry = false;
+ bool reuse_null_end = false;
int old_end = mas_data_end(mas, this_type, &last_piv, &coalesce);
int new_end = old_end;
int ret = 0;
// Possible pivot before the new range.
data_slot = slot;
if (mas->index > min) {
- if (!slot) { // storing to slot 0 means we need the null.
+ if (!slot) // storing to slot 0 means we need the null.
ret++;
- } else if (append) { // Appending will need a null.
+ else if (append) // Appending will need a null.
ret++;
- } else { // a starting null is only needed if there isn't one
- // there.
+ else { // a starting null is only needed if there isn't one
+ // there.
struct maple_enode *slot_val;
slot_val = mte_get_rcu_slot(mas->node, slot);
ret++;
}
+ // If there is a null at the end already, then it can be reused.
+ if (last_piv > mas->last && !mte_get_rcu_slot(mas->node, old_end))
+ reuse_null_end = true;
+
/* Check for splitting */
new_end += ret - coalesce + 1;
- if (new_end == slot_cnt && mas->max == mt_node_max(mas->node))
+ if (new_end == slot_cnt && mas->max == mt_node_max(mas->node) &&
+ !reuse_null_end)
split = true; // Need a NULL for the maximum range.
else if (new_end > slot_cnt)
split = true; // There is not enough space.
if (mas_is_err(mas))
return 0;
- split++;
if (split <= slot)
slot = slot - split;
static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
static inline void mas_next_slot(struct ma_state *mas, unsigned long max)
- __must_hold(ms->tree->lock)
+ __must_hold(mas->tree->lock)
{
unsigned char slot;
while (1) {
slot = mte_parent_slot(mas->node);
walk_again:
+ if (mte_is_root(mas->node))
+ goto no_entry;
+
mas_encoded_parent(mas);
if (mas->max > max)
goto no_entry;
} else if (slot < mt_slot_count(mas->node)) {
slot++;
goto walk_down;
+ } else if (mte_is_root(mas->node)) {
+ goto no_entry;
} else {
goto walk_again;
}
unsigned long *range_start)
{
void *entry = NULL;
+ unsigned long range_max;
if (mas->node && !mas_searchable(mas))
return NULL;
- if (!mas->node || mas_is_start(mas)) { // First run.
- unsigned long range_max;
-
+ if (!mas->node || mas_is_start(mas)) // First run.
entry = mas_range_load(mas, range_start, &range_max);
- }
if (entry)
return entry;
pivot = _mas_get_safe_pivot(mas, i, type);
/* End of data in this leaf */
- if (i && !pivot)
- break;
+ if (i && !pivot) {
+ if (min > mas->max)
+ break;
+ pivot = mas->max;
+ }
/* Not within lower bounds */
if (mas->index > pivot)
max = pivot;
break;
}
+
min = pivot + 1;
}
goto done;
mas->node = next;
+ mas_set_slot(mas, 0);
}
done:
mas_set_slot(mas, i);
node_cnt = 1; // Root node.
while (nodes) {
node_cnt += nodes;
- nodes /= 8;
+ nodes /= 4;
}
+
// FIXME: When splitting & reusing we need an extra node.
mas_node_cnt(mas, node_cnt + 1);
if (mas_is_err(mas))
mas_set_slot(mas, mte_parent_slot(mas->node));
mas_next_node(mas, mas->index);
slot = 0;
- } while (mas->node != MAS_NONE && mas->min < mas->index);
+ } while (!mas_is_none(mas) && mas->min < mas->index);
// Insert the new value.
new_mas.index = mas->index;
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 127);
mas_nomem(&mas, GFP_KERNEL); // Free.
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
+ for (int i = 1; i < 128; i++) {
+ int j;
+ mas_node_cnt(&mas, i); // Request
+ mas_nomem(&mas, GFP_KERNEL); // Fill request
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled
+ for (j = i; j > 0; j--) { //Free the requests
+ mn = mas_next_alloc(&mas); // get the next node.
+ MT_BUG_ON(mt, mn == NULL);
+ ma_free(mn);
+ }
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
+ }
+ for (int i = 1; i < 128; i++) {
+ int j;
+ MA_STATE(mas2, mt, 0, 0);
+
+ mas_node_cnt(&mas, i); // Request
+ mas_nomem(&mas, GFP_KERNEL); // Fill request
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != i); // check request filled
+ for (j = 1; j <= i; j++) { // Move the allocations to mas2
+ mn = mas_next_alloc(&mas); // get the next node.
+ MT_BUG_ON(mt, mn == NULL);
+ mas_push_node(&mas2, (struct maple_enode*)mn);
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j);
+ }
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != i);
+
+ for (j = i; j > 0; j--) { //Free the requests
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j);
+ mn = mas_next_alloc(&mas2); // get the next node.
+ MT_BUG_ON(mt, mn == NULL);
+ ma_free(mn);
+ }
+ MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != 0);
+ }
mtree_unlock(mt);
mtree_destroy(mt);
unsigned long huge = 8000UL * 1000 * 1000;
check_insert(mt, huge, (void *) huge);
- check_insert(mt, 0, (void *) 0);
+ check_insert(mt, 0, xa_mk_value(0) );
check_lb_not_empty(mt);
}
check_load(mt, 5018, NULL);
erase_check_load(mt, 3);
+ mt_set_non_kernel(1);
erase_check_erase(mt, 2); // erase 5017 to check append
erase_check_erase(mt, 0); // erase 5015 to check append
erase_check_insert(mt, 4); // 1000 < Should not split.
switch(set[i]) {
case STORE:
if (!mtree_test_insert_range(mt, set[i + 1],
- set[i + 2] - 1, &set))
+ set[i + 2] - 1,
+ xa_mk_value(set[i+1])))
entry_cnt++;
else
- erase_check_store_range(mt, set, i + 1, &set);
+ erase_check_store_range(mt, set, i + 1,
+ xa_mk_value(set[i+1]));
break;
case ERASE:
- check_erase(mt, set[i+1], &set);
+ check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
entry_cnt--;
break;
}
// These tests were pulled from kvm tests.
static noinline void check_erase2_sets(struct maple_tree *mt)
{
+ void *entry;
unsigned long start = 0;
unsigned long set[] = {
STORE, 140737488347136, 140737488351231,
ERASE, 47135835840512, 47135835893759,
STORE, 47135835840512, 47135835885567,
STORE, 47135835885568, 47135835893759,
+ };
+
+ unsigned long set4[] = {
+STORE, 140737488347136, 140737488351232,
+STORE, 140728251703296, 140737488351232,
+ERASE, 140728251703296, 140737488351232,
+STORE, 140728251703296, 140728251707392,
+STORE, 94668429205504, 94668429377536,
+ERASE, 94668429205504, 94668429377536,
+STORE, 94668429205504, 94668429221888,
+STORE, 94668429221888, 94668429377536,
+ERASE, 94668429221888, 94668429377536,
+STORE, 94668429221888, 94668429324288,
+STORE, 94668429324288, 94668429365248,
+STORE, 94668429365248, 94668429377536,
+STORE, 47646523273216, 47646523445248,
+ERASE, 47646523273216, 47646523445248,
+STORE, 47646523273216, 47646523277312,
+STORE, 47646523277312, 47646523445248,
+ERASE, 47646523277312, 47646523445248,
+STORE, 47646523277312, 47646523400192,
};
+
+ mt_set_non_kernel(3);
check_erase2_testset(mt, set, ARRAY_SIZE(set));
mtree_destroy(mt);
mt_set_non_kernel(2);
mtree_init(mt, 0);
check_erase2_testset(mt, set3, ARRAY_SIZE(set3));
+ mtree_destroy(mt);
+ mtree_init(mt, 0);
+ check_erase2_testset(mt, set4, ARRAY_SIZE(set4));
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (mas_retry(&mas, entry))
+ continue;
+ }
+ rcu_read_unlock();
+ mtree_destroy(mt);
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
{