[maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
#if defined(NODE256)
- [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
#else
[maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2),
#endif
while (offset < mt_slots[type]) {
piv = _mas_safe_pivot(mas, pivots, offset, type);
- if (piv >= mas->max)
+ if ((!piv && offset) || piv >= mas->max)
break;
offset++;
}
trace_mas_rebalance(mas);
- mas_node_cnt(mas, 1 + empty_cnt * 2);
+ mas_node_cnt(mas, 1 + empty_cnt * 3);
if (mas_is_err(mas))
return 0;
{
struct maple_enode *new_node;
- if ((b_node->b_end < mt_min_slot_cnt(mas->node)) &&
- (!mte_is_root(mas->node)) &&
- (mas_mt_height(mas) > 1))
+ if ((b_node->b_end < mt_min_slots[b_node->type]) &&
+ (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1))
return mas_rebalance(mas, b_node);
- if (b_node->b_end >= mt_slot_count(mas->node)) {
- if (mas_is_err(mas))
- return 0;
-
+ if (b_node->b_end >= mt_slots[b_node->type])
return mas_split(mas, b_node);
- }
if (mas_reuse_node(mas, b_node, end))
goto reused_node;
*
*/
static inline bool mas_is_span_wr(struct ma_state *mas, unsigned long piv,
- void *entry)
+ enum maple_type type, void *entry)
{
if (mas->span_enode) // Already a spanning store.
return true;
/* Writing ULONG_MAX is not a spanning write regardless of the value
* being written as long as the range fits in the node.
*/
- if (mas->last == ULONG_MAX &&
- mas->min <= mas->index &&
- mas->last == mas->max)
+ if ((mas->last == ULONG_MAX) && (mas->last == mas->max))
return false;
- if (!mte_is_leaf(mas->node)) { // Internal nodes.
- if (mas->last < piv) // Fits in the slot.
- return false;
-
- if (entry && piv == mas->last) // Writes a value to the end of the child node
- return false;
- } else { // A leaf node.
+ if (ma_is_leaf(type)) {
+ trace_mas_is_span_wr(mas, piv, entry);
if (mas->last < mas->max) // Fits in the node, but may span slots.
return false;
-
- if (entry && mas->last == mas->max) // Writes to the end of the node but not null.
+ if (entry && (mas->last == mas->max)) // Writes to the end of the node but not null.
+ return false;
+ } else {
+ if (entry && piv == mas->last)
return false;
}
+ trace_mas_is_span_wr(mas, piv, entry);
mas->span_enode = mas->node;
return true;
static inline bool mas_node_walk(struct ma_state *mas, enum maple_type type,
unsigned long *range_min, unsigned long *range_max)
{
- unsigned char i;
- unsigned long min = mas->min, pivot = 0;
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+ unsigned long min = mas->min, pivot = 0;
+ unsigned char i;
bool ret = true;
+ if (ma_is_dense(type)) {
+ // Linear node.
+ // What if mas->index != mas->last?
+ pivot = min = mas->index;
+ i = mas->index = mas->min;
+ goto dense;
+ }
- switch (type) {
- default:
- for (i = mas_offset(mas); i < mt_slots[type]; i++) {
- pivot = _mas_safe_pivot(mas, pivots, i, type);
+ for (i = mas_offset(mas); i < mt_slots[type]; i++) {
+ pivot = _mas_safe_pivot(mas, pivots, i, type);
- if (!pivot && i) {
- if (mas->max < mas->index) {
- i = MAPLE_NODE_SLOTS;
- ret = false;
- }
- pivot = mas->max;
- break;
+ if (!pivot && i) {
+ if (mas->max < mas->index) {
+ i = MAPLE_NODE_SLOTS;
+ ret = false;
}
-
- if (mas->index <= pivot)
- break;
- min = pivot + 1;
+ pivot = mas->max;
+ break;
}
- break;
- case maple_dense:
- // Linear node.
- // What if mas->index != mas->last?
- pivot = min = mas->index;
- i = mas->index = mas->min;
- break;
+ if (mas->index <= pivot)
+ break;
+
+ min = pivot + 1;
}
+dense:
if (ret) {
*range_min = min;
*range_max = pivot;
unsigned long *range_max, void *entry)
{
enum maple_type type;
- struct maple_enode *next;
unsigned char end;
bool ret = false;
if (unlikely(!mas_node_walk(mas, type, range_min, range_max)))
return false;
- if (mas_is_span_wr(mas, *range_max, entry))
+ if (mas_is_span_wr(mas, *range_max, type, entry))
return ma_is_leaf(type);
if (ma_is_leaf(type))
return true;
end = mas_data_end(mas);
- if (end <= mt_min_slots[type])
+ if (end < mt_min_slots[type])
mas_cnt_empty(mas);
- else if (end >= mt_slots[type] - 2)
+ else if (end >= mt_slots[type] - 1)
mas_cnt_full(mas);
else
mas->full_cnt = 0;
- next = mas_get_slot(mas, mas_offset(mas));
// Traverse.
mas->max = *range_max;
mas->min = *range_min;
- if (unlikely(!next))
- return false;
-
- mas->node = next;
+ mas->node = mas_get_slot(mas, mas_offset(mas));
mas_set_offset(mas, 0);
}
return ret;
/* Node rebalancing may occur due to this store, so there may be two new
* entries per level plus a new root.
*/
- node_cnt += 1 + mas_mt_height(mas) * 2;
+ node_cnt += 1 + mas_mt_height(mas) * 3;
mas_node_cnt(mas, node_cnt);
if (mas_is_err(mas))
return 0;
void *entry;
MA_STATE(mas, mt, index, index);
+ trace_mtree_load(&mas);
rcu_read_lock();
entry = mas_load(&mas);
rcu_read_unlock();
{
MA_STATE(mas, mt, index, last);
+ trace_mtree_store_range(&mas, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
void *entry = NULL;
MA_STATE(mas, mt, index, index);
+ trace_mtree_erase(&mas);
mtree_lock(mt);
entry = mas_erase(&mas);
mas->min = p_min;
}
+void mt_validate_nulls(struct maple_tree *mt)
+{
+ void *entry, *last = (void*)1;
+ unsigned char end, offset = 0;
+ void **slots;
+ MA_STATE(mas, mt, 0, 0);
+
+ mas_start(&mas);
+ if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ return;
+
+ while (!mte_is_leaf(mas.node)) {
+ mas_descend(&mas);
+ }
+
+ slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
+ end = mas_data_end(&mas);
+ do {
+ entry = slots[offset];
+ if (!last && !entry) {
+ printk("Sequential nulls end at %p[%u]\n",
+ mas_mn(&mas), offset);
+ }
+ MT_BUG_ON(mt, !last && !entry);
+ last = entry;
+ if (offset == end) {
+ mas_next_node(&mas, ULONG_MAX);
+ if (mas_is_none(&mas))
+ return;
+ offset = 0;
+ end = mas_data_end(&mas);
+ slots = ma_slots(mte_to_node(mas.node),
+ mte_node_type(mas.node));
+ } else
+ offset++;
+
+ } while(!mas_is_none(&mas));
+}
/*
* validate a maple tree by checking:
* 1. The limits (pivots are within mas->min to mas->max)
rcu_read_lock();
mas_start(&mas);
mas_first_entry(&mas, ULONG_MAX);
- while (mas.node != MAS_NONE) {
+ while (!mas_is_none(&mas)) {
if (!mte_is_root(mas.node)) {
end = mas_data_end(&mas);
if ((end < mt_min_slot_cnt(mas.node)) &&
mas_validate_gaps(&mas);
mas_dfs_postorder(&mas, ULONG_MAX);
}
+ mt_validate_nulls(mt);
rcu_read_unlock();
}
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
- // FIXME: After changing mas_wr_walk to actually count full nodes, this
- // testcase and the one below should cause a height increment.
mtree_init(mt, MAPLE_ALLOC_RANGE);
-// for (i = 0; i <= 1590; i++) {
- for (i = 0; i <= 1420; i++) {
+ for (i = 0; i <= 1590; i++) {
+// for (i = 0; i <= 1420; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
// Cause a 3 child split all the way up the tree.
- check_store_range(mt, 9755, 9759, NULL, 0);
- MT_BUG_ON(mt, mt_height(mt) >= 4);
- //MT_BUG_ON(mt, mt_height(mt) < 4);
+ check_store_range(mt, 15519, 15519, NULL, 0);
+
+// check_store_range(mt, 9755, 9759, NULL, 0);
+// MT_BUG_ON(mt, mt_height(mt) >= 4);
+ MT_BUG_ON(mt, mt_height(mt) < 4);
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
- for (i = 0; i <= 1420; i++) {
+ for (i = 0; i <= 1590; i++) {
+// for (i = 0; i <= 1420; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
// triple split across multiple levels.
- check_store_range(mt, 9595, 9599, NULL, 0);
- MT_BUG_ON(mt, mt_height(mt) >= 4);
- //MT_BUG_ON(mt, mt_height(mt) < 4);
+// check_store_range(mt, 9595, 9599, NULL, 0);
+ check_store_range(mt, 9115, 9121, NULL, 0);
+// MT_BUG_ON(mt, mt_height(mt) >= 4);
+ MT_BUG_ON(mt, mt_height(mt) != 4);
}
static noinline void check_next_entry(struct maple_tree *mt)
check_rev_seq(&tree, 1000, true);
mtree_destroy(&tree);
-
check_lower_bound_split(&tree);
check_upper_bound_split(&tree);
check_mid_split(&tree);