pivots = ma_pivots(a_node, a_type);
a_enode = mt_mk_node(a_node, a_type);
- if (unlikely(p_enode == a_enode)) {
- // Dead node.
- pr_err("Failed on node %p (%p)\n", mas_mn(mas),
- a_enode);
- //FIXME: Restart and retry if the lock is held.
- MT_BUG_ON(mas->tree, p_enode == a_enode);
- }
+ if (unlikely(p_enode == a_enode))
+ return; // Dead node must be handled at a higher level.
if (!set_min && a_slot) {
set_min = true;
mas->max = max;
mas->min = min;
+ return;
}
static inline struct maple_node *mas_pop_node(struct ma_state *mas)
mas_parent_gap(mas, pslot, max_gap);
}
-/*
- * mas_first_entry() - Go the first leaf and find the first entry.
- *
- * @mas: the maple state.
- * @limit: the maximum index to check.
- * Returns: The start of the range.
- */
-static inline void *mas_first_entry(struct ma_state *mas,
- unsigned long limit, unsigned long *r_start)
-{
- unsigned long max;
- unsigned long range_start;
- unsigned char offset;
- unsigned long *pivots;
- struct maple_node *mn;
- void **slots;
- enum maple_type mt;
- void *entry = NULL;
-
- range_start = mas->min;
- max = mas->max;
- while (likely(!mte_is_leaf(mas->node))) {
- mn = mas_mn(mas);
- mt = mte_node_type(mas->node);
- slots = ma_slots(mn, mt);
- pivots = ma_pivots(mn, mt);
- max = pivots[0];
- mas->node = mas_slot(mas, slots, 0);
- }
-
- mas->max = max;
- mn = mas_mn(mas);
- mt = mte_node_type(mas->node);
- slots = ma_slots(mn, mt);
- /* 0 or 1 must be set */
- offset = 0;
- if (range_start > limit)
- goto none;
-
- entry = mas_slot(mas, slots, offset);
- if(likely(entry))
- goto done;
-
- pivots = ma_pivots(mn, mt);
- range_start = pivots[0] + 1;
-
- if (range_start > limit)
- goto none;
-
- entry = mas_slot(mas, slots, offset);
- if(likely(entry))
- goto done;
-
-none:
- mas->node = MAS_NONE;
-done:
- mas->offset = offset;
- *r_start = range_start;
- return entry;
-}
-
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
}
/*
- * mas_next_sibling() - Find the next node with the same parent.
+ *mas_next_sibling() - Find the next node with the same parent.
*
* @mas: the maple state
* Returns true if there is a next sibling, false otherwise.
return true;
next = mas_slot(mas, ma_slots(mas_mn(mas), type), mas->offset);
+ if (unlikely(mte_dead_node(mas->node)))
+ return false;
+
if (unlikely(!next))
return false;
return content;
}
-static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
* tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
*/
static inline void mas_prev_node(struct ma_state *mas, unsigned long limit)
{
- unsigned long start_piv;
enum maple_type mt;
int offset, level;
void **slots;
if (mte_is_root(mas->node))
goto no_entry;
- start_piv = mas->index;
-restart_prev_node:
level = 0;
do {
if (mte_is_root(mas->node))
// Walk up.
offset = mte_parent_slot(mas->node);
mas_ascend(mas);
+ if (unlikely(mte_dead_node(mas->node)))
+ return;
+
level++;
- if (mas_dead_node(mas, start_piv))
- goto restart_prev_node;
} while (!offset);
offset--;
goto no_entry;
while (level > 1) {
- level--;
mas->node = mas_slot(mas, slots, offset);
- if (mas_dead_node(mas, start_piv))
- goto restart_prev_node;
+ if (unlikely(mte_dead_node(mas->node)))
+ return;
+
+ level--;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
slots = ma_slots(node, mt);
}
}
- mas->offset = offset;
mas->node = mas_slot(mas, slots, offset);
+ if (unlikely(mte_dead_node(mas->node)))
+ return;
+
+ mas->offset = offset;
if (offset)
mas->min = pivots[offset - 1] + 1;
- if (mas_dead_node(mas, start_piv))
- goto restart_prev_node;
return;
no_entry:
mas->node = MAS_NONE;
+ return;
+
}
/*
static inline unsigned long mas_next_node(struct ma_state *mas,
unsigned long max)
{
- unsigned long start_piv, min, pivot;
+ unsigned long min, pivot;
unsigned long *pivots;
struct maple_node *node;
int level = 0;
goto no_entry;
// Save the location in case of dead node.
- start_piv = mas->index;
-
-restart_next_node:
level = 0;
do {
if (mte_is_root(mas->node))
if (min > max)
goto no_entry;
mas_ascend(mas);
- if (unlikely(mas_dead_node(mas, start_piv)))
- goto restart_next_node;
+ if (unlikely(mte_dead_node(mas->node)))
+ return mas->max;
level++;
end = mas_data_end(mas);
pivot = _mas_safe_pivot(mas, pivots, ++offset, mt);
// Descend, if necessary.
while (unlikely(level > 1)) {
- level--;
mas->node = mas_slot(mas, slots, offset);
+ if (unlikely(mte_dead_node(mas->node)))
+ return mas->max;
+
+ level--;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt);
}
mas->node = mas_slot(mas, slots, offset);
- if (unlikely(mas_dead_node(mas, start_piv)))
- goto restart_next_node;
+ if (unlikely(mte_dead_node(mas->node)))
+ return mas->max;
+
mas->min = min;
mas->max = pivot;
return mas->max;
return entry;
}
+/*
+ * _mas_walk(): A walk that supports returning the range in which an
+ * index is located.
+ *
+ */
+static inline bool _mas_walk(struct ma_state *mas, unsigned long *range_min,
+ unsigned long *range_max)
+{
+
+ void *entry;
+ bool ret;
+
+retry:
+ ret = false;
+ entry = mas_start(mas);
+ if (entry)
+ return true;
+
+ if (mas_is_none(mas))
+ goto not_found;
+
+ if (mas_is_ptr(mas)) {
+ *range_min = *range_max = 0;
+ if (!mas->index)
+ return true;
+
+ goto not_found;
+ }
+
+ ret = __mas_walk(mas, range_min, range_max);
+
+ if (unlikely(mte_dead_node(mas->node))) {
+ mas->node = MAS_START;
+ goto retry;
+ }
+
+ return ret;
+
+not_found:
+ mas->offset = MAPLE_NODE_SLOTS;
+ return false;
+}
+
+/*
+ * mas_dead_node() - Check if the maple state is pointing to a dead node.
+ *
+ * @mas: The maple state
+ * @index: The index to restore in @mas.
+ * Return 1 if @mas has been reset to MAS_START, 0 otherwise.
+ */
+static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
+{
+ unsigned long range_max, range_min;
+
+ if (unlikely(!mas_searchable(mas)))
+ return 0;
+
+ if (likely(!mte_dead_node(mas->node)))
+ return 0;
+
+ mas->index = index;
+ mas->node = MAS_START;
+ _mas_walk(mas, &range_min, &range_max);
+ return 1;
+}
+
+/*
+ * mas_first_entry() - Go the first leaf and find the first entry.
+ *
+ * @mas: the maple state.
+ * @limit: the maximum index to check.
+ * Returns: The start of the range.
+ */
+static inline void *mas_first_entry(struct ma_state *mas,
+ unsigned long limit, unsigned long *r_start)
+{
+ unsigned long max;
+ unsigned long range_start;
+ unsigned char offset;
+ unsigned long *pivots;
+ struct maple_node *mn;
+ void **slots;
+ enum maple_type mt;
+ void *entry = NULL;
+
+ range_start = mas->min;
+ max = mas->max;
+restart:
+ while (likely(!mte_is_leaf(mas->node))) {
+ mn = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ slots = ma_slots(mn, mt);
+ pivots = ma_pivots(mn, mt);
+ max = pivots[0];
+ mas->node = mas_slot(mas, slots, 0);
+ if (unlikely(mas_dead_node(mas, range_start)))
+ goto restart;
+ }
+
+ mas->max = max;
+ mn = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ slots = ma_slots(mn, mt);
+ /* 0 or 1 must be set */
+ offset = 0;
+ if (range_start > limit)
+ goto none;
+
+ entry = mas_slot(mas, slots, offset);
+ if(likely(entry))
+ goto done;
+
+ pivots = ma_pivots(mn, mt);
+ range_start = pivots[0] + 1;
+
+ if (range_start > limit)
+ goto none;
+
+ entry = mas_slot(mas, slots, offset);
+ if(likely(entry))
+ goto done;
+
+none:
+ mas->node = MAS_NONE;
+done:
+ mas->offset = offset;
+ *r_start = range_start;
+ return entry;
+}
/*
*
* __mas_next() Set the @mas->node to the next entry and the range_start to
void *entry = NULL;
struct maple_enode *prev_node = mas->node;
unsigned char offset = mas->offset;
- unsigned long index = mas->index;
+ unsigned long last = mas->last;
enum maple_type mt = mte_node_type(mas->node);
unsigned long r_start;
- mas->offset++;
retry:
+ mas->offset++;
if (unlikely(mas->offset >= mt_slots[mt]))
goto next_node;
break;
if (likely(entry)) {
- if (unlikely(mas_dead_node(mas, index)))
+ if (unlikely(mas_dead_node(mas, last)))
goto retry;
mas->index = r_start;
prev_node = mas->node;
offset = mas->offset;
mas_next_node(mas, limit);
+ if (unlikely(mas_dead_node(mas, last)))
+ goto retry;
+
mas->offset = 0;
mt = mte_node_type(mas->node);
}
{
void *entry;
+ unsigned long index = mas->index;
+
+retry:
while (likely(!mas_is_none(mas))) {
entry = mas_prev_nentry(mas, limit);
if (likely(entry))
return entry;
mas_prev_node(mas, limit);
+ if (unlikely(mas_dead_node(mas, index)))
+ goto retry;
+
mas->offset = mt_slot_count(mas->node);
}
return true;
}
- //descend
+ // descend, only happens under lock.
mas->node = mas_slot(mas, slots, offset);
mas->min = min;
mas->max = max;
return found;
}
-/*
- * _mas_walk(): A walk that supports returning the range in which an
- * index is located.
- *
- */
-static inline bool _mas_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max)
-{
-
- void *entry = mas_start(mas);
-
- if (entry)
- return true;
-
- if (mas_is_none(mas))
- goto not_found;
-
- if (mas_is_ptr(mas)) {
- *range_min = *range_max = 0;
- if (!mas->index)
- return true;
-
- goto not_found;
- }
-
- return __mas_walk(mas, range_min, range_max);
-
-not_found:
- mas->offset = MAPLE_NODE_SLOTS;
- return false;
-}
-
-
-static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
-{
- unsigned long range_max, range_min;
-
- if (!mas_searchable(mas))
- return 0;
-
- if (!mte_dead_node(mas->node))
- return 0;
-
- mas->index = index;
- mas->node = MAS_START;
- _mas_walk(mas, &range_min, &range_max);
- return 1;
-}
-
void *mas_walk(struct ma_state *mas)
{
unsigned long range_min, range_max;
unsigned long index = mas->index;
+ void *entry;
if (mas_is_none(mas))
mas->node = MAS_START;
_mas_walk(mas, &range_min, &range_max);
retry:
- if (mas_dead_node(mas, index))
- goto retry;
+ entry = NULL;
+ if (mas->offset != MAPLE_NODE_SLOTS)
+ entry = mas_get_slot(mas, mas->offset);
- if (mas->offset == MAPLE_NODE_SLOTS)
- return NULL; // Not found.
+ if (unlikely(mas_dead_node(mas, index)))
+ goto retry;
mas->index = range_min;
mas->last = range_max;
- return mas_get_slot(mas, mas->offset);
+ return entry;
}
static inline bool mas_search_cont(struct ma_state *mas, unsigned long index,
unsigned long *range_min, unsigned long *range_max)
{
unsigned long index = mas->index;
+ void *entry;
if (mas_is_none(mas))
mas->node = MAS_START;
if (unlikely(mas->node == MAS_ROOT))
return mas_root(mas);
retry:
- if (mas_dead_node(mas, index))
- goto retry;
-
- if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
- return NULL; // Not found.
+ entry = NULL;
+ if (likely(mas->offset != MAPLE_NODE_SLOTS))
+ entry = mas_get_slot(mas, mas->offset);
- return mas_get_slot(mas, mas->offset);
+ if (unlikely(mas_dead_node(mas, index)))
+ goto retry;
+ return entry;
}
void *mas_load(struct ma_state *mas)
void *mas_find(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
+ bool first = false;
+
+ if (mas_is_start(mas))
+ first = true;
- while (mas_search_cont(mas, mas->index, max, entry))
+retry:
+ while (mas_search_cont(mas, mas->index, max, entry)) {
entry = _mas_next(mas, max);
+ if (unlikely(mas_dead_node(mas, mas->index))) {
+ if (first)
+ mas->node = MAS_START;
+
+ goto retry;
+ }
+ first = false;
+ }
+
return entry;
}
EXPORT_SYMBOL_GPL(mas_find);
i /= 2;
}
mtree_destroy(mt);
-
}
static noinline void check_lower_bound_split(struct maple_tree *mt)
)
#define check_erase2_debug 0
void *mas_next(struct ma_state *mas, unsigned long max);
+
// Calculate the overwritten entries.
int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
void *s_entry, unsigned long s_min,
MT_BUG_ON(mt, mas.last != 0);
}
+
+struct rcu_test_struct {
+ struct maple_tree *mt; // the maple tree
+ int count; // Number of times to check value(s)
+ unsigned long index; // The first index to check
+ void *entry1; // The first entry value
+ void *entry2; // The second entry value
+ unsigned long range_start;
+ unsigned long range_end;
+
+ unsigned int seen_count; // Number of threads that have seen the new value
+ unsigned long last; // The end of the range to write.
+
+};
+
+static void *rcu_val(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ bool updated = false;
+ void *entry;
+
+ rcu_register_thread();
+
+ while (count--) {
+ usleep(200);
+ entry = mtree_load(test->mt, test->index);
+ if (!updated) {
+ if (entry != test->entry1) {
+ MT_BUG_ON(test->mt, entry != test->entry2);
+ uatomic_inc(&test->seen_count);
+ updated = true;
+ continue;
+ }
+ MT_BUG_ON(test->mt, entry != test->entry1);
+ continue;
+ }
+
+ MT_BUG_ON(test->mt, entry != test->entry2);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *rcu_loop(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct*)ptr;
+ unsigned long count = test->count;
+ void *entry, *expected;
+ bool updated = false;
+ MA_STATE(mas, test->mt, test->range_start, test->range_start);
+
+ rcu_register_thread();
+
+ while (count--) {
+ usleep(100);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, test->range_end) {
+ expected = xa_mk_value(
+ mas.index ? mas.index / 10 : mas.index);
+
+ if (mas.index == test->index)
+ {
+ if (entry != test->entry1) {
+ MT_BUG_ON(test->mt,
+ entry != test->entry2);
+ if (!updated)
+ uatomic_inc(&test->seen_count);
+
+ updated = true;
+ continue;
+ }
+ }
+ MT_BUG_ON(test->mt, entry != expected);
+ }
+ rcu_read_unlock();
+ mas_set(&mas, test->range_start);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static noinline void run_check_rcu(struct maple_tree *mt,
+ struct rcu_test_struct *vals)
+{
+
+ int i;
+ void *(*function)(void*);
+ pthread_t readers[20];
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i % 2)
+ function = rcu_loop;
+ else
+ function = rcu_val;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5); // small yield to ensure all threads are at least started.
+ mtree_store_range(mt, vals->index, vals->last, vals->entry2,
+ GFP_KERNEL);
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ // Make sure the test caught at least one update.
+ MT_BUG_ON(mt, !vals->seen_count);
+}
+
+static noinline void check_rcu(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 1000;
+ struct rcu_test_struct vals;
+
+ rcu_register_thread();
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+ // Store across several slots.
+ vals.count = 1000;
+ vals.mt = mt;
+ vals.index = 8650;
+ vals.last = 8666;
+ vals.entry1 = xa_mk_value(865);
+ vals.entry2 = xa_mk_value(8650);
+ vals.range_start = 0;
+ vals.range_end = ULONG_MAX;
+ vals.seen_count = 0;
+
+ run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+// 4390-4395: value 439 (0x1b7) [0x36f]
+ // Store across several slots.
+ // Spanning store.
+ vals.count = 10000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.seen_count = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ run_check_rcu(mt, &vals);
+ rcu_unregister_thread();
+}
+
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
#if defined(BENCH)
skip:
#endif
+
+
+ mtree_init(&tree, MAPLE_ALLOC_RANGE);
+ check_rcu(&tree);
+ mtree_destroy(&tree);
+
rcu_barrier();
pr_info("maple_tree: %u of %u tests passed\n", maple_tree_tests_passed,
maple_tree_tests_run);