if (unlikely(!max && offset))
goto max;
- offset++;
min = max + 1;
- while (offset < count) {
+ while (++offset < count) {
max = wr_mas->pivots[offset];
if (index <= max)
goto done;
-
- if (unlikely(!max))
+ else if (unlikely(!max))
break;
min = max + 1;
- offset++;
}
max:
return false;
}
- trace_ma_write(__func__, mas, piv, entry);
+ trace_ma_write(__func__, wr_mas->mas, piv, entry);
return true;
}
{
unsigned long *pivots;
unsigned char count;
- unsigned long min, max;
+ unsigned long prev, max;
unsigned char offset;
unsigned long index;
return;
}
- offset = 0;
- min = mas->min;
pivots = ma_pivots(node, type);
- max = pivots[offset];
+ max = pivots[0];
if (unlikely(ma_dead_node(node)))
return;
- count = mt_pivots[type];
+ offset = 0;
+ prev = mas->min;
index = mas->index;
if (unlikely(index <= max))
- goto done;
+ goto offset_zero;
- offset++;
- min = max + 1;
- while (offset < count) {
+ count = mt_pivots[type];
+ while (++offset < count) {
+ prev = max;
max = pivots[offset];
if (unlikely(ma_dead_node(node)))
return;
if (index <= max)
- goto done;
-
- if (unlikely(!max))
- break;
-
- min = max + 1;
- offset++;
+ goto offset_found;
+ else if (unlikely(!max))
+ goto mas_max;
}
+ prev = max;
+mas_max:
max = mas->max;
-done:
+offset_found:
+ prev++;
+offset_zero:
mas->offset = offset;
if (ma_is_leaf(type)) {
*range_max = max;
- *range_min = min;
+ *range_min = prev;
} else {
mas->max = max;
- mas->min = min;
+ mas->min = prev;
}
}
/*
- * mas_lookup_walk() - Internal quick lookup that does not keep maple state up
+ * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
* to date.
*
* @mas: The maple state.
*
+ * Note: Leaves mas in undesirable state.
* Return: The entry for @mas->index or %NULL on dead node.
*/
-static inline void *mas_lookup_walk(struct ma_state *mas)
+static inline void *mtree_lookup_walk(struct ma_state *mas)
{
unsigned long *pivots;
unsigned char offset;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
if (unlikely(ma_dead_node(node)))
goto dead_node;
- while((offset < end) &&
- (pivots[offset] < mas->index))
+ if (pivots[offset] >= mas->index)
+ goto next;
+
+ end = ma_data_end(node, type, pivots, max);
+ do {
offset++;
+ } while((offset < end) && (pivots[offset] < mas->index));
- if (likely(offset > mt_pivots[type]) && pivots[offset])
+ if (likely(offset > end))
max = pivots[offset];
+next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
if (unlikely(ma_dead_node(node)))
return NULL;
}
-/*
- * mt_lookup() - Fast value lookup.
- *
- */
-void *mt_lookup(struct maple_tree *mt, unsigned long index)
-{
- MA_STATE(mas, mt, index, index);
- void *entry;
-
-retry:
- entry = mas_start(&mas);
- if (unlikely(mas_is_none(&mas)))
- return NULL;
-
- if (unlikely(mas_is_ptr(&mas))) {
- if (!index)
- return entry;
-
- return NULL;
- }
-
- entry = mas_lookup_walk(&mas);
- if (!entry && unlikely(mas_is_start(&mas)))
- goto retry;
-
- return entry;
-}
-EXPORT_SYMBOL_GPL(mt_lookup);
-
/*
* mas_descend_walk(): Locates a value and sets the mas->node and slot
* accordingly. range_min and range_max are set to the range which the entry is
struct ma_state *mas = wr_mas->mas;
struct maple_big_node b_node;
-
/* Direct replacement */
if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
struct ma_wr_state wr_mas;
wr_mas.mas = mas;
- if ( !mas_is_start(mas)) {
+ if (!mas_is_start(mas)) {
if (mas_is_none(mas))
mas_reset(mas);
else {
*/
void *mtree_load(struct maple_tree *mt, unsigned long index)
{
+ MA_STATE(mas, mt, index, index);
void *entry;
- unsigned long range_max, range_min;
- MA_STATE(mas, mt, index, index);
trace_ma_read(__func__, &mas);
rcu_read_lock();
- entry = mas_range_load(&mas, &range_min, &range_max);
+retry:
+ entry = mas_start(&mas);
+ if (unlikely(mas_is_none(&mas)))
+ goto unlock;
+
+ if (unlikely(mas_is_ptr(&mas))) {
+ if (index)
+ entry = NULL;
+
+ goto unlock;
+ }
+
+ entry = mtree_lookup_walk(&mas);
+ if (!entry && unlikely(mas_is_start(&mas)))
+ goto retry;
+unlock:
rcu_read_unlock();
if (xa_is_zero(entry))
return NULL;
{
unsigned long range_start = 0, range_end = 0;
void *entry = NULL;
- bool leaf;
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned long copy = *index;
#endif
return NULL;
rcu_read_lock();
- leaf = mas_tree_walk(&mas, &range_start, &range_end);
- if (leaf == true && mas.offset != MAPLE_NODE_SLOTS)
+ if (mas_tree_walk(&mas, &range_start, &range_end)) {
+ if (unlikely(mas_is_ptr(&mas)) && !(*index))
+ return mas_root(&mas);
entry = mas_get_slot(&mas, mas.offset);
+ }
mas.last = range_end;
if (entry && !xa_is_zero(entry)) {
{
void *ret = mtree_test_load(mt, index);
- if (ret != mt_lookup(mt, index)) {
- pr_err("lookup of %lu %p expected %p\n", index, mt_lookup(mt, index), ret);
- }
- MT_BUG_ON(mt, ret != mt_lookup(mt, index));
-
if (ret != ptr)
pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
MT_BUG_ON(mt, ret != ptr);
#if defined(BENCH_WALK)
static noinline void bench_walk(struct maple_tree *mt)
{
- int i, max = 2500, count = 250000000;
+ int i, max = 2500, count = 500000000;
MA_STATE(mas, mt, 1470, 1470);
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
+#if 0
+ mtree_load(mt, 1470);
+#else
mas_walk(&mas);
+#endif
mas_reset(&mas);
}