static inline unsigned long
mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char piv)
{
- if (!piv)
+ if (unlikely(!piv))
return mas->min;
return pivots[piv - 1] + 1;
mas->depth++;
mas_node_walk(mas, type, range_min, range_max);
- if (ma_is_leaf(type)) // Leaf.
+ if (unlikely(ma_is_leaf(type)))
return true;
next = mas_get_slot(mas, mas->offset);
if (!next)
return false;
- // Traverse.
+ // Descend.
mas->max = *range_max;
mas->min = *range_min;
mas->node = next;
{
enum maple_type type = mte_node_type(mas->node);
struct maple_node *node = mas_mn(mas);
- unsigned long *pivots, *gaps;
+ unsigned long *pivots, *gaps = NULL;
void **slots;
unsigned long gap, max, min;
if (mas->last < min)
continue;
+
if (mas->index > max) {
mas_set_err(mas, -EBUSY);
return false;
goto not_found;
if (mas_is_ptr(mas)) {
- *range_min = 0;
- *range_max = 0;
+ *range_min = *range_max = 0;
if (!mas->index)
return true;
#define CONFIG_DEBUG_MAPLE_TREE
//#define BENCH_SLOT_STORE
//#define BENCH_NODE_STORE
+//#define BENCH_AWALK
//#define BENCH_WALK
//#define BENCH_FORK
static
}
#endif
+#if defined(BENCH_AWALK)
+static noinline void bench_awalk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 30000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_get_empty_area_rev(&mas, 0, 2000, 10);
+ mas_reset(&mas);
+ }
+}
+#endif
#if defined(BENCH_WALK)
static noinline void bench_walk(struct maple_tree *mt)
{
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_AWALK)
+#define BENCH
+ mtree_init(&tree, MAPLE_ALLOC_RANGE);
+ bench_awalk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
#if defined(BENCH_WALK)
#define BENCH
mtree_init(&tree, MAPLE_ALLOC_RANGE);
/*
* mas was called for the prev vma, and that may not be the correct
- * location for the vma being inserted.
+ * location for the vma being inserted, but is is before that location
+ * and so the call to vma_mas_link()->vma_mas_store()->mas_store_gfp()
+ * will detect the write as a spanning store and reset mas if necessary.
*/
- if (mas.max < addr)
- mas_reset(&mas);
mas_set(&mas, addr);
mas_walk(&mas);
vma_mas_link(mm, vma, &mas, prev);