#include <linux/export.h>
#include <linux/slab.h>
#include <asm/barrier.h>
-#include <linux/mm.h> // for task_size
+//#include <linux/mm.h> // for task_size
#define MA_ROOT_PARENT 1
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
unsigned char slot)
{
enum maple_type type = mte_node_type(mas->node);
+
return _mas_get_safe_pivot(mas, slot, type);
}
static inline void mas_descend(struct ma_state *mas)
{
unsigned char slot = mas_get_slot(mas);
+
if (slot)
mas->min = mas_get_safe_pivot(mas, slot - 1) + 1;
mas->max = mas_get_safe_pivot(mas, slot);
mas->max = max;
mas->min = min;
mas->node = p_enode;
- return;
}
static inline void mas_set_safe_pivot(struct ma_state *mas, unsigned char slot,
if (cnt == 1) {
ms->alloc = NULL;
} else if (cnt <= 16) {
- cnt-=2;
- smn = mn->slot[cnt];
- mn->slot[cnt] = NULL;
- mn = smn;
+ cnt -= 2;
+ smn = mn->slot[cnt];
+ mn->slot[cnt] = NULL;
+ mn = smn;
} else if (cnt > 16) {
- cnt-=2;
+ cnt -= 2;
smn = mn->slot[(cnt / 15) - 1];
mn = smn->slot[(cnt % 15)];
smn->slot[cnt % 15] = NULL;
mas_set_alloc_req(ms, req);
list_failed:
- if (req > 0) {
+ if (req > 0)
mas_set_err(ms, -ENOMEM);
- }
}
// Free the allocations.
entry = _mte_get_rcu_slot(mn, slot, type);
if (mt_will_coalesce(entry)) {
- if (piv == prev_piv || !slot) {
+ if (piv == prev_piv || !slot)
(*coalesce)++;
- }
- //counted_null = true;
+
} else if (entry == NULL) {
- if (counted_null) {
+ if (counted_null)
(*coalesce)++;
- }
+
counted_null++;
} else {
counted_null = 0;
}
}
- } else
+ }
return half;
mas_set_slot(mas, data_end + 1);
mas_append_entry(mas, entry);
} else {
- MA_STATE(cp, mas->tree, mas->index, mas->last);
- mas_dup_state(&cp, mas);
-
unsigned char slot = mas_get_slot(mas);
unsigned char end_slot = slot;
unsigned long src_max = mas->max;
unsigned long piv, prev_piv = mas->min - 1;
void *existing_entry = NULL;
+ MA_STATE(cp, mas->tree, mas->index, mas->last);
+ mas_dup_state(&cp, mas);
+
if (slot)
prev_piv = mte_get_pivot(mas->node, slot - 1);
p_slot = mte_parent_slot(mas->node);
do {
+ MA_STATE(tmp, mas->tree, mas->index, mas->last);
+
mas_set_slot(mas, p_slot); // for mas_next_node.
mas_set_slot(&p_mas, p_slot); // for pivot changes in parent.
- MA_STATE(tmp, mas->tree, mas->index, mas->last);
mas_dup_state(&r_mas, mas); // point to the start node.
while (!mas_is_none(&r_mas) && r_mas.max <= r_mas.last) {
mas_dup_state(&tmp, &r_mas);
if (entry)
goto next_slot;
- this_gap = max - mas->index + 1;
+ this_gap = max - min + 1;
if (this_gap >= size) {
/* within range and large enough */
mas->max = max;
+ mas->min = min;
found = true;
break;
}
{
unsigned long min = mas->min;
unsigned char slot = mas_get_slot(mas);
+ // At this point, mas->node points to the right node and we have a
+ // slot that has a sufficient gap.
if (slot)
min = mte_get_pivot(mas->node, slot - 1) + 1;
+ mas->min = min;
+ mas->max = mas_get_safe_pivot(mas, slot);
+
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
}
void mas_set_rev_index(struct ma_state *mas, unsigned long size)
{
- // At this point, mas->node points to the right node and we have a
- // slot that has a sufficient gap.
+ unsigned long gap_max = mas->max;
+ unsigned long range_max = mas->last; // Includes subtracted size.
+
+ // rev_awalk has set mas->min and mas->max to the gap values.
// If the maximum is outside the window we are searching, then use the
// last location in the search.
- if (mas->max > mas->last)
- mas->index = mas->last;
- else
- mas->index = mas->max - size + 1;
-
- mas->last = mas->index + size - 1;
+ // mas->max and mas->min is the range of the gap.
+ // mas->index and mas->last are currently set to the search range.
+
+ // Trim the upper limit to the max.
+ if (gap_max > range_max)
+ gap_max = range_max;
+
+ mas->last = gap_max;
+ mas->index = mas->last - size + 1;
+ return;
}
static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size, bool forward)
// The start of the window can only be within these values.
mas->index = min;
- mas->last = max - size;
+ mas->last = max;
if (forward)
mas_awalk(mas, size);
EXPORT_SYMBOL(mtree_destroy);
#ifdef CONFIG_DEBUG_MAPLE_TREE
+#ifndef __KERNEL__
+extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
+void mt_set_non_kernel(unsigned int val)
+{
+ kmem_cache_set_non_kernel(maple_node_cache, val);
+}
+extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
+unsigned long mt_get_alloc_size(void)
+{
+ return kmem_cache_get_alloc(maple_node_cache);
+}
+#define MA_PTR "%p"
+#else
+#define MA_PTR "%px"
+#endif
+// Tree validations
void mt_dump_node(void *entry, unsigned long min, unsigned long max,
unsigned int depth);
void mt_dump_range(unsigned long min, unsigned long max, unsigned int depth)
mt_dump_range(min, max, depth);
if (xa_is_value(entry))
- pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
+ pr_cont("value %ld (0x%lx) ["MA_PTR"]\n", xa_to_value(entry),
xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else if (xa_is_retry(entry))
pr_cont("retry (%ld)\n", xa_to_internal(entry));
else if (mt_is_reserved(entry))
- pr_cont("UNKNOWN ENTRY (%p)\n", entry);
+ pr_cont("UNKNOWN ENTRY ("MA_PTR")\n", entry);
else
- pr_cont("%p\n", entry);
+ pr_cont(""MA_PTR"\n", entry);
}
pr_cont(" contents: ");
for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
- pr_cont("%p\n", node->slot[i]);
+ pr_cont(""MA_PTR" %lu ", node->slot[i], node->pivot[i]);
+ pr_cont(""MA_PTR"\n", node->slot[i]);
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
unsigned long last = max;
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ pr_err("node "MA_PTR" last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
break;
}
pr_cont("%lu ", node->gap[i]);
pr_cont("| ");
for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
- pr_cont("%p\n", node->slot[i]);
+ pr_cont(MA_PTR" %lu ", node->slot[i], node->pivot[i]);
+ pr_cont(MA_PTR"\n", node->slot[i]);
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
unsigned long last = max;
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ pr_err("node "MA_PTR" last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
break;
}
mt_dump_range(min, max, depth);
- pr_cont("node %p depth %d type %d parent %p", node, depth, type,
+ pr_cont("node "MA_PTR" depth %d type %d parent "MA_PTR, node, depth, type,
node ? node->parent : NULL);
switch (type) {
case maple_dense:
{
void *entry = mt->ma_root;
- pr_info("maple_tree(%p) flags %X, root %p\n",
+ pr_info("maple_tree("MA_PTR") flags %X, root "MA_PTR"\n",
mt, mt->ma_flags, entry);
if (!xa_is_node(entry))
mt_dump_entry(entry, 0, 0, 0);
else if (entry)
mt_dump_node(entry, 0, mt_max[mte_node_type(entry)], 0);
}
-#ifndef __KERNEL__
-extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
-void mt_set_non_kernel(unsigned int val)
-{
- kmem_cache_set_non_kernel(maple_node_cache, val);
-}
-
-extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
-unsigned long mt_get_alloc_size(void)
-{
- return kmem_cache_get_alloc(maple_node_cache);
-}
-#endif
-// Tree validations
/**
* Calculate the maximum gap in a node and check if that's what is reported in
if (xa_is_retry(entry))
pr_err("retry\n");
- pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
+ pr_err(MA_PTR"[%u] -> "MA_PTR" %lu != %lu - %lu + 1\n",
mas_mn(mas), i,
mte_get_rcu_slot(mas->node, i),
gap, p_end, p_start);
}
} else {
if (gap >= p_end - p_start + 1) {
- pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ pr_err(MA_PTR"[%u] %lu >= %lu - %lu + 1 (%lu)\n",
mas_mn(mas), i, gap, p_end, p_start,
p_end - p_start + 1);
MT_BUG_ON(mas->tree,
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
if (ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) != max_gap)
- pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
+ pr_err("gap "MA_PTR"[%u] != %lu\n", p_mn, p_slot, max_gap);
MT_BUG_ON(mas->tree,
ma_get_gap(p_mn, p_slot, mas_parent_enum(mas, mte)) !=
MT_BUG_ON(mas->tree,
ma_get_rcu_slot(parent, i, p_type) != mas->node);
else if (ma_get_rcu_slot(parent, i, p_type) == mas->node) {
- pr_err("parent contains invalid child at %p[%u] %p\n",
- parent, i, mas_mn(mas));
+ pr_err("parent contains invalid child at "MA_PTR"[%u] "
+ MA_PTR"\n", parent, i, mas_mn(mas));
MT_BUG_ON(mas->tree,
ma_get_rcu_slot(parent, i, p_type) == mas->node);
}
if (!mt_will_coalesce(entry)) {
if (piv < mas->min)
mt_dump(mas->tree);
- pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
MT_BUG_ON(mas->tree, piv < mas->min);
}
}
if (piv > mas->max) {
- pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, piv,
+ pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i, piv,
mas->max);
MT_BUG_ON(mas->tree, piv > mas->max);
}
ERASE, 47906195480576, 47906195480576,
STORE, 94641242615808, 94641242750975,
};
+ unsigned long set11[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140732658499584, 140737488351231,
+ERASE, 140732658499584, 140732658499584,
+STORE, 140732658499584, 140732658503679,
+STORE, 94029856579584, 94029856751615,
+ERASE, 94029856579584, 94029856579584,
+STORE, 94029856579584, 94029856595967,
+STORE, 94029856595968, 94029856751615,
+ERASE, 94029856595968, 94029856595968,
+STORE, 94029856595968, 94029856698367,
+STORE, 94029856698368, 94029856739327,
+STORE, 94029856739328, 94029856751615,
+STORE, 140014592573440, 140014592745471,
+ERASE, 140014592573440, 140014592573440,
+STORE, 140014592573440, 140014592577535,
+STORE, 140014592577536, 140014592745471,
+ERASE, 140014592577536, 140014592577536,
+STORE, 140014592577536, 140014592700415,
+STORE, 140014592700416, 140014592733183,
+STORE, 140014592733184, 140014592741375,
+STORE, 140014592741376, 140014592745471,
+STORE, 140732658565120, 140732658569215,
+STORE, 140732658552832, 140732658565119,
+ };
+
mt_set_non_kernel(3);
check_erase2_testset(mt, set, ARRAY_SIZE(set));
check_erase2_testset(mt, set10, ARRAY_SIZE(set10));
rcu_barrier();
mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ check_erase2_testset(mt, set11, ARRAY_SIZE(set11));
+ rcu_barrier();
+ mas_get_unmapped_area_rev(&mas, 12288, 140014592737280, 0x2000);
+ MT_BUG_ON(mt, mas.index != 140014592565248);
+ mtree_destroy(mt);
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
for (i = 0; i < range_cnt; i += 2) {
/* Inclusive, Inclusive (with the -1) */
- pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
+ /*
+ pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
(range[i + 1] >> 12) - 1);
+ */
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
mt_validate(mt);
}
+ //printk("Done insert\n");
for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+ /*
+ pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
+ min, holes[i+1]>>12, holes[i+2]>>12,
+ holes[i] >> 12);
+ */
MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min,
holes[i+1] >> 12,
holes[i+2] >> 12));
- MT_BUG_ON(mt, mas.index != holes[i] >> 12);
+// printk("Found %lu %lu\n", mas.index, mas.last);
+// printk("gap %lu %lu\n", (holes[i] >> 12),
+// (holes[i+1] >> 12));
+ MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
+ MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
min = holes[i+1] >> 12;
mas_reset(&mas);
}
for (i = 0; i < req_range_cnt; i += 5) {
- pr_debug("\tRequest between %lu-%lu size %lu\n",
+ /*
+ pr_debug("\tRequest between %lu-%lu size %lu, should get %lu\n",
req_range[i] >> 12,
(req_range[i + 1] >> 12) - 1,
- req_range[i+2] >> 12);
+ req_range[i+2] >> 12,
+ req_range[i+3] >> 12);
+ */
check_mtree_alloc_rrange(mt,
req_range[i] >> 12, // start
req_range[i+1] >> 12, // end
int req_range_cnt = ARRAY_SIZE(req_range);
for (i = 0; i < range_cnt; i += 2) {
+ /*
pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
(range[i + 1] >> 12) - 1);
+ */
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
mt_validate(mt);
}
+ //printk("Checking stuff now\n\n");
MA_STATE(mas, mt, 0, 0);
unsigned long min = 0x565234af2000;
for (i = 0; i < ARRAY_SIZE(holes); i+= 3) {
+// printk("\t check gap %i\n", i/3);
MT_BUG_ON(mt, mas_get_unmapped_area(&mas, min >> 12,
holes[i+1] >> 12,
holes[i+2] >> 12));