mt_for_each interface simplification for external users.
get_unampped_area_rev was not getting the right answer. Fix it and
clean up the condensed and unused interface internal to the maple tree.
Fix associated tests as well.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
mt_init_flags(mt, 0);
}
-void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
bool start);
/**
* mt_for_each - Searches for an entry starting at index until max.
*
*/
#define mt_for_each(tree, entry, index, max) \
- for (entry = mt_find(tree, &index, max, true); \
- entry; entry = mt_find(tree, &index, max, false))
+ for (entry = _mt_find(tree, &index, max, true); \
+ entry; entry = _mt_find(tree, &index, max, false))
#ifdef CONFIG_DEBUG_MAPLE_TREE
static unsigned int tests_run;
static unsigned int tests_passed;
-#ifdef CONFIG_DEBUG_MAPLE_TREE
void mt_dump(const struct maple_tree *mt);
#define MT_BUG_ON(tree, x) do { \
tests_run++; \
} while (0)
#else
#define MT_BUG_ON(tree, x) BUG_ON(x)
-#endif
-
#endif /* CONFIG_DEBUG_MAPLE_TREE */
#endif /*_LINUX_MAPLE_TREE_H */
* Matthew Wilcox <willy@infradead.org>
*/
-#define CONFIG_DEBUG_MAPLE_TREE
-
#include <linux/maple_tree.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <asm/barrier.h>
//#include <linux/mm.h> // for task_size
+#define CONFIG_DEBUG_MAPLE_TREE
+void mt_dump(const struct maple_tree *mt);
#define MA_ROOT_PARENT 1
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
static inline unsigned long mas_leaf_max_gap(struct ma_state *mas)
{
enum maple_type mt = mte_node_type(mas->node);
+ unsigned long pstart, pend;
+ unsigned long prev_gap = 0;
unsigned long max_gap = 0;
unsigned long gap = 0;
- unsigned long pstart, pend;
- int i;
void *entry = NULL;
+ int i;
if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mas->node); i++) {
gap = pend - pstart + 1;
entry = mte_get_rcu_slot(mas->node, i);
- if (!mt_is_empty(entry) || xa_is_retry(entry))
+ if (!mt_is_empty(entry) || xa_is_retry(entry)) {
+ prev_gap = 0;
goto next;
+ }
- if (gap > max_gap)
- max_gap = gap;
+ prev_gap += gap;
+ if (prev_gap > max_gap)
+ max_gap = prev_gap;
next:
if (pend >= mas->max)
{
enum maple_type type;
unsigned long max, min;
- unsigned char i;
+ unsigned char i, start;
bool found = false;
+ unsigned long this_gap = 0;
type = mte_node_type(mas->node);
i = mas_get_slot(mas);
max = _mas_get_safe_pivot(mas, i, type);
do {
- unsigned long this_gap = 0;
void *entry = NULL;
if (!i)
/* check if this slot is full */
entry = _mte_get_rcu_slot(mas->node, i, type);
- if (entry && !xa_is_deleted(entry))
+ if (entry && !xa_is_deleted(entry)) {
+ this_gap = 0;
goto next_slot;
+ }
+
+ if (!this_gap)
+ start = i;
- this_gap = max - min + 1;
+ this_gap += max - min + 1;
if (this_gap >= size) {
/* within range and large enough */
- mas->max = max;
+ if (mas->last - min + 1 < size) {
+ printk("Skip: %lu - %lu < %lu\n",
+ mas->last, min, size);
+ /* It is possible that the gap is
+ * sufficient and within range, but
+ * the size does not fit within the
+ * maximum value and the min of gap
+ */
+ goto next_slot;
+ }
mas->min = min;
+ mas->max = min + this_gap - 1;
+ i = start;
found = true;
break;
}
default:
do {
- unsigned long this_gap;
-
if (!i)
min = mas->min;
else
* Note: Does not return the zero entry.
* returns an entry.
*/
-void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
+void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
bool start)
{
unsigned long range_start = 0, range_end = 0;
return entry;
}
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) {
+ return _mt_find(mt, index, max, true);
+}
EXPORT_SYMBOL(mt_find);
static inline int mas_build_replacement(struct ma_state *mas, void *new_entry,
}
void mas_set_rev_index(struct ma_state *mas, unsigned long size)
{
- unsigned long gap_max = mas->max;
- unsigned long range_max = mas->last; // Includes subtracted size.
+ unsigned long gap_max = mas->max; // in-tree gap.
+ unsigned long range_max = mas->last; // range window we are searching in.
// rev_awalk has set mas->min and mas->max to the gap values.
// If the maximum is outside the window we are searching, then use the
mas->last = gap_max;
mas->index = mas->last - size + 1;
return;
+}
+static void _mas_empty_or_single_unmapped_area(struct ma_state *mas,
+ unsigned long min, unsigned long max, unsigned long size,
+ bool fwd)
+{
+ unsigned long start = 0;
+ if (!mas_is_none(mas))
+ start++; // mas_is_ptr
+
+ if (start < min)
+ start = min;
+
+ if (fwd) {
+ mas->index = start;
+ mas->last = start + size - 1;
+ return;
+ }
+
+ mas->index = max - size + 1;
+ mas->index = max;
+
+
}
static inline int _mas_get_unmapped_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size, bool forward)
{
mas_start(mas);
+ max--; // Convert to inclusive.
// Empty set.
- if (mas_is_none(mas)) {
- mas->index = 0;
- mas->last = size - 1;
- return 0;
- }
- // There is already a single value.
- if (mas_is_ptr(mas)) {
- mas->index = 1;
- mas->last = size;
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ _mas_empty_or_single_unmapped_area(mas, min, max, size, forward);
return 0;
}
*
* Returns: 0 on success, -EBUSY otherwise.
*/
-static inline int mas_rev_alloc(struct ma_state *mas, void *entry,
+static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
+ unsigned long max, void *entry,
unsigned long size, unsigned long *index)
{
unsigned char slot = MAPLE_NODE_SLOTS;
+ int ret = 0;
- mas_start(mas);
-
- if (mas_is_none(mas)) {
- slot = 0;
- goto empty_tree;
- }
-
- if (mas_is_ptr(mas)) {
- mas_root_expand(mas, entry);
- if (!mas->index)
- return mte_get_pivot(mas->node, 0);
- return mte_get_pivot(mas->node, 1);
- }
-
- mas_rev_awalk(mas, size);
- // cannot be empty tree.
+ ret = _mas_get_unmapped_area(mas, min, max, size, false);
+ if (ret)
+ return ret;
if (mas_is_err(mas))
return xa_err(mas->node);
if (slot == MAPLE_NODE_SLOTS)
goto no_gap;
- // At this point, mas->node points to the right node and we have a
- // slot that has a sufficient gap.
- // If the maximum is outside the window we are searching, then use the
- // last location in the search.
- if (mas->max > mas->last)
- mas->index = mas->last;
- else
- mas->index = mas->max - size + 1;
-
- mas->last = mas->index + size - 1;
-
-empty_tree:
return mas_fill_gap(mas, entry, slot, size, index);
no_gap:
/* Interface */
void __init maple_tree_init(void)
{
- maple_node_cache = kmem_cache_create("maple node",
+ maple_node_cache = kmem_cache_create("maple_node",
sizeof(struct maple_node), sizeof(struct maple_node),
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, NULL);
}
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
- if (min > max)
+ if (min >= max)
return -EINVAL;
if (max < size - 1)
mtree_lock(mas.tree);
retry:
- mas.index = min;
- mas.last = max - size + 1;
- ret = mas_rev_alloc(&mas, entry, size, startp);
+ ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
if (mas_nomem(&mas, gfp))
goto retry;
p_end = mas->max;
if (mte_is_leaf(mte)) {
- gap = p_end - p_start + 1;
- if (!mt_is_empty(mte_get_rcu_slot(mas->node, i)))
+ if (!mt_is_empty(mte_get_rcu_slot(mas->node, i))) {
+ gap = 0;
goto not_empty;
+ }
+ gap += p_end - p_start + 1;
} else {
void *entry = mte_get_rcu_slot(mas->node, i);
gap = mte_get_gap(mte, i);
gap != p_end - p_start + 1);
}
} else {
- if (gap >= p_end - p_start + 1) {
+ if (gap > p_end - p_start + 1) {
pr_err(MA_PTR"[%u] %lu >= %lu - %lu + 1 (%lu)\n",
mas_mn(mas), i, gap, p_end, p_start,
p_end - p_start + 1);
MT_BUG_ON(mas->tree,
- gap >= p_end - p_start + 1);
+ gap > p_end - p_start + 1);
}
}
}
ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
GFP_KERNEL);
+ printk("ret = %d result %lu\n", ret, result);
MT_BUG_ON(mt, ret != eret);
if (ret)
return;
mas_reset(&mas);
index = 17;
- entry = mt_find(mt, &index, 512, true);
+ entry = mt_find(mt, &index, 512);
MT_BUG_ON(mt, xa_mk_value(256) != entry);
mas_reset(&mas);
index = 17;
- entry = mt_find(mt, &index, 20, true);
+ entry = mt_find(mt, &index, 20);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, index != mas.index);
MT_BUG_ON(mt, last != mas.last);
mas_unlock(&mas);
-
-
-
mtree_destroy(mt);
}
STORE, 140014592741375, 140014592741375, // contrived
STORE, 140014592733184, 140014592741376, // creates first entry retry.
};
+ unsigned long set13[] = {
+STORE, 140373516247040, 140373516251136,//: ffffa2e7b0e10d80
+STORE, 140373516251136, 140373516255232,//: ffffa2e7b1195d80
+STORE, 140373516255232, 140373516443648,//: ffffa2e7b0e109c0
+STORE, 140373516443648, 140373516587008,//: ffffa2e7b05fecc0
+//STORE, 140373516587008, 140373516963839,//: 0000000000000000
+STORE, 140373516963840, 140373518647296,//: ffffa2e7bfbdcc00
+STORE, 140373518647296, 140373518663680,//: ffffa2e7bf5d59c0
+STORE, 140373518663680, 140373518684160,//: deleted (257)
+//STORE, 140373518684160, 140373518680063,//: 0000000000000000
+STORE, 140373518680064, 140373518684160,//: ffffa2e7b0e1cb40
+STORE, 140373518684160, 140373518688256,//: ffffa2e7b05fec00
+STORE, 140373518688256, 140373518692352,//: ffffa2e7bfbdcd80
+STORE, 140373518692352, 140373518696448,//: ffffa2e7b0749e40
+ };
int cnt = 0;
-
mt_set_non_kernel(3);
check_erase2_testset(mt, set, ARRAY_SIZE(set));
mtree_destroy(mt);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set2, ARRAY_SIZE(set2));
start = 140735933894656;
- MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL, true));
+ MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL));
mtree_destroy(mt);
mt_set_non_kernel(2);
mtree_init(mt, MAPLE_ALLOC_RANGE);
check_erase2_testset(mt, set12, ARRAY_SIZE(set12));
rcu_barrier();
- mt_dump(mt);
mas_for_each(&mas, entry, ULONG_MAX) {
- if (mas_retry(&mas, entry)) {
- printk("retry\n\n\n");
+ if (mas_retry(&mas, entry))
continue;
- }
BUG_ON(cnt > 12);
cnt++;
- printk("%d %lu-%lu %p\n", cnt, mas.index, mas.last, entry);
}
- exit(0);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mtree_init(mt, MAPLE_ALLOC_RANGE);
+ check_erase2_testset(mt, set13, ARRAY_SIZE(set13));
+ mtree_erase(mt, 140373516443648);
+ rcu_read_lock();
+ mas_get_unmapped_area_rev(&mas, 0, 140373518663680, 4096);
+ rcu_read_unlock();
+ printk("index = %lu - %lu\n", mas.index, mas.last);
+ mt_dump(mt);
mtree_destroy(mt);
}
0x0, // Min
0x565234AF1 << 12, // Max
0x3000, // Size
- 0x565234AEF << 12, // hole - 3.
+ 0x565234AEE << 12, // max - 3.
0, // Return value success.
0x0, // Min
- 0x0, // Max
+ -1, // Max
0x1000, // Size
- 0x0, // First rev hole of size 0x1000
+ 562949953421311 << 12,// First rev hole of size 0x1000
0, // Return value success.
0x0, // Min
0x7F36D510A << 12, // Max
0x4000, // Size
- 0x7F36D5107 << 12, // First rev hole of size 0x4000
+ 0x7F36D5106 << 12, // First rev hole of size 0x4000
0, // Return value success.
// Ascend test.
for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
- /*
pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
min, holes[i+1]>>12, holes[i+2]>>12,
holes[i] >> 12);
- */
MT_BUG_ON(mt, mas_get_unmapped_area_rev(&mas, min,
holes[i+1] >> 12,
holes[i+2] >> 12));
-// printk("Found %lu %lu\n", mas.index, mas.last);
-// printk("gap %lu %lu\n", (holes[i] >> 12),
-// (holes[i+1] >> 12));
+ printk("Found %lu %lu\n", mas.index, mas.last);
+ printk("gap %lu %lu\n", (holes[i] >> 12),
+ (holes[i+1] >> 12));
MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
min = holes[i+1] >> 12;
}
for (i = 0; i < req_range_cnt; i += 5) {
- /*
pr_debug("\tRequest between %lu-%lu size %lu, should get %lu\n",
req_range[i] >> 12,
(req_range[i + 1] >> 12) - 1,
req_range[i+2] >> 12,
req_range[i+3] >> 12);
- */
check_mtree_alloc_rrange(mt,
req_range[i] >> 12, // start
req_range[i+1] >> 12, // end
mt_validate(mt);
}
+ mtree_erase(mt, 34148798727); // create a deleted range.
+ check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
+ 34148798725, 0, mt);
+
mtree_destroy(mt);
}
}
return bug ? -1 : i;
}
+#define CONFIG_DEBUG_MAPLE_TREE
#if defined(CONFIG_DEBUG_MAPLE_TREE)
extern void mt_dump(const struct maple_tree *mt);
}
static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
{
+ //printk("%s: mt_mod %p: ERASE, %lu, %lu,\n", __func__, mm, vma->vm_start,
+ // vma->vm_end - 1);
mtree_erase(&mm->mm_mt, vma->vm_start);
}
static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
+ //printk("%s: mt_mod %p: STORE, %lu, %lu,\n", __func__, mm, vma->vm_start,
+ // vma->vm_end - 1);
mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma,
GFP_KERNEL);
}
return -ENOMEM;
rcu_read_lock();
- if (mas_get_unmapped_area_rev(&mas, info->low_limit,
- info->high_limit - 1, length))
+ if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit,
+ length))
return -ENOMEM;
rcu_read_unlock();
- gap = ((mas.index) & ~info->align_mask) + info->align_offset;
+ gap = mas.index;
+ // Not sure why this is needed..
+ if (mas.max > info->high_limit)
+ gap = ((gap) & ~info->align_mask) + info->align_offset;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
VM_BUG_ON(gap_end < gap_start);
if (gap != gap_end) {
- pr_err("Gap was found: mt %lu gap_end %lu\n", gap, gap_end);
+ pr_err("%s: Gap was found: mt %lu gap_end %lu\n", __func__, gap,
+ gap_end);
+ pr_err("window was %lu - %lu size %lu\n", info->high_limit,
+ info->low_limit, length);
+ pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max,
+ mas.last);
+ pr_err("mas.index %lu align %lu offset %lu\n", mas.index,
+ info->align_offset, info->align_mask);
mt_dump(&mm->mm_mt);
}
VM_BUG_ON(gap != gap_end);
if (likely(vma))
return vma;
- vma = mt_find(&mm->mm_mt, &addr, ULONG_MAX, 0);
+ vma = mt_find(&mm->mm_mt, &addr, ULONG_MAX);
if (vma)
vmacache_update(addr, vma);
&prev, &rb_link, &rb_parent))
return -ENOMEM;
- if ((overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1, true)) != NULL) {
+ if ((overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1)) != NULL) {
pr_err("Found vma ending at %lu\n", start - 1);
pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap,
overlap->vm_start, overlap->vm_end - 1);
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
return NULL; /* should never get here */
- if (mt_find(&mm->mm_mt, &index, addr+len - 1, true))
+ if (mt_find(&mm->mm_mt, &index, addr+len - 1))
BUG();
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),