return ((unsigned long)entry >> 3) & 15;
}
+static inline bool ma_is_leaf(const enum maple_type type)
+{
+ return type < maple_range_16;
+}
+
static inline bool mt_is_leaf(const struct maple_enode *entry)
{
- return mt_node_type(entry) < maple_range_16;
+ return ma_is_leaf(mt_node_type(entry));
}
static inline enum maple_type mt_node_hole(const void *entry)
pstart = mas->min;
for (int i = 0; i < mt_slots[mt]; i++) {
- bool empty = true;
if (i < mt_pivots[mt])
pend = ma_get_pivot(mas->node, i);
else
pend = mas->max;
+ if (i && !pend)
+ break;
+
if (pend) {
gap = pend - pstart;
if (ma_get_rcu_slot(mas->node, i))
- empty = false;
+ goto next;
} else {
gap = mas->max - pstart;
}
if (!pstart)
gap++;
- if (empty && (gap > max_gap))
+ if (gap > max_gap)
max_gap = gap;
+next:
if (pend == mas->max)
break;
return max_gap;
}
-static inline unsigned long ma_max_gap(struct ma_state *mas, unsigned char *slot)
+static inline unsigned long ma_max_gap(struct ma_state *mas,
+ unsigned char *slot)
{
unsigned long max_gap = 0;
unsigned char i;
}
+static inline bool _mas_rev_awalk(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type;
+ unsigned long max, min;
+ unsigned char pivot_cnt, i;
+ bool found = false;
+
+ type = mt_node_type(mas->node);
+ i = ma_get_slot(mas);
+
+ min = mas->min;
+ max = mas->max;
+
+ switch (type) {
+ case maple_leaf_64:
+ pivot_cnt = mt_max[type];
+ if (i >= pivot_cnt - 1)
+ max = mas->max;
+ else
+ max = _ma_get_pivot(mas->node, i, type);
+
+ do {
+ unsigned long this_gap = 0;
+ void *entry = NULL;
+
+ if (!i)
+ min = mas->min;
+ else
+ min = _ma_get_pivot(mas->node, i - 1,
+ type) + 1;
+
+ /* last is below this range */
+ if (mas->last < min)
+ goto next_slot;
+
+ /* index is above this range.*/
+ if (mas->index > max) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+
+ /* check if this slot is full */
+ entry = _ma_get_rcu_slot(mas->node, i, type);
+ if (entry)
+ goto next_slot;
+
+ this_gap = max - mas->index + 1;
+ if (this_gap >= size) {
+ /* within range and large enough */
+ mas->max = max;
+ found = true;
+ break;
+ }
+next_slot:
+ if (!i)
+ goto ascend;
+
+ max = min - 1;
+ } while (i--);
+ break;
+ default:
+
+ do {
+ unsigned long this_gap;
+ if (!i)
+ min = mas->min;
+ else
+ min = _ma_get_pivot(mas->node, i - 1,
+ type) + 1;
+
+
+ /* last is too little for this range */
+ if (mas->last < min)
+ goto next;
+
+
+ /* index is too large for this range */
+ if (mas->index > max) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+
+ this_gap = ma_get_gap(mas->node, i);
+ /* Not big enough */
+ if (size > this_gap)
+ goto next;
+
+ break;
+
+next:
+ /* Not found in this node.*/
+ if (!i)
+ goto ascend;
+
+ max = min - 1;
+ if (mas->index > max ) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } while (i--);
+ break;
+
+ case maple_dense:
+ // FIXME: find a line of nulls...
+ i = mas->index - mas->min;
+ found = true;
+ break;
+ }
+
+
+ if (!ma_is_leaf(type)) { //descend
+ struct maple_enode *next;
+ next = _ma_get_rcu_slot(mas->node, i, type);
+ mas->min = min;
+ mas->max = max;
+ if (next) {
+ mas->node = next;
+ i = ma_data_end(mas->node, mt_node_type(next), &max);
+ } else {
+ found = true; // this is a non-leaf hole.
+ }
+ }
+
+ ma_set_slot(mas, i);
+ return found;
+ascend:
+ if (ma_is_root(mas->node))
+ mas_set_err(mas, -EBUSY);
+
+ ma_set_slot(mas, i);
+ return found;
+}
+
static inline bool _mas_awalk(struct ma_state *mas, unsigned long size)
{
enum maple_type type;
goto next;
entry = _ma_get_rcu_slot(mas->node, i, type);
- if (!entry) {
- this_gap = pivot - mas->index;
- if (!this_gap) // No entry, pivot = index.
- this_gap = 1;
- }
+ if (entry)
+ goto next;
+
+ this_gap = pivot - mas->index;
+ if (!this_gap) // No entry, pivot = index.
+ this_gap = 1;
/* Does not fit in this gap or node */
if (mas->last < pivot - this_gap)
}
- if (type >= maple_range_16) { //descend
+ if (!ma_is_leaf(type)) { //descend
struct maple_enode *next;
next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
type = mt_node_type(mas->node);
pivot_cnt = mt_pivots[type];
- if (type < maple_range_16) // Leaf.
+ if (ma_is_leaf(type)) // Leaf.
ret = true;
switch (type) {
ma_set_slot(mas, i);
return ret;
}
+static inline bool mas_rewind_node(struct ma_state *mas);
+static inline void mas_rev_awalk(struct ma_state *mas, unsigned long size)
+{
+ struct maple_enode *last = NULL;
+ unsigned long last_piv;
+ unsigned char slot;
+
+ mas->node = mas_start(mas);
+ slot = ma_data_end(mas->node, mt_node_type(mas->node), &last_piv);
+ ma_set_slot(mas, slot);
+
+
+ /* There are 4 options:
+ * go to child (descend)
+ * go back to parent (ascend)
+ * no gap found. (return, slot == MAPLE_NODE_SLOTS)
+ * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ */
+ while (!_mas_rev_awalk(mas, size) && !mas_is_err(mas)) {
+ if (last == mas->node)
+ mas_rewind_node(mas);
+ else
+ last = mas->node;
+ }
+ return;
+}
static inline bool mas_skip_node(struct ma_state *mas);
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
-
{
struct maple_enode *last = NULL;
//mas->index is the start address for the search, which may be done with?
//mas->last is the end address for the search
- unsigned long min = mas->min;
- if (slot)
- min = ma_get_pivot(mas->node, slot - 1) + 1;
-
- if (mas->index < min)
- mas->index = min;
-
*index = mas->index;
mas->last = mas->index + size - 1;
_ma_insert(mas, entry, slot);
unsigned long size, unsigned long *index)
{
unsigned char slot = MAPLE_NODE_SLOTS;
+ unsigned long min;
mas->node = mas_start(mas);
if (!xa_is_node(rcu_dereference(mas->tree->ma_root))) {
return xa_err(mas->node);
#endif
+ min = mas->min;
+ if (slot)
+ min = ma_get_pivot(mas->node, slot - 1) + 1;
+
+ if (mas->index < min)
+ mas->index = min;
+
+ return mas_fill_gap(mas, entry, slot, size, index);
+
+no_gap:
+ return -EBUSY;
+}
+static inline int ma_rev_alloc(struct ma_state *mas, void *entry,
+ unsigned long size, unsigned long *index)
+{
+ unsigned char slot = MAPLE_NODE_SLOTS;
+
+ mas->node = mas_start(mas);
+ if (!xa_is_node(rcu_dereference(mas->tree->ma_root))) {
+ ma_root_expand(mas, entry);
+ if (!mas->index)
+ return ma_get_pivot(mas->node, 0);
+ return ma_get_pivot(mas->node, 1);
+ }
+
+ mas_rev_awalk(mas, size);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ slot = ma_get_slot(mas);
+ if (slot == MAPLE_NODE_SLOTS)
+ goto no_gap;
+
+ // At this point, mas->node points to the right node and we have a
+ // slot that has a sufficient gap.
+#if 0
+ //FIXME: coalesce is an odd beast
+ mas_coalesce(mas);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+#endif
+ if (mas->max < mas->last)
+ mas->index = mas->max;
+ else
+ mas->index = mas->last;
+
return mas_fill_gap(mas, entry, slot, size, index);
no_gap:
return entry;
}
+static inline bool mas_rewind_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (ma_is_root(mas->node)) {
+ slot = ma_get_slot(mas);
+ if (!slot) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ slot = mt_parent_slot(mas->node);
+ ma_encoded_parent(mas);
+ }
+ } while (!slot);
+
+ ma_set_slot(mas, --slot);
+ mas_update_limits(mas, slot, mt_node_type(mas->node));
+ return true;
+}
/* Skip this slot in the parent. */
static inline bool mas_skip_node(struct ma_state *mas)
{
mtree_unlock(mas.tree);
return ret;
}
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (max < size - 1)
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
+ MA_STATE(mas, mt, min, max - size);
+
+ mtree_lock(mas.tree);
+retry:
+ mas.index = min;
+ mas.last = max - size + 1;
+ mas.min = 0;
+ mas.max = mt_max[maple_arange_64];
+ ret = ma_rev_alloc(&mas, entry, size, startp);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mas.tree);
+ return ret;
+}
int mtree_next(struct maple_tree *mt, unsigned long index, unsigned long *next)
{
MT_BUG_ON(mt, result != expected);
}
+
+static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
static noinline void check_load(struct maple_tree *mt, unsigned long index,
void *ptr)
{
check_insert(mt, 0, (void*) 0);
check_lb_not_empty(mt);
}
+static noinline void check_alloc_rev_range(struct maple_tree *mt)
+{
+ // cat /proc/self/maps|awk '{print $1}'| awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ unsigned long range[] = {
+ // Inclusive , Exclusive.
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+
+ /* req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ unsigned long req_range[] = {
+ 0x565234af9000, // Min
+ 0x7fff58791000, // Max
+ 0x1000, // Size
+ 0x7fff5878d << 12, // First rev hole in our data of size 0x1000
+ 0, // Return value success.
+
+ 0x0, // Min
+ 0x565234AF1 << 12, // Max
+ 0x3000, // Size
+ 0x565234AEF << 12, // hole - 3.
+ 0, // Return value success.
+
+ 0x0, // Min
+ 0x0, // Max
+ 0x1000, // Size
+ 0x0, // First rev hole in our data of size 0x1000
+ 0, // Return value success.
+
+ 0x0, // Min
+ 0x7F36D510A << 12, // Max
+ 0x4000, // Size
+ 0x7F36D5107 << 12, // First rev hole in our data of size 0x4000
+ 0, // Return value success.
+
+ // Ascend test.
+ 0x0,
+ 34148798629 << 12,
+ 19 << 12,
+ 34148797436 << 12,
+ 0x0,
+
+ // Too big test.
+ 0x0,
+ 18446744073709551615UL,
+ 23171648240 << 12,
+ 0x0,
+ -EBUSY,
+
+ };
+ int i, range_cnt = sizeof(range)/sizeof(range[0]);
+ int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
+
+ for (i = 0; i < range_cnt; i+=2) {
+ // Inclusive , Inclusive (with the -1)
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12));
+ }
+
+ printk("Starting reverse range allocation tests.\n");
+ for (i = 0; i < req_range_cnt; i+=5) {
+ mt_dump(mt);
+ printk("%d: alloc %ld in range %ld - %ld\n", i,
+ req_range[i+2] >> 12,
+ req_range[i] >> 12,
+ req_range[i+1] >> 12);
+ if (req_range[i+4])
+ printk("\t Should fail\n");
+ else
+ printk ("\tShould be starting at %ld\n",
+ req_range[i+3] >> 12);
+ check_mtree_alloc_rrange(mt,
+ req_range[i] >> 12, // start
+ req_range[i+1] >> 12, // end
+ req_range[i+2] >> 12, // size
+ req_range[i+3] >> 12, // expected address
+ req_range[i+4], // expected return
+ xa_mk_value(req_range[i] >> 12)); // pointer
+ mt_dump(mt);
+ printk("Done\n\n");
+ }
+
+ mtree_destroy(mt);
+}
static noinline void check_alloc_range(struct maple_tree *mt)
{
};
int i, range_cnt = sizeof(range)/sizeof(range[0]);
int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
- unsigned long last = 0;
- printk("range cnt = %d\n", range_cnt);
- for (i = 0; i < range_cnt; i+=2) {
- printk("hole: %lu\n", (range[i] >> 12) - last);
- last = range[i+1] >> 12;
- printk("Decode table %d: %lx %lx -> %lu - %lu ", i,
- range[i], range[i+1],
- range[i] >> 12, range[i+1] >> 12);
- }
- printk("last hole: %lu\n", (MTREE_ALLOC_MAX>>12) - last);
- printk("i = count, inclusive start - inclusive end\n");
for (i = 0; i < range_cnt; i+=2) {
- printk("i = %d, %ld-%ld\n", i, range[i] >> 12, (range[i + 1] >> 12) - 1);
- // Inclusive , Inclusive (with the -1)
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12));
- mt_dump(mt);
- printk("Done\n\n");
}
for (i = 0; i < req_range_cnt; i+=5) {
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_alloc_range(&tree);
+ mtree_init(&tree, MAPLE_ALLOC_RANGE);
+ check_alloc_rev_range(&tree);
+
mtree_init(&tree, 0);
check_load(&tree, set[0], NULL); // See if 5015 -> NULL