if (slot < mt_pivots[type])
ms->max = _ma_get_pivot(ms->node, slot, type);
}
+
static inline void ma_encoded_parent(struct ma_state *mas)
{
void *parent, *gparent;
mas->node = mt_mk_node(gparent, mt_parent_enum(mas, parent));
ma_set_slot(mas, slot);
mas_update_limits(mas, slot, mt_parent_enum(mas, parent));
- //mas->node = ma_get_rcu_slot(mas->node, slot);
mas->node = ma_get_rcu_slot(mas->node, slot);
return;
}
pstart = mas->min;
for (int i = 0; i < mt_slots[mt]; i++) {
- if (i < mt_pivots[mt])
+ if (i < mt_pivots[mt]) {
pend = ma_get_pivot(mas->node, i);
- else
+ if (!pend)
+ break;
+ } else {
pend = mas->max;
+ }
gap = (pend ? pend : mas->max) - pstart;
if ((gap > max_gap) && !ma_get_rcu_slot(mas->node, i))
{
unsigned long max_gap = 0;
- if (ma_is_root(mas->node)) {
- ma_set_gap(mas->node, slot, new);
+ if (ma_is_root(mas->node))
return;
- }
ma_encoded_parent(mas);
max_gap = ma_max_gap(mas);
static inline void mt_replace(struct ma_state *mas)
{
struct maple_node *mn = mt_to_node(mas->node);
- struct maple_node *parent;
+ struct maple_enode *parent = NULL;
struct maple_enode *prev;
unsigned char slot = 0;
prev = mas->tree->ma_root;
} else {
enum maple_type ptype = mt_parent_enum(mas, mas->node);
- parent = mt_parent(mas->node);
+ parent = mt_mk_node(mt_parent(mas->node), ptype);
slot = mt_parent_slot(mas->node);
- prev = ma_get_rcu_slot(mt_mk_node(parent, ptype), slot);
+ prev = ma_get_rcu_slot(parent, slot);
}
if (mt_to_node(prev) == mn)
((unsigned long)mas->tree | MA_ROOT_PARENT));
rcu_assign_pointer(mas->tree->ma_root, mt_mk_root(mas->node));
} else {
- rcu_assign_pointer(parent->slot[slot], mas->node);
+ ma_update_rcu_slot(parent, slot, mas->node);
}
mt_free(mt_to_node(prev));
min = ma_get_pivot(mas->node, slot - 1) + 1;
/* Figure out how many slots are needed for the entry. */
- if (max != mas->last)
+ if (max > mas->last)
n_end++;
if (mas->index && min != mas->index)
return 0;
mt = ma_determine_type(mas, min, slot);
-
leaf = mt_mk_node(ma_next_alloc(mas), mt);
mt_set_parent(leaf, mas->node, slot);
mas->node = leaf;
/* Not appending */
/* Creates a new node and copies until slot (inclusively) */
mas_partial_copy(mas, slot);
+
if (mas_is_err(mas))
return 0;
o_end = slot;
}
-
-
if (mas->index && min != mas->index) {
/* When writing a NULL entry, the order must be reversed to
* ensure readers don't get incorrect data on appends
}
/* Skip possible duplicate entry that contains a NULL */
- if (o_end != n_end && ma_get_pivot(p_mn, o_end) == mas->last)
+ if (o_end != n_end && ma_get_pivot(p_mn, o_end) <= mas->last)
o_end++;
/* Copy remainder of node if this isn't an append */
return ret;
}
+
+static inline bool _mas_awalk(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type;
+ unsigned long pivot, max, min;
+ unsigned char pivot_cnt, i;
+ bool leaf = false, found = false;
+
+ min = mas->min;
+ max = mas->max;
+
+ type = mt_node_type(mas->node);
+ pivot_cnt = mt_pivots[type];
+ if (type < maple_range_16)
+ leaf = true;
+
+ switch (type) {
+ case maple_leaf_64:
+ for (i = 0; i <= pivot_cnt; i++) {
+ unsigned long this_gap = 0;
+ void *entry = NULL;
+ if (i == pivot_cnt - 1)
+ pivot = max;
+ else
+ pivot = _ma_get_pivot(mas->node, i, type);
+ if (mas->index > pivot)
+ continue;
+ entry = _ma_get_rcu_slot(mas->node, i, type);
+ if (!entry)
+ this_gap = pivot - min;
+ min = pivot;
+ if (!this_gap)
+ continue;
+ if (mas->last < pivot - this_gap)
+ goto ascend;
+ if (this_gap >= size) {
+ found = true;
+ break;
+ }
+ }
+ break;
+ default:
+ pivot = 0;
+ i = ma_get_slot(mas);
+ for (; i < pivot_cnt; i++) {
+ unsigned long this_gap;
+ pivot = _ma_get_pivot(mas->node, i, type);
+ if (i != 0 && pivot == 0)
+ goto ascend;
+
+ this_gap = ma_get_gap(mas->node, i);
+ if (mas->last < pivot - this_gap)
+ goto ascend;
+
+ if (size <= this_gap) {
+ if (mas->index <= pivot) {
+ max = pivot;
+ break;
+ }
+ }
+ min = pivot + 1;
+ }
+
+ // Final slot.
+ if ((i == pivot_cnt - 1) && (mas->index > pivot)) {
+ if (size > ma_get_gap(mas->node, ++i))
+ goto ascend;
+ }
+
+ break;
+
+ case maple_dense:
+ // FIXME: find a line of nulls...
+ i = mas->index - mas->min;
+ found = true;
+ break;
+ }
+
+
+ if (!leaf) { // descend.
+ mas->node = _ma_get_rcu_slot(mas->node, i, type);
+ mas->min = min;
+ mas->max = max;
+ if (!mas->node)
+ found = true; // this is a non-leaf hole.
+ }
+ ma_set_slot(mas, i);
+ return found;
+
+ascend:
+ return false;
+
+
+}
/*
* Private: Returns true if mas->node is a leaf
*/
type = mt_node_type(mas->node);
pivot_cnt = mt_pivots[type];
+ if (type < maple_range_16) // Leaf.
+ ret = true;
+
switch (type) {
default:
for (i = 0; i < pivot_cnt; i++) {
if ((i == pivot_cnt - 1) && (mas->index > pivot))
i++;
- if (type < maple_range_16) { // Leaf.
- ret = true;
+ if (ret)
goto done;
- }
break;
case maple_dense:
// Linear node.
i = mas->index - mas->min;
- ret = true;
goto done;
break;
}
ma_set_slot(mas, i);
return ret;
}
+static inline void mas_walk_next(struct ma_state *mas);
+static inline void mas_awalk(struct ma_state *mas, unsigned long size)
+{
+ struct maple_enode *last = NULL;
+
+ mas->node = mas_start(mas);
+ ma_set_slot(mas, 0);
+
+ /* There are 4 options:
+ * go to child (descend)
+ * go back to parent (ascend)
+ * no gap found. (return, slot == MAPLE_NODE_SLOTS)
+ * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ */
+ while (!_mas_awalk(mas, size))
+ {
+ if (last == mas->node) // Ascend.
+ mas_walk_next(mas);
+
+ last = mas->node;
+ }
+ return;
+}
static inline void ma_insert(struct ma_state *mas, void *entry)
{
unsigned char slot = MAPLE_NODE_SLOTS;
mas_set_err(mas, -EEXIST);
return;
}
+static int mas_fill_gap(struct ma_state *mas, void *entry, unsigned char slot,
+ unsigned long size, unsigned long *index)
+{
+ //mas->index is the start address for the search, which may be done with?
+ //mas->last is the end address for the search
+
+ //mas->index = ma_get_pivot(mas->node, slot);
+ unsigned long min = mas->min;
+ if (slot)
+ min = ma_get_pivot(mas->node, slot - 1) + 1;
+
+ if (mas->index < min)
+ mas->index = min;
+
+ *index = mas->index;
+ mas->last = mas->index + size - 1;
+ _ma_insert(mas, entry, slot);
+ return 0;
+}
+static inline int ma_alloc(struct ma_state *mas, void *entry,
+ unsigned long size, unsigned long *index)
+{
+ unsigned char slot = MAPLE_NODE_SLOTS;
+
+ mas->node = mas_start(mas);
+ if (!xa_is_node(rcu_dereference(mas->tree->ma_root))) {
+ ma_root_expand(mas, entry);
+ if (!mas->index)
+ return ma_get_pivot(mas->node, 0);
+ return ma_get_pivot(mas->node, 1);
+ }
+
+ mas_awalk(mas, size);
+ slot = ma_get_slot(mas);
+ if (slot == MAPLE_NODE_SLOTS)
+ goto no_gap;
+
+ // At this point, mas->node points to the right node and we have a
+ // slot that has a sufficient gap.
+#if 0
+ //FIXME: coalesce is an odd beast
+ mas_coalesce(mas);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+#endif
+
+ return mas_fill_gap(mas, entry, slot, size, index);
+
+no_gap:
+ return -EBUSY;
+}
/*
* Private
return entry;
}
-unsigned long mas_walk_next(struct ma_state *mas)
+static inline void mas_walk_next(struct ma_state *mas)
{
- return 0;
+ unsigned char slot;
+
+ do {
+ slot = mt_parent_slot(mas->node);
+ ma_encoded_parent(mas);
+ } while (slot > mt_slot_count(mas->node) - 1);
+
+ slot++;
+ ma_set_slot(mas, slot);
+ mas_update_limits(mas, slot, mt_node_type(mas->node));
+ mas->node = ma_get_rcu_slot(mas->node, slot);
+ return;
}
/* Private
* Find an entry and erase the entire range
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
- int ret = 0;
-
MA_STATE(ms, mt, first, last);
if (WARN_ON_ONCE(mt_is_reserved(entry)))
if (mas_is_err(&ms))
return xa_err(ms.node);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(mtree_insert_range);
int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
}
EXPORT_SYMBOL(mtree_insert);
-int mtree_alloc_range(struct maple_tree *mt, void *startp,
- void *ptr, unsigned long size, unsigned long min,
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
{
int ret = 0;
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (max < size)
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
+ MA_STATE(mas, mt, min, max - size);
+
+ mtree_lock(mas.tree);
+retry:
+ mas.index = min;
+ mas.last = max - size;
+ mas.min = 0;
+ mas.max = ULONG_MAX;
+ ret = ma_alloc(&mas, entry, size, startp);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mas.tree);
return ret;
}
int ret = -ENOENT;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
- *next = mas_walk_next(&mas);
+ mas_walk_next(&mas);
rcu_read_unlock();
- if (*next)
+ if (mas.node)
return 0;
return ret;
return mtree_erase(mt, index);
}
+static noinline void check_mtree_alloc_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+ unsigned long result = expected + 1;
+ int ret;
+ ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ MT_BUG_ON(mt, result != expected);
+}
static noinline void check_load(struct maple_tree *mt, unsigned long index,
void *ptr)
{
0x7f36d5105000, 0x7fff5876b000, 0x7fff5878d000,
0x7fff5878e000, 0x7fff58791000, 0x7fff58791000,
0x7fff58793000,};
- int i, range_cnt = 48;
+
+ /* req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ unsigned long req_range[] = {
+ 0x565234af9000, // Min
+ 0x7fff58791000, // Max
+ 0x1000, // Size
+ 0x565234afb000, // First hole in our data of size 1000.
+ 0, // Return value success.
+
+ 0x0, // Min
+ 0x7fff58791000, // Max
+ 0x1000, // Size
+ 0x0, // First hole in our data of size 1000.
+ 0, // Return value success.
+ };
+ int i, range_cnt = sizeof(range)/sizeof(range[0]);
+ int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
for (i = 0; i < range_cnt/2; i+=2) {
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12));
}
+ for (i = 0; i < req_range_cnt; i+=5) {
+ check_mtree_alloc_range(mt,
+ req_range[i] >> 12, // start
+ req_range[i+1] >> 12, // end
+ req_range[i+2] >> 12, // size
+ req_range[i+3] >> 12, // expected address
+ req_range[i+4] >> 12, // expected return
+ xa_mk_value(req_range[i] >> 12)); // pointer
+ }
mtree_destroy(mt);
}
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_alloc_range(&tree);
+ return 0;
mtree_init(&tree, 0);