struct maple_enode *next; /* Overlaps the pivot */
};
-struct maple_node {
- union {
- struct {
- struct maple_pnode *parent;
- void __rcu *slot[MAPLE_NODE_SLOTS];
- };
- struct {
- void *pad;
- struct rcu_head rcu;
- };
- struct maple_range_64 mr64;
- struct maple_arange_64 ma64;
- struct maple_range_32 mr32;
- struct maple_range_16 mr16;
- struct maple_sparse_64 ms64;
- struct maple_sparse_32 ms32;
- struct maple_sparse_21 ms21;
- struct maple_sparse_16 ms16;
- struct maple_sparse_9 ms9;
- struct maple_sparse_6 ms6;
- };
-};
-
enum maple_type {
maple_dense,
maple_sparse_6,
maple_arange_64,
};
+
/* Flags:
* MAPLE_ALLOC_RANGE - This tree is used to store allocation ranges. Use
* alloc range types (MAPLE_ARANGE_*)
void __rcu *ma_root;
};
+
#define MTREE_INIT(name, flags) { \
.ma_lock = __SPIN_LOCK_UNLOCKED(name.ma_lock), \
.ma_flags = flags, \
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ enum maple_type type;
+ struct maple_tree mt;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_range_32 mr32;
+ struct maple_range_16 mr16;
+ struct maple_sparse_64 ms64;
+ struct maple_sparse_32 ms32;
+ struct maple_sparse_21 ms21;
+ struct maple_sparse_16 ms16;
+ struct maple_sparse_9 ms9;
+ struct maple_sparse_6 ms6;
+ };
+};
+
struct ma_topiary {
struct maple_enode *head;
struct maple_enode *tail;
{
return ma_set_pivot(mte_to_node(mn), slot, mte_node_type(mn), val);
}
+
+static inline void __rcu **ma_get_slots(struct maple_node *mn,
+ enum maple_type type)
+{
+ switch (type) {
+ default:
+ case maple_arange_64:
+ return mn->ma64.slot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return mn->mr64.slot;
+ case maple_dense:
+ return mn->slot;
+ case maple_sparse_6:
+ return mn->ms6.slot;
+ case maple_sparse_9:
+ return mn->ms9.slot;
+ case maple_sparse_16:
+ return mn->ms16.slot;
+ case maple_sparse_21:
+ return mn->ms21.slot;
+ case maple_sparse_32:
+ return mn->ms32.slot;
+ case maple_sparse_64:
+ return mn->ms64.slot;
+ case maple_range_16:
+ case maple_leaf_16:
+ return mn->mr16.slot;
+ case maple_range_32:
+ case maple_leaf_32:
+ return mn->mr32.slot;
+ }
+}
static inline struct maple_enode *ma_get_rcu_slot(
const struct maple_node *mn, unsigned char slot,
enum maple_type type, struct maple_tree *mtree)
{
-
switch (type) {
case maple_range_64:
case maple_leaf_64:
{
ma_set_rcu_slot(mte_to_node(mn), slot, mte_node_type(mn), val);
}
-/*
- * mte_destroy_walk() - Free the sub-tree from @mn and below.
- *
- * @mn - the head of the sub-tree to free.
- * @mtree - the maple tree that includes @mn (for type)
- */
-void _mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree,
- bool rcu)
-{
- struct maple_enode *end_child;
- unsigned char slot_cnt = mt_slot_count(mn);
- int end;
-
- if (mte_is_leaf(mn))
- return;
-
- for (end = 0; end < slot_cnt; end++) {
- end_child = mte_get_rcu_slot(mn, end, mtree);
- if (!end_child)
- break;
-
- if (!mte_is_leaf(end_child))
- _mte_destroy_walk(end_child, mtree, rcu);
-
- if (rcu)
- ma_free_rcu(mte_to_node(end_child));
- else
- mte_set_rcu_slot(mn, end, mte_to_node(end_child));
- }
- if (!rcu) {
- void **slot_array;
- struct maple_node *node = mte_to_node(mn);
- switch (mte_node_type(mn)) {
- default:
- case maple_arange_64:
- slot_array = node->ma64.slot;
- break;
- case maple_range_64:
- slot_array = node->mr64.slot;
- break;
- case maple_range_32:
- slot_array = node->mr32.slot;
- break;
- case maple_range_16:
- slot_array = node->mr16.slot;
- break;
- }
- kmem_cache_free_bulk(maple_node_cache, end, slot_array);
- }
-}
-void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree,
- bool rcu)
-{
- struct maple_node *node = mte_to_node(mn);
- _mte_destroy_walk(mn, mtree, rcu);
- rcu ? ma_free_rcu(node) : kmem_cache_free(maple_node_cache, node);
-}
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
*
mte_to_mat(mat->tail)->next = dead_enode;
mat->tail = dead_enode;
}
+
+void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
/*
* mat_free() - Free all nodes in a dead list.
*
while (mat->head) {
next = mte_to_mat(mat->head)->next;
if (recursive)
- mte_destroy_walk(mat->head, mat->mtree, true);
+ mte_destroy_walk(mat->head, mat->mtree);
else
mte_free(mat->head);
mat->head = next;
}
}
+
/*
* mas_dup_state() - duplicate the internal state of a ma_state.
*
mas->node = mas_dup_node(oldmas, mas);
mte_to_node(mas->node)->parent = ma_parent_ptr(
- ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
dup_children:
mas_dup_children(mas, node_cnt);
if (mas_is_err(mas))
return;
+
mas_adopt_children(mas, mas->node);
}
}
mtree_unlock(mas->tree);
}
+static inline unsigned char mas_dead_leaves(struct ma_state *mas, void **slots)
+{
+ struct maple_node *node;
+ int slot = 0;
+ for (; slot < mt_slot_count(mas->node); slot++) {
+ if (!slots[slot])
+ break;
+ node = mte_to_node(ma_enode_ptr(slots[slot]));
+ node->parent = ma_parent_ptr(node);
+ slots[slot] = (void *)node;
+ }
+ return slot;
+}
+
+void **mas_destroy_descend(struct ma_state *mas)
+{
+ void **slots = ma_get_slots(mte_to_node(mas->node),
+ mte_node_type(mas->node));
+ while (!mte_is_leaf(slots[0])) {
+ mas->node = slots[0];
+ slots = ma_get_slots(mte_to_node(mas->node),
+ mte_node_type(mas->node));
+ }
+ return slots;
+}
+/*
+ * mte_destroy_walk() - Free the sub-tree from @mn and below.
+ *
+ * @mn - the head of the (sub-)tree to free.
+ */
+void mt_destroy_walk(struct rcu_head *head)
+{
+ unsigned char end, slot = 0;
+ void **slots;
+ struct maple_node *node = container_of(head, struct maple_node, rcu);
+ struct maple_enode *start;
+ MA_STATE(mas, &node->mt, 0, 0);
+
+
+ if (ma_is_leaf(node->type))
+ goto free_leaf;
+
+ start = mt_mk_node(node, node->type);
+ mas.node = start;
+ slots = mas_destroy_descend(&mas);
+
+ while (!mas_is_none(&mas)) {
+ enum maple_type type;
+
+ end = mas_dead_leaves(&mas, slots);
+ kmem_cache_free_bulk(maple_node_cache, end, slots);
+ if (mas.node == start)
+ break;
+
+ type = mas_parent_enum(&mas, mas.node);
+ slot = mte_parent_slot(mas.node);
+ mas.node = mt_mk_node(mte_parent(mas.node), type);
+ slots = ma_get_slots(mte_to_node(mas.node), type);
+
+ if ((slot == mt_slots[type] - 1) || !slots[slot + 1])
+ continue;
+
+ mas.node = slots[++slot];
+ slots = mas_destroy_descend(&mas);
+
+ }
+
+free_leaf:
+ kmem_cache_free(maple_node_cache, node);
+}
+
+void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt)
+{
+ struct maple_node *node = mte_to_node(enode);
+
+ node->type = mte_node_type(enode);
+ node->mt.ma_flags = mt->ma_flags;
+ mte_set_node_dead(enode);
+ call_rcu(&node->rcu, mt_destroy_walk);
+}
/* Interface */
void __init maple_tree_init(void)
{
void mtree_direct_destroy(struct maple_tree *mt)
{
mtree_lock(mt);
- if (xa_is_node(mt->ma_root))
- mte_destroy_walk(mt->ma_root, mt, false);
+ if (xa_is_node(mt->ma_root)) {
+ struct maple_node *node = mte_to_node(mt->ma_root);
+
+ node->type = mte_node_type(mt->ma_root);
+ node->mt.ma_flags = mt->ma_flags;
+ mte_set_node_dead(mt->ma_root);
+ mt_destroy_walk(&node->rcu);
+ }
mt->ma_flags = 0;
mt->ma_height = 0;
{
mtree_lock(mt);
if (xa_is_node(mt->ma_root))
- mte_destroy_walk(mt->ma_root, mt, true);
+ mte_destroy_walk(mt->ma_root, mt);
mt->ma_flags = 0;
mt->ma_height = 0;
}
}
}
+
void mas_validate_child_slot(struct ma_state *mas)
{
enum maple_type type = mte_node_type(mas->node);
}
}
}
+
/*
* Validate all pivots are within mas->min and mas->max.
*/