kmem_cache_free(maple_node_cache, node);
}
-static void ma_free(struct maple_node *node)
+static void ma_free_rcu(struct maple_node *node)
{
node->parent = ma_parent_ptr(node);
call_rcu(&node->rcu, mt_free_rcu);
}
+
static inline enum maple_type mte_node_type(const struct maple_enode *entry)
{
return ((unsigned long)entry >> 3) & 15;
}
static inline void mte_free(struct maple_enode *enode)
{
- ma_free(mte_to_node(enode));
+ ma_free_rcu(mte_to_node(enode));
}
static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
* @mn - the head of the sub-tree to free.
* @mtree - the maple tree that includes @mn (for type)
*/
-void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree)
+void _mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree,
+ bool rcu)
{
- struct maple_enode *node;
- unsigned int type = mte_node_type(mn);
+ struct maple_enode *child, *end_child;
unsigned char slot_cnt = mt_slot_count(mn);
- int i;
+ int end;
- switch (type) {
- case maple_range_16:
- case maple_range_32:
- case maple_range_64:
- case maple_arange_64:
- for (i = 0; i < slot_cnt; i++) {
- node = mte_get_rcu_slot(mn, i, mtree);
- if (node)
- mte_destroy_walk(node, mtree);
+ if (mte_is_leaf(mn))
+ return;
+
+ child = mte_get_rcu_slot(mn, 0, mtree);
+ for (end = 0; end < slot_cnt; end++) {
+ end_child = mte_get_rcu_slot(mn, end, mtree);
+ if (!end_child) {
+ end--;
+ break;
}
- break;
- default:
- break;
- }
- mte_free(mn);
+ if (!mte_is_leaf(end_child))
+ _mte_destroy_walk(end_child, mtree, rcu);
+
+ if (rcu)
+ ma_free_rcu(mte_to_node(end_child));
+ }
+ if (!rcu)
+ kmem_cache_free_bulk(maple_node_cache, end + 1,
+ (void **)(&child));
+}
+void mte_destroy_walk(struct maple_enode *mn, struct maple_tree *mtree,
+ bool rcu)
+{
+ _mte_destroy_walk(mn, mtree, rcu);
+ rcu ? ma_free_rcu(mte_to_node(mn)) : kmem_cache_free(maple_node_cache, mn);
}
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
while (mat->head) {
next = mte_to_mat(mat->head)->next;
if (recursive)
- mte_destroy_walk(mat->head, mat->mtree);
+ mte_destroy_walk(mat->head, mat->mtree, true);
else
mte_free(mat->head);
mat->head = next;
}
// Free the allocations.
-static inline void ma_free_alloc(struct maple_node *node)
+static inline void ma_free_rcu_alloc(struct maple_node *node)
{
int alloc = 0;
while (alloc < MAPLE_NODE_SLOTS && node->slot[alloc]) {
if (ma_mnode_ptr(node->slot[alloc])->slot[0])
- ma_free_alloc(node->slot[alloc]);
+ ma_free_rcu_alloc(node->slot[alloc]);
else
kfree(node->slot[alloc]);
alloc++;
struct maple_node *node = mas_get_alloc(mas);
if (node)
- ma_free_alloc(node);
+ ma_free_rcu_alloc(node);
mas->alloc = NULL;
}
/*
}
EXPORT_SYMBOL(mtree_erase);
-void mtree_destroy(struct maple_tree *mt)
+void mtree_direct_destroy(struct maple_tree *mt)
{
- struct maple_enode *destroyed;
+ mtree_lock(mt);
+ if (xa_is_node(mt->ma_root))
+ mte_destroy_walk(mt->ma_root, mt, false);
+
+ mt->ma_flags = 0;
+ mt->ma_height = 0;
+ rcu_assign_pointer(mt->ma_root, NULL);
+ mtree_unlock(mt);
+}
+/* mtree_direct_destroy is unsafe to export for readers. */
+void mtree_destroy(struct maple_tree *mt)
+{
mtree_lock(mt);
- destroyed = mt->ma_root;
- if (xa_is_node(destroyed))
- mte_destroy_walk(destroyed, mt);
+ if (xa_is_node(mt->ma_root))
+ mte_destroy_walk(mt->ma_root, mt, true);
mt->ma_flags = 0;
mt->ma_height = 0;
MT_BUG_ON(mt, mn->slot[1] != NULL);
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
- ma_free(mn);
+ ma_free_rcu(mn);
// Allocate 3 nodes, will fail.
mas_node_cnt(&mas, 3);
// Drop the lock and allocate 3 nodes.
for (j = i; j > 0; j--) { //Free the requests
mn = mas_next_alloc(&mas); // get the next node.
MT_BUG_ON(mt, mn == NULL);
- ma_free(mn);
+ ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
}
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != j);
mn = mas_next_alloc(&mas2); // get the next node.
MT_BUG_ON(mt, mn == NULL);
- ma_free(mn);
+ ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas2) != 0);
}