mat->tail = dead_enode;
}
-void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
+static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
/*
* mat_free() - Free all nodes in a dead list.
*
memset(reuse, 0, sizeof(*reuse));
count = mas_allocated(mas);
- if (count && (head->node_count < MAPLE_NODE_SLOTS - 1)) {
+ if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
if (head->slot[0])
head->node_count++;
head->slot[head->node_count] = reuse;
return;
mas_set_alloc_req(mas, 0);
- if (!allocated || mas->alloc->node_count == MAPLE_NODE_SLOTS - 2) {
+ if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
node = (struct maple_alloc *)mt_alloc_one(gfp);
if (!node)
goto nomem;
mas_push_node(mas, used);
}
-// Free the allocations.
-void mas_empty_alloc(struct ma_state *mas)
-{
- struct maple_alloc *node;
-
- while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
- node = mas->alloc;
- mas->alloc = mas->alloc->slot[0];
- if (node->node_count > 0)
- mt_free_bulk(node->node_count, (void**)&node->slot[1]);
- kmem_cache_free(maple_node_cache, node);
- }
- mas->alloc = NULL;
-}
-
-/*
- * Check if there was an error allocating and do the allocation if necessary
- * If there are allocations, then free them.
- */
-bool mas_nomem(struct ma_state *mas, gfp_t gfp)
- __must_hold(mas->tree->lock)
-{
- if (mas->node != MA_ERROR(-ENOMEM)) {
- mas_empty_alloc(mas);
- return false;
- }
-
- if (gfpflags_allow_blocking(gfp)) {
- mtree_unlock(mas->tree);
- mas_alloc_nodes(mas, gfp);
- mtree_lock(mas->tree);
- } else {
- mas_alloc_nodes(mas, gfp);
- }
-
- if (!mas_allocated(mas))
- return false;
-
- mas->node = MAS_START;
- return true;
-}
-
static void mas_node_count(struct ma_state *mas, int count)
{
unsigned long allocated = mas_allocated(mas);
}
}
-int mas_entry_count(struct ma_state *mas, unsigned long nr_entries)
-{
- int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
- struct maple_enode *enode = mas->node;
- int nr_nodes;
- int ret;
-
- nr_entries++; // For trailing null.
-
- if (!mt_is_alloc(mas->tree))
- nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
-
- // Leaves
- nr_nodes = DIV_ROUND_UP(nr_entries, MAPLE_RANGE64_SLOTS - 1);
- // Internal nodes.
- nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
- mas_node_count(mas, nr_nodes);
-
- if (!mas_is_err(mas))
- return 0;
-
- ret = xa_err(mas->node);
- mas->node = enode;
- return ret;
-
-}
/*
* Sets up maple state for operations by setting mas->min = 0 & mas->node to
* certain values.
unsigned char offset = 0;
void **slots;
+
if (mte_is_root(mas->node)) {
prev = mas_root_locked(mas);
} else {
return mas_spanning_rebalance(mas, &mast, empty_count);
}
+static inline void mas_destroy_rebalance(struct ma_state *mas,
+ unsigned char mas_end)
+{
+ struct maple_big_node b_node;
+
+ /* Slow path. */
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ b_node.type = mte_node_type(mas->node);
+ mas_mab_cp(mas, 0, mas_end, &b_node, 0);
+ b_node.b_end = mas_end;
+ mas_rebalance(mas, &b_node);
+}
+
static inline bool _mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas, int height)
{
return content;
}
-void *mas_store(struct ma_state *mas, void *entry)
-{
- if (mas->index <= mas->last)
- return _mas_store(mas, entry, true);
-
- mas_set_err(mas, -EINVAL);
- return NULL;
-
-}
-
static inline int mas_dead_node(struct ma_state *mas, unsigned long index);
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
}
/*
- * mte_destroy_walk() - Free the sub-tree from @mn and below.
+ * mt_destroy_walk() - Free this the node and all nodes in this sub-tree.
*
- * @mn - the head of the (sub-)tree to free.
+ * Walk all nodes from the start node and bulk free/ free the all nodes.
+ *
+ * @head: The rcu_head of the starting node.
*/
-void mt_destroy_walk(struct rcu_head *head)
+static void mt_destroy_walk(struct rcu_head *head)
{
unsigned char end, offset = 0;
void **slots;
ma_free_rcu(node);
}
-void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt)
+/*
+ * mte_destroy_walk() - Free the sub-tree from @mn and below.
+ *
+ * @enode - the encoded maple node (maple_enode) to start
+ * @mn - the tree to free - needed for node types.
+ */
+static inline void mte_destroy_walk(struct maple_enode *enode,
+ struct maple_tree *mt)
{
struct maple_node *node = mte_to_node(enode);
}
/* Interface */
+
void __init maple_tree_init(void)
{
maple_node_cache = kmem_cache_create("maple_node",
}
EXPORT_SYMBOL(mtree_destroy);
+/*
+ * mas_store() - Store an @entry.
+ * @mas: The maple state.
+ * @entry: The entry to store.
+ *
+ * The @mas->index and @mas->last is used to set the range for the @entry.
+ * Note: The @mas should have pre-allocated entries to ensure there is memory to
+ * store the entry. Please see mas_entry_count()/mas_destroy() for more details.
+ */
+void *mas_store(struct ma_state *mas, void *entry)
+{
+ if (mas->index <= mas->last)
+ return _mas_store(mas, entry, true);
+
+ mas_set_err(mas, -EINVAL);
+ return NULL;
+
+}
+
+/*
+ * mas_entry_count() - Set the expected number of entries that will be inserted.
+ *
+ * @mas: The maple state
+ * @nr_entries: The number of expected entries.
+ *
+ * This will attempt to pre-allocate enough nodes to store the expected number
+ * of entries. The allocations will occur using the bulk allocator interface
+ * for speed. Please call mas_destroy() on the @mas after inserting the entries
+ * to ensure any unused nodes are freed.
+ */
+int mas_entry_count(struct ma_state *mas, unsigned long nr_entries)
+{
+ int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
+ struct maple_enode *enode = mas->node;
+ int nr_nodes;
+ int ret;
+
+ // Avoid overflow, assume a gap between each entry and a trailing null
+ // If this is wrong, it just means allocation can happen during
+ // insertion of entries.
+ nr_nodes = max(nr_entries, nr_entries * 2 + 1);
+
+ if (!mt_is_alloc(mas->tree))
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+
+ // Leaves
+ nr_nodes = DIV_ROUND_UP(nr_nodes , MAPLE_RANGE64_SLOTS - 1);
+ // Internal nodes.
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ mas_node_count(mas, nr_nodes);
+
+ if (!mas_is_err(mas))
+ return 0;
+
+ ret = xa_err(mas->node);
+ mas->node = enode;
+ return ret;
+
+}
+
+/*
+ * mas_destroy() - destroy a maple state.
+ * @mas: The maple state
+ *
+ * Frees any allocated nodes associated with this maple state.
+ *
+ */
+void mas_destroy(struct ma_state *mas)
+{
+ struct maple_alloc *node;
+
+ // When using mas_for_each() to insert an expected number of elements,
+ // it is possible that the number inserted is less than the expected
+ // number. To fix an invalid final node, a check is performed here to
+ // rebalance the previous node with the final node.
+ if ((mas->max == ULONG_MAX) && !mas_is_err(mas) && !mas_is_start(mas) &&
+ mas_searchable(mas) && !mte_is_root(mas->node)) {
+ unsigned char end = mas_data_end(mas);
+
+ if (end < mt_min_slot_count(mas->node)) {
+ printk("destroy rebalance %p\n", mas->node);
+ mt_dump(mas->tree);
+ mas_destroy_rebalance(mas, end);
+ }
+ }
+
+ while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) {
+ node = mas->alloc;
+ mas->alloc = mas->alloc->slot[0];
+ if (node->node_count > 0)
+ mt_free_bulk(node->node_count, (void**)&node->slot[1]);
+ kmem_cache_free(maple_node_cache, node);
+ }
+ mas->alloc = NULL;
+}
+
+/*
+ * Check if there was an error allocating and do the allocation if necessary
+ * If there are allocations, then free them.
+ */
+bool mas_nomem(struct ma_state *mas, gfp_t gfp)
+ __must_hold(mas->tree->lock)
+{
+ if (mas->node != MA_ERROR(-ENOMEM)) {
+ mas_destroy(mas);
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp)) {
+ mtree_unlock(mas->tree);
+ mas_alloc_nodes(mas, gfp);
+ mtree_lock(mas->tree);
+ } else {
+ mas_alloc_nodes(mas, gfp);
+ }
+
+ if (!mas_allocated(mas))
+ return false;
+
+ mas->node = MAS_START;
+ return true;
+}
+
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned int maple_tree_tests_run;
EXPORT_SYMBOL_GPL(maple_tree_tests_run);