From a8eaf0fe4569209858b5807a6c16fd1761f23c09 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 26 Oct 2020 18:23:36 -0400 Subject: [PATCH] maple_tree: Fix mas_alloc_push() overflow mas_alloc_push() was overflowing and writing the next node. Add #define to clean up the alloc slot calculations Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 8 +- lib/maple_tree.c | 239 ++++++++++++++++++++++++------------- 2 files changed, 158 insertions(+), 89 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 4675e59c4ab7..926150c1d5c5 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -96,11 +96,12 @@ struct maple_arange_64 { unsigned long gap[MAPLE_ARANGE64_SLOTS]; }; +#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1) struct maple_alloc { unsigned long total; unsigned char node_count; unsigned int request_count; - struct maple_alloc *slot[MAPLE_NODE_SLOTS - 1]; + struct maple_alloc *slot[MAPLE_ALLOC_SLOTS]; }; struct maple_topiary { @@ -255,6 +256,8 @@ void *mas_find(struct ma_state *mas, unsigned long max); bool mas_nomem(struct ma_state *mas, gfp_t gfp); void mas_pause(struct ma_state *mas); void maple_tree_init(void); +void mas_destroy(struct ma_state *mas); +int mas_entry_count(struct ma_state *mas, unsigned long nr_entries); void *mas_prev(struct ma_state *mas, unsigned long min); void *mas_next(struct ma_state *mas, unsigned long max); @@ -404,9 +407,6 @@ static inline void mt_set_in_rcu(struct maple_tree *mt) mtree_unlock(mt); } -int mas_entry_count(struct ma_state *mas, unsigned long nr_leaves); -void mas_empty_alloc(struct ma_state *mas); - void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max); void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max, bool start); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index b5f98d1cae80..a0e66b15ff07 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -694,7 +694,7 @@ static inline void mat_add(struct ma_topiary *mat, mat->tail = dead_enode; } -void mte_destroy_walk(struct maple_enode *, struct maple_tree *); +static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); /* * mat_free() - Free all nodes in a dead list. * @@ -870,7 +870,7 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_enode *used) memset(reuse, 0, sizeof(*reuse)); count = mas_allocated(mas); - if (count && (head->node_count < MAPLE_NODE_SLOTS - 1)) { + if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) { if (head->slot[0]) head->node_count++; head->slot[head->node_count] = reuse; @@ -905,7 +905,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) return; mas_set_alloc_req(mas, 0); - if (!allocated || mas->alloc->node_count == MAPLE_NODE_SLOTS - 2) { + if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) { node = (struct maple_alloc *)mt_alloc_one(gfp); if (!node) goto nomem; @@ -963,48 +963,6 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used) mas_push_node(mas, used); } -// Free the allocations. -void mas_empty_alloc(struct ma_state *mas) -{ - struct maple_alloc *node; - - while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) { - node = mas->alloc; - mas->alloc = mas->alloc->slot[0]; - if (node->node_count > 0) - mt_free_bulk(node->node_count, (void**)&node->slot[1]); - kmem_cache_free(maple_node_cache, node); - } - mas->alloc = NULL; -} - -/* - * Check if there was an error allocating and do the allocation if necessary - * If there are allocations, then free them. - */ -bool mas_nomem(struct ma_state *mas, gfp_t gfp) - __must_hold(mas->tree->lock) -{ - if (mas->node != MA_ERROR(-ENOMEM)) { - mas_empty_alloc(mas); - return false; - } - - if (gfpflags_allow_blocking(gfp)) { - mtree_unlock(mas->tree); - mas_alloc_nodes(mas, gfp); - mtree_lock(mas->tree); - } else { - mas_alloc_nodes(mas, gfp); - } - - if (!mas_allocated(mas)) - return false; - - mas->node = MAS_START; - return true; -} - static void mas_node_count(struct ma_state *mas, int count) { unsigned long allocated = mas_allocated(mas); @@ -1015,32 +973,6 @@ static void mas_node_count(struct ma_state *mas, int count) } } -int mas_entry_count(struct ma_state *mas, unsigned long nr_entries) -{ - int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; - struct maple_enode *enode = mas->node; - int nr_nodes; - int ret; - - nr_entries++; // For trailing null. - - if (!mt_is_alloc(mas->tree)) - nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; - - // Leaves - nr_nodes = DIV_ROUND_UP(nr_entries, MAPLE_RANGE64_SLOTS - 1); - // Internal nodes. - nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); - mas_node_count(mas, nr_nodes); - - if (!mas_is_err(mas)) - return 0; - - ret = xa_err(mas->node); - mas->node = enode; - return ret; - -} /* * Sets up maple state for operations by setting mas->min = 0 & mas->node to * certain values. @@ -1357,6 +1289,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced) unsigned char offset = 0; void **slots; + if (mte_is_root(mas->node)) { prev = mas_root_locked(mas); } else { @@ -2444,6 +2377,19 @@ static inline int mas_rebalance(struct ma_state *mas, return mas_spanning_rebalance(mas, &mast, empty_count); } +static inline void mas_destroy_rebalance(struct ma_state *mas, + unsigned char mas_end) +{ + struct maple_big_node b_node; + + /* Slow path. */ + memset(&b_node, 0, sizeof(struct maple_big_node)); + b_node.type = mte_node_type(mas->node); + mas_mab_cp(mas, 0, mas_end, &b_node, 0); + b_node.b_end = mas_end; + mas_rebalance(mas, &b_node); +} + static inline bool _mas_split_final_node(struct maple_subtree_state *mast, struct ma_state *mas, int height) { @@ -3305,16 +3251,6 @@ spanning_store: return content; } -void *mas_store(struct ma_state *mas, void *entry) -{ - if (mas->index <= mas->last) - return _mas_store(mas, entry, true); - - mas_set_err(mas, -EINVAL); - return NULL; - -} - static inline int mas_dead_node(struct ma_state *mas, unsigned long index); /* * mas_prev_node() - Find the prev non-null entry at the same level in the @@ -4538,11 +4474,13 @@ static inline void **mas_destroy_descend(struct ma_state *mas) } /* - * mte_destroy_walk() - Free the sub-tree from @mn and below. + * mt_destroy_walk() - Free this the node and all nodes in this sub-tree. * - * @mn - the head of the (sub-)tree to free. + * Walk all nodes from the start node and bulk free/ free the all nodes. + * + * @head: The rcu_head of the starting node. */ -void mt_destroy_walk(struct rcu_head *head) +static void mt_destroy_walk(struct rcu_head *head) { unsigned char end, offset = 0; void **slots; @@ -4582,7 +4520,14 @@ free_leaf: ma_free_rcu(node); } -void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt) +/* + * mte_destroy_walk() - Free the sub-tree from @mn and below. + * + * @enode - the encoded maple node (maple_enode) to start + * @mn - the tree to free - needed for node types. + */ +static inline void mte_destroy_walk(struct maple_enode *enode, + struct maple_tree *mt) { struct maple_node *node = mte_to_node(enode); @@ -4596,6 +4541,7 @@ void mte_destroy_walk(struct maple_enode *enode, struct maple_tree *mt) } /* Interface */ + void __init maple_tree_init(void) { maple_node_cache = kmem_cache_create("maple_node", @@ -4785,6 +4731,129 @@ void mtree_destroy(struct maple_tree *mt) } EXPORT_SYMBOL(mtree_destroy); +/* + * mas_store() - Store an @entry. + * @mas: The maple state. + * @entry: The entry to store. + * + * The @mas->index and @mas->last is used to set the range for the @entry. + * Note: The @mas should have pre-allocated entries to ensure there is memory to + * store the entry. Please see mas_entry_count()/mas_destroy() for more details. + */ +void *mas_store(struct ma_state *mas, void *entry) +{ + if (mas->index <= mas->last) + return _mas_store(mas, entry, true); + + mas_set_err(mas, -EINVAL); + return NULL; + +} + +/* + * mas_entry_count() - Set the expected number of entries that will be inserted. + * + * @mas: The maple state + * @nr_entries: The number of expected entries. + * + * This will attempt to pre-allocate enough nodes to store the expected number + * of entries. The allocations will occur using the bulk allocator interface + * for speed. Please call mas_destroy() on the @mas after inserting the entries + * to ensure any unused nodes are freed. + */ +int mas_entry_count(struct ma_state *mas, unsigned long nr_entries) +{ + int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; + struct maple_enode *enode = mas->node; + int nr_nodes; + int ret; + + // Avoid overflow, assume a gap between each entry and a trailing null + // If this is wrong, it just means allocation can happen during + // insertion of entries. + nr_nodes = max(nr_entries, nr_entries * 2 + 1); + + if (!mt_is_alloc(mas->tree)) + nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; + + // Leaves + nr_nodes = DIV_ROUND_UP(nr_nodes , MAPLE_RANGE64_SLOTS - 1); + // Internal nodes. + nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); + mas_node_count(mas, nr_nodes); + + if (!mas_is_err(mas)) + return 0; + + ret = xa_err(mas->node); + mas->node = enode; + return ret; + +} + +/* + * mas_destroy() - destroy a maple state. + * @mas: The maple state + * + * Frees any allocated nodes associated with this maple state. + * + */ +void mas_destroy(struct ma_state *mas) +{ + struct maple_alloc *node; + + // When using mas_for_each() to insert an expected number of elements, + // it is possible that the number inserted is less than the expected + // number. To fix an invalid final node, a check is performed here to + // rebalance the previous node with the final node. + if ((mas->max == ULONG_MAX) && !mas_is_err(mas) && !mas_is_start(mas) && + mas_searchable(mas) && !mte_is_root(mas->node)) { + unsigned char end = mas_data_end(mas); + + if (end < mt_min_slot_count(mas->node)) { + printk("destroy rebalance %p\n", mas->node); + mt_dump(mas->tree); + mas_destroy_rebalance(mas, end); + } + } + + while (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) { + node = mas->alloc; + mas->alloc = mas->alloc->slot[0]; + if (node->node_count > 0) + mt_free_bulk(node->node_count, (void**)&node->slot[1]); + kmem_cache_free(maple_node_cache, node); + } + mas->alloc = NULL; +} + +/* + * Check if there was an error allocating and do the allocation if necessary + * If there are allocations, then free them. + */ +bool mas_nomem(struct ma_state *mas, gfp_t gfp) + __must_hold(mas->tree->lock) +{ + if (mas->node != MA_ERROR(-ENOMEM)) { + mas_destroy(mas); + return false; + } + + if (gfpflags_allow_blocking(gfp)) { + mtree_unlock(mas->tree); + mas_alloc_nodes(mas, gfp); + mtree_lock(mas->tree); + } else { + mas_alloc_nodes(mas, gfp); + } + + if (!mas_allocated(mas)) + return false; + + mas->node = MAS_START; + return true; +} + #ifdef CONFIG_DEBUG_MAPLE_TREE unsigned int maple_tree_tests_run; EXPORT_SYMBOL_GPL(maple_tree_tests_run); -- 2.50.1