From 20999d851e5d8fea408b63fd3873999481c996b5 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 5 Dec 2018 11:25:09 -0500 Subject: [PATCH] maple_tree: Store errors in ms->node Remove the GFP_KERNEL from _maple_state_node() so this path always fails in the userspace test framework. This tests the error handling path. Signed-off-by: Matthew Wilcox --- lib/maple_tree.c | 71 ++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d7b605d609b9..bdece8c955d3 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -6,6 +6,17 @@ #include #include +#define MA_ERROR(err) ((struct maple_node *)(((unsigned long)err << 2) | 2UL)) + +static inline void mas_set_err(struct maple_state *mas, long err) +{ + mas->node = MA_ERROR(err); +} + +static inline bool mas_is_err(struct maple_state *mas) +{ + return xa_is_err(mas->node); +} /* Things we will probably eventually need */ #if 0 @@ -119,7 +130,7 @@ static void maple_new_node(struct maple_state *ms, int count, gfp_t gfp) return; mn = ma_get_alloc(ms); - if(!mn) + if (!mn) mn = kcalloc(count, sizeof(*mn), gfp); if (!mn) goto list_failed; @@ -140,12 +151,16 @@ kzalloc_failed: kfree(mn->slot[i]); kfree(mn); list_failed: - return; + ma_set_alloc_cnt(ms, count); + mas_set_err(ms, -ENOMEM); } static bool __maple_nomem(struct maple_state *ms, gfp_t gfp) __must_hold(ms->tree->lock) { + if (ms->node != MA_ERROR(-ENOMEM)) + return false; + if (gfpflags_allow_blocking(gfp)) { spin_unlock(&ms->tree->lock); maple_new_node(ms, ma_get_alloc_cnt(ms), gfp); @@ -209,6 +224,9 @@ static void *mas_start(struct maple_state *ms) { void *entry; + if (mas_is_err(ms)) + return NULL; + if (ms->node == MAS_START) { entry = ms->tree->root; if (!xa_is_node(entry)) { @@ -232,7 +250,7 @@ static void *mas_start(struct maple_state *ms) * ms->node will point to the node containing the entry. * * - * */ + */ static void *_maple_walk(struct maple_state *ms) { void *entry = mas_start(ms); @@ -302,8 +320,7 @@ static struct maple_node *maple_state_node(struct maple_state *ms, int count) if (allocated < count) { int diff = count - allocated; //ma_set_alloc_cnt(ms, needed); - maple_new_node(ms, diff, GFP_KERNEL | GFP_NOWAIT | - __GFP_NOWARN | __GFP_ZERO); + maple_new_node(ms, diff, GFP_NOWAIT | __GFP_NOWARN); } return ms->alloc; } @@ -312,14 +329,14 @@ static struct maple_node *maple_state_node(struct maple_state *ms, int count) * Private * Handle if root is just a single pointer */ -static int _maple_root_expand(struct maple_state *ms) +static void _maple_root_expand(struct maple_state *ms) { void *r_entry = ms->tree->root; // root entry struct maple_node *mn; maple_state_node(ms, 1); - if (!ms->alloc) - return -ENOMEM; + if (mas_is_err(ms)) + return; mn = ma_next_alloc(ms); ms->node = mn; @@ -340,8 +357,8 @@ static int _maple_root_expand(struct maple_state *ms) /* swap the new root into the tree */ rcu_assign_pointer(ms->tree->root, ma_mk_node(mn)); - return 0; } + /* * Private * @@ -484,7 +501,7 @@ void maple_link_node(struct maple_state *ms, * Split a node and set ms->node to the correct one for an insert for * ms->index to ms->end. */ -static int _maple_node_split(struct maple_state *ms) +static void _maple_node_split(struct maple_state *ms) { struct maple_node *lmn, *rmn; /* Assume node64, as node4 cannot be split. */ @@ -504,15 +521,14 @@ static int _maple_node_split(struct maple_state *ms) /* Allocate new mn64 and split */ maple_state_node(ms, alloc_cnt); - if (!ms->alloc) - return -ENOMEM; + if (mas_is_err(ms)) + return; lmn = ma_next_alloc(ms); rmn = ma_next_alloc(ms); /* copy 1/2 data from the end to the new node. */ maple_split_data_64(ms, lmn, rmn,offset); maple_link_node(ms, lmn, rmn); - return 0; } static inline int _maple_insert_4(struct maple_state *ms, void *entry) @@ -580,6 +596,8 @@ static inline int _maple_insert_64(struct maple_state *ms, void *entry) */ static inline int _maple_insert(struct maple_state *ms, void *entry) { + if (mas_is_err(ms)) + return 0; if (!xa_is_node(ms->tree->root)) { BUG_ON(ms->end > 0); if (ms->tree->root == NULL) { @@ -618,14 +636,9 @@ static void *_maple_setup_insert(struct maple_state *ms) void *entry = mas_start(ms); if (!xa_is_node(entry)) { - int err; - if (ms->end == 0) return entry; - err = _maple_root_expand(ms); - if (err == -ENOMEM) - return MAS_START; - + _maple_root_expand(ms); return NULL; } @@ -639,13 +652,9 @@ static void *_maple_setup_insert(struct maple_state *ms) continue; } if (_maple_node_is_full(ms)) { - int ret = _maple_node_split(ms); - /* - * If the allocation fails, we need to drop the lock - * and restart the walk from root. - */ - if (ret == -ENOMEM) - return MAS_START; + _maple_node_split(ms); + if (mas_is_err(ms)) + return NULL; } entry = _maple_walk_64(ms, ms->index); @@ -721,20 +730,18 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long start, spin_lock(&ms.tree->lock); retry: walked = _maple_setup_insert(&ms); - if (walked == MAS_START) - goto retry; if (walked != NULL) goto already_exists; ret = _maple_insert(&ms, entry); - if (ret == -ENOMEM) { - if (__maple_nomem(&ms, gfp)) - goto retry; - } + if (__maple_nomem(&ms, gfp)) + goto retry; already_exists: spin_unlock(&ms.tree->lock); + if (mas_is_err(&ms)) + return xa_err(ms.node); return ret; } -- 2.50.1