From 69d9334cba6669a3fd8653d715416c0bd3139dc0 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 6 Dec 2018 16:27:36 -0500 Subject: [PATCH] maple_tree: Rework allocating new nodes to use the slots in ms->alloc Allocate node 0 to ms->alloc, encode 1, and nodes 1-2 into slot 0-1. Signed-off-by: Liam R. Howlett --- include/linux/maple_tree.h | 2 +- lib/maple_tree.c | 171 +++++++++++++++++++------------------ 2 files changed, 90 insertions(+), 83 deletions(-) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index ae71158ff09f..99e8725adf48 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -124,7 +124,7 @@ struct maple_state { unsigned long min; /* The smallest index to be found in %node */ unsigned long max; /* The largest index to be found in %node */ unsigned long slot_idx; /* The slot index from %node */ - struct maple_node **alloc; /* Allocated for this operation */ + struct maple_node *alloc; /* Allocated for this operation */ short flags; /* Different stuff */ }; diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 3135944609ae..446819f102db 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -54,36 +54,40 @@ static inline void *ma_mk_node(const struct maple_node *node) return (void *)((unsigned long)node | 2); } -static inline struct maple_node **ma_get_alloc(const struct maple_state *ms) +static inline void ma_set_alloc_cnt(struct maple_state *ms, int count) { - return (struct maple_node **)((unsigned long)ms->alloc & ~3); + ms->alloc = (struct maple_node *)((unsigned long)ms->alloc & ~3); + ms->alloc = (struct maple_node *)((unsigned long)ms->alloc | count); } -static inline void ma_set_alloc_cnt(struct maple_node **alloc, int count) -{ - alloc = (struct maple_node **)((unsigned long)alloc | count); -} -static inline int ma_req_alloc_cnt(const struct maple_state *ms) +static inline int ma_get_alloc_cnt(const struct maple_state *ms) { return (int)((unsigned long)ms->alloc & 3); } - -static inline bool ma_alloc_empty(struct maple_node **alloc) -{ - if ((unsigned long)alloc & ~2) - return false; - return true; -} -static inline struct maple_node *ma_left_alloc(const struct maple_state *ms) -{ - return ma_get_alloc(ms)[0]; -} -static inline struct maple_node *ma_right_alloc(const struct maple_state *ms) +static inline struct maple_node *ma_get_alloc(const struct maple_state *ms) { - return ma_get_alloc(ms)[1]; + return (struct maple_node *)((unsigned long)ms->alloc & ~3); } -static inline struct maple_node *ma_root_alloc(const struct maple_state *ms) +static inline struct maple_node *ma_next_alloc(struct maple_state *ms) { - return ma_get_alloc(ms)[2]; + int cnt; + struct maple_node *mn, *smn; + + if (!ms->alloc) + return NULL; + + cnt = ma_get_alloc_cnt(ms); + mn = ma_get_alloc(ms); + cnt--; + if (cnt == 0) { + ms->alloc = NULL; + } else { + smn = (struct maple_node*)mn->map64.slot[cnt - 1]; + mn->map64.slot[cnt - 1] = NULL; + ma_set_alloc_cnt(ms, cnt); + mn = smn; + } + + return mn; } static inline bool maple_is_root(struct maple_state *ms, @@ -109,31 +113,37 @@ static inline bool _maple_is_node_4(struct maple_state *ms) return false; } -static struct maple_node **_maple_new_node(gfp_t gfp, int count) +static void maple_new_node(struct maple_state *ms, int count, gfp_t gfp) { - struct maple_node **list; - struct maple_node *mn; + struct maple_node *mn, *smn; int i = 0; - list = kcalloc(count, sizeof(struct maple_node *), gfp); - if (!list) + if (count == 0) + return; + + mn = ma_get_alloc(ms); + if(!mn) + mn = kcalloc(count, sizeof(*mn), gfp); + if (!mn) goto list_failed; - for (i = 0; i < count; i++) { - mn = kzalloc(sizeof(*mn), gfp); - if (!mn) + for (i = 0; i < count - 1; i++) { + smn = kzalloc(sizeof(*mn), gfp); + if (!smn) goto kzalloc_failed; - mn->map64.parent = NULL; - list[i] = mn; + smn->map64.parent = NULL; + mn->map64.slot[i] = smn; } - return list; + ms->alloc = mn; + ma_set_alloc_cnt(ms, count); + return; kzalloc_failed: - while (i-- > 0) - kfree(list[i]); - kfree(list); + while (--i >= 0) + kfree(mn->map64.slot[i]); + kfree(mn); list_failed: - return NULL; + return; } static bool __maple_nomem(struct maple_state *ms, gfp_t gfp) @@ -141,10 +151,10 @@ static bool __maple_nomem(struct maple_state *ms, gfp_t gfp) { if (gfpflags_allow_blocking(gfp)) { spin_unlock(&ms->tree->lock); - ms->alloc = _maple_new_node(gfp, ma_req_alloc_cnt(ms)); + maple_new_node(ms, ma_get_alloc_cnt(ms), gfp); spin_lock(&ms->tree->lock); } else { - ms->alloc = _maple_new_node(gfp, ma_req_alloc_cnt(ms)); + maple_new_node(ms, ma_get_alloc_cnt(ms), gfp); } ms->node = MAS_START; if (!ma_get_alloc(ms)) @@ -286,23 +296,19 @@ static inline bool _maple_node_is_full(struct maple_state *ms) /* * Private */ -static struct maple_node **_maple_state_node(struct maple_state *ms, int count) +static struct maple_node *maple_state_node(struct maple_state *ms, int count) { - struct maple_node **alt_mn; - int allocated = ma_req_alloc_cnt(ms); + int allocated = ma_get_alloc_cnt(ms); BUG_ON(count > 3); - if (allocated != 0) { - if (allocated != count) - return NULL; // FIXME: Busy, retry. - alt_mn = ms->alloc; - } else { - alt_mn = _maple_new_node(GFP_KERNEL | GFP_NOWAIT | - __GFP_NOWARN | __GFP_ZERO, count); - ma_set_alloc_cnt(alt_mn, count); + if (allocated < count) { + int diff = count - allocated; + //ma_set_alloc_cnt(ms, needed); + maple_new_node(ms, diff, GFP_KERNEL | GFP_NOWAIT | + __GFP_NOWARN | __GFP_ZERO); } - return alt_mn; + return ms->alloc; } /* @@ -312,15 +318,14 @@ static struct maple_node **_maple_state_node(struct maple_state *ms, int count) static int _maple_root_expand(struct maple_state *ms) { void *r_entry = ms->tree->root; // root entry - struct maple_node **list, *mn; + struct maple_node *mn; - list = _maple_state_node(ms, 1); - if (ma_alloc_empty(list)) + maple_state_node(ms, 1); + if (!ms->alloc) return -ENOMEM; - mn = list[0]; - ms->node = list[0]; - /* Used the list of allocated nodes. */ - kfree(list); + + mn = ma_next_alloc(ms); + ms->node = mn; /* * Insert the existing entry into the new node @@ -345,13 +350,14 @@ static int _maple_root_expand(struct maple_state *ms) * * Copy 1/2 the data from ms->node to new_mn. */ -static void maple_split_data_64(struct maple_state *ms, int off) +static void maple_split_data_64(struct maple_state *ms, + struct maple_node *lmn, + struct maple_node *rmn, + int off) { - struct maple_node *left_mn = ma_left_alloc(ms); - struct maple_node *right_mn = ma_right_alloc(ms); struct maple_node_64 *full_mn = &(ms->node->map64); - struct maple_node_64 *target = &(left_mn->map64); + struct maple_node_64 *target = &(lmn->map64); int i, j; /* Copy 0-4 to left_mn 1-5 @@ -359,7 +365,7 @@ static void maple_split_data_64(struct maple_state *ms, int off) */ for (i = 0, j = off; j < MAPLE_NODE64_MAX_SLOT; i++, j++) { if (i == MAPLE_NODE64_MAX_SLOT / 2) { - target = &(right_mn->map64); + target = &(rmn->map64); i = 0; } if (j < MAPLE_NODE64_MAX_SLOT - 1) @@ -413,17 +419,17 @@ void maple_shift_64(struct maple_node_64 *mn64, int p_here) * Note, currently only links node 64s * */ -void maple_link_node(struct maple_state *ms) +void maple_link_node(struct maple_state *ms, + struct maple_node *lmn, + struct maple_node *rmn) { struct maple_node *full_mn = ms->node; struct maple_node_64 *fmn64 = &(full_mn->map64); - struct maple_node *left_mn = ma_left_alloc(ms); - struct maple_node *right_mn = ma_right_alloc(ms); - struct maple_node_64 *lmn64 = &(left_mn->map64); - struct maple_node_64 *rmn64 = &(right_mn->map64); + struct maple_node_64 *lmn64 = &(lmn->map64); + struct maple_node_64 *rmn64 = &(rmn->map64); if (maple_is_root(ms, ms->node)) { - struct maple_node *root_mn = ma_root_alloc(ms); + struct maple_node *root_mn = ma_next_alloc(ms); int l_end = maple_data_end_64(lmn64) - 1; int r_end = maple_data_end_64(rmn64) - 1; int idx = 0; @@ -434,20 +440,19 @@ void maple_link_node(struct maple_state *ms) */ lmn64->parent = ma_mk_node(root_mn); rmn64->parent = ma_mk_node(root_mn); - /* Root will have two entries: left_mn, right_mn. + /* Root will have two entries: lmn, rmn. * The pivot will be the maximum pivot of each. - * pivot 0 will be the lowest pivot of left_mn. + * pivot 0 will be the lowest pivot of lmn. */ root_mn->map64.pivot[idx] = fmn64->pivot[0]; idx++; /* Left child */ root_mn->map64.pivot[idx] = lmn64->pivot[l_end]; - RCU_INIT_POINTER(root_mn->map64.slot[idx], ma_mk_node(left_mn)); + RCU_INIT_POINTER(root_mn->map64.slot[idx], ma_mk_node(lmn)); idx++; /* Right child */ root_mn->map64.pivot[idx] = rmn64->pivot[r_end]; - RCU_INIT_POINTER(root_mn->map64.slot[idx], - ma_mk_node(right_mn)); + RCU_INIT_POINTER(root_mn->map64.slot[idx], ma_mk_node(rmn)); /* Swap root of tree */ rcu_assign_pointer(ms->tree->root, ma_mk_node(root_mn)); ms->node = root_mn; @@ -470,12 +475,12 @@ void maple_link_node(struct maple_state *ms) rmn64->parent = fmn64->parent; /* Shift the data over */ maple_shift_64(target, ms->slot_idx); - target->slot[ms->slot_idx + 1] = ma_mk_node(right_mn); + target->slot[ms->slot_idx + 1] = ma_mk_node(rmn); target->pivot[ms->slot_idx] = lmn64->pivot[3]; - target->slot[ms->slot_idx] = ma_mk_node(left_mn); + target->slot[ms->slot_idx] = ma_mk_node(lmn); } - /* Orhpan the full node */ + /* Orphan the full node */ fmn64->parent = full_mn; _maple_free_node(full_mn); kfree(ma_get_alloc(ms)); @@ -487,6 +492,7 @@ void maple_link_node(struct maple_state *ms) */ static int _maple_node_split(struct maple_state *ms) { + struct maple_node *lmn, *rmn; /* Assume node64, as node4 cannot be split. */ int alloc_cnt = 2; int offset = 0; @@ -496,7 +502,6 @@ static int _maple_node_split(struct maple_state *ms) offset++; } - /* * FIXME: * If there is room in the previous or next node @@ -504,13 +509,15 @@ static int _maple_node_split(struct maple_state *ms) */ /* Allocate new mn64 and split */ - ms->alloc = _maple_state_node(ms, alloc_cnt); - if (ma_alloc_empty(ms->alloc)) + maple_state_node(ms, alloc_cnt); + if (!ms->alloc) return -ENOMEM; + lmn = ma_next_alloc(ms); + rmn = ma_next_alloc(ms); /* copy 1/2 data from the end to the new node. */ - maple_split_data_64(ms, offset); - maple_link_node(ms); + maple_split_data_64(ms, lmn, rmn,offset); + maple_link_node(ms, lmn, rmn); return 0; } -- 2.50.1