From: Liam R. Howlett Date: Fri, 30 Nov 2018 19:33:01 +0000 (-0500) Subject: maple_tree: Add support for a single pointer. X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=251663e19f701d1bf9088694577885a95bb40db6;p=users%2Fjedix%2Flinux-maple.git maple_tree: Add support for a single pointer. If the range is 0-0 and the tree is empty, insert just a single pointer. Later inserts cause the root to be a node with 0 pointing to that first pointer. Update test code to test if previously inserted data still exists. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 531d8d26a86e..3eea6082b77f 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -#ifndef _LINUX_MAPLE_TREE_H +#ifndef _LINUX_MAPLE_TREE_H #define _LINUX_MAPLE_TREE_H /* @@ -9,13 +9,12 @@ * */ -/* xarray functions used: +/* + * xarray functions used: * * xa_is_internal - used to check if this is an internal node. * xa_mk_internal - used to mark nodes internal. * xa_to_internal - used to extract the actual node address - * - * */ #include #include @@ -35,6 +34,7 @@ #define MAPLE_NODE4_MAX_SLOT 15 #define MAPLE_NODE4M_MAX_SLOT 14 +#define maple_retry(ms) ((ms)->tree) struct maple_node; /* Node 64 looks at all 64 bits of a number to decide pivots */ @@ -95,8 +95,6 @@ struct maple_tree { spinlock_t lock; unsigned int flags; struct maple_node __rcu *root; - unsigned long min; - unsigned long max; }; @@ -113,8 +111,6 @@ struct maple_tree { .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ .flags = 0, \ .root = NULL, \ - .min = 0, \ - .max = ULONG_MAX - 1, \ } #define MAP_STATE_SPLIT_ON_FULL 1 @@ -140,9 +136,9 @@ struct maple_state { .index = sindex, \ .end = eindex, \ .node = NULL, \ - .min = stree->min, \ - .max = stree->max, \ .alloc = NULL, \ + .min = 0, \ + .max = ULONG_MAX, \ .flags = 0, \ } diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4ff5c826d020..2429cff63807 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ #include #include #include @@ -6,20 +7,23 @@ /* Things we will probably eventually need */ #if 0 -/* Private +/* + * Private * return next left node */ static void _maple_next_node(struct maple_state *ms) { } -/* Private +/* + * Private * return previous right node */ static void _maple_prev_node(struct maple_state *ms) { } -/* Private +/* + * Private * Merge a given set of nodes into a single node */ static void _maple_merge(struct maple_state *ms, struct maple_node *mn) @@ -27,7 +31,8 @@ static void _maple_merge(struct maple_state *ms, struct maple_node *mn) } -/* Private +/* + * Private * Split a ms->node into two. * Requires tree to be locked. */ @@ -56,10 +61,14 @@ static inline void *_maple_mk_node(const struct maple_node *node) return (void *)((unsigned long)node | 2); } -static inline bool _maple_is_node_4(struct maple_state *ms) { +static inline bool _maple_is_node_4(struct maple_state *ms) +{ /* FIXME: This just means that our node could be contained within a * node_4. This could happen when shrinking a node_64, for example. */ + if (ms->node == ms->tree->root) + return false; + if ((ms->max - ms->min) <= (unsigned long)MAPLE_NODE4_MAX_SLOT) return true; return false; @@ -80,6 +89,21 @@ kmalloc_failed: } +static bool __maple_nomem(struct maple_state *ms, gfp_t gfp) + __must_hold(ms->tree->lock) +{ + if (gfpflags_allow_blocking(gfp)) { + spin_unlock(&ms->tree->lock); + ms->alloc = _maple_mk_node(_maple_new_node(gfp)); + spin_lock(&ms->tree->lock); + } else { + ms->alloc = _maple_mk_node(_maple_new_node(gfp)); + } + if (ms->alloc == NULL) + return false; + return true; +} + static void _maple_free_node(struct maple_node *mn) { kfree(mn); @@ -88,6 +112,7 @@ static void _maple_free_node(struct maple_node *mn) static void *_maple_walk_4(struct maple_state *ms) { struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4); + ms->slot_idx = ms->index - ms->min; return rcu_dereference(mn4->slot[ms->slot_idx]); } @@ -111,18 +136,19 @@ static void *_maple_walk_64(struct maple_state *ms, unsigned long val) static void _maple_update_limits(struct maple_state *ms) { - struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64); + struct maple_node_64 *mn64; int i = ms->slot_idx; if (ms->node == ms->tree->root) return; + mn64 = &(_maple_to_node(ms->node)->map64); + if (i > 0) ms->min = mn64->pivot[i - 1]; if (i < MAPLE_NODE64_MAX_SLOT - 1) ms->max = mn64->pivot[i] - 1; - } /* * Private @@ -139,10 +165,20 @@ static void *_maple_walk(struct maple_state *ms) void *entry = NULL; /* Outside this nodes range, it doesn't exist. */ - if (ms->min >= ms->index || - ms->max <= ms->index) + if (ms->min > ms->index || + ms->max < ms->index) + return entry; + + if (ms->node == NULL) return entry; + if (xa_is_internal(ms->node) == false) { + if (ms->index == ms->end && ms->index == 0) + return ms->node; + else + return NULL; + } + entry = ms->node; do { _maple_update_limits(ms); @@ -160,69 +196,122 @@ static void *_maple_walk(struct maple_state *ms) static inline int _count_node_64(struct maple_state *ms) { int i; + struct maple_node_64 mn64 = _maple_to_node(ms->node)->map64; for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) { - if (ms->node->map64.pivot[i] == 0) + if (mn64.pivot[i] == 0) break; } /*FIXME: Is this right? */ if (i == MAPLE_NODE64_MAX_PIVOT - 1 && - ms->node->map64.slot[i + 1] != NULL) + mn64.slot[i + 1] != NULL) i++; return i; } -/* Private - * +/* + * Private * Check if ms->node has enough room for ms->index to ms->end - * */ static inline bool _maple_node_is_full(struct maple_state *ms) { + if (ms->tree->root == NULL && ms->index == 0 && ms->end == 0) + return false; // Room for a singe range. + if (xa_is_internal(ms->tree->root) == false) + return true; // Single range is occupied. + if (_maple_is_node_4(ms)) + return false; // Can't get here. + if (_count_node_64(ms) == MAPLE_NODE64_MAX_SLOT) + return true; // Only full if all the slots are occupied. return false; } -/* Private - * +/* + * Private + */ +static struct maple_node *_maple_state_node(struct maple_state *ms) +{ + struct maple_node *alt_mn; + + if (ms->alloc != NULL) { + alt_mn = _maple_to_node(ms->alloc); + ms->alloc = NULL; + } else { + alt_mn = _maple_new_node(GFP_KERNEL | GFP_NOWAIT | + __GFP_NOWARN | __GFP_ZERO); + } + return alt_mn; +} + +/* + * Private + * Handle if root is just a single pointer + */ +static int _maple_root_expand(struct maple_state *ms) +{ + void *r_entry = ms->tree->root; // root entry + struct maple_node *mn; + + /* Don't expand the node if the insert will fail. */ + if (ms->index == 0 && ms->end == 0) + return -EINVAL; + + mn = _maple_state_node(ms); + if (mn == NULL) + return -ENOMEM; + + ms->node = _maple_mk_node(mn); + + /* + * Insert the existing entry into the new node + * rcu_assign is not necessary as readers are not able to access this + * node. + */ + mn->map64.slot[0] = r_entry; + mn->map64.pivot[0] = 1; + + + ms->slot_idx = 1; + + /* swap the new root into the tree */ + rcu_assign_pointer(ms->tree->root, ms->node); + return 0; +} +/* + * Private * Split a node and set ms->node to the correct one for an insert for - * ms->index to ms->end - * + * ms->index to ms->end. */ -static void *_maple_node_split(struct maple_state *ms) +static int _maple_node_split(struct maple_state *ms) { /* Assume node64, as node4 cannot be split. */ //struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64); - struct maple_node_64 *alt_mn64; + //struct maple_node_64 *alt_mn64; struct maple_node *alt_mn; - - /* If there is room in the previous or next node + if (xa_is_internal(ms->tree->root) == false) + return _maple_root_expand(ms); + /* + * FIXME: + * If there is room in the previous or next node * then rebalance. * set up maple_state to point to the correct node. - * */ /* Allocate new mn64 and split */ - if (ms->alloc != NULL) { - alt_mn64 = &(ms->alloc->map64); - ms->alloc = NULL; - } - else { - alt_mn = _maple_new_node(GFP_NOWAIT | __GFP_NOWARN); - if (alt_mn == NULL) - return ms->tree; - } + alt_mn = _maple_state_node(ms); + if (alt_mn == NULL) + return -ENOMEM; /* Calculate even split by pivots */ /* Set up maple_state for the side of the split we will fall on. */ - return alt_mn64; - + return 0; } static inline int _maple_insert_4(struct maple_state *ms, void *entry) { struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4); - + ms->slot_idx = ms->index - ms->min; rcu_assign_pointer(mn4->slot[ms->slot_idx], entry); return 0; @@ -230,10 +319,10 @@ static inline int _maple_insert_4(struct maple_state *ms, void *entry) /* Private * - * Note: If the final slot is in use, this will return + * Note: If the final slot is in use, this will return * MAPLE_NODE64_MAX_PIVOT - 1 */ -static inline int _maple_data_end_64(struct maple_node_64 *mn64) +static inline int _maple_data_end_64(const struct maple_node_64 *mn64) { int p_end; for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) { @@ -275,7 +364,6 @@ static inline int _maple_insert_64(struct maple_state *ms, void *entry) shift++; } - // Sanity check. BUG_ON(p_end + shift >= MAPLE_NODE64_MAX_SLOT); @@ -284,7 +372,7 @@ static inline int _maple_insert_64(struct maple_state *ms, void *entry) /* Writing new values from the tail forward ensure that a valid entry * was hit prior to a partial write. */ - for (idx = p_end; idx >= p_here+shift; idx--) + for (idx = p_end; idx >= p_here; idx--) { mn64->pivot[idx + shift] = mn64->pivot[idx]; rcu_assign_pointer(mn64->slot[idx + shift], @@ -294,39 +382,56 @@ static inline int _maple_insert_64(struct maple_state *ms, void *entry) /* We now have made space at p_here to p_here + shift for the new * data, which needs to be: - * (maybe) p_here: index - 1 => NULL + * (maybe) p_here: index - 1 => NULL * (always) p_here + 1: end + 1 => entry */ idx = p_here + shift; mn64->pivot[idx] = ms->end + 1; rcu_assign_pointer(mn64->slot[idx], entry); + idx--; if (shift > 0) { - idx--; mn64->pivot[idx] = ms->index; rcu_assign_pointer(mn64->slot[idx], NULL); } - return 0; } -/* Private - * +/* + * Private * Must hold the spin lock. - * * When this function is called, ms->node has space and is the correct * location. - * */ -int _maple_insert(struct maple_state *ms, void *entry) +static inline int _maple_insert(struct maple_state *ms, void *entry) { + if (xa_is_internal(ms->tree->root) == false) { + if (ms->tree->root == NULL && ms->index == 0 && ms->end == 0) { + ms->tree->root = entry; + return 0; + } else if (ms->tree->root == NULL) { + struct maple_node *mn = _maple_state_node(ms); + + if (mn == NULL) + return -ENOMEM; + if (mn == NULL) + return -ENOMEM; + ms->node = _maple_mk_node(mn); + mn->map64.slot[0] = NULL; + mn->map64.pivot[0] = ms->index; + mn->map64.slot[1] = entry; + mn->map64.pivot[1] = ms->index+1; + rcu_assign_pointer(ms->tree->root, ms->node); + return 0; + } + } + if(_maple_is_node_4(ms)) return _maple_insert_4(ms, entry); /* node 64 is all that's left */ return _maple_insert_64(ms, entry); - } /* Private @@ -347,6 +452,10 @@ static void *_maple_insert_walk(struct maple_state *ms) { void *entry = ms->node; + /* No root node yet */ + if (entry == NULL) + return entry; + do { ms->node = entry; _maple_update_limits(ms); @@ -356,12 +465,23 @@ static void *_maple_insert_walk(struct maple_state *ms) continue; } if (_maple_node_is_full(ms)) { - _maple_node_split(ms); - entry = ms->node; - continue; + int ret = _maple_node_split(ms); + + /* + * If the allocation fails, we need to drop the lock + * and restart the walk from root. + */ + if (ret == -ENOMEM) + return maple_retry(ms); + if (ret == -EINVAL) // FIXME.. + return maple_retry(ms); + return NULL; } entry = _maple_walk_64(ms, ms->index); + /* Ranges need to return the same entry to ensure they do not + * overlap other ranges. + */ if ((ms->end != ms->index) && (xa_is_internal(entry) == false)) { int s_idx = ms->slot_idx; @@ -410,10 +530,7 @@ void mtree_init(struct maple_tree *mt) { spin_lock_init(&mt->lock); mt->flags = 0; - mt->root = _maple_mk_node(_maple_new_node(GFP_KERNEL | __GFP_ZERO)); - - mt->min = 0; - mt->max = ULONG_MAX; + mt->root = NULL; } EXPORT_SYMBOL(maple_init); @@ -426,22 +543,29 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long start, int ret = -EEXIST; void *walked = NULL; MAP_STATE(ms, mt, start, end); - + if (WARN_ON_ONCE(xa_is_internal(entry))) return -EINVAL; if (start > end) return -EINVAL; - + mt->flags |= MAP_STATE_SPLIT_ON_FULL; spin_lock(&ms.tree->lock); +retry: ms.node = rcu_dereference(mt->root); - walked = _maple_insert_walk(&ms); + walked = _maple_insert_walk(&ms); if (walked != NULL) goto already_exists; + if (walked == maple_retry(&ms)) + goto retry; ret = _maple_insert(&ms, entry); + if (ret == -ENOMEM) { + if (__maple_nomem(&ms, GFP_KERNEL | __GFP_ZERO)) // Drops lock. + goto retry; + } already_exists: spin_unlock(&ms.tree->lock); @@ -539,13 +663,14 @@ void mn_dump(void *entry, unsigned long min, unsigned long max) void mt_dump(const struct maple_tree *mt) { void *entry = mt->root; - unsigned long min = mt->min; - unsigned long max = mt->max; + unsigned long min = 0; + unsigned long max = ULONG_MAX; - pr_info("maple_tree(%p) flags %X, root %p, min %ld, max %ld\n", - mt, mt->flags, entry, min, max); - if (!xa_is_node(entry)) + + if (xa_is_internal(entry) == false) max = 0; + pr_info("maple_tree(%p) flags %X, root %p, min %lu, max %lu\n", + mt, mt->flags, entry, min, max); mn_dump(entry, min, max); } #endif diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 5da4d97c4da9..178b362dc841 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -77,6 +77,7 @@ static noinline void check_load(struct maple_tree *mt, unsigned long index, void *ptr) { void *ret = mtree_test_load(mt, index); + MT_BUG_ON(mt, ret != ptr); } @@ -84,19 +85,33 @@ static DEFINE_MAPLE_TREE(tree); static int maple_tree_seed(void) { - unsigned long set[] = {15, 14, 17, 25, 1000, 1001,1002,1003,1005}; + unsigned long set[] = {15, 14, 17, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 3}; unsigned long r[] = {10, 15, 20, 25, 22}; // For range testing void *ptr = &set; mtree_init(&tree); - /* First set of tests try to insert, insert a dup, and load back what - * was inserted. - */ + check_load(&tree, set[0], NULL); // See if 15 -> NULL + + check_insert(&tree, set[9], &tree); // Insert 0 + check_load(&tree, set[9], &tree); // See if 0 -> &tree + check_load(&tree, set[0], NULL); // See if 15 -> NULL + + check_insert(&tree, set[10], ptr); // Insert 3 + check_load(&tree, set[9], &tree); // See if 0 -> &tree + + check_load(&tree, set[10], ptr); // See if 3 -> ptr + + /* Clear out the tree */ + mtree_destroy(&tree); + /* Try to insert, insert a dup, and load back what was inserted. */ + mtree_init(&tree); check_insert(&tree, set[0], &tree); // Insert 15 check_dup_insert(&tree, set[0], &tree); // Insert 15 again - check_load(&tree, set[0], &tree); // See if 15 -> &tree + check_load(&tree, set[0], &tree); // See if 15 -> &tree (test 10) /* Second set of tests try to load a value that doesn't exist, inserts * a second value, then loads the value again @@ -104,6 +119,7 @@ static int maple_tree_seed(void) check_load(&tree, set[1], NULL); // See if 14 -> NULL check_insert(&tree, set[1], ptr); // insert 14 -> ptr check_load(&tree, set[1], ptr); // See if 14 -> ptr + check_load(&tree, set[0], &tree); // See if 15 -> &tree /* Tree currently contains: * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil) @@ -111,6 +127,11 @@ static int maple_tree_seed(void) check_insert(&tree, set[6], ptr); // insert 1002 -> ptr check_insert(&tree, set[7], &tree); // insert 1003 -> &tree + check_load(&tree, set[0], &tree); // See if 15 -> &tree + check_load(&tree, set[1], ptr); // See if 14 -> ptr + check_load(&tree, set[6], ptr); // See if 1002 -> ptr + check_load(&tree, set[7], &tree); // See if 1003 -> &tree + /* Clear out tree */ mtree_destroy(&tree); @@ -119,6 +140,9 @@ static int maple_tree_seed(void) check_insert(&tree, set[5], ptr); // insert 1001 -> ptr check_insert(&tree, set[7], &tree); // insert 1003 -> &tree check_insert(&tree, set[6], ptr); // insert 1002 -> ptr + check_load(&tree, set[5], ptr); // See if 1001 -> ptr + check_load(&tree, set[6], ptr); // See if 1002 -> ptr + check_load(&tree, set[7], &tree); // See if 1003 -> &tree /* Clear out the tree */ mtree_destroy(&tree);