+// SPDX-License-Identifier: GPL-2.0+
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/gfp.h>
/* Things we will probably eventually need */
#if 0
-/* Private
+/*
+ * Private
* return next left node
*/
static void _maple_next_node(struct maple_state *ms)
{
}
-/* Private
+/*
+ * Private
* return previous right node
*/
static void _maple_prev_node(struct maple_state *ms)
{
}
-/* Private
+/*
+ * Private
* Merge a given set of nodes into a single node
*/
static void _maple_merge(struct maple_state *ms, struct maple_node *mn)
}
-/* Private
+/*
+ * Private
* Split a ms->node into two.
* Requires tree to be locked.
*/
return (void *)((unsigned long)node | 2);
}
-static inline bool _maple_is_node_4(struct maple_state *ms) {
+static inline bool _maple_is_node_4(struct maple_state *ms)
+{
/* FIXME: This just means that our node could be contained within a
* node_4. This could happen when shrinking a node_64, for example.
*/
+ if (ms->node == ms->tree->root)
+ return false;
+
if ((ms->max - ms->min) <= (unsigned long)MAPLE_NODE4_MAX_SLOT)
return true;
return false;
}
+static bool __maple_nomem(struct maple_state *ms, gfp_t gfp)
+ __must_hold(ms->tree->lock)
+{
+ if (gfpflags_allow_blocking(gfp)) {
+ spin_unlock(&ms->tree->lock);
+ ms->alloc = _maple_mk_node(_maple_new_node(gfp));
+ spin_lock(&ms->tree->lock);
+ } else {
+ ms->alloc = _maple_mk_node(_maple_new_node(gfp));
+ }
+ if (ms->alloc == NULL)
+ return false;
+ return true;
+}
+
static void _maple_free_node(struct maple_node *mn)
{
kfree(mn);
static void *_maple_walk_4(struct maple_state *ms)
{
struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
+
ms->slot_idx = ms->index - ms->min;
return rcu_dereference(mn4->slot[ms->slot_idx]);
}
static void _maple_update_limits(struct maple_state *ms)
{
- struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ struct maple_node_64 *mn64;
int i = ms->slot_idx;
if (ms->node == ms->tree->root)
return;
+ mn64 = &(_maple_to_node(ms->node)->map64);
+
if (i > 0)
ms->min = mn64->pivot[i - 1];
if (i < MAPLE_NODE64_MAX_SLOT - 1)
ms->max = mn64->pivot[i] - 1;
-
}
/*
* Private
void *entry = NULL;
/* Outside this nodes range, it doesn't exist. */
- if (ms->min >= ms->index ||
- ms->max <= ms->index)
+ if (ms->min > ms->index ||
+ ms->max < ms->index)
+ return entry;
+
+ if (ms->node == NULL)
return entry;
+ if (xa_is_internal(ms->node) == false) {
+ if (ms->index == ms->end && ms->index == 0)
+ return ms->node;
+ else
+ return NULL;
+ }
+
entry = ms->node;
do {
_maple_update_limits(ms);
static inline int _count_node_64(struct maple_state *ms) {
int i;
+ struct maple_node_64 mn64 = _maple_to_node(ms->node)->map64;
for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
- if (ms->node->map64.pivot[i] == 0)
+ if (mn64.pivot[i] == 0)
break;
}
/*FIXME: Is this right? */
if (i == MAPLE_NODE64_MAX_PIVOT - 1 &&
- ms->node->map64.slot[i + 1] != NULL)
+ mn64.slot[i + 1] != NULL)
i++;
return i;
}
-/* Private
- *
+/*
+ * Private
* Check if ms->node has enough room for ms->index to ms->end
- *
*/
static inline bool _maple_node_is_full(struct maple_state *ms)
{
+ if (ms->tree->root == NULL && ms->index == 0 && ms->end == 0)
+ return false; // Room for a singe range.
+ if (xa_is_internal(ms->tree->root) == false)
+ return true; // Single range is occupied.
+ if (_maple_is_node_4(ms))
+ return false; // Can't get here.
+ if (_count_node_64(ms) == MAPLE_NODE64_MAX_SLOT)
+ return true; // Only full if all the slots are occupied.
return false;
}
-/* Private
- *
+/*
+ * Private
+ */
+static struct maple_node *_maple_state_node(struct maple_state *ms)
+{
+ struct maple_node *alt_mn;
+
+ if (ms->alloc != NULL) {
+ alt_mn = _maple_to_node(ms->alloc);
+ ms->alloc = NULL;
+ } else {
+ alt_mn = _maple_new_node(GFP_KERNEL | GFP_NOWAIT |
+ __GFP_NOWARN | __GFP_ZERO);
+ }
+ return alt_mn;
+}
+
+/*
+ * Private
+ * Handle if root is just a single pointer
+ */
+static int _maple_root_expand(struct maple_state *ms)
+{
+ void *r_entry = ms->tree->root; // root entry
+ struct maple_node *mn;
+
+ /* Don't expand the node if the insert will fail. */
+ if (ms->index == 0 && ms->end == 0)
+ return -EINVAL;
+
+ mn = _maple_state_node(ms);
+ if (mn == NULL)
+ return -ENOMEM;
+
+ ms->node = _maple_mk_node(mn);
+
+ /*
+ * Insert the existing entry into the new node
+ * rcu_assign is not necessary as readers are not able to access this
+ * node.
+ */
+ mn->map64.slot[0] = r_entry;
+ mn->map64.pivot[0] = 1;
+
+
+ ms->slot_idx = 1;
+
+ /* swap the new root into the tree */
+ rcu_assign_pointer(ms->tree->root, ms->node);
+ return 0;
+}
+/*
+ * Private
* Split a node and set ms->node to the correct one for an insert for
- * ms->index to ms->end
- *
+ * ms->index to ms->end.
*/
-static void *_maple_node_split(struct maple_state *ms)
+static int _maple_node_split(struct maple_state *ms)
{
/* Assume node64, as node4 cannot be split. */
//struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
- struct maple_node_64 *alt_mn64;
+ //struct maple_node_64 *alt_mn64;
struct maple_node *alt_mn;
-
- /* If there is room in the previous or next node
+ if (xa_is_internal(ms->tree->root) == false)
+ return _maple_root_expand(ms);
+ /*
+ * FIXME:
+ * If there is room in the previous or next node
* then rebalance.
* set up maple_state to point to the correct node.
- *
*/
/* Allocate new mn64 and split */
- if (ms->alloc != NULL) {
- alt_mn64 = &(ms->alloc->map64);
- ms->alloc = NULL;
- }
- else {
- alt_mn = _maple_new_node(GFP_NOWAIT | __GFP_NOWARN);
- if (alt_mn == NULL)
- return ms->tree;
- }
+ alt_mn = _maple_state_node(ms);
+ if (alt_mn == NULL)
+ return -ENOMEM;
/* Calculate even split by pivots */
/* Set up maple_state for the side of the split we will fall on. */
- return alt_mn64;
-
+ return 0;
}
static inline int _maple_insert_4(struct maple_state *ms, void *entry)
{
struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
-
+
ms->slot_idx = ms->index - ms->min;
rcu_assign_pointer(mn4->slot[ms->slot_idx], entry);
return 0;
/* Private
*
- * Note: If the final slot is in use, this will return
+ * Note: If the final slot is in use, this will return
* MAPLE_NODE64_MAX_PIVOT - 1
*/
-static inline int _maple_data_end_64(struct maple_node_64 *mn64)
+static inline int _maple_data_end_64(const struct maple_node_64 *mn64)
{
int p_end;
for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
shift++;
}
-
// Sanity check.
BUG_ON(p_end + shift >= MAPLE_NODE64_MAX_SLOT);
/* Writing new values from the tail forward ensure that a valid entry
* was hit prior to a partial write.
*/
- for (idx = p_end; idx >= p_here+shift; idx--)
+ for (idx = p_end; idx >= p_here; idx--)
{
mn64->pivot[idx + shift] = mn64->pivot[idx];
rcu_assign_pointer(mn64->slot[idx + shift],
/* We now have made space at p_here to p_here + shift for the new
* data, which needs to be:
- * (maybe) p_here: index - 1 => NULL
+ * (maybe) p_here: index - 1 => NULL
* (always) p_here + 1: end + 1 => entry
*/
idx = p_here + shift;
mn64->pivot[idx] = ms->end + 1;
rcu_assign_pointer(mn64->slot[idx], entry);
+ idx--;
if (shift > 0) {
- idx--;
mn64->pivot[idx] = ms->index;
rcu_assign_pointer(mn64->slot[idx], NULL);
}
-
return 0;
}
-/* Private
- *
+/*
+ * Private
* Must hold the spin lock.
- *
* When this function is called, ms->node has space and is the correct
* location.
- *
*/
-int _maple_insert(struct maple_state *ms, void *entry)
+static inline int _maple_insert(struct maple_state *ms, void *entry)
{
+ if (xa_is_internal(ms->tree->root) == false) {
+ if (ms->tree->root == NULL && ms->index == 0 && ms->end == 0) {
+ ms->tree->root = entry;
+ return 0;
+ } else if (ms->tree->root == NULL) {
+ struct maple_node *mn = _maple_state_node(ms);
+
+ if (mn == NULL)
+ return -ENOMEM;
+ if (mn == NULL)
+ return -ENOMEM;
+ ms->node = _maple_mk_node(mn);
+ mn->map64.slot[0] = NULL;
+ mn->map64.pivot[0] = ms->index;
+ mn->map64.slot[1] = entry;
+ mn->map64.pivot[1] = ms->index+1;
+ rcu_assign_pointer(ms->tree->root, ms->node);
+ return 0;
+ }
+ }
+
if(_maple_is_node_4(ms))
return _maple_insert_4(ms, entry);
/* node 64 is all that's left */
return _maple_insert_64(ms, entry);
-
}
/* Private
{
void *entry = ms->node;
+ /* No root node yet */
+ if (entry == NULL)
+ return entry;
+
do {
ms->node = entry;
_maple_update_limits(ms);
continue;
}
if (_maple_node_is_full(ms)) {
- _maple_node_split(ms);
- entry = ms->node;
- continue;
+ int ret = _maple_node_split(ms);
+
+ /*
+ * If the allocation fails, we need to drop the lock
+ * and restart the walk from root.
+ */
+ if (ret == -ENOMEM)
+ return maple_retry(ms);
+ if (ret == -EINVAL) // FIXME..
+ return maple_retry(ms);
+ return NULL;
}
entry = _maple_walk_64(ms, ms->index);
+ /* Ranges need to return the same entry to ensure they do not
+ * overlap other ranges.
+ */
if ((ms->end != ms->index) &&
(xa_is_internal(entry) == false)) {
int s_idx = ms->slot_idx;
{
spin_lock_init(&mt->lock);
mt->flags = 0;
- mt->root = _maple_mk_node(_maple_new_node(GFP_KERNEL | __GFP_ZERO));
-
- mt->min = 0;
- mt->max = ULONG_MAX;
+ mt->root = NULL;
}
EXPORT_SYMBOL(maple_init);
int ret = -EEXIST;
void *walked = NULL;
MAP_STATE(ms, mt, start, end);
-
+
if (WARN_ON_ONCE(xa_is_internal(entry)))
return -EINVAL;
if (start > end)
return -EINVAL;
-
+
mt->flags |= MAP_STATE_SPLIT_ON_FULL;
spin_lock(&ms.tree->lock);
+retry:
ms.node = rcu_dereference(mt->root);
- walked = _maple_insert_walk(&ms);
+ walked = _maple_insert_walk(&ms);
if (walked != NULL)
goto already_exists;
+ if (walked == maple_retry(&ms))
+ goto retry;
ret = _maple_insert(&ms, entry);
+ if (ret == -ENOMEM) {
+ if (__maple_nomem(&ms, GFP_KERNEL | __GFP_ZERO)) // Drops lock.
+ goto retry;
+ }
already_exists:
spin_unlock(&ms.tree->lock);
void mt_dump(const struct maple_tree *mt)
{
void *entry = mt->root;
- unsigned long min = mt->min;
- unsigned long max = mt->max;
+ unsigned long min = 0;
+ unsigned long max = ULONG_MAX;
- pr_info("maple_tree(%p) flags %X, root %p, min %ld, max %ld\n",
- mt, mt->flags, entry, min, max);
- if (!xa_is_node(entry))
+
+ if (xa_is_internal(entry) == false)
max = 0;
+ pr_info("maple_tree(%p) flags %X, root %p, min %lu, max %lu\n",
+ mt, mt->flags, entry, min, max);
mn_dump(entry, min, max);
}
#endif
void *ptr)
{
void *ret = mtree_test_load(mt, index);
+
MT_BUG_ON(mt, ret != ptr);
}
static int maple_tree_seed(void)
{
- unsigned long set[] = {15, 14, 17, 25, 1000, 1001,1002,1003,1005};
+ unsigned long set[] = {15, 14, 17, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 3};
unsigned long r[] = {10, 15, 20, 25, 22}; // For range testing
void *ptr = &set;
mtree_init(&tree);
- /* First set of tests try to insert, insert a dup, and load back what
- * was inserted.
- */
+ check_load(&tree, set[0], NULL); // See if 15 -> NULL
+
+ check_insert(&tree, set[9], &tree); // Insert 0
+ check_load(&tree, set[9], &tree); // See if 0 -> &tree
+ check_load(&tree, set[0], NULL); // See if 15 -> NULL
+
+ check_insert(&tree, set[10], ptr); // Insert 3
+ check_load(&tree, set[9], &tree); // See if 0 -> &tree
+
+ check_load(&tree, set[10], ptr); // See if 3 -> ptr
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+ /* Try to insert, insert a dup, and load back what was inserted. */
+ mtree_init(&tree);
check_insert(&tree, set[0], &tree); // Insert 15
check_dup_insert(&tree, set[0], &tree); // Insert 15 again
- check_load(&tree, set[0], &tree); // See if 15 -> &tree
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree (test 10)
/* Second set of tests try to load a value that doesn't exist, inserts
* a second value, then loads the value again
check_load(&tree, set[1], NULL); // See if 14 -> NULL
check_insert(&tree, set[1], ptr); // insert 14 -> ptr
check_load(&tree, set[1], ptr); // See if 14 -> ptr
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
/* Tree currently contains:
* p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
+ check_load(&tree, set[1], ptr); // See if 14 -> ptr
+ check_load(&tree, set[6], ptr); // See if 1002 -> ptr
+ check_load(&tree, set[7], &tree); // See if 1003 -> &tree
+
/* Clear out tree */
mtree_destroy(&tree);
check_insert(&tree, set[5], ptr); // insert 1001 -> ptr
check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
+ check_load(&tree, set[5], ptr); // See if 1001 -> ptr
+ check_load(&tree, set[6], ptr); // See if 1002 -> ptr
+ check_load(&tree, set[7], &tree); // See if 1003 -> &tree
/* Clear out the tree */
mtree_destroy(&tree);