// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bug.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/gfp.h>
/* Actual implementation. */
-/* Private */
-static inline bool _is_root(struct maple_state *ms)
+static inline bool mas_is_start(struct maple_state *ms)
{
- if (ms->node == ms->tree->root)
- return true;
- return false;
+ return ms->node == MAS_START;
}
-static inline struct maple_node *_maple_to_node(const void *entry)
+static inline struct maple_node *ma_to_node(const void *entry)
{
+ BUG_ON(!xa_is_node(entry));
return (struct maple_node *)((unsigned long)entry - 2);
}
-static inline void *_maple_mk_node(const struct maple_node *node)
+static inline void *ma_mk_node(const struct maple_node *node)
{
+ BUG_ON(xa_is_node(node));
return (void *)((unsigned long)node | 2);
}
} else {
ms->alloc = _maple_new_node(gfp);
}
+ ms->node = MAS_START;
if (!ms->alloc)
return false;
return true;
static void *_maple_walk_4(struct maple_state *ms)
{
- struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
+ struct maple_node_4 *mn4 = &ms->node->map4;
ms->slot_idx = ms->index - ms->min;
return rcu_dereference(mn4->slot[ms->slot_idx]);
static void *_maple_walk_64(struct maple_state *ms, unsigned long val)
{
- struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ struct maple_node_64 *mn64 = &ms->node->map64;
int i = 0;
do {
static void _maple_update_limits(struct maple_state *ms)
{
-
struct maple_node_64 *mn64;
int i = ms->slot_idx;
- if (ms->node == ms->tree->root)
+ if (mas_is_start(ms))
return;
- mn64 = &(_maple_to_node(ms->node)->map64);
+ mn64 = &ms->node->map64;
if (i > 0)
ms->min = mn64->pivot[i - 1];
if (i < MAPLE_NODE64_MAX_SLOT - 1)
ms->max = mn64->pivot[i] - 1;
}
+
+static void *mas_start(struct maple_state *ms)
+{
+ void *entry;
+
+ if (ms->node == MAS_START) {
+ entry = ms->tree->root;
+ if (!xa_is_node(entry)) {
+ if (ms->index > 0)
+ return NULL;
+ ms->node = MAS_ROOT;
+ }
+ } else {
+ entry = ma_mk_node(ms->node);
+ }
+
+ return entry;
+}
+
/*
* Private
*
* */
static void *_maple_walk(struct maple_state *ms)
{
- void *entry = NULL;
+ void *entry = mas_start(ms);
/* Outside this nodes range, it doesn't exist. */
if (ms->min > ms->index ||
ms->max < ms->index)
- return entry; // FIXME: Retry?
+ return NULL; // FIXME: Retry?
- if (ms->node == NULL)
- return entry;
-
- if (xa_is_internal(ms->node) == false) {
- if (ms->index == ms->end && ms->index == 0)
- return ms->node;
- else
- return NULL;
- }
-
- entry = ms->node;
- do {
+ while (xa_is_node(entry)) {
_maple_update_limits(ms);
- ms->node = entry;
+ ms->node = ma_to_node(entry);
if (_maple_is_node_4(ms)) {
entry = _maple_walk_4(ms);
break;
}
entry = _maple_walk_64(ms, ms->index);
-
- } while (xa_is_internal(entry));
+ }
return entry;
}
-static inline int _count_node_64(struct maple_state *ms) {
+static inline int _count_node_64(struct maple_state *ms)
+{
int i;
- struct maple_node_64 mn64 = _maple_to_node(ms->node)->map64;
+ struct maple_node_64 mn64 = ms->node->map64;
for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
if (mn64.pivot[i] == 0)
void *r_entry = ms->tree->root; // root entry
struct maple_node *mn;
- /* Don't expand the node if the insert will fail. */
- if (ms->index == 0 && ms->end == 0)
- return -EINVAL;
-
mn = _maple_state_node(ms);
if (mn == NULL)
return -ENOMEM;
- ms->node = _maple_mk_node(mn);
+ ms->node = mn;
/*
* Insert the existing entry into the new node
* rcu_assign is not necessary as readers are not able to access this
* node.
*/
- mn->map64.slot[0] = r_entry;
+ RCU_INIT_POINTER(mn->map64.slot[0], r_entry);
mn->map64.pivot[0] = 1;
-
ms->slot_idx = 1;
/* swap the new root into the tree */
- rcu_assign_pointer(ms->tree->root, ms->node);
+ rcu_assign_pointer(ms->tree->root, ma_mk_node(mn));
return 0;
}
/*
*/
static int _maple_node_split(struct maple_state *ms)
{
-
/* Assume node64, as node4 cannot be split. */
- //struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ //struct maple_node_64 *mn64 = &ms->node->map64;
//struct maple_node_64 *alt_mn64;
struct maple_node *alt_mn;
- if (xa_is_internal(ms->tree->root) == false)
+ if (!xa_is_node(ms->tree->root))
return _maple_root_expand(ms);
/*
* FIXME:
static inline int _maple_insert_4(struct maple_state *ms, void *entry)
{
- struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
+ struct maple_node_4 *mn4 = &ms->node->map4;
ms->slot_idx = ms->index - ms->min;
rcu_assign_pointer(mn4->slot[ms->slot_idx], entry);
static inline int _maple_insert_64(struct maple_state *ms, void *entry)
{
-
- struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ struct maple_node_64 *mn64 = &ms->node->map64;
int p_here = ms->slot_idx; /* Location to place data */
int p_end; /* End of data */
int shift = 0;
*/
static inline int _maple_insert(struct maple_state *ms, void *entry)
{
- if (xa_is_internal(ms->tree->root) == false) {
- if (ms->tree->root == NULL && ms->index == 0 && ms->end == 0) {
+ if (!xa_is_node(ms->tree->root)) {
+ BUG_ON(ms->end > 0);
+ if (ms->tree->root == NULL) {
ms->tree->root = entry;
return 0;
- } else if (ms->tree->root == NULL) {
- struct maple_node *mn = _maple_state_node(ms);
-
- if (mn == NULL)
- return -ENOMEM;
- if (mn == NULL)
- return -ENOMEM;
- ms->node = _maple_mk_node(mn);
- mn->map64.slot[0] = NULL;
- mn->map64.pivot[0] = ms->index;
- mn->map64.slot[1] = entry;
- mn->map64.pivot[1] = ms->index+1;
- rcu_assign_pointer(ms->tree->root, ms->node);
- return 0;
+ } else {
+ return -EEXIST;
}
}
- if(_maple_is_node_4(ms))
+ if (_maple_is_node_4(ms))
return _maple_insert_4(ms, entry);
/* node 64 is all that's left */
*/
static void *_maple_insert_walk(struct maple_state *ms)
{
- void *entry = ms->node;
+ void *entry = mas_start(ms);
- /* No root node yet */
- if (entry == NULL)
- return entry;
+ if (!xa_is_node(entry)) {
+ int err;
+
+ if (ms->end == 0)
+ return entry;
+ err = _maple_root_expand(ms);
+ if (err == -ENOMEM)
+ return maple_retry(ms);
+
+ return NULL;
+ }
do {
- ms->node = entry;
_maple_update_limits(ms);
+ ms->node = ma_to_node(entry);
if(_maple_is_node_4(ms)) {
/* FIXME: What about range inserts on a node4? */
entry = _maple_walk_4(ms);
if (s_idx != ms->slot_idx || mn != ms->node)
return mn; /* Return non-NULL for an insert fail */
}
-
- } while(xa_is_internal(entry));
+ } while (xa_is_node(entry));
return entry;
}
static void _maple_destroy_walk(struct maple_state *ms)
{
- struct maple_node *mn = _maple_to_node(ms->node);
- struct maple_node_64 *mn64 = &(mn->map64);
+ struct maple_node *mn = ms->node;
+ struct maple_node_64 *mn64 = &mn->map64;
void *entry;
int i = 0;
do {
- if(mn64->pivot[i] == 0)
+ if ((i < MAPLE_NODE64_MAX_PIVOT) && mn64->pivot[i] == 0)
break;
entry = mn64->slot[i];
- if (entry == NULL)
- continue;
- if (xa_is_internal(entry))
- {
+ if (xa_is_node(entry)) {
if(_maple_is_node_4(ms)) {
_maple_free_node(ms->node);
continue;
/* end of the line.. */
}
/* Set min/max */
- ms->node = entry;
+ ms->node = ma_to_node(entry);
_maple_destroy_walk(ms);
}
} while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
spin_lock(&ms.tree->lock);
retry:
- ms.node = rcu_dereference(mt->root);
walked = _maple_insert_walk(&ms);
if (walked != NULL)
goto already_exists;
void *entry;
MAP_STATE(ms, mt, index, index);
rcu_read_lock();
- ms.node = rcu_dereference(mt->root);
entry = _maple_walk(&ms);
rcu_read_unlock();
return entry;
int mtree_destroy(struct maple_tree *mt)
{
MAP_STATE(ms, mt, 0, 0);
- if (mt->root == NULL)
- goto noop;
- ms.node = mt->root;
+
spin_lock(&mt->lock);
- _maple_destroy_walk(&ms);
+ if (xa_is_node(mt->root)) {
+ ms.node = ma_to_node(mt->root);
+ _maple_destroy_walk(&ms);
+ }
+ mt->root = NULL;
spin_unlock(&mt->lock);
-noop:
+
return 0;
}
EXPORT_SYMBOL(mtree_destroy);
pr_info("%lu-%lu: ", min, max);
if (xa_is_node(entry)) {
- struct maple_node_64 *mn64 = &_maple_to_node(entry)->map64;
+ struct maple_node_64 *mn64 = &ma_to_node(entry)->map64;
unsigned long prev = min;
unsigned int i;