#include <linux/maple_tree.h>
#include <linux/rcupdate.h>
+#define MA_ERROR(err) ((struct maple_node *)(((unsigned long)err << 2) | 2UL))
+
+static inline void mas_set_err(struct maple_state *mas, long err)
+{
+ mas->node = MA_ERROR(err);
+}
+
+static inline bool mas_is_err(struct maple_state *mas)
+{
+ return xa_is_err(mas->node);
+}
/* Things we will probably eventually need */
#if 0
return;
mn = ma_get_alloc(ms);
- if(!mn)
+ if (!mn)
mn = kcalloc(count, sizeof(*mn), gfp);
if (!mn)
goto list_failed;
kfree(mn->slot[i]);
kfree(mn);
list_failed:
- return;
+ ma_set_alloc_cnt(ms, count);
+ mas_set_err(ms, -ENOMEM);
}
static bool __maple_nomem(struct maple_state *ms, gfp_t gfp)
__must_hold(ms->tree->lock)
{
+ if (ms->node != MA_ERROR(-ENOMEM))
+ return false;
+
if (gfpflags_allow_blocking(gfp)) {
spin_unlock(&ms->tree->lock);
maple_new_node(ms, ma_get_alloc_cnt(ms), gfp);
{
void *entry;
+ if (mas_is_err(ms))
+ return NULL;
+
if (ms->node == MAS_START) {
entry = ms->tree->root;
if (!xa_is_node(entry)) {
* ms->node will point to the node containing the entry.
*
*
- * */
+ */
static void *_maple_walk(struct maple_state *ms)
{
void *entry = mas_start(ms);
if (allocated < count) {
int diff = count - allocated;
//ma_set_alloc_cnt(ms, needed);
- maple_new_node(ms, diff, GFP_KERNEL | GFP_NOWAIT |
- __GFP_NOWARN | __GFP_ZERO);
+ maple_new_node(ms, diff, GFP_NOWAIT | __GFP_NOWARN);
}
return ms->alloc;
}
* Private
* Handle if root is just a single pointer
*/
-static int _maple_root_expand(struct maple_state *ms)
+static void _maple_root_expand(struct maple_state *ms)
{
void *r_entry = ms->tree->root; // root entry
struct maple_node *mn;
maple_state_node(ms, 1);
- if (!ms->alloc)
- return -ENOMEM;
+ if (mas_is_err(ms))
+ return;
mn = ma_next_alloc(ms);
ms->node = mn;
/* swap the new root into the tree */
rcu_assign_pointer(ms->tree->root, ma_mk_node(mn));
- return 0;
}
+
/*
* Private
*
* Split a node and set ms->node to the correct one for an insert for
* ms->index to ms->end.
*/
-static int _maple_node_split(struct maple_state *ms)
+static void _maple_node_split(struct maple_state *ms)
{
struct maple_node *lmn, *rmn;
/* Assume node64, as node4 cannot be split. */
/* Allocate new mn64 and split */
maple_state_node(ms, alloc_cnt);
- if (!ms->alloc)
- return -ENOMEM;
+ if (mas_is_err(ms))
+ return;
lmn = ma_next_alloc(ms);
rmn = ma_next_alloc(ms);
/* copy 1/2 data from the end to the new node. */
maple_split_data_64(ms, lmn, rmn,offset);
maple_link_node(ms, lmn, rmn);
- return 0;
}
static inline int _maple_insert_4(struct maple_state *ms, void *entry)
*/
static inline int _maple_insert(struct maple_state *ms, void *entry)
{
+ if (mas_is_err(ms))
+ return 0;
if (!xa_is_node(ms->tree->root)) {
BUG_ON(ms->end > 0);
if (ms->tree->root == NULL) {
void *entry = mas_start(ms);
if (!xa_is_node(entry)) {
- int err;
-
if (ms->end == 0)
return entry;
- err = _maple_root_expand(ms);
- if (err == -ENOMEM)
- return MAS_START;
-
+ _maple_root_expand(ms);
return NULL;
}
continue;
}
if (_maple_node_is_full(ms)) {
- int ret = _maple_node_split(ms);
- /*
- * If the allocation fails, we need to drop the lock
- * and restart the walk from root.
- */
- if (ret == -ENOMEM)
- return MAS_START;
+ _maple_node_split(ms);
+ if (mas_is_err(ms))
+ return NULL;
}
entry = _maple_walk_64(ms, ms->index);
spin_lock(&ms.tree->lock);
retry:
walked = _maple_setup_insert(&ms);
- if (walked == MAS_START)
- goto retry;
if (walked != NULL)
goto already_exists;
ret = _maple_insert(&ms, entry);
- if (ret == -ENOMEM) {
- if (__maple_nomem(&ms, gfp))
- goto retry;
- }
+ if (__maple_nomem(&ms, gfp))
+ goto retry;
already_exists:
spin_unlock(&ms.tree->lock);
+ if (mas_is_err(&ms))
+ return xa_err(ms.node);
return ret;
}