-#include <linux/maple_tree.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/gfp.h>
-
-
-#if 0
-/* Private */
-static inline bool _is_root(struct map_node *mn) {
- return (mn->map64.parent == NULL);
-}
-/* Private */
-static inline void _orphan_this_node(struct map_node *mn) {
- mn->map64.parent = mn;
-}
-
-/* Private */
-static inline bool _is_node_orphan(struct map_node *mn) {
- return mn->map64.parent == mn;
-}
-
-static inline bool _is_map_node_4(struct map_state *map) {
- /* FIXME: This just means that our node could be contained within a
- * node_4. This could happen when shrinking a node_64, for example.
- */
- if ((map->max - map->min) <= MAPLE_NODE4_MAX_SLOT)
- return true;
- return false;
-}
-/* Private */
-static inline bool __is_internal_entry(const void *entry) {
- return ((uint64_t)entry & 3) == 2;
-}
-
-/* Private */
-static inline bool _is_node(const void *entry) {
- if (__is_internal_entry(entry))
- return true;
- return false;
-
-}
-static inline void _set_retry(void *entry, struct map_state *map) {
- entry = (void*)rcu_dereference(map->tree->root);
-}
-
-/* Private */
-static inline bool _is_retry(const void *entry, const struct map_state *map) {
- if (entry == map->tree->root)
- return true;
- return false;
-}
-
-
-static inline int _count_node_64(struct map_state *map) {
- int i;
- for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
- if (map->node->map64.pivot[i] == 0)
- break;
- }
- /*FIXME: Is this right? */
- if (i == MAPLE_NODE64_MAX_PIVOT - 1 &&
- map->node->map64.slot[i] != NULL)
- i++;
- return i;
-}
-
-/* Private
- *
- * Check if map->start to map->end will fit in existing nodes and if those
- * nodes should be combined, then combine them.
- * */
-static inline bool _check_range(struct map_state *map) {
- unsigned long i;
- bool ret = false;
-
- if (_is_map_node_4(map))
- {
- struct map_node4 *node = &map->node->map4;
- unsigned long rstart = map->index - map->min;
- unsigned long rend = map->end - map->min;
- for (i = rstart; i < rend; i++) {
- if (node->slot[i] != NULL)
- return false; /* This range is taken. */
- if (map->max == i) /* At the end of this node4 */
- node = _next_node(map);
- }
- return true;
- } else {
-
- /* The space required for this range. */
- //unsigned long space = map_state_min_space(map) + map_state_needs_term(map);
-
- /* FIXME: Complete this range fit check later */
-
- ret = false;
-
- }
- return ret;
-}
-
-
-int map_delete_node(struct map_node *mn) {
- /* TODO: Figure out RCU delete.. */
-
- return 0;
-}
-
-
-
-/* Private */
-inline void *_map_walk_4(struct map_state *map)
-{
- /* This should never iterate. */
- struct map_node_4 *mn4 = &(map->node->map4);
- unsigned long hole;
-
- map->slot_idx = map->index - map->min;
-
- if (map_state_should_split(map) == false)
- /* The location is implied */
- return rcu_dereference(mn4->slot[map->slot_idx]);
-
- /* We want to split, which means we are using walk to setup map->node
- * for a create or rebalance, so we need to ensure this node is not
- * full or does not need to be collapsed.
- */
-
- /* FIXME: Not complete for create */
- return NULL;
-}
-
-inline void *_map_walk_64(struct map_state *map)
-{
- /* map->node now contains the node of interest. */
- struct map_node_64 *mn64 = &(map->node->map64);
- void *ptr = NULL;
- int i = 0;
-
- for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
- /* FIXME: check if this is root if we don't use
- * min/max in the root tree */
- if (i == MAPLE_NODE64_MAX_PIVOT - 1)
- i++; // Right-most child.
- else if (map->index > (mn64->pivot[i] - 1))
- continue; // Keep searching.
- else
- map->max = mn64->pivot[i]; // Update the state max.
-
- ptr = rcu_dereference(mn64->slot[i]);
- /* Set the map slot index and max */
- map->slot_idx = i;
- break;
- }
-
- /* Update the state min */
- if (i > 0)
- map->min = mn64->pivot[i-1];
-
- if (map_state_should_split(map) == false)
- return ptr;
-
-
- /* FIXME: Not complete for create */
- return ptr;
-}
-
-/* Walk the tree to find the pointer value for map->index */
-static void *__map_walk(struct map_state *map)
-{
- void *entry;
-
- entry = map->node;
- do {
- map->node = (struct map_node*)entry;
-
- /* Start of critical read section */
-
- if (_is_map_node_4(map))
- entry = _map_walk_4(map);
- else
- entry = _map_walk_64(map);
-
-
- /* Dead node detect */
- if (_is_node_orphan(map->node) == true) {
- _set_retry(entry, map);
- goto retry_set;
- }
-
- /* End of critical read section */
-
- } while (_is_node(entry));
-
-retry_set:
-
- return entry;
-}
-
-/* Private
- *
- *
- * return a pointer value for map->index, with retries.
- * Possible returns are NULL (dead tree, no entry) or the pointer.
- *
- * */
-static void *_map_walk(struct map_state *map) {
- void *entry;
-
- do {
- /* Tree has been removed, abort search */
- if (map->tree->root == NULL)
- return NULL;
-
- /* Start the search */
- entry = __map_walk(map);
- } while (map->node == rcu_dereference(map->tree->root));
-
- return entry;
-}
-
-/**
- * *map_locate() - Find a pointer associated with a value.
- * @val - The value of interest
- * @mn - the maple tree node (usually root)
- *
- * Returns: the pointer for the range in which val resides or NULL.
- */
-void *map_load(struct map_tree *tree, unsigned long index) {
- void *entry;
- MAP_STATE(map, tree, index, index);
-
- rcu_read_lock();
- map.node = rcu_dereference(tree->root);
- entry = _map_walk(&map);
- rcu_read_unlock();
-
- return entry;
-}
-
-/**
- * Private.
- *
- * Split map->node into two separate nodes and set map->node to the newly
- * created node for map->index to be inserted
- */
-static inline void _node_split(struct map_state *map)
-{
-
- /* If this is a node 4, then we have 16 entries to deal with.
- * Create 1x node 4 with the upper data from map->node
- * Alter the parent to deal with the median of the 2 node 4s.
- * If the parent is full, split again.. all the way up, or create a
- * new level (Create 1x node 64 pointing to the node 4s)
- * FIXME: Somehow mark the data in the old node as dead?
- *
- */
-
-}
-
-static inline void _update_max(struct map_state *map)
-{
- /* FIXME: to be determined */
-}
-
-
-static void _copy_over(struct map_state *map) {
- struct map_node_64 *mn = &(map->node->map64);
- unsigned long p_here, p_end; /* Pivots */
- unsigned long hole, shift;
- p_here = map->slot_idx;
-
- for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
- if (map->node->map64.pivot[p_end] == 0)
- break;
- }
-
-
- hole = map_state_needs_term(map);
- shift = p_end - p_here + hole;
-
- /* Add new termination point, a special case as the pivot may not
- * exist.
- */
- if (p_end < MAPLE_NODE64_MAX_PIVOT - 1)
- mn->pivot[p_end + 1] = 0;
- mn->slot[p_end + 1] = NULL;
-
- /* Copy over everything. */
- while (shift > hole) {
- mn->pivot[p_here + shift] = mn->pivot[p_here + shift - 1];
- rcu_assign_pointer(mn->slot[p_here + shift],
- mn->slot[p_here + shift - 1]);
- shift--;
- };
-
- /* If we need a terminator on this "range", then the value at the end
- * of the hole needs to be NULL
- */
- if (hole > 0) {
- rcu_assign_pointer(mn->slot[p_here + hole], NULL);
- mn->pivot[p_here + hole] = map->index + 1;
- }
-}
-/**
- * Private.
- * Insert ptr into map->node at map->index
- *
- **/
-static inline void _pair_create(struct map_state *map, void *ptr)
-{
- struct map_node_64 *mn = &(map->node->map64);
- unsigned long p_here = map->slot_idx;
- /* map->node points to a node 64 with a `hole` or a node 4 with a
- * hole.
- *
- * The node 4 is easy, as slot_idx has the location of the
- * hole.
- */
-
- /* FIXME: Update this function.
- * We need to handle map->index to map->end insert, including spanning
- * two nodes
- */
- if (_is_map_node_4(map)) {
- rcu_assign_pointer(map->node->map4.slot[map->slot_idx], ptr);
- /* FIXME: This probably cannot happen.. */
- if (map->max < map->index)
- _update_max(map);
- return;
- }
-
- /* TODO: Figure out if this can actually happen */
- if (p_here == MAPLE_NODE64M_MAX_PIVOT) {
- /* This node is not full (find ensured it was split)
- * and the slot index is the last slot, so insert the value.
- * This replaces the null, but somehow we ended up at this
- * node from the parents max, which may not be possible.
- */
- rcu_assign_pointer(mn->slot[map->slot_idx], ptr);
- return;
- }
-
-
- /* The C in RCU */
- _copy_over(map);
-
- /* Update the end.
- * Care must be taken to overwrite the previous end to avoid sending
- * readers off into the abyss (return incorrect data)
- */
- rcu_assign_pointer(mn->slot[p_here], NULL);
- mn->pivot[p_here] = map->index;
- /* Release the hounds! */
- rcu_assign_pointer(mn->slot[p_here], ptr);
-}
-/* Private */
-static int _map_create(struct map_state *map, void *ptr)
-{
-
- void *entry;
- int ret = -EINTR;
-
- /* Find the node to alter, or split and create a new one */
-
-
- spin_lock(&map->tree->lock);
-
- map->flags |= MAP_STATE_SPLIT_ON_FULL;
- entry = _map_walk(map); /* Find the node by use of map_find. */
-
- if (map->tree->root == NULL)
- goto dead_tree;
-
- ret = -EEXIST;
- if (entry != NULL)
- goto already_exists;
-
- _pair_create(map, ptr);
-
- /* do complex insert */
-
-dead_tree:
-already_exists:
- spin_unlock(&map->tree->lock);
-
- return ret; /* Success */
-}
-/**
- * map_create_range() - Insert a range into a maple tree.
- * @start: The start of the range
- * @end: The end of the range
- * @ptr: The value in which the range will point.
- * @tree: The maple tree to insert the range.
- *
- * Context: Any context.
- * Returns: -EEXISTS if the range overlaps any defined values, 0 on success.
- */
-int map_create_range(uint64_t start, uint64_t end, void *ptr,
- struct map_tree *tree) {
-
- MAP_STATE(map, tree, start, end);
- return _map_create(&map, ptr);
-}
-
-
-
-/**
- * map_create() - Insert a single key -> value into a maple tree.
- *
- * @index: The key value to insert into the tree.
- * @ptr: The value in which the key will point.
- * @tree: The maple tree to insert the range.
- *
- * Context: Any context.
- * Returns: -EEXISTS if the index is already defined, 0 on success.
- */
-int map_create(uint64_t index, void *ptr, struct map_tree *tree) {
- MAP_STATE(map, tree, index, 0);
- return _map_create(&map, ptr);
-}
-
-
-#endif
-
+#include <linux/maple_tree.h>
/* Things we will probably eventually need */
}
#endif
+/* Testing code */
+
+static inline struct maple_node *_maple_to_node(const void *entry);
+void mn64_dump(const struct maple_node_64 *mn64)
+{
+ int i = 0;
+ void *entry;
+ do {
+ pr_info("p[%d]: %ld -> %p ", i, mn64->pivot[i], mn64->slot[i]);
+ if (mn64->pivot[i] == 0)
+ break;
+ entry = mn64->slot[i];
+ if (xa_is_internal(entry)) {
+ pr_info("\n\n");
+ mn64_dump(&(_maple_to_node(entry)->map64));
+ }
+
+ } while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
+ if (mn64->slot[MAPLE_NODE64_MAX_SLOT - 1] != NULL)
+ pr_info("p[%d]: R -> %p", i+1, mn64->slot[i+1]);
+ pr_info("\n");
+}
+void mt_dump(const struct maple_tree *mt)
+{
+ pr_info("maple_tree(%p) flags %X, root %p, max %lX, min %lX\n",
+ mt, mt->flags, mt->root, mt->max, mt->min);
+ mn64_dump(&(_maple_to_node(mt->root)->map64));
+}
+
/* Actual implementation. */
/* Private */
+static inline bool _is_root(struct maple_state *ms)
+{
+ if (ms->node == ms->tree->root)
+ return true;
+ return false;
+}
static inline struct maple_node *_maple_to_node(const void *entry)
{
- return (struct maple_node *)((unsigned long)entry - 2);
+ return (struct maple_node *)(xa_to_internal(entry));
}
static inline bool _maple_is_node_4(struct maple_state *ms) {
/* FIXME: This just means that our node could be contained within a
* node_4. This could happen when shrinking a node_64, for example.
*/
- if ((ms->max - ms->min) <= MAPLE_NODE4_MAX_SLOT)
+ if ((ms->max - ms->min) <= (unsigned long)MAPLE_NODE4_MAX_SLOT)
return true;
return false;
}
struct maple_node *mn;
size_t size = sizeof(struct maple_node);
mn = kmalloc(size, gfp);
- if (mn != NULL)
- xa_mk_internal((unsigned long)mn);
+ if (mn == NULL)
+ goto kmalloc_failed;
+
+ mn->map64.parent = NULL;
+ mn = xa_mk_internal((unsigned long)mn);
+
+kmalloc_failed:
return mn;
}
+static void _maple_free_node(struct maple_node *mn)
+{
+ kfree(mn);
+}
+
static void *_maple_walk_4(struct maple_state *ms)
{
- struct maple_node_4 *mn4 = &(ms->node->map4);
+ struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
ms->slot_idx = ms->index - ms->min;
return rcu_dereference(mn4->slot[ms->slot_idx]);
}
+
static void *_maple_walk_64(struct maple_state *ms)
{
struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
do {
if (i >= MAPLE_NODE64_MAX_PIVOT)
break; // Right-most child.
- if (ms->index > (mn64->pivot[i] - 1))
- continue;
+ if (ms->index <= mn64->pivot[i] - 1)
+ break;
} while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
-
- if (i > 0)
- ms->min = mn64->pivot[i - 1];
-
- if (i <= MAPLE_NODE64_MAX_SLOT - 1)
- ms->max = mn64->pivot[i];
+ pr_debug("%s: i = %d\n", __func__, i);
ms->slot_idx = i;
return rcu_dereference(mn64->slot[i]);
}
+static void _maple_update_limits(struct maple_state *ms)
+{
+
+ struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ int i = ms->slot_idx;
+
+ if (ms->node == ms->tree->root)
+ return;
+
+ if (i > 0)
+ ms->min = mn64->pivot[i - 1];
+
+ if (i < MAPLE_NODE64_MAX_SLOT - 1)
+ ms->max = mn64->pivot[i] - 1;
+
+}
/*
* Private
*
void *entry = NULL;
/* Outside this nodes range, it doesn't exist. */
- if (ms->min > ms->index ||
- ms->max < ms->index)
+ if (ms->min >= ms->index ||
+ ms->max <= ms->index)
return entry;
entry = ms->node;
do {
- entry = _maple_to_node(entry);
- if (_maple_is_node_4(ms))
+ _maple_update_limits(ms);
+ ms->node = entry;
+ if (_maple_is_node_4(ms)) {
entry = _maple_walk_4(ms);
- else
- entry = _maple_walk_64(ms);
+ break;
+ }
+ entry = _maple_walk_64(ms);
} while (xa_is_internal(entry));
i++;
return i;
}
+
+/* Private
+ *
+ * Check if ms->node has enough room for ms->index to ms->end
+ *
+ */
+static inline bool _maple_node_is_full(struct maple_state *ms)
+{
+ return false;
+}
/* Private
*
* Split a node and set ms->node to the correct one for an insert for
{
/* Assume node64, as node4 cannot be split. */
- struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ //struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
struct maple_node_64 *alt_mn64;
- struct maple_node *ald_mn;
+ struct maple_node *alt_mn;
/* If there is room in the previous or next node
/* Allocate new mn64 and split */
if (ms->alloc != NULL) {
- alt_mn64 = ms->alloc->map64;
+ alt_mn64 = &(ms->alloc->map64);
ms->alloc = NULL;
}
else {
- _maple_new_node(alt_mn, GFP_NOWAIT | __GFP_NOWARN);
+ alt_mn = _maple_new_node(GFP_NOWAIT | __GFP_NOWARN);
if (alt_mn == NULL)
return ms->tree;
}
return alt_mn64;
}
-static *_maple_insert_walk(struct maple_state *ms)
+
+static inline int _maple_insert_4(struct maple_state *ms, void *entry)
{
+ struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
+
+ pr_debug("%s: %lu\n", __func__, ms->index);
+ ms->slot_idx = ms->index - ms->min;
+ rcu_assign_pointer(mn4->slot[ms->slot_idx], entry);
+ return 0;
+}
- do {
- if(_maple_is_node_4(ms)) {
- /* end of the line.. */
+/* Private
+ *
+ * Note: If the final slot is in use, this will return
+ * MAPLE_NODE64_MAX_PIVOT - 1
+ */
+static inline int _maple_data_end_64(struct maple_node_64 *mn64)
+{
+ int p_end;
+ for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
+ if (mn64->pivot[p_end] == 0)
break;
- }
- if (_maple_node_is_full(ms)) {
- _maple_node_split(ms);
- continue;
- }
-
-
- /* max/min set in here */
- /* ms->node set in here */
-
-
- } while(1);
-
-
- /* Found the correct node with the correct space needed. */
- if (_maple_is_node_4(ms))
- return _maple_insert_4(ms);
- return _maple_insert_64(ms);
+ }
+ if ( (p_end == MAPLE_NODE64_MAX_PIVOT - 1) &&
+ (mn64->slot[p_end + 1] != NULL) )
+ p_end++;
+ return p_end;
}
-static void _maple_insert_64(struct map_state *ms)
+static inline int _maple_data_shift_64(struct maple_state *ms, int p_end)
+{
+ struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
+ int p_here = ms->slot_idx; /* Location to start new data */
+ int shift = 0;
+ return shift;
+}
+static inline int _maple_insert_64(struct maple_state *ms, void *entry)
{
-#if 0
struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
- unsigned long p_here, p_end; /* Pivots */
- unsigned long hole, shift;
+ unsigned long value = ms->index + 1;
+ int p_here = ms->slot_idx; /* Location to place data */
+ int p_end; /* End of data */
+ int shift = 0;
+ int idx = 0;
+ bool null_start = false;
+ bool range_end = false;
/* Check if this will be the last entry */
- if (ms->slot_idx == MAPLE_NODE64_MAX_SLOT - 1)
- {
- rcu_assign_pointer(mn64->slot[ms->slot_idx], ptr);
+ if (p_here == MAPLE_NODE64_MAX_SLOT - 1) {
+ rcu_assign_pointer(mn64->slot[p_here], entry);
return 0;
}
- /* Check if this will be the first entry */
- if (ms->slot_idx == 0)
- {
- return 0;
+ /* Find the end of pivots in this node */
+ p_end = _maple_data_end_64(mn64); // Points to nil on non-full nodes.
+
+ /* Find how far to move the existing data */
+ if (ms->index != ms->end) {
+ // May need a spot for the end of the range.
+ int p_end = p_here + 1;
+ if ((p_end < MAPLE_NODE64_MAX_PIVOT) &&
+ (mn64->pivot[p_end] != ms->end)) {
+ range_end = true;
+ shift++;
+ }
}
- p_here = ms->slot_idx;
-
- for (p_end = 0; p_end < MAPLE_NODE64_MAX_PIVOT; p_end++) {
- if (ms->node->map64.pivot[p_end] == 0)
- break;
+ /* Things to check for the null start:
+ *
+ * 1. Not using the parent pivot to get to slot 0.
+ * 2. The previous pivot isn't sequential with our value.
+ */
+ if ((p_here == 0 && value > ms->min)) {
+ null_start = true;
+ shift++;
+ } else if (p_here > 0 && mn64->pivot[p_here-1] != value - 1) {
+ null_start = true;
+ shift++;
}
- hole =
- shift = p_end - p_here + hole;
- /* Add new termination point, a special case as the pivot may not
- * exist.
- */
- if (p_end < MAPLE_NODE64_MAX_PIVOT - 1)
- mn64->pivot[p_end + 1] = 0;
- mn64->slot[p_end + 1] = NULL;
-
- /* Copy over everything. */
- while (shift > hole) {
- mn64->pivot[p_here + shift] = mn64->pivot[p_here + shift - 1];
- rcu_assign_pointer(mn64->slot[p_here + shift],
- mn64->slot[p_here + shift - 1]);
- shift--;
- };
-
- /* If we need a terminator on this "range", then the value at the end
- * of the hole needs to be NULL
+ pr_debug("%s: shift = %d p_here %d\n", __func__, shift, p_here);
+ // Sanity check.
+ BUG_ON(p_end + shift >= MAPLE_NODE64_MAX_SLOT);
+
+ /* Shift everything over. */
+ if (shift > 0) {
+ /* Writing new values from the tail forward ensure that a valid entry
+ * was hit prior to a partial write.
+ */
+ for (idx = p_end; idx >= p_here+shift; idx--)
+ {
+ mn64->pivot[idx + shift] = mn64->pivot[idx];
+ rcu_assign_pointer(mn64->slot[idx + shift], mn64->slot[idx]);
+ pr_debug("%s: p[%i](%lu) => %p\n", __func__,
+ idx+shift, mn64->pivot[idx+shift], mn64->slot[idx+shift]);
+ }
+ }
+
+ /* We now have made space at p_here to p_here + shift for the new
+ * data, which needs to be:
+ * (maybe) p_here: index - 1 => NULL
+ * (always) p_here + 1: index + 1 => entry
+ * (maybe) p_here + 2: end+1 => NULL
*/
- if (hole > 0) {
- rcu_assign_pointer(mn64->slot[p_here + hole], NULL);
- mn64->pivot[p_here + hole] = ms->index + 1;
+
+ idx = p_here + shift;
+ if (range_end) {
+ mn64->pivot[idx] = ms->end + 1;
+ rcu_assign_pointer(mn64->slot[idx], entry);
+ pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
+ idx, mn64->pivot[idx], mn64->slot[idx]);
+ idx--;
}
-#endif
-
-}
-int _maple_insert_4(struct maple_state *ms, void *entry)
-{
- struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
- ms->slot_idx = ms->index - ms->min;
- rcu_assign_pointer(mn4->slot[ms->slot_idx], entry);
+
+ mn64->pivot[idx] = value;
+ rcu_assign_pointer(mn64->slot[idx], entry);
+ pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
+ idx, mn64->pivot[idx], mn64->slot[idx]);
+
+ if (null_start == true) {
+ idx--;
+ value--; // Decrement the index
+ mn64->pivot[idx] = value;
+ rcu_assign_pointer(mn64->slot[idx], NULL);
+ pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
+ idx, mn64->pivot[idx], mn64->slot[idx]);
+ }
+
return 0;
}
-
/* Private
*
- * Must hold the spin_lock.
+ * Must hold the spin lock.
*
* When this function is called, ms->node has space and is the correct
* location.
*/
int _maple_insert(struct maple_state *ms, void *entry)
{
+ pr_debug("%s: %lu => %p\n", __func__, ms->index, entry);
+ pr_debug("%s: %lu %lu\n", __func__, ms->min, ms->max);
if(_maple_is_node_4(ms))
return _maple_insert_4(ms, entry);
/* node 64 is all that's left */
- return _maple_insert_64(ms, mn64);
+ return _maple_insert_64(ms, entry);
+
+}
+
+/* Private
+ *
+ * Must hold the spin lock.
+ *
+ * Set up the maple state for an insert.
+ * This includes: Splitting when necessary
+ * Setting up the min/max.
+ * Returns: The value stored in the node at the given offset, if it exists.
+ *
+ * FIXME: This will split full nodes even if the ms->index is already set,
+ * which may not be what we want.
+ *
+ */
+static void *_maple_insert_walk(struct maple_state *ms)
+{
+ void *entry = ms->node;
+
+ do {
+ ms->node = entry;
+ _maple_update_limits(ms);
+ if(_maple_is_node_4(ms)) {
+ entry = _maple_walk_4(ms);
+ /* end of the line.. */
+ continue;
+ }
+ if (_maple_node_is_full(ms)) {
+ _maple_node_split(ms);
+ entry = ms->node;
+ continue;
+ }
+
+ entry = _maple_walk_64(ms);
+ } while(xa_is_internal(entry));
+ return entry;
+}
+
+static void _maple_destroy_walk(struct maple_state *ms)
+{
+ struct maple_node *mn = _maple_to_node(ms->node);
+ struct maple_node_64 *mn64 = &(mn->map64);
+ void *entry;
+ int i = 0;
+
+ do {
+ if(mn64->pivot[i] == 0)
+ break;
+
+ entry = mn64->slot[i];
+ if (entry == NULL)
+ continue;
+ if (xa_is_internal(entry))
+ {
+ if(_maple_is_node_4(ms)) {
+ _maple_free_node(ms->node);
+ continue;
+ /* end of the line.. */
+ }
+ /* Set min/max */
+ ms->node = entry;
+ _maple_destroy_walk(ms);
+ }
+ } while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
+ _maple_free_node(mn);
}
/* Exposed API */
{
spin_lock_init(&mt->lock);
mt->flags = 0;
- mt->root = _maple_new_node(GFP_NOWAIT);
- /* FIXME: Check for failure. */
+ mt->root = _maple_new_node(GFP_KERNEL | GFP_NOWAIT | __GFP_ZERO);
mt->min = 0;
mt->max = ULONG_MAX;
int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry)
{
int ret = -EEXIST;
+ void *walked = NULL;
MAP_STATE(ms, mt, index, index);
if (WARN_ON_ONCE(xa_is_internal(entry)))
mt->flags |= MAP_STATE_SPLIT_ON_FULL;
spin_lock(&ms.tree->lock);
- /* FIXME: Could roots max/min change? */
ms.node = rcu_dereference(mt->root);
- ret = _maple_insert_walk(&ms);
- if (ret != 0)
+ walked = _maple_insert_walk(&ms);
+ if (walked != NULL)
goto already_exists;
ret = _maple_insert(&ms, entry);
}
EXPORT_SYMBOL(mtree_erase);
+/* Destroy an entire tree
+ *
+ */
int mtree_destroy(struct maple_tree *mt)
{
-
+ MAP_STATE(ms, mt, 0, 0);
+ if (mt->root == NULL)
+ goto noop;
+ ms.node = mt->root;
+ spin_lock(&mt->lock);
+ _maple_destroy_walk(&ms);
+ spin_unlock(&mt->lock);
+noop:
return 0;
}
EXPORT_SYMBOL(mtree_destroy);