-
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/gfp.h>
return rcu_dereference(mn4->slot[ms->slot_idx]);
}
-static void *_maple_walk_64(struct maple_state *ms)
+static void *_maple_walk_64(struct maple_state *ms, unsigned long val)
{
struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
int i = 0;
do {
if (i >= MAPLE_NODE64_MAX_PIVOT)
break; // Right-most child.
- if (ms->index <= mn64->pivot[i] - 1)
+ if (val <= mn64->pivot[i] - 1)
break;
} while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
- pr_debug("%s: i = %d\n", __func__, i);
-
ms->slot_idx = i;
-
return rcu_dereference(mn64->slot[i]);
}
entry = _maple_walk_4(ms);
break;
}
- entry = _maple_walk_64(ms);
+ entry = _maple_walk_64(ms, ms->index);
} while (xa_is_internal(entry));
{
struct maple_node_4 *mn4 = &(_maple_to_node(ms->node)->map4);
- pr_debug("%s: %lu\n", __func__, ms->index);
ms->slot_idx = ms->index - ms->min;
rcu_assign_pointer(mn4->slot[ms->slot_idx], entry);
return 0;
return p_end;
}
-static inline int _maple_data_shift_64(struct maple_state *ms, int p_end)
-{
-#if 0
- struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
- int p_here = ms->slot_idx; /* Location to start new data */
-#endif
- int shift = 0;
- return shift;
-}
-
static inline int _maple_insert_64(struct maple_state *ms, void *entry)
{
struct maple_node_64 *mn64 = &(_maple_to_node(ms->node)->map64);
- unsigned long value = ms->index + 1;
int p_here = ms->slot_idx; /* Location to place data */
int p_end; /* End of data */
int shift = 0;
int idx = 0;
- bool null_start = false;
- bool range_end = false;
/* Check if this will be the last entry */
if (p_here == MAPLE_NODE64_MAX_SLOT - 1) {
/* Find the end of pivots in this node */
p_end = _maple_data_end_64(mn64); // Points to nil on non-full nodes.
- /* Find how far to move the existing data */
- if (ms->index != ms->end) {
- // May need a spot for the end of the range.
- int p_end = p_here + 1;
- if ((p_end < MAPLE_NODE64_MAX_PIVOT) &&
- (mn64->pivot[p_end] != ms->end)) {
- range_end = true;
- shift++;
- }
- }
/* Things to check for the null start:
*
* 1. Not using the parent pivot to get to slot 0.
* 2. The previous pivot isn't sequential with our value.
*/
- if ((p_here == 0 && value > ms->min)) {
- null_start = true;
- shift++;
- } else if (p_here > 0 && mn64->pivot[p_here-1] != value - 1) {
- null_start = true;
+ if ((p_here == 0 && ms->index + 1 > ms->min) ||
+ (p_here > 0 && mn64->pivot[p_here-1] != ms->index)) {
shift++;
}
- pr_debug("%s: shift = %d p_here %d\n", __func__, shift, p_here);
// Sanity check.
BUG_ON(p_end + shift >= MAPLE_NODE64_MAX_SLOT);
for (idx = p_end; idx >= p_here+shift; idx--)
{
mn64->pivot[idx + shift] = mn64->pivot[idx];
- rcu_assign_pointer(mn64->slot[idx + shift], mn64->slot[idx]);
- pr_debug("%s: p[%i](%lu) => %p\n", __func__,
- idx+shift, mn64->pivot[idx+shift], mn64->slot[idx+shift]);
+ rcu_assign_pointer(mn64->slot[idx + shift],
+ mn64->slot[idx]);
}
}
/* We now have made space at p_here to p_here + shift for the new
* data, which needs to be:
* (maybe) p_here: index - 1 => NULL
- * (always) p_here + 1: index + 1 => entry
- * (maybe) p_here + 2: end+1 => NULL
+ * (always) p_here + 1: end + 1 => entry
*/
idx = p_here + shift;
- if (range_end) {
- mn64->pivot[idx] = ms->end + 1;
- rcu_assign_pointer(mn64->slot[idx], entry);
- pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
- idx, mn64->pivot[idx], mn64->slot[idx]);
- idx--;
- }
-
- mn64->pivot[idx] = value;
+ mn64->pivot[idx] = ms->end + 1;
rcu_assign_pointer(mn64->slot[idx], entry);
- pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
- idx, mn64->pivot[idx], mn64->slot[idx]);
- if (null_start == true) {
+ if (shift > 0) {
idx--;
- value--; // Decrement the index
- mn64->pivot[idx] = value;
+ mn64->pivot[idx] = ms->index;
rcu_assign_pointer(mn64->slot[idx], NULL);
- pr_debug("%s %d: p[%i](%lu) => %p\n", __func__, __LINE__,
- idx, mn64->pivot[idx], mn64->slot[idx]);
}
return 0;
*/
int _maple_insert(struct maple_state *ms, void *entry)
{
- pr_debug("%s: %lu => %p\n", __func__, ms->index, entry);
- pr_debug("%s: %lu %lu\n", __func__, ms->min, ms->max);
-
if(_maple_is_node_4(ms))
return _maple_insert_4(ms, entry);
*
* FIXME: This will split full nodes even if the ms->index is already set,
* which may not be what we want.
+ * FIXME: What happens if start/end span nodes?
*
*/
static void *_maple_insert_walk(struct maple_state *ms)
continue;
}
- entry = _maple_walk_64(ms);
+ entry = _maple_walk_64(ms, ms->index);
+ if ((ms->end != ms->index) &&
+ (xa_is_internal(entry) == false)) {
+ int s_idx = ms->slot_idx;
+ struct maple_node *mn = ms->node;
+ entry = _maple_walk_64(ms, ms->end);
+ if (s_idx != ms->slot_idx || mn != ms->node)
+ return mn;
+ }
+
} while(xa_is_internal(entry));
return entry;
*/
int mtree_insert_range(struct maple_tree *mt, unsigned long start,
unsigned long end, void *entry)
-{
- return 0;
-}
-
-/* Store an entry at an index.
- * Does not overwrite, returns -EEXISTS if the entry exists already
- */
-int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry)
{
int ret = -EEXIST;
void *walked = NULL;
- MAP_STATE(ms, mt, index, index);
+ MAP_STATE(ms, mt, start, end);
if (WARN_ON_ONCE(xa_is_internal(entry)))
return -EINVAL;
+ if (start > end)
+ return -EINVAL;
+
mt->flags |= MAP_STATE_SPLIT_ON_FULL;
spin_lock(&ms.tree->lock);
return ret;
}
+
+/* Store an entry at an index.
+ * Does not overwrite, returns -EEXISTS if the entry exists already
+ */
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry)
+{
+ return mtree_insert_range(mt, index, index, entry);
+}
EXPORT_SYMBOL(mtree_insert);
/* Load an entry at an index.
void *ptr)
{
return mtree_insert(mt, index, ptr);
-
+}
+static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start,
+ unsigned long end, void *ptr)
+{
+ return mtree_insert_range(mt, start, end, ptr);
}
static void *mtree_test_load(struct maple_tree *mt, unsigned long index)
}
+static noinline void check_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ int ret = -EINVAL;
+ ret = mtree_test_insert_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
static noinline void check_insert(struct maple_tree *mt, unsigned long index,
void *ptr)
{
static int maple_tree_seed(void)
{
unsigned long set[] = {15, 14, 17, 25, 1000, 1001,1002,1003,1005};
+ unsigned long r[] = {10, 15, 20, 25, 22}; // For range testing
void *ptr = &set;
+
+
mtree_init(&tree);
+ /* First set of tests try to insert, insert a dup, and load back what
+ * was inserted.
+ */
check_insert(&tree, set[0], &tree); // Insert 15
- mt_dump(&tree);
check_dup_insert(&tree, set[0], &tree); // Insert 15 again
- mt_dump(&tree);
check_load(&tree, set[0], &tree); // See if 15 -> &tree
- mt_dump(&tree);
+ /* Second set of tests try to load a value that doesn't exist, inserts
+ * a second value, then loads the value again
+ */
check_load(&tree, set[1], NULL); // See if 14 -> NULL
- mt_dump(&tree);
check_insert(&tree, set[1], ptr); // insert 14 -> ptr
- mt_dump(&tree);
check_load(&tree, set[1], ptr); // See if 14 -> ptr
/* Tree currently contains:
* p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
*/
- mt_dump(&tree);
check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
- mt_dump(&tree);
check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
- mt_dump(&tree);
+ /* Clear out tree */
mtree_destroy(&tree);
- mtree_init(&tree);
- mt_dump(&tree);
+ mtree_init(&tree);
+ /* Test inserting into a NULL hole. */
check_insert(&tree, set[5], ptr); // insert 1001 -> ptr
- mt_dump(&tree);
check_insert(&tree, set[7], &tree); // insert 1003 -> &tree
- mt_dump(&tree);
check_insert(&tree, set[6], ptr); // insert 1002 -> ptr
- mt_dump(&tree);
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mtree_init(&tree);
+
+ check_insert_range(&tree, r[0], r[1], ptr); // Insert 10-15 => ptr
+ check_insert_range(&tree, r[2], r[3], ptr); // Insert 20-25 => &tree
+
+ /* Clear out the tree */
mtree_destroy(&tree);
printk("maple_tree: %u of %u tests passed\n", tests_passed, tests_run);