return rcu_dereference(mn64->slot[i]);
}
+static void maple_update_parent(struct maple_node *mn)
+{
+ struct maple_node_64 *mn64 = &mn->map64;
+ struct maple_node *child;
+ int i = 0;
+
+ do {
+ child = rcu_dereference(mn64->slot[i]);
+ if (xa_is_node(child))
+ ma_to_node(child)->parent = mn;
+
+ if (i < MAPLE_NODE64_MAX_PIVOT &&
+ mn64->pivot[i] == 0)
+ break; // end of nodes.
+
+ } while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
+}
static void _maple_update_limits(struct maple_state *ms)
{
struct maple_node_64 *mn64;
struct maple_node_64 mn64 = ms->node->map64;
for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
- if (mn64.pivot[i] == 0)
+ if (mn64.pivot[i] == 0) {
+ if (mn64.slot[i] == NULL)
+ i--;
break;
+ }
}
- if (i == MAPLE_NODE64_MAX_PIVOT && mn64.slot[i] != NULL)
- i++;
-
- if (maple_is_root(ms, ms->node))
+ if (i == MAPLE_NODE64_MAX_PIVOT - 1 && mn64.slot[i] != NULL)
i++;
return i;
return true; // Single range is occupied.
if (_maple_is_node_4(ms))
return false; // Can't get here.
- if (_count_node_64(ms) == MAPLE_NODE64_MAX_SLOT)
+ if (_count_node_64(ms) >= MAPLE_NODE64_MAX_SLOT - 1)
return true; // Only full if all the slots are occupied.
return false;
}
if (r_entry != NULL) {
RCU_INIT_POINTER(mn->map64.slot[0], r_entry);
mn->map64.pivot[0] = 1;
- } else {
- mn->map64.pivot[0] = ms->index;
+ ms->slot_idx = 1;
}
- ms->slot_idx = 1;
-
/* swap the new root into the tree */
rcu_assign_pointer(ms->tree->root, ma_mk_node(mn));
}
+static int maple_calc_split(struct maple_state *ms)
+{
+ int i;
+ struct maple_node_64 *full64 = &ms->node->map64;
+ unsigned long min = full64->pivot[0];
+ unsigned long max = min;
+
+ for (i = 3; i < MAPLE_NODE64_MAX_PIVOT; i++) {
+ max = full64->pivot[i];
+ if ((max - min) >= 8)
+ break;
+ }
+ return i;
+}
/*
* Private
*
struct maple_node_64 *full64 = &ms->node->map64;
struct maple_node_64 *target = &left->map64;
- int i, j;
+ int i, j, split;
- /* Copy 0-3 to left_mn 0-3
+ split = maple_calc_split(ms);
+ /* Copy 0-3 to left_mn 0-3 if the range is larger than
* Copy 4-7 to right_mn 0-3
*/
for (i = 0, j = off; j < MAPLE_NODE64_MAX_SLOT; i++, j++) {
- if (i == MAPLE_NODE64_MAX_SLOT / 2) {
+ if (i == split + 1) {
target = &right->map64;
i = 0;
}
- if (j < MAPLE_NODE64_MAX_SLOT - 1)
+ if (j < MAPLE_NODE64_MAX_PIVOT)
target->pivot[i] = full64->pivot[j];
- else if (j == MAPLE_NODE64_MAX_SLOT - 1 && ms->max != ULONG_MAX)
- target->pivot[i] = ms->max;
+ else if (j == MAPLE_NODE64_MAX_PIVOT && ms->max != ULONG_MAX)
+ target->pivot[i] = ms->max + 1;
target->slot[i] = full64->slot[j];
}
}
* Writing new values from the tail forward ensure that a valid entry
* was hit prior to a partial write.
*/
-void maple_shift_64(struct maple_node_64 *mn64, int p_here)
+void maple_shift_64(struct maple_node_64 *mn64, int p_here, int shift)
{
int data_end = maple_data_end_64(mn64);
int idx;
for (idx = data_end; idx >= p_here; idx--) {
- if (idx < 6)
- mn64->pivot[idx + 1] = mn64->pivot[idx];
- rcu_assign_pointer(mn64->slot[idx + 1], mn64->slot[idx]);
+ if ((idx + shift < MAPLE_NODE64_MAX_SLOT))
+ rcu_assign_pointer(mn64->slot[idx + shift],
+ mn64->slot[idx]);
+ if (idx + shift < MAPLE_NODE64_MAX_PIVOT)
+ mn64->pivot[idx + shift] = mn64->pivot[idx];
}
}
{
struct maple_node *full = ms->node;
struct maple_node_64 *left64 = &left->map64;
+ int l_end = maple_data_end_64(left64);
+
+ /* If the end pivot is 0, then decrease the end to get a value */
+ if (left64->pivot[l_end] == 0)
+ l_end--;
if (maple_is_root(ms, ms->node)) {
struct maple_node *new_root = ma_next_alloc(ms);
- int l_end = maple_data_end_64(left64) - 1;
- int r_end = maple_data_end_64(&right->map64) - 1;
/* The full node is the root.
* We have to throw out the old root due to pesky readers.
* left and right have the data already, so link them in the
new_root->map64.pivot[0] = left64->pivot[l_end];
RCU_INIT_POINTER(new_root->map64.slot[0], ma_mk_node(left));
/* Right child */
- new_root->map64.pivot[1] = right->map64.pivot[r_end];
+ new_root->map64.pivot[1] = 0;
RCU_INIT_POINTER(new_root->map64.slot[1], ma_mk_node(right));
/* Swap root of tree */
+ maple_update_parent(left);
+ maple_update_parent(right);
rcu_assign_pointer(ms->tree->root, ma_mk_node(new_root));
ms->node = new_root;
} else {
left->parent = full->parent;
right->parent = full->parent;
+ /* Get the slot_idx to overwrite in the parent */
+ ms->node = full->parent;
+ _maple_walk_64(ms, ms->index);
/* Shift the data over */
- maple_shift_64(target, ms->slot_idx);
+ maple_shift_64(target, ms->slot_idx, 1);
/* Overwrite the duplicate slot data with the new right node */
target->slot[ms->slot_idx + 1] = ma_mk_node(right);
/* Overwrite the first pivot with the new value. This is fine
* as the current slot has valid entries for this pivot */
- target->pivot[ms->slot_idx] = left64->pivot[3];
+ target->pivot[ms->slot_idx] = left64->pivot[l_end];
/* Set the first slot to the node with less pivots */
target->slot[ms->slot_idx] = ma_mk_node(left);
+ /* Update the new nodes children's parent setting */
+ maple_update_parent(left);
+ maple_update_parent(right);
+ ms->node = full->parent;
}
+
/* Orphan & free the full node */
full->parent = full;
_maple_free_node(full);
+ _maple_walk(ms);
}
/*
* Private
int alloc_cnt = 2;
int offset = 0;
- if (maple_is_root(ms, ms->node)) {
+ if (maple_is_root(ms, ms->node))
alloc_cnt++;
-// offset++;
- }
/*
* FIXME:
}
+
+/*
+ * Private
+ *
+ * Insert entry into the ms->tree.
+ *
+ * This is done by:
+ * 1. Calculating the range where ms->slot_idx points.
+ * 2. Figuring out how many inserts are needed for it to fit.
+ * 3. Split if there is insufficient space.
+ * 4. Shift the data over.
+ * 5. Add the start and end entries as needed.
+ */
static inline int _maple_insert_64(struct maple_state *ms, void *entry)
{
struct maple_node_64 *mn64 = &ms->node->map64;
- int p_here = ms->slot_idx; /* Place data here */
+ int slot = ms->slot_idx; /* Place data here */
+ unsigned long min_null_range = ms->min;
+ unsigned long max_null_range = ms->max;
int shift = 0;
int idx = 0;
+ int data_end = maple_data_end_64(mn64);
+
+ /* Split and retry if the node is full */
+ if (data_end == MAPLE_NODE64_MAX_SLOT - 1)
+ goto split_retry;
/* Check if this will be the last entry */
- if (p_here == MAPLE_NODE64_MAX_SLOT - 1) {
- rcu_assign_pointer(mn64->slot[p_here], entry);
+ if (slot == MAPLE_NODE64_MAX_SLOT - 1) {
+ rcu_assign_pointer(mn64->slot[slot], entry);
return 0;
}
+ /* Set the minimum range value */
+ if (slot > 0)
+ min_null_range = mn64->pivot[slot - 1];
+
+ /* Set the maximum range value */
+ if (slot < MAPLE_NODE64_MAX_SLOT - 2)
+ max_null_range = mn64->pivot[slot];
- /* Things to check for the null start:
- *
- * 1. Not using the parent pivot to get to slot 0.
- * 2. The previous pivot isn't sequential with our value.
+ /* Check if the entire null range matches this range */
+ if ((max_null_range == ms->end + 1) &&
+ (min_null_range == ms->index)) {
+ rcu_assign_pointer(mn64->slot[slot], entry);
+ return 0;
+ }
+
+ /* If the end of the existing range isn't the end of the insert range,
+ * we need to shift.
*/
- if (ms->index != 0 &&
- ((p_here == 0 && ms->index + 1 > ms->min) ||
- (p_here > 0 && mn64->pivot[p_here-1] != ms->index))) {
+ if (max_null_range != ms->end + 1)
shift++;
- }
+ /* If the start of the existing range isn't the start of the insert
+ * range, we need to shift
+ */
+ if (min_null_range != ms->index)
+ shift++;
+
+ /* If there is not enough space in this node to insert the number of
+ * entries we need, then split this node.
+ */
+ if (data_end + shift >= MAPLE_NODE64_MAX_SLOT - 1)
+ goto split_retry;
+
+ /* Start at the back of the new entries */
+ idx = slot + shift - 1;
/* Shift everything over. */
- if (shift > 0)
- maple_shift_64(mn64, p_here);
+ maple_shift_64(mn64, slot, shift);
- /* We now have made space at p_here to p_here + shift for the new
+ /* We now have made space at slot to slot + shift for the new
* data, which needs to be:
- * (maybe) p_here: index - 1 => NULL
- * (always) p_here + 1: end + 1 => entry
+ * (maybe) slot: index - 1 => NULL
+ * (maybe) slot + 1: end + 1 => entry
*/
- idx = p_here + shift;
- if (idx < 7)
+ /* Insert the end entry if needed */
+ if (max_null_range != ms->end + 1) {
+ rcu_assign_pointer(mn64->slot[idx], NULL);
mn64->pivot[idx] = ms->end + 1;
- rcu_assign_pointer(mn64->slot[idx], entry);
-
- if (shift > 0) {
idx--;
- mn64->pivot[idx] = ms->index;
- rcu_assign_pointer(mn64->slot[idx], NULL);
}
+
+ /* Insert the start pivot if needed */
+ if (min_null_range != ms->index)
+ mn64->pivot[idx] = ms->index;
+
+ /* Always set the entry as it is known to be null */
+ rcu_assign_pointer(mn64->slot[idx + 1], entry);
return 0;
+
+split_retry:
+ _maple_node_split(ms);
+ if (mas_is_err(ms))
+ return xa_err(ms->node);
+ return _maple_insert_64(ms, entry);
}
/*
* Private
int i = 0;
do {
- if ((i < MAPLE_NODE64_MAX_PIVOT) && mn64->pivot[i] == 0)
- break;
-
entry = mn64->slot[i];
if (xa_is_node(entry)) {
if(_maple_is_node_4(ms)) {
ms->node = ma_to_node(entry);
_maple_destroy_walk(ms);
}
+
+ if ((i < MAPLE_NODE64_MAX_PIVOT) && mn64->pivot[i] == 0)
+ break;
} while (i++ < MAPLE_NODE64_MAX_SLOT - 1);
_maple_free_node(mn);
}
mtree_lock(ms.tree);
retry:
walked = _maple_setup_insert(&ms);
- if(mas_nomem(&ms, gfp))
- goto retry;
-
if (walked != NULL)
goto already_exists;
ret = _maple_insert(&ms, entry);
+ if (mas_nomem(&ms, gfp))
+ goto retry;
already_exists:
mtree_unlock(ms.tree);
MT_BUG_ON(mt, !mtree_empty(mt));
- for (i = 0; i < 15; i++) {
+ for (i = 0; i < 16; i++) {
MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
for (j = 0; j <= i; j++)
check_index_load(mt, j);
mtree_destroy(mt);
}
+static noinline void check_double_insert_leaf(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 50000000000;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ i = huge;
+ while (i > 0x80) {
+ check_insert(mt, i, xa_mk_value(i));
+ for (j = huge; j >= i; j /= 2) {
+ check_load(mt, j, xa_mk_value(j));
+ check_load(mt, (unsigned long)(xa_mk_value(j+1)), NULL);
+ }
+ i /= 2;
+ }
+ mtree_destroy(mt);
+}
static DEFINE_MAPLE_TREE(tree);
static int maple_tree_seed(void)
mtree_init(&tree);
check_new_node(&tree);
+ check_double_insert_leaf(&tree);
check_load(&tree, set[0], NULL); // See if 15 -> NULL
check_insert(&tree, set[9], &tree); // Insert 0
mtree_init(&tree);
check_insert(&tree, set[0], &tree); // Insert 15
check_dup_insert(&tree, set[0], &tree); // Insert 15 again
- check_load(&tree, set[0], &tree); // See if 15 -> &tree (test 10)
+ check_load(&tree, set[0], &tree); // See if 15 -> &tree
/* Second set of tests try to load a value that doesn't exist, inserts
* a second value, then loads the value again
check_load(&tree, set[0], &tree); // See if 15 -> &tree
check_load(&tree, set[1], ptr); // See if 14 -> ptr
check_load(&tree, set[6], ptr); // See if 1002 -> ptr
- check_load(&tree, set[7], &tree); // 1003 = &tree ? (test 20)
+ check_load(&tree, set[7], &tree); // 1003 = &tree ?
/* Clear out tree */
mtree_destroy(&tree);
*/
check_insert(&tree, set[0], ptr); // 15
- check_insert(&tree, set[1], &tree); // 14 (test 30)
+ check_insert(&tree, set[1], &tree); // 14
check_insert(&tree, set[2], ptr); // 17
check_insert(&tree, set[3], &tree); // 25.
check_insert(&tree, set[4], ptr); // 1000 < Should split.
check_load(&tree, set[3], &tree); //25
check_load(&tree, set[4], ptr);
check_insert(&tree, set[5], &tree); // 1001
- check_load(&tree, set[0], ptr); // test 40
+ check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
- check_load(&tree, set[3], &tree); // test 50
+ check_load(&tree, set[3], &tree);
check_load(&tree, set[4], ptr);
check_load(&tree, set[5], &tree);
check_load(&tree, set[6], ptr);
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
- check_load(&tree, set[3], &tree); // test 60
+ check_load(&tree, set[3], &tree);
check_load(&tree, set[4], ptr);
check_load(&tree, set[5], &tree);
check_load(&tree, set[6], ptr);