};
#define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)]
-#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS* 2 + 1)
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS* 2 + 2)
struct maple_big_node {
struct maple_pnode *parent;
static inline void mab_shift_right(struct maple_big_node *b_node,
unsigned char shift)
{
- unsigned char b_end = b_node->b_end - 1;
+ unsigned long size = b_node->b_end * sizeof(unsigned long);
- do {
- b_node->pivot[b_end + shift] = b_node->pivot[b_end];
- b_node->slot[b_end + shift] = b_node->slot[b_end];
- b_node->gap[b_end + shift] = b_node->gap[b_end];
- } while (b_end--);
+ memmove(b_node->pivot +shift, b_node->pivot, size);
+ memmove(b_node->slot +shift, b_node->slot, size);
+ memmove(b_node->gap + shift, b_node->gap, size);
}
/*
* still be sufficient, then increment the split on NULL.
*/
if ((split < slot_cnt - 1) &&
- (b_node->b_end - split) < (mt_min_slots[b_node->type]))
+ (b_node->b_end - split) > (mt_min_slots[b_node->type]))
split++;
else
split--;
// Possible underflow of piv will wrap back to 0 before use.
unsigned long piv = mas->min - 1;
+ // Copy start data up to insert.
if (slot) {
mas_mab_cp(mas, 0, slot - 1, b_node, 0);
b_end = b_node->b_end;
piv = b_node->pivot[b_end - 1];
}
+ // Handle range overlap start.
if (piv + 1 < mas->index) {
b_node->slot[b_end] = contents;
if (!contents)
b_node->pivot[b_end++] = mas->index - 1;
}
+ // Insert the data.
b_node->slot[b_end] = entry;
b_node->pivot[b_end] = mas->last;
+ // Handle range overlap end.
piv = mas_get_safe_pivot(mas, slot);
if (piv > mas->last) {
b_node->slot[++b_end] = contents;
} else
piv = mas->last;
+ // Appended.
if (piv >= mas->max)
return b_end;
+ // Handle range overwrites
do {
piv = mas_get_safe_pivot(mas, ++slot);
} while ((piv <= mas->last) && (slot <= end));
+ // Copy end data to the end of the node.
if (piv > mas->last) {
if (slot > end) {
b_node->slot[++b_end] = NULL;
}
}
-static inline bool mas_push_left(struct ma_state *mas,
- struct maple_subtree_state *mast)
+static inline bool mas_push_data(struct ma_state *mas,
+ struct maple_subtree_state *mast, bool left)
{
unsigned char slot_total = mast->bn->b_end;
unsigned char end, space, split;
mas_dup_state(&tmp_mas, mast->l); // for depth.
tmp_mas.node = mas->node;
- if (!mas_prev_sibling(&tmp_mas))
+ if (left && !mas_prev_sibling(&tmp_mas))
+ return false;
+ else if (!left && !mas_next_sibling(&tmp_mas))
return false;
end = mas_data_end(&tmp_mas);
slot_total += end;
-
space = 2 * mt_slot_count(mas->node) - 1;
/* -2 instead of -1 to ensure there isn't a triple split */
if (ma_is_leaf(mast->bn->type))
if (slot_total >= space)
return false;
- /* Fill mast->bn */
+ /* Get the data; Fill mast->bn */
mast->bn->b_end++;
- mab_shift_right(mast->bn, end + 1);
- mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
- mast->bn->b_end = slot_total + 1;
- /* Switch mas to prev node */
- mat_add(mast->free, mas->node);
- mas_dup_state(mas, &tmp_mas);
- /* Start using mast->l for the left side. */
- tmp_mas.node = mast->l->node;
- mas_dup_state(mast->l, &tmp_mas);
- split = mab_no_null_split(mast->bn, mt_slots[mast->bn->type] - 1,
- mt_slots[mast->bn->type]);
+ if (left) {
+ mab_shift_right(mast->bn, end + 1);
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
+ mast->bn->b_end = slot_total + 1;
+ } else {
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
+ }
+
+ /* Configure mast for splitting of mast->bn */
+ split = mt_slots[mast->bn->type] - 1;
+ if (left) {
+ /* Switch mas to prev node */
+ mat_add(mast->free, mas->node);
+ mas_dup_state(mas, &tmp_mas);
+ /* Start using mast->l for the left side. */
+ tmp_mas.node = mast->l->node;
+ mas_dup_state(mast->l, &tmp_mas);
+ } else {
+ mat_add(mast->free, tmp_mas.node);
+ tmp_mas.node = mast->r->node;
+ mas_dup_state(mast->r, &tmp_mas);
+ split = slot_total - split;
+ }
+ split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
// Update parent slot for split calculation.
- mas_set_slot(mast->orig_l, mas_get_slot(mast->orig_l) + end + 1);
+ if (left)
+ mas_set_slot(mast->orig_l, mas_get_slot(mast->orig_l) + end + 1);
+
mast_split_data(mast, mas, split);
mast_split_fill_bnode(mast, mas, 2);
mas_split_final_node(mast, mas, mas->full_cnt + 1);
return true;
}
+
+static inline bool mas_push_right(struct ma_state *mas,
+ struct maple_subtree_state *mast)
+{
+ return mas_push_data(mas, mast, false);
+}
+
+static inline bool mas_push_left(struct ma_state *mas,
+ struct maple_subtree_state *mast)
+{
+ return mas_push_data(mas, mast, true);
+}
+
static inline int mas_split(struct ma_state *mas,
struct maple_big_node *b_node)
{
if (mas_push_left(mas, &mast))
break;
+ if (mas_push_right(mas, &mast))
+ break;
+
split = mab_calc_split(mast.bn, &mid_split);
mast_split_data(&mast, mas, split);
+ // Usually correct, mab_mas_cp in the above call overwrites r->max.
+ mast.r->max = mas->max;
mast_split_fill_bnode(&mast, mas, 1);
mas_dup_state(&prev_l_mas, mast.l);
mas_dup_state(&prev_r_mas, mast.r);