mni_finalise(dst);
}
+struct split_data {
+ /*
+ * Used to split the data between two nodes.
+ * The user of this struct, mt_wr_split_data(), must keep track of the
+ * offset into the entire data (up to two nodes worth), so that the
+ * insert can be placed in the correct place while the source data that
+ * is overwritten is skipped.
+ */
+ unsigned char offset; /* Offset into destination data (entire set) */
+ unsigned char space; /* The space left in the current destination node */
+ unsigned char split; /* Proposed split of data */
+ unsigned char insert; /* Insert location of destination */
+ unsigned char new_end; /* Total data */
+ unsigned char src_ins_end;
+ struct ma_node_state *state;
+ int i;
+};
+
/*
*
* @src: The node information of the source
*/
static int mt_wr_split_data(struct ma_node_info *src,
struct ma_node_info *left, struct ma_node_info *right,
- struct ma_node_part *ma_part, unsigned char split,
- unsigned char insert, unsigned char size, unsigned char offset,
- unsigned char node_ins_end, unsigned char total_data,
- struct ma_node_state *state, int i)
+ struct ma_node_part *ma_part,
+ struct ma_node_state *state, int i,
+ struct split_data *sd)
{
unsigned char insert_end;
unsigned char node_off, part_off;
struct ma_node_info *to;
+ state = sd->state;
+ i = sd->i;
+
/* Offset into the destination data where the insert ends */
- insert_end = insert + ma_part->size - 1;
+ insert_end = sd->insert + ma_part->size - 1;
to = left;
node_off = 0; /* src */
part_off = 0;
do {
unsigned char copied = 0;
- if (offset >= insert && offset <= insert_end) {
- copied = min(ma_part->size - part_off, size);
+ if (sd->offset >= sd->insert && sd->offset <= insert_end) {
+ copied = min(ma_part->size - part_off, sd->space);
state[i].part = ma_part;
mns_mni_init(&state[i], to, part_off, copied);
state[i].use_part = true;
part_off += copied;
- node_off = node_ins_end + 1;
+ node_off = sd->src_ins_end + 1;
} else {
state[i].info = src;
- if (offset < insert_end) {
+ if (sd->offset < insert_end) {
/*
* First part of node, may split across node
* boundaries though
*/
- copied = min(size, insert - offset);
+ copied = min(sd->space, sd->insert - sd->offset);
} else {
- copied = min(size, (src->end - node_off + 1));
+ copied = min(sd->space, (src->end - node_off + 1));
}
BUG_ON(copied == 0);
mns_mni_init(&state[i], to, node_off, copied);
node_off += copied;
}
- offset += copied;
- size -= copied;
- if ((to == left) && (offset >= split)) {
+ sd->offset += copied;
+ sd->space -= copied;
+ if ((to == left) && (sd->offset >= sd->split)) {
if (ma_is_leaf(src->type) &&
mns_ends_in_null(&state[i])) {
- if (!state[i].use_part && offset != insert) {
+ if (!state[i].use_part && sd->offset != sd->insert) {
/* Setting up a copy from a normal node */
state[i].size++;
- split++;
- offset++;
- size--;
+ sd->split++;
+ sd->offset++;
+ sd->space--;
node_off++;
} else {
- split--;
- offset--;
- size++;
+ sd->split--;
+ sd->offset--;
+ sd->space++;
if (state[i].use_part) {
part_off--;
} else {
state[i].size--;
}
}
- size = mt_slots[right->type];
+ sd->space = mt_slots[right->type];
to = right;
- split = 255;
+ sd->split = 255;
}
i++;
} while (node_off <= src->end);
*
* Returns: True on rebalance, false otherwise.
*/
-static bool mas_wr_try_rebalance(struct ma_state *mas,
- struct ma_node_info *src, unsigned char new_end,
+static
+bool mas_wr_try_rebalance(struct ma_state *mas, struct ma_node_info *src,
struct ma_node_info *left, struct ma_node_info *right,
- struct ma_node_part *ma_part, unsigned char node_ins_end)
+ struct ma_node_part *ma_part, struct split_data *sd)
{
struct ma_state tmp_mas;
struct ma_node_info src2, parent, new_parent;
struct ma_node_state state[5];
unsigned char split, max, i;
- unsigned char total_data, size, offset, insert;
bool left_store = false;
/*
tmp_mas.offset--;
mas_descend(&tmp_mas);
mni_mas_init(&src2, &tmp_mas);
- split = mas_wr_rebalance_calc(src2.end + new_end, src2.type);
+ split = mas_wr_rebalance_calc(src2.end + sd->new_end, src2.type);
if (split) {
parent.insert_off--;
mas_descend(&tmp_mas);
mni_mas_init(&src2, &tmp_mas);
mni_set_end(&src2);
- split = mas_wr_rebalance_calc(src2.end + new_end, src2.type);
+ split = mas_wr_rebalance_calc(src2.end + sd->new_end, src2.type);
if (!split)
return false;
- split = src2.end + new_end - split;
+ split = src2.end + sd->new_end - split;
left_store = true;
/* Left will be src, right will be src2 */
left->min = src->min;
}
/* The rebalance operation will succeed. */
+
+ sd->split = split;
+ sd->state = state;
+ sd->new_end += src2.end + 1;
+
i = 0;
- offset = 0;
- total_data = src2.end + new_end + 1;
if (left_store) {
/* Left pushes data right. */
- insert = mas->offset;
- size = split;
+ sd->insert = mas->offset;
+ sd->space = split;
+ sd->offset = 0;
} else {
/* Right pushes data left */
- insert = mas->offset + src2.end + 1;
- offset += src2.end + 1;
- size = split - src2.end;
+ sd->insert = mas->offset + src2.end + 1;
+ sd->offset = src2.end + 1;
+ sd->space = split - src2.end;
state[i].info = &src2;
mns_mni_init(&state[i], left, 0, src2.end + 1);
i++;
+ sd->i++;
}
/*
* There can also be a split between nodes that may happen at these
* boundaries, or elsewhere.
*/
- i = mt_wr_split_data(src, left, right, ma_part, split, insert, size,
- offset, node_ins_end, total_data, state, i);
+ i = mt_wr_split_data(src, left, right, ma_part,
+ state, i, sd);
if (left_store) {
state[i].info = &src2;
mns_mni_init(&state[i++], right, 0, src2.end + 1);
struct ma_node_state state[4];
unsigned char i = 0;
struct ma_node_info src, parent, left, right;
- struct ma_node_part ma_part;
+ struct ma_node_part part;
+ struct split_data sd;
int height;
- unsigned char split, total;
+
+
trace_ma_op(__func__, mas);
if (mt_is_alloc(mas->tree))
right.alloc = left.alloc = true;
- mns_node_part_leaf_init(&ma_part, wr_mas, &src);
- total = mas->end + ma_part.size; /* - skip + 1 */
- if (height > 1 && mas_wr_try_rebalance(mas, &src, total, &left,
- &right, &ma_part, wr_mas->offset_end))
+ mns_node_part_leaf_init(&part, wr_mas, &src);
+ sd.new_end = mas->end + part.size; /* - skip + 1 */
+ sd.src_ins_end = wr_mas->offset_end;
+ sd.i = 0;
+ if (height > 1 &&
+ mas_wr_try_rebalance(mas, &src, &left, &right, &part, &sd))
goto rebalanced;
- split = (total + 1) / 2;
left.min = src.min;
right.max = src.max;
- i = mt_wr_split_data(&src, &left, &right, &ma_part, split,
- mas->offset, split, 0, wr_mas->offset_end,
- total, state, 0);
+
+ sd.split = (sd.new_end + 1) / 2;
+ sd.insert = mas->offset;
+ sd.space = sd.split;
+ sd.offset = 0;
+ sd.state = state;
+
+ i = mt_wr_split_data(&src, &left, &right, &part,
+ state, 0, &sd);
mns_assemble(state, i);
mni_finalise(&left);
mni_finalise(&right);
goto new_root;
while (--height) {
- mni_node_part_init(&ma_part, &left, &right);
+ mni_node_part_init(&part, &left, &right);
mas_wr_ascend_init(mas, &src);
mas->end = src.end;
- total = mas->end + 1;
+ sd.new_end = mas->end + 1;
if (mas->end + 1 < mt_slots[src.type])
goto converged;
mni_node_init(&left, mas_pop_node(mas), src.type);
mni_node_init(&right, mas_pop_node(mas), src.type);
+ sd.src_ins_end = src.insert_off;
+ sd.i = 0;
if ((height > 1) &&
- (mas_wr_try_rebalance(mas, &src, mas->end + 1, &left,
- &right, &ma_part, src.insert_off)))
+ (mas_wr_try_rebalance(mas, &src, &left, &right, &part, &sd)))
goto rebalanced;
left.min = src.min;
right.max = src.max;
- split = (total + 1) / 2;
- i = mt_wr_split_data(&src, &left, &right, &ma_part, split,
- mas->offset, split, 0, mas->offset,
- total, state, 0);
+ sd.insert = mas->offset;
+ sd.split = (sd.new_end + 1) / 2;
+ sd.space = sd.split;
+ sd.offset = 0;
+
+ i = mt_wr_split_data(&src, &left, &right, &part,
+ state, 0, &sd);
mns_assemble(state, i);
mni_finalise(&left);
mni_finalise(&right);
}
new_root:
- mni_node_part_init(&ma_part, &left, &right);
+ mni_node_part_init(&part, &left, &right);
/* Converged on new root */
mas->depth++;
src.insert_off = mas->offset = 0;
* set the skip to high enough to avoid using any data
*/
converged:
- mas_wr_converged(&src, &parent, &ma_part, mas);
+ mas_wr_converged(&src, &parent, &part, mas);
mas->node = parent.enode;
rebalanced:
mas_wmb_replace(mas, src.enode);