/*
* mns_node_part_leaf_init() - Initialize what is being inserted, calculate how
* many slots will be skipped.
- * @ma_part: The maple node part
+ * @part: The maple node part
* @wr_mas: The write maple state
* @src: The source maple node state (existing data)
*/
static __always_inline
-void mns_node_part_leaf_init(struct ma_node_part *ma_part,
+void mns_node_part_leaf_init(struct ma_node_part *part,
struct ma_wr_state *wr_mas, struct ma_node_info *src)
{
- ma_part->pos = 0;
- ma_part->size = 0;
+ part->pos = 0;
+ part->size = 0;
if (wr_mas->r_min < wr_mas->mas->index) {
- ma_part->pivots[0] = wr_mas->mas->index - 1;
- ma_part->slots[0] = wr_mas->content;
- ma_part->size++;
+ part->pivots[0] = wr_mas->mas->index - 1;
+ part->slots[0] = wr_mas->content;
+ part->size++;
}
- ma_part->pivots[ma_part->size] = wr_mas->mas->last;
- ma_part->slots[ma_part->size] = wr_mas->entry;
- ma_part->size++;
+ part->pivots[part->size] = wr_mas->mas->last;
+ part->slots[part->size] = wr_mas->entry;
+ part->size++;
if (wr_mas->end_piv > wr_mas->mas->last) {
- ma_part->pivots[ma_part->size] = wr_mas->end_piv;
- ma_part->slots[ma_part->size] = src->slots[wr_mas->offset_end];
- ma_part->size++;
+ part->pivots[part->size] = wr_mas->end_piv;
+ part->slots[part->size] = src->slots[wr_mas->offset_end];
+ part->size++;
}
- ma_part->unfinished = false;
- ma_part->dst_max_off = 255;
- ma_part->skip = wr_mas->offset_end - wr_mas->mas->offset + 1;
- ma_part->leaf = true;
+ part->unfinished = false;
+ part->dst_max_off = 255;
+ part->skip = wr_mas->offset_end - wr_mas->mas->offset + 1;
+ part->leaf = true;
}
static inline
-void mni_node_part_init(struct ma_node_part *ma_part,
+void mni_node_part_init(struct ma_node_part *part,
struct ma_node_info *left, struct ma_node_info *right)
{
- ma_part->slots[0] = left->enode;
- ma_part->pivots[0] = left->max;
- ma_part->gaps[0] = left->max_gap;
+ part->slots[0] = left->enode;
+ part->pivots[0] = left->max;
+ part->gaps[0] = left->max_gap;
- ma_part->slots[1] = right->enode;
- ma_part->pivots[1] = right->max;
- ma_part->gaps[1] = right->max_gap;
+ part->slots[1] = right->enode;
+ part->pivots[1] = right->max;
+ part->gaps[1] = right->max_gap;
- ma_part->pos = 0;
- ma_part->size = 2;
- ma_part->unfinished = false;
- ma_part->dst_max_off = 255;
- ma_part->skip = 1;
- ma_part->leaf = false;
+ part->pos = 0;
+ part->size = 2;
+ part->unfinished = false;
+ part->dst_max_off = 255;
+ part->skip = 1;
+ part->leaf = false;
}
static __always_inline
struct ma_node_info *right,
bool left_store,
unsigned char split,
- struct ma_node_part *ma_part,
+ struct ma_node_part *part,
unsigned char mas_off,
unsigned char new_end
)
unsigned char l_end, r_end;
/*
- * l_src, ma_part, and r_src will be split between the new left and
+ * l_src, part, and r_src will be split between the new left and
* right nodes. Depending on where the split and the store offset
* (mas_off) falls within the data will determine where the new data
* will end up in the new nodes (left and right).
* Shifting back means copying the data to the right node. Shifting
* forward is complicated by a potential insert splitting the nodes,
* which means the new data going to the left will have to come from the
- * ma_part. This is all taken care of in mas_wr_split_no_null().
+ * part. This is all taken care of in mas_wr_split_no_null().
*/
l_end = l_src->end;
if (mas_off)
mni_cp(l_src, left, mas_off);
- ma_part->dst_max_off = split;
- mni_insert_part(ma_part, left);
- l_src->offset+= ma_part->skip;
+ part->dst_max_off = split;
+ mni_insert_part(part, left);
+ l_src->offset+= part->skip;
printk("%d\n", __LINE__);
printk("\tright min %lu left max %lu\n", right->min, left->max);
printk("\tright min %lu left max %lu\n", right->min, left->max);
mas_wr_split_no_null(l_src, left, right,
- r_end + new_end + 1, ma_part);
+ r_end + new_end + 1, part);
right->min = left->max + 1;
printk("\tright min %lu left max %lu\n", right->min, left->max);
- if (ma_part->unfinished)
- mni_insert_part(ma_part, right);
+ if (part->unfinished)
+ mni_insert_part(part, right);
if (l_end >= l_src->offset)
mni_cp(l_src, right, l_end - l_src->offset + 1);
printk("%s %d\n", __func__, __LINE__);
mni_cp(l_src, left, split + 1);
mas_wr_split_no_null(l_src, left, right,
- r_end + new_end + 1, ma_part);
+ r_end + new_end + 1, part);
right->min = left->max + 1;
mni_cp(l_src, right, mas_off - l_src->offset);
- mni_insert_part(ma_part, right);
+ mni_insert_part(part, right);
//printk("%d\n", __LINE__);
- l_src->offset+= ma_part->skip;
+ l_src->offset+= part->skip;
if (l_end >= l_src->offset)
mni_cp(l_src, right, l_end - l_src->offset + 1);
}
printk("%s %d\n", __func__, __LINE__);
mni_cp(l_src, left, split + 1);
mas_wr_split_no_null(l_src, left, right,
- l_end + new_end + 1, ma_part);
+ l_end + new_end + 1, part);
mni_cp(l_src, right, l_end - l_src->offset + 1);
right->min = left->max + 1;
mni_cp(r_src, right, mas_off);
- mni_insert_part(ma_part, right);
+ mni_insert_part(part, right);
//printk("%d\n", __LINE__);
- r_src->offset+= ma_part->skip;
+ r_src->offset+= part->skip;
if (r_src->offset <= r_end)
mni_cp(r_src, right, r_end - r_src->offset + 1);
if (mas_off <= r_split) {
if (mas_off)
mni_cp(r_src, left, mas_off);
- ma_part->dst_max_off = split;
- mni_insert_part(ma_part, left);
+ part->dst_max_off = split;
+ mni_insert_part(part, left);
//printk("%d\n", __LINE__);
- r_src->offset+= ma_part->skip;
+ r_src->offset+= part->skip;
if (r_src->offset < r_split)
mni_cp(r_src, left,
r_split - r_src->offset);
mas_wr_split_no_null(r_src, left, right,
l_end + new_end + 1,
- ma_part);
+ part);
- if (ma_part->unfinished)
- mni_insert_part(ma_part, right);
+ if (part->unfinished)
+ mni_insert_part(part, right);
right->min = left->max + 1;
} else {
mni_cp(r_src, left, r_split + 1);
mas_wr_split_no_null(r_src, left, right,
l_end + new_end + 1,
- ma_part);
+ part);
right->min = left->max + 1;
if (mas_off > r_src->offset)
mni_cp(r_src, right,
mas_off - r_src->offset);
- mni_insert_part(ma_part, right);
+ mni_insert_part(part, right);
//printk("%d\n", __LINE__);
- r_src->offset+= ma_part->skip;
+ r_src->offset+= part->skip;
}
if (r_src->offset <= r_end)
};
static inline void mas_wr_converged(struct ma_node_info *src,
- struct ma_node_info *dst, struct ma_node_part *ma_part,
+ struct ma_node_info *dst, struct ma_node_part *part,
struct ma_state *mas, struct split_data *sd)
{
unsigned char off = 0;
off = src->insert_off;
}
- mns_mni_init(&sd->states[sd->len], dst, 0, ma_part->size);
- sd->states[sd->len].part = ma_part;
+ mns_mni_init(&sd->states[sd->len], dst, 0, part->size);
+ sd->states[sd->len].part = part;
sd->states[sd->len].use_part = true;
sd->len++;
- off += ma_part->skip;
+ off += part->skip;
if (src->end >= off) {
unsigned char start = off;
}
/*
+ * mt_wr_split_data() - Split the combined data into ma_node_state parts.
*
* @src: The node information of the source
* @left: The node information of the left destination
* @right: The node information of the right destination
- * @ma_part: The node part being inserted
+ * @part: The node part being inserted
* @split: The split location referenced by the destination data
- * @insert: The location of the insert start for @ma_part
+ * @insert: The location of the insert start for @part
* @size: The targeted size of the left node
* @offset: The starting offset into the destination data (may be larger than
* the node)
* @total_data: The total size of the data being stored
* @state: The maple node state array
* @i: The number of existing states
+ *
+ *
+ * The source and destination will be set up to execute the copy while avoiding
+ * the NULL entry at the end of a node. Executing the actual copy will happen
+ * when the nodes are assembled. There can only ever be 5 node states as this
+ * is the maximum number of copy operations that needs to occur.
+ *
+ * Destination can either be left or right node.
+ * Source is either a portion of the left or right source when rebalancing, or
+ * just the one node being split. It can also be the new data in @part.
*/
static void mt_wr_split_data(struct ma_node_info *src,
struct ma_node_info *left, struct ma_node_info *right,
- struct ma_node_part *ma_part, struct split_data *sd)
+ struct ma_node_part *part, struct split_data *sd)
{
unsigned char insert_end;
unsigned char node_off, part_off;
/* Offset into the destination data where the insert ends */
- insert_end = sd->insert + ma_part->size - 1;
+ insert_end = sd->insert + part->size - 1;
to = left;
node_off = 0; /* src */
part_off = 0;
state = &sd->states[sd->len];
if (sd->offset >= sd->insert && sd->offset <= insert_end) {
- copied = min(ma_part->size - part_off, sd->space);
- state->part = ma_part;
+ copied = min(part->size - part_off, sd->space);
+ state->part = part;
mns_mni_init(state, to, part_off, copied);
state->use_part = true;
part_off += copied;
* @new_end: The size of the src after the insert
* @left: The new left child
* @right: The new right child
- * @ma_part: The node part that will be inserted
+ * @part: The node part that will be inserted
*
* Returns: True on rebalance, false otherwise.
*/
static
bool mas_wr_try_rebalance(struct ma_state *mas, struct ma_node_info *src,
struct ma_node_info *left, struct ma_node_info *right,
- struct ma_node_part *ma_part, struct split_data *sd)
+ struct ma_node_part *part, struct split_data *sd)
{
struct ma_state tmp_mas;
struct ma_node_info src2, parent, new_parent;
}
/* The rebalance operation will succeed. */
-
sd->split = split;
sd->new_end += src2.end + 1;
-
if (left_store) {
/* Left pushes data right. */
sd->insert = mas->offset;
* There can also be a split between nodes that may happen at these
* boundaries, or elsewhere.
*/
- mt_wr_split_data(src, left, right, ma_part, sd);
+ mt_wr_split_data(src, left, right, part, sd);
if (left_store) {
sd->states[sd->len].info = &src2;
mns_mni_init(&sd->states[sd->len], right, 0, src2.end + 1);
mns_assemble(sd->states, sd->len);
mni_finalise(left);
mni_finalise(right);
- mni_node_part_init(ma_part, left, right);
+ mni_node_part_init(part, left, right);
mas_ascend(mas);
mas->end = parent.end;
mas->offset = parent.insert_off;
- ma_part->skip = 2;
- mas_wr_converged(&parent, &new_parent, ma_part, mas, sd);
+ part->skip = 2;
+ mas_wr_converged(&parent, &new_parent, part, mas, sd);
src->enode = parent.enode;
mas->node = new_parent.enode;
return true;
{
struct ma_node_info src_info, parent, left, right;
struct ma_node_state src;
- struct ma_node_part ma_part;
+ struct ma_node_part part;
mni_node_init(&left, mas_pop_node(mas), wr_mas->type);
if (mt_is_alloc(mas->tree))
struct ma_node_state src, parent, l_src, r_src;
struct ma_node_info src_info;
struct ma_node_state left, right;
- struct ma_node_part ma_part;
+ struct ma_node_part part;
unsigned char total, split, height;
unsigned char insert;
mt_dump(mas->tree, mt_dump_dec);
height = mas_mt_height(mas);
mns_mas_init(&src, &src_info, mas);
- mns_node_part_leaf_init(&ma_part, wr_mas, &src);
+ mns_node_part_leaf_init(&part, wr_mas, &src);
/* Total will lack sibling data until the sibling is known */
printk("end %p %u\n", mas->node, mas->end);
- total = mas->end + ma_part.size - ma_part.skip - 1;
+ total = mas->end + part.size - part.skip - 1;
printk("%p will have %u in the end\n", mas->node, total);
- printk("Skip %u\n", ma_part.skip);
+ printk("Skip %u\n", part.skip);
// Skipping is causing a copy beyond the end of the src.
printk("Rebalance %p %lu-%lu", mas->node, mas->index, mas->last);
*
* Case 1:
* Left takes data from right.
- * Fill left up to split from l_src and ma_part - Func_1
+ * Fill left up to split from l_src and part - Func_1
* Fill left up from l_src remainder - Func_2
* Fill left up to split from right. - Func_2
* fill right with remainder of right. - Func_2
* Right takes data from left
* Copy left to new left up to split - Func_2
* Fill right with remainder of left - Func_2
- * Fill right from old right or ma_part - Func_1
+ * Fill right from old right or part - Func_1
*/
if ((total > 2 * mt_min_slots[l_src.type] ) ||
ma_is_root(parent.node)) {
printk("split is %u\n", split);
left.min = l_src.min;
mas_wr_rebalance_nodes(&l_src, &r_src, &left, &right,
- l_store, split, &ma_part,
+ l_store, split, &part,
insert, total);
mni_finalise(&left);
mni_finalise(&right);
- mni_node_part_init(&ma_part, &left, &right);
- ma_part.skip = 2;
- mas_wr_converged(&parent, &new_parent, &ma_part, mas);
+ mni_node_part_init(&part, &left, &right);
+ part.skip = 2;
+ mas_wr_converged(&parent, &new_parent, &part, mas);
src.enode = parent.enode;
mas->node = new_parent.enode;
mas->depth = height;
if (l_store) {
if (l_src.insert)
mni_cp(&l_src, &left, l_src.insert);
- mni_insert_part(&ma_part, &left);
- l_src.offset += ma_part.skip;
+ mni_insert_part(&part, &left);
+ l_src.offset += part.skip;
if (l_src.offset <= l_src.end)
mni_cp(&l_src, &left,
l_src.end - l_src.offset + 1);
mni_cp(&l_src, &left, l_src.end);
if (r_src.insert)
mni_cp(&r_src, &left, r_src.insert);
- mni_insert_part(&ma_part, &left);
- r_src.offset += ma_part.skip;
+ mni_insert_part(&part, &left);
+ r_src.offset += part.skip;
if (r_src.offset <= r_src.end)
mni_cp(&r_src, &left,
r_src.end - r_src.offset + 1);