return mt_pivots[type];
}
+static inline
+void wr_mas_ascend(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ mas_ascend(mas);
+ wr_mas->node = mas_mn(mas);
+ wr_mas->type = mte_node_type(mas->node);
+ wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
+ mas->end = ma_data_end(wr_mas->node, wr_mas->type, wr_mas->pivots,
+ mas->max);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+ wr_mas->r_min = mas->min;
+ wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, mas->offset);
+ wr_mas->r_max = mas_safe_pivot(mas, wr_mas->pivots, mas->offset,
+ wr_mas->type);
+ /* Careful, this may be wrong.. */
+ wr_mas->end_piv = wr_mas->r_max;
+ wr_mas->offset_end = mas->offset;
+ //WARN_ON_ONCE(wr_mas->end_piv < mas->last);
+}
+
+
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
* @mas: the maple state
end = ma_data_end(node, mt, pivots, mas->max);
for (offset = mas->offset; offset <= end; offset++) {
entry = mas_slot_locked(mas, slots, offset);
+ //printk("%p[%u] entry %p parent %p\n", node, offset, entry, mte_parent(entry));
if (mte_parent(entry) == node) {
*child = *mas;
mas->offset = offset + 1;
/* Direct node to node copy */
static inline
-unsigned long node_copy(struct maple_node *src, unsigned char start,
- unsigned char size, unsigned long s_max, enum maple_type s_mt,
- struct maple_node *dst, unsigned char d_start, enum maple_type d_mt)
+unsigned long node_copy(struct ma_state *mas, struct maple_node *src,
+ unsigned char start, unsigned char size, unsigned long s_max,
+ enum maple_type s_mt, struct maple_node *dst, unsigned char d_start,
+ enum maple_type d_mt)
{
- void __rcu *s_slots;
- void __rcu *d_slots;
unsigned long *s_pivots, *d_pivots;
+ void __rcu **s_slots, **d_slots;
unsigned long *s_gaps, *d_gaps;
- bool set_last_piv = true;
unsigned long d_max;
- s_slots = ma_slots(src, s_mt) + start;
+
+
d_slots = ma_slots(dst, d_mt) + d_start;
- s_pivots = ma_pivots(src, s_mt) + start;
d_pivots = ma_pivots(dst, d_mt) + d_start;
-
- printk("slot %p piv %p\n", s_slots, s_pivots);
- printk("d slot %p piv %p\n", d_slots, d_pivots);
- printk("\t\t\t\t\t\tcp %p[%u-%u] => %p[%u-%u]\n", src, start, start + size - 1,
- dst, d_start, d_start + size - 1);
- fflush(stdout);
+ s_slots = ma_slots(src, s_mt) + start;
+ s_pivots = ma_pivots(src, s_mt) + start;
memcpy(d_slots, s_slots, size * sizeof(void*));
+ if (!ma_is_leaf(d_mt) && s_mt == maple_copy) {
+ struct maple_enode *edst = mt_mk_node(dst, d_mt);
+
+ for (int i = 0; i < size; i++)
+ mas_set_parent(mas, d_slots[i], edst, i);
+ }
d_gaps = ma_gaps(dst, d_mt);
if (d_gaps) {
s_gaps = ma_gaps(src, s_mt) + start;
d_gaps += d_start;
- printk("CP GAPS??\n");
- fflush(stdout);
memcpy(d_gaps, s_gaps, size * sizeof(unsigned long));
}
- if (d_start + size >= mt_slots[d_mt]) {
- set_last_piv = false;
- size--;
- }
+ d_max = s_max;
+ if (start + size < mt_pivots[s_mt])
+ d_max = s_pivots[size];
- if (start + size >= mt_slots[s_mt]) {
- set_last_piv = false;
- size--;
- d_pivots[size] = s_max;
- d_max = s_max;
- } else {
- d_max = *(s_pivots + size - 1);
- }
+ size--;
+ if (d_start + size < mt_pivots[d_mt])
+ d_pivots[size] = d_max;
if (size)
memcpy(d_pivots, s_pivots, size * sizeof(unsigned long));
- if (set_last_piv)
- d_pivots[size] = s_max;
-
return d_max;
}
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format);
+
+static inline void spanning_leaf_init(struct maple_copy *cp,
+ struct ma_state *mas, struct ma_wr_state *l_wr_mas,
+ struct ma_wr_state *r_wr_mas)
+{
+ unsigned char end = 0;
+ /* Create entries to insert including split entries to left and right */
+ if (l_wr_mas->r_min < mas->index) {
+ cp->slot[0] = l_wr_mas->content;
+ cp->pivot[0] = mas->index - 1;
+ end++;
+ if (l_wr_mas->content)
+ cp->gap[0] = 0;
+ else
+ cp->gap[0] = mas->index - l_wr_mas->r_min;
+ }
+ cp->slot[end] = l_wr_mas->entry;
+ cp->pivot[end] = mas->last;
+ if (l_wr_mas->entry)
+ cp->gap[end] = 0;
+ else
+ cp->gap[end] = mas->last - mas->index + 1;
+
+ if (r_wr_mas->r_max > mas->last) {
+ end++;
+ cp->slot[end] = r_wr_mas->content;
+ cp->pivot[end] = r_wr_mas->r_max;
+ if (r_wr_mas->content)
+ cp->gap[end] = 0;
+ else
+ cp->gap[end] = r_wr_mas->r_max - mas->last + 1;
+ }
+
+ cp->min = l_wr_mas->r_min;
+ cp->max = cp->pivot[end];
+ cp->end = end;
+}
+
+/*
+ * cp->data will not be 0 indexed.
+ */
static inline void spanning_data_calc(struct maple_copy *cp,
struct ma_state *mas, struct ma_wr_state *l_wr_mas,
struct ma_wr_state *r_wr_mas, struct ma_state *sib)
{
- /* data from left + new entry */
- cp->data = l_wr_mas->mas->offset + 1;
- printk("data size is off %u + 1\n", l_wr_mas->mas->offset);
- printk("write is now %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
- /* new entry will overwrite one part of left */
- if (l_wr_mas->r_min == mas->index) {
- printk("min doesn't split, subtrack one\n");
- cp->data--;
- } else {
- printk("min splits start %lx vs %lx\n", l_wr_mas->r_min, mas->index);
- }
- printk("%p data + 1 = %u\n", l_wr_mas->mas->node, cp->data);
+ /* Add 1 every time for the 0th element */
+ cp->data = l_wr_mas->mas->offset;
+ printk("left: cp %p 0 - %u (%u)\n", l_wr_mas->mas->node, cp->data - 1, cp->data);
- /* Data from right (offset to end) + 1 for zero, +1 for splitting */
- cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset + 2;
- printk("end %u - off %u + 1\n", r_wr_mas->mas->end, r_wr_mas->mas->offset);
- /* new entry splits the insert location */
- printk("end piv %lx vs last %lx\n", r_wr_mas->r_max, mas->last);
- if (r_wr_mas->r_max == mas->last) {
- printk("cp->data--\n");
- cp->data--;
- }
+ cp->data += cp->end + 1;
+ printk("insert: %p data + end = %u\n", cp, cp->data);
- printk("%p data = %u\n", r_wr_mas->mas->node, cp->data);
+ /* Data from right (offset + 1 to end), +1 for zero */
+ cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset;
+ printk("end %u - off %u + 1\n", r_wr_mas->mas->end, r_wr_mas->mas->offset);
+ printk("right: %p data = %u\n", r_wr_mas->mas->node, cp->data);
if (((l_wr_mas->mas->min != 0) || (r_wr_mas->mas->max != ULONG_MAX)) &&
(cp->data < mt_min_slots[l_wr_mas->type])) {
{
cp->d_count = 0;
/* Calc split here */
- if (cp->data < mt_slots[mt]) {
+ if (cp->data <= mt_slots[mt]) {
cp->split = cp->data;
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
- /* New root */
+ cp->d_count = 1;
} else if (cp->data >= mt_slots[mt] * 2 - 1) {
cp->split = cp->data / 3;
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
- /* New root */
+ cp->d_count = 3;
} else {
cp->split = (cp->data + 1) / 2;
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
- cp->dst[cp->d_count].mt = mt;
- cp->dst[cp->d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ cp->d_count = 2;
+ }
+
+ for (int i = 0; i < cp->d_count; i++) {
+ cp->dst[i].mt = mt;
+ cp->dst[i].node = ma_mnode_ptr(mas_pop_node(mas));
}
- printk("split = %u data %u d_count %u\n", cp->split, cp->data, cp->d_count);
+ printk("split = %u data %u d_count %u type %u\n", cp->split, cp->data, cp->d_count, mt);
}
+
+/*
+ * src->start and end are 0 indexed
+ */
static inline
void spanning_split_src_setup(struct maple_copy *cp, struct ma_state *mas,
struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
append_node_cp(cp, sib, 0, sib->end);
/* Copy left 0 - offset */
- if (l_wr_mas->mas->offset || l_wr_mas->r_min < mas->index) {
- unsigned long l_end = l_wr_mas->mas->offset;
-
- if (l_wr_mas->r_min == mas->index)
- l_end--;
- append_node_cp(cp, l_wr_mas->mas, 0, l_end);
+ if (l_wr_mas->mas->offset) {
+ append_node_cp(cp, l_wr_mas->mas, 0, l_wr_mas->mas->offset - 1);
+ cp->src[cp->s_count - 1].max = l_wr_mas->r_min - 1;
}
- if (cp->s_count)
- cp->src[cp->s_count - 1].max = mas->index - 1;
- /* Insert, overwrite prev pivot */
cp->src[cp->s_count].node = ma_mnode_ptr(cp);
- cp->src[cp->s_count].max = mas->last;
cp->src[cp->s_count].mt = maple_copy;
- cp->src[cp->s_count].end = 0;
+ cp->src[cp->s_count].max = cp->max;
cp->src[cp->s_count].start = 0;
- cp->slot[0] = l_wr_mas->entry;
- cp->max = mas->last;
- cp->min = mas->index;
- if (l_wr_mas->entry)
- cp->gap[0] = 0;
- else
- cp->gap[0] = mas->last - mas->index;
+ cp->src[cp->s_count].end = cp->end;
cp->s_count++;
/* Copy right either from offset or offset + 1 pending on r_max */
- if (r_wr_mas->mas->end != r_wr_mas->mas->offset || r_wr_mas->r_max > mas->last) {
- unsigned long r_start = r_wr_mas->mas->offset;
-
- if (r_wr_mas->r_max == mas->last)
- r_start++;
-
- append_node_cp(cp, r_wr_mas->mas, r_wr_mas->mas->offset, r_start);
- }
+ if (r_wr_mas->mas->end != r_wr_mas->mas->offset)
+ append_node_cp(cp, r_wr_mas->mas, r_wr_mas->mas->offset + 1,
+ r_wr_mas->mas->end);
if (sib->end) {
if (sib->min > r_wr_mas->mas->max) {
//l_wr_mas->mas = sib;
}
}
+
+ printk("\t\t\t\tSources are %u:", cp->s_count);
+ for (int i = 0; i < cp->s_count; i++)
+ printk(" %p", cp->src[i].node);
+ printk("\n");
}
static inline
dst_offset = 0;
data_offset = 0;
next_node = split;
+ /*
+ * size (and split, and next_node will be 1 indexed while
+ * src start/end dst start/end are 0 indexed
+ */
do {
- unsigned long d_max;
-
do {
size = next_node - data_offset;
printk("try to use size %d\n", size);
/* Fill the destination */
//printk("%d: %u %u %u\n", __LINE__, size, next_node, data_offset);
- if (src_end - s_offset < size) {
- printk("size too big for src end: %u %u\n", src_end, s_offset);
- size = src_end - s_offset;
+ if (src_end - s_offset + 1 < size) {
+ printk("size too big for src %p end: %u %u\n", src, src_end, s_offset);
+ size = src_end - s_offset + 1;
}
- //printk("%d: %u\n", __LINE__, size);
+ printk("%d: %u\n", __LINE__, size);
printk("split %u dst_offset %u size %u\n", split, dst_offset, size);
- if (split - dst_offset < size) {
- size = split - dst_offset;
+ if (split - dst_offset + 1 < size) {
+ size = split - dst_offset + 1;
}
printk("%d: size %u\n", __LINE__, size);
- size++;
- d_max = node_copy(src, s_offset, size, s_max, s_mt,
- dst, dst_offset, d_mt);
+ cp->dst[d].max = node_copy(mas, src, s_offset, size,
+ s_max, s_mt, dst, dst_offset,
+ d_mt);
+ printk("%d: set dest max %lx\n", __LINE__, cp->dst[d].max);
+#if 0
+ {
+ unsigned long min = cp->min;
+ printk("\n\nCount is %u\n", cp->d_count);
+ for (int i = 0; i < cp->d_count; i++) {
+ printk("dump %p %lu - %lu\n", cp->dst[i].node, min, cp->dst[i].max);
+ mt_dump_node(mas->tree, mt_mk_node(cp->dst[i].node, cp->dst[i].mt),
+ min, cp->dst[i].max, 0, mt_dump_hex);
+ min = cp->dst[i].max + 1;
+ }
+ }
+#endif
data_offset += size;
dst_offset += size;
/* This source is exhausted */
s++;
if (s >= cp->s_count) {
- cp->dst[d].max = d_max;
node_finalise(dst, d_mt, dst_offset);
return;
}
s_mt = cp->src[s].mt;
}
printk("\toffset is %u next node is %u\n", data_offset, next_node);
- } while (data_offset <= next_node);
+ } while (data_offset < next_node);
next_node *= 2;
if (next_node > data + 1) {
data_offset--;
dst_offset--;
}
- cp->dst[d].max = d_max;
node_finalise(dst, d_mt, dst_offset);
- mt_dump_node(mas->tree, mt_mk_node(dst, d_mt), 0, ULONG_MAX, 1, mt_dump_hex);
+ //mt_dump_node(mas->tree, mt_mk_node(dst, d_mt), 0, ULONG_MAX, 1, mt_dump_hex);
if (d >= cp->d_count) {
WARN_ON(data_offset < data);
return;
} while (data_offset <= data);
}
+
static bool spanning_ascend(struct maple_copy *cp, struct ma_state *mas,
- struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
+ struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas,
+ struct ma_state *sib)
{
- return false; /* for now */
+ unsigned char d;
+
+ for (d = 0; d < cp->d_count; d++) {
+ cp->slot[d] = mt_mk_node(cp->dst[d].node, cp->dst[d].mt);
+ cp->pivot[d] = cp->dst[d].max;
+ printk("cp slot %u => %p piv %lx\n", d, cp->slot[d], cp->pivot[d]);
+ }
+ //cp->slot[d] = mt_mk_node(cp->dst[d].node, cp->dst[d].mt);
+ cp->max = cp->dst[cp->d_count - 1].max;
+ printk("cp max is %lx\n", cp->max);
- if (cp->d_count == 1) /* Converged to one node */
+ if (cp->d_count == 1) {
+ /* Converged to one node */
+ printk("CONVERGED\n");
+ if (!cp->min && cp->max == ULONG_MAX) {
+ cp->dst[0].node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ while (!mte_is_root(mas->node))
+ mas_ascend(mas);
+ }
+ else {
+ cp->dst[0].node->parent = mas_mn(mas)->parent;
+ }
return false;
+ } else {
+ printk("\t\t\t\td_count %u\n", cp->d_count);
+ }
+
+ if (sib->end) {
+ if (sib->max < l_wr_mas->mas->min) {
+ *l_wr_mas->mas = *sib;
+ printk("Shift left\n");
+ } else {
+ *r_wr_mas->mas = *sib;
+ printk("Shift Right\n");
+ }
+ }
+ cp->end = cp->d_count - 1;
+ printk("more nodes.. %u\n", cp->end);
+ wr_mas_ascend(l_wr_mas);
+ wr_mas_ascend(r_wr_mas);
+ printk("At %p and %p\n", l_wr_mas->node, r_wr_mas->node);
+ /*
+ * cp->slot[0] should go in l_wr_mas->offset
+ * cp->slot[end] should go in r_wr_mas->offset
+ */
+
+ return true;
}
static void mas_spanning_rebalance_loop(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char height,
struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
{
- struct maple_big_node b_node;
+ struct maple_enode *old_enode;
struct ma_state sib;
struct maple_copy cp;
- MA_STATE(l_mas, mas->tree, mas->index, mas->index);
- MA_STATE(r_mas, mas->tree, mas->index, mas->last);
- MA_STATE(m_mas, mas->tree, mas->index, mas->index);
+ int debug = 0;
/*
* Spanning store is different in that the write is actually from
mt_dump(mas->tree, mt_dump_hex);
+ spanning_leaf_init(&cp, mas, l_wr_mas, r_wr_mas);
do {
+ printk("\nlmas %p rmas %p\n", l_wr_mas->node, r_wr_mas->node);
+
spanning_data_calc(&cp, mas, l_wr_mas, r_wr_mas, &sib);
spanning_split_dest_setup(&cp, mas, l_wr_mas->type);
spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib);
spanning_data_write(&cp, mas);
- } while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas));
-
- memset(&b_node, 0, sizeof(struct maple_big_node));
- /* Copy l_mas and store the value in b_node. */
- mas_store_b_node(l_wr_mas, &b_node, mast->orig_l->end);
- printk("big node end %u\n", b_node.b_end);
-
-
- /* Copy r_mas into b_node if there is anything to copy. */
- if (mast->orig_r->max > mast->orig_r->last) {
- mas_mab_cp(mast->orig_r, mast->orig_r->offset,
- mast->orig_r->end, &b_node, b_node.b_end + 1);
- printk("big node end %u\n", b_node.b_end);
- } else {
- b_node.b_end++;
- printk("big node ++ end %u\n", b_node.b_end);
- }
-
-
- /* Stop spanning searches by searching for just index. */
- mast->orig_l->index = mast->orig_l->last = mas->index;
-
- mast->bn = &b_node;
- /* Combine l_mas and r_mas and split them up evenly again. */
-
- /*
- * The tree needs to be sibd and leaves need to be kept at the same level.
- * Rebalancing is done by use of the ``struct maple_topiary``.
- */
- mast->l = &l_mas;
- mast->m = &m_mas;
- mast->r = &r_mas;
- l_mas.status = r_mas.status = m_mas.status = ma_none;
-
- /* Check if this is not root and has sufficient data. */
- if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
- unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) {
-#if 0
- if (cp.data_size >= mt_min_slots[l_wr_mas->type]) {
- printk("data size is %u vs %u\n", cp.data_size, mast->bn->b_end);
- printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
- mt_dump(mas->tree, mt_dump_hex);
- BUG_ON(1);
- }
-#endif
- mast_spanning_rebalance(mast);
-#if 0
- printk("big node end sib %u\n", b_node.b_end);
- if (mast->orig_l->node == sib.node)
- printk("left\n");
- else if (mast->orig_r->node == sib.node)
- printk("right (%p)\n", sib.node);
- else
- BUG_ON(1);
- printk("end %u vs %u\n", mast->bn->b_end, cp.data_size);
-#endif
- }
-#if 0
- if (mast->bn->b_end != cp.data_size) {
- mt_dump(mas->tree, mt_dump_hex);
- printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
- fflush(stdout);
- BUG_ON(1);
- }
-
- printk("END \n\n\n");
-#endif
-
#if 1
+ if (debug < 2)
{
unsigned long min = cp.min;
- printk("Count is %u\n", cp.d_count);
+ printk("\n\nCount is %u\n", cp.d_count);
for (int i = 0; i < cp.d_count; i++) {
- printk("dump %p %lu - %lu\n", cp.dst[i].node, min, cp.dst[i].max);
+ printk("dump %p %lx - %lx\n", cp.dst[i].node, min, cp.dst[i].max);
mt_dump_node(mas->tree, mt_mk_node(cp.dst[i].node, cp.dst[i].mt),
min, cp.dst[i].max, height, mt_dump_hex);
min = cp.dst[i].max + 1;
}
- printk ("VS\n");
}
#endif
- mas_spanning_rebalance_loop(mas, mast, height);
+ printk ("NEXT LEVEL %d\n", debug++);
+ BUG_ON(debug > 4);
+ } while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib));
+
+ old_enode = mas->node;
+ mas->node = cp.slot[0];
+ printk("Replace %p with %p\n", old_enode, mas->node);
+ mas_wmb_replace(mas, old_enode, height);
+ mtree_range_walk(mas);
}
/*
* mas_rebalance() - Rebalance a given node.