unsigned int req = mas_alloc_req(mas);
/* nothing or a request pending. */
+ if (!total) {
+ printk("Ran out of nodes\n");
+ mt_dump(mas->tree, mt_dump_hex);
+ }
if (WARN_ON(!total))
return NULL;
end = ma_data_end(node, mt, pivots, mas->max);
for (offset = mas->offset; offset <= end; offset++) {
entry = mas_slot_locked(mas, slots, offset);
+ printk("entry %p at offset %u\n", entry, offset);
if (mte_parent(entry) == node) {
*child = *mas;
mas->offset = offset + 1;
+ printk("d_start = %u on %u\n", d_start, d_mt);
d_slots = ma_slots(dst, d_mt) + d_start;
d_pivots = ma_pivots(dst, d_mt) + d_start;
s_slots = ma_slots(src, s_mt) + start;
s_pivots = ma_pivots(src, s_mt) + start;
+ fflush(stdout);
memcpy(d_slots, s_slots, size * sizeof(void __rcu*));
if (!ma_is_leaf(d_mt) && s_mt == maple_copy) {
struct maple_enode *edst = mt_mk_node(dst, d_mt);
{
unsigned char end = 0;
+ printk("write %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
+ printk("l %p[%u] r %p[%u]\n", l_wr_mas->node, l_wr_mas->mas->offset - 1,
+ r_wr_mas->node, r_wr_mas->mas->offset + 1);
/* Create entries to insert including split entries to left and right */
if (l_wr_mas->r_min < mas->index) {
cp->slot[0] = l_wr_mas->content;
/* Add 1 every time for the 0th element */
cp->data = l_wr_mas->mas->offset;
+ printk("%d %d\n", __LINE__, cp->data);
cp->data += cp->end + 1;
+ printk("%d %d\n", __LINE__, cp->data);
/* Data from right (offset + 1 to end), +1 for zero */
cp->data += r_wr_mas->mas->end - r_wr_mas->mas->offset;
+ printk("%d %d\n", __LINE__, cp->data);
if (((l_wr_mas->mas->min != 0) || (r_wr_mas->mas->max != ULONG_MAX)) &&
(cp->data <= mt_min_slots[l_wr_mas->type])) {
mas_spanning_move(l_wr_mas, r_wr_mas, sib);
cp->data += sib->end + 1;
+ printk("%d %d\n", __LINE__, cp->data);
} else {
sib->end = 0;
}
void spanning_split_dest_setup(struct maple_copy *cp, struct ma_state *mas,
enum maple_type mt)
{
- cp->d_count = 0;
- /* Calc split here; cp->data is not 0 indexed */
- if (cp->data < mt_slots[mt]) {
+ /* Data is 1 indexed, every src has +1 added. */
+
+ if (cp->data <= mt_slots[mt]) {
cp->split = cp->data;
cp->d_count = 1;
- } else if (cp->data < mt_slots[mt] * 2 - 1) {
- cp->split = (cp->data + 1) / 2;
- cp->d_count = 2;
- } else {
- cp->split = (cp->data + 2) / 3;
- cp->d_count = 3;
+ goto node_setup;
}
+ cp->split = cp->data / 2;
+ cp->d_count = 2;
+ if (cp->data < mt_slots[mt] * 2) {
+ unsigned char off;
+ unsigned char s;
+
+ if (!ma_is_leaf(mt))
+ goto node_setup;
+
+#if 1
+
+ /*
+ * Leaf nodes are a bit tricky because we cannot assume the data
+ * can fit due to the NULL limitation on node ends.
+ */
+ off = cp->split;;
+ printk("start %u\n", off);
+ for (s = 0; s < cp->s_count; s++) {
+ unsigned char s_off;
+
+ s_off = cp->src[s].end - cp->src[s].start;
+ printk("%u size %u\n", s, s_off);
+ if (s)
+ off--;
+ if (s_off >= off) {
+ printk("s_off fits\n");
+ break;
+ }
+ off -= s_off;
+ printk("offset %u\n", off);
+ }
+ off += cp->src[s].start;
+ printk("CHECK %p[%u]\n", cp->src[s].node, off);
+ mt_dump(mas->tree, mt_dump_hex);
+ fflush(stdout);
+ if (ma_slots(cp->src[s].node, cp->src[s].mt)[off]) {
+ printk("src %u slot %u is not NULL\n", s, off);
+ goto node_setup;
+ } else
+ printk("src %u slot %u IS NULL\n", s, off);
+
+ cp->split++;
+ printk("set split to %u\n", cp->split);
+ if (cp->split < mt_slots[mt])
+ goto node_setup;
+
+ }
+#endif
+ /* No other choice but to 3-way split the data */
+ cp->split = (cp->data + 2) / 3;
+ cp->d_count = 3;
+
+node_setup:
+ printk("data %u split %u d_count %u type %u\n", cp->data, cp->split, cp->d_count, mt);
for (int i = 0; i < cp->d_count; i++) {
cp->dst[i].mt = mt;
cp->dst[i].node = ma_mnode_ptr(mas_pop_node(mas));
+ printk("%d type %u ptr %p\n", i, mt, cp->dst[i].node);
}
}
append_node_cp(cp, sib, 0, sib->end);
}
-static inline
+//static inline
void spanning_data_write(struct maple_copy *cp, struct ma_state *mas)
{
struct maple_node *dst, *src;
unsigned char split, next_node, size;
unsigned long s_max;
enum maple_type s_mt, d_mt;
- int debug = 0;
+ printk("\n\n%s\n", __func__);
s = d = 0;
/* Readability help */
src = cp->src[s].node;
* size (and split, and next_node will be 1 indexed while
* src start/end dst start/end are 0 indexed
*/
+ printk("MAX src %u dst %u\n", cp->s_count, cp->d_count);
do {
do {
- debug++;
- BUG_ON(debug > 8);
- size = next_node - data_offset;
+ printk("%u - %u + 1\n", next_node, data_offset);
+ size = next_node - data_offset + 1;
/* Fill the destination */
- if (src_end - s_offset + 1 < size)
+ printk("src end %u s_off %u size %u\n", src_end, s_offset, size);
+ if (src_end - s_offset + 1 < size) {
size = src_end - s_offset + 1;
+ printk("%d: %u %u %u\n", __LINE__, size, split, dst_offset);
+ }
- if (split - dst_offset + 1 < size)
+ printk("splut %u d_off %u size %u\n", split , dst_offset, size);
+ if (split - dst_offset + 1 < size) {
size = split - dst_offset + 1;
+ printk("%d: %u %u %u\n", __LINE__, size, split, dst_offset);
+ }
+ printk("Size is %u\n", size);
+ printk("src %u dst %u\n", s, d);
+ printk("src node %p to %p\n", src, dst);
node_copy(mas, src, s_offset, size, s_max, s_mt, dst,
dst_offset, d_mt);
data_offset += size;
s_max = cp->src[s].max;
s_mt = cp->src[s].mt;
}
- } while (data_offset < next_node);
+ } while (data_offset <= next_node);
next_node *= 2;
if (next_node > data + 1)
next_node = data + 1;
split = cp->split;
- /* Handle null entries */
if (dst_offset <= mt_pivots[d_mt]) {
cp->dst[d].max = ma_pivots(dst, d_mt)[dst_offset - 1];
} else {
- cp->dst[d].max = s_max;
+ cp->dst[d].max = ma_pivots(src, s_mt)[s_offset - 1];
}
+ /* Handle null entries */
if (cp->dst[d].max != ULONG_MAX &&
!ma_slots(dst, d_mt)[dst_offset - 1]) {
+ printk("%p slot %u is nul!!\n", dst, dst_offset - 1);
+ printk("src is %p[%u]\n", src, s_offset - 1);
+ fflush(stdout);
+ BUG_ON(cp->d_count == 2);
if (s_offset == cp->src[s].start) {
s--;
src = cp->src[s].node;
return;
}
/* Reset local dst */
+ printk("Switch dst at %d\n", cp->d_count);
++d;
dst = cp->dst[d].node;
d_mt = cp->dst[d].mt;
dst_offset = 0;
} while (data_offset <= data);
+ BUG_ON(1);
}
cp->slot[0] = mt_mk_node(cp->dst[0].node, mt);
cp->height++;
}
-
WARN_ON_ONCE(cp->dst[0].node != mte_to_node(cp->slot[0]));
cp->dst[0].node->parent = ma_parent_ptr(mas_tree_parent(mas));
while (!mte_is_root(mas->node))
} else if (l_wr_mas->mas->node == r_wr_mas->mas->node) {
/* Converged, but caused a cascading split. */
if (cp->d_count != 1) {
- mt_dump(mas->tree, mt_dump_hex);
- printk("At %p\n", l_wr_mas->mas->node);
- printk("Writing %lx -%lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
+ //mt_dump(mas->tree, mt_dump_hex);
+ //printk("At %p\n", l_wr_mas->mas->node);
+ //printk("Writing %lx -%lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
}
- WARN_ON_ONCE(cp->d_count != 1);
+ //WARN_ON_ONCE(cp->d_count != 1);
//cp->dst[0].node->parent = mas_mn(mas)->parent;
//return false;
}
+ cp->height++;
wr_mas_ascend(l_wr_mas);
wr_mas_ascend(r_wr_mas);
/*
* being stored to the last slot of the left node.
*/
+ cp.height = 1;
spanning_leaf_init(&cp, mas, l_wr_mas, r_wr_mas);
do {
- cp.height++;
spanning_data_calc(&cp, mas, l_wr_mas, r_wr_mas, &sib);
- spanning_split_dest_setup(&cp, mas, l_wr_mas->type);
spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib);
+ spanning_split_dest_setup(&cp, mas, l_wr_mas->type);
spanning_data_write(&cp, mas);
} while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib));
mas->node = cp.slot[0];
mas_wmb_replace(mas, old_enode, cp.height);
mtree_range_walk(mas);
+ printk("\n\n");
}
/*
*/
mas = wr_mas->mas;
trace_ma_op(__func__, mas);
+ //mt_dump(mas->tree, mt_dump_hex);
+ //printk ("%p %s %lx - %lx => %p\n", mas->tree, __func__, mas->index, mas->last, wr_mas->entry);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);