if (!total) {
printk("Ran out of nodes\n");
mt_dump(mas->tree, mt_dump_hex);
+ fflush(stdout);
+ sleep(1);
}
if (WARN_ON(!total))
return NULL;
+ printk("pop\n");
if (total == 1) {
/* single allocation in this ma_state */
mas_ascend(sib);
end = mas_data_end(sib);
/* Prioritize move right to pull data left */
- if (sib->offset < end)
+ if (sib->offset < end) {
sib->offset++;
- else
+ printk("sib right\n");
+ } else {
sib->offset--;
+ printk("sib left\n");
+ }
mas_descend(sib);
sib->end = mas_data_end(sib);
+ printk("%p limits %lx - %lx\n", mas_mn(sib), sib->min, sib->max);
}
static inline
d_pivots = ma_pivots(dst, d_mt) + d_start;
s_slots = ma_slots(src, s_mt) + start;
s_pivots = ma_pivots(src, s_mt) + start;
+ printk("cp %p[%u]+%u to %p[%u] + %u\n", src, start, size,
+ dst, d_start, size);
memcpy(d_slots, s_slots, size * sizeof(void __rcu*));
if (!ma_is_leaf(d_mt) && s_mt == maple_copy) {
struct maple_enode *edst = mt_mk_node(dst, d_mt);
cp->gap[0] = 0;
else
cp->gap[0] = mas->index - l_wr_mas->r_min;
+ printk("start piv\n");
}
cp->slot[end] = l_wr_mas->entry;
cp->pivot[end] = mas->last;
+ printk("new data (%p) %lx\n", l_wr_mas->entry, mas->last);
if (l_wr_mas->entry)
cp->gap[end] = 0;
else
cp->gap[end] = mas->last - mas->index + 1;
+ printk("end_piv %lx last %lx\n", r_wr_mas->end_piv, mas->last);
if (r_wr_mas->end_piv > mas->last) {
end++;
cp->slot[end] = r_wr_mas->slots[r_wr_mas->offset_end];
cp->gap[end] = 0;
else
cp->gap[end] = r_wr_mas->end_piv - mas->last + 1;
+ printk("end data\n");
}
cp->min = l_wr_mas->r_min;
space = 2 * mt_slots[type];
end = sib->end;
+ printk("%d\n", __LINE__);
new_data = end + 1 + cp->data;
if (new_data > space)
return false;
+ printk("end %u cp->data %u cp->end %u\n", end, cp->data, cp->end);
+ printk("offset %u\n", mas->offset);
+ printk("%d\n", __LINE__);
/*
* This is off by one by design. The extra space is left to reduce
* jitter in operations that add then remove two entries.
* location.
*/
if (sib->max < mas->min) {
+ printk("Trying left\n");
/*
* Pushing to left, there must be room so the last slot is not
* occupied.
new_data = mt_slots[type] - end - 2;
if (mas->offset == new_data ||
mas->offset + cp->end == new_data) {
+ printk("use cp ");
new_data -= mas->offset;
slots = cp->slot;
} else {
+ printk("use mas ");
slots = ma_slots(mas_mn(mas), type);
}
+ printk("Check slot %u\n", new_data);
} else {
unsigned char split;
/*
* The split must fall either in the new data or the end of the
* existing mas node.
*/
+ printk("Trying right\n");
+ printk("data is %u end is %u\n", cp->data, end);
/* size - size to index (-1) */
split = (new_data - 1) / 2;
+ printk("split will be %u\n", split);
if (split >= mas->offset &&
split <= mas->offset + cp->end + 1) {
new_data = split - mas->offset;
slots = cp->slot;
+ printk("Check new data %u\n", new_data);
} else {
if (mas->offset < split) {
+ printk("offset is smaller than expected split\n");
+ printk("new_data %u offset %u end %u\n", new_data, mas->offset, cp->end);
/*
* The last slot in the previous node is the one
* before the overflow.
new_data = mas->end - (cp->data - 1 - split);
}
slots = ma_slots(mas_mn(mas), type);
+ printk("Check mas %u\n", new_data);
}
}
+ fflush(stdout);
+ printk("Check %u\n", new_data);
if (slots[new_data])
return true;
sib->end = mas_data_end(sib);
if (data_fits(sib, mas, cp)) {
/* Push left */
+ printk("push left\n");
return;
}
mas_descend(sib);
sib->end = mas_data_end(sib);
if (data_fits(sib, mas, cp)) {
+ printk("push right (%p and %p)\n", mas_mn(mas), mas_mn(sib));
/* Push right*/
return;
}
no_push:
+ printk("no pushing\n");
sib->end = 0;
}
sib->end = 0;
if (cp->data >= mt_slots[wr_mas->type]) {
+ printk("Push data\n");
push_data_sib(cp, wr_mas->mas, sib);
if (sib->end) {
cp->data += sib->end + 1;
if (((wr_mas->mas->min != 0) || (wr_mas->mas->max != ULONG_MAX)) &&
(cp->data <= mt_min_slots[wr_mas->type])) {
+ printk("Rebalance sibling\n");
rebalance_sib(wr_mas->mas, sib);
cp->data += sib->end + 1;
}
{
/* Data is 1 indexed, every src has +1 added. */
+ printk("data is %u vs slots %u\n", cp->data, mt_slots[mt]);
if (cp->data <= mt_slots[mt]) {
cp->split = cp->data - 1;
cp->d_count = 1;
if (!ma_is_leaf(mt))
goto node_setup;
+ printk("Check leaf\n");
#if 1
/*
* can fit due to the NULL limitation on node ends.
*/
off = cp->split;
+ printk("off start %u\n", off);
for (s = 0; s < cp->s_count; s++) {
unsigned char s_off;
s_off = cp->src[s].end - cp->src[s].start;
+ printk("src end %u start %u\n", cp->src[s].end, cp->src[s].start);
if (s_off >= off)
break;
s_off++;
off -= s_off;
+ printk("off = %u\n", off);
}
off += cp->src[s].start;
+ printk("Check src %u slot %u\n", s, off);
if (ma_slots(cp->src[s].node, cp->src[s].mt)[off])
goto node_setup;
else
size = dst_size;
+ printk("cp %p[%u] + %u ", src, s_offset, size);
+ printk("to %p[%u]\n", dst, dst_offset);
d_max = node_copy(mas, src, s_offset, size, s_max, s_mt,
dst, dst_offset, d_mt);
+ printk("d_max returned %lx\n", d_max);
dst_offset += size;
s_offset += size;
if (s_offset > src_end) {
unsigned long max, struct ma_state *mas)
{
unsigned char d;
+ unsigned long slot_min = min;
for (d = 0; d < cp->d_count; d++) {
struct maple_node *mn = cp->dst[d].node;
enum maple_type mt = cp->dst[d].mt;
- unsigned long max = cp->dst[d].max;
+ unsigned long slot_max = cp->dst[d].max;
cp->slot[d] = mt_mk_node(mn, mt);
- cp->pivot[d] = max;
+ cp->pivot[d] = slot_max;
if (mt_is_alloc(mas->tree)) {
if (ma_is_leaf(mt)) {
- cp->gap[d] = ma_leaf_max_gap(mn, mt, min, max,
- ma_pivots(mn, mt),
- ma_slots(mn,mt));
+ cp->gap[d] = ma_leaf_max_gap(mn, mt, slot_min,
+ slot_max, ma_pivots(mn, mt),
+ ma_slots(mn,mt));
} else {
unsigned long *gaps = ma_gaps(mn, mt);
}
}
}
- min = max + 1;
+ slot_min = slot_max + 1;
}
cp->end = cp->d_count - 1;
mas = wr_mas->mas;
if (!sib->end) {
+ printk("no sibling, must be last run\n");
min = mas->min;
r = mas;
} else if (sib->min > mas->max) { /* Move right succeeded */
min = mas->min;
+ printk("mas min %lx\n", min);
r = sib;
} else {
min = sib->min;
+ printk("sib min %lx\n", min);
r = mas;
}
cp_dst_to_slots(cp, min, r->max, mas);
if (!cp->min && cp->max == ULONG_MAX) {
+ printk("new root\n");
+ printk("mas is at %p\n", mas_mn(mas));
rebalance_new_root(cp, mas);
return false;
}
wr_mas->offset_end++;
else
wr_mas->mas->offset--;
+ printk("next\n\n");
return true;
}
/* root needs more than 2 entries to be sufficient + 1 */
if (mas->end > 2)
wr_mas->sufficient_height = 1;
- } else if (mas->end > mt_min_slots[wr_mas->type] + 1)
+ } else if (mas->end > mt_min_slots[wr_mas->type] + 1) {
wr_mas->sufficient_height = mas->depth + 1;
+ }
mas_wr_walk_traverse(wr_mas);
}
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
mas = wr_mas->mas;
+ mt_dump(mas->tree, mt_dump_hex);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
/*
min = mas->min; /* push right, or normal split */
max = mas->max;
if (sib->end) {
+ printk("sib was used\n");
if (sib->max < mas->min) {
min = sib->min; /* push left */
+ printk("pushed left, min %lx\n", min);
} else {
max = sib->max; /* push right */
+ printk("pushed right, max %lx\n", max);
}
}
+ printk("min is %lx max is %lx\n", min, max);
cp_dst_to_slots(cp, min, max, mas);
+ printk("min is %lx max is %lx\n", min, max);
if (!cp->min && cp->max == ULONG_MAX) {
+ printk("rebalance new root\n");
rebalance_new_root(cp, mas);
return false;
}
wr_mas->offset_end++;
}
+ printk("ascend\n\n");
return true;
}
if (cp->data <= mt_slots[wr_mas->type]) {
MAS_BUG_ON(wr_mas->mas, cp->height == 1);
sib->end = 0;
+ printk("Split fits in %p\n", mas_mn(wr_mas->mas));
return;
}
+ printk("Try to push\n");
push_data_sib(cp, wr_mas->mas, sib);
if (sib->end) {
cp->data += sib->end + 1;
+ printk("%d\n", __LINE__);
}
}
mas = wr_mas->mas;
cp_leaf_init(&cp, mas, wr_mas, wr_mas);
do {
+ printk("height %u\n", cp.height);
split_data_calc(&cp, wr_mas, &sib);
multi_src_setup(&cp, wr_mas, wr_mas, &sib);
multi_dst_setup(&cp, mas, wr_mas->type);
mtree_range_walk(mas);
//mt_dump(wr_mas->mas->tree, mt_dump_hex);
+ printk("Done\n\n");
mt_validate(wr_mas->mas->tree);
#else
mas = wr_mas->mas;
cp_leaf_init(&cp, mas, wr_mas, wr_mas);
do {
+ printk("height %u\n", cp.height);
rebalance_data_calc(&cp, wr_mas, &sib);
multi_src_setup(&cp, wr_mas, wr_mas, &sib);
multi_dst_setup(&cp, mas, wr_mas->type);
old_enode = mas->node;
mas->node = cp.slot[0];
+ printk("Replace %p with %p\n", old_enode, mas->node);
mas_wmb_replace(mas, old_enode, cp.height);
mtree_range_walk(mas);
}
struct ma_state *mas = wr_mas->mas;
unsigned char new_end = mas_wr_new_end(wr_mas);
+ printk("Write %lx - %lx => %p\n", mas->index, mas->last, wr_mas->entry);
switch (mas->store_type) {
case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
mas_wr_prealloc_setup(wr_mas);
wr_mas->mas->store_type = mas_wr_store_type(wr_mas);
+ printk("store type %u\n", wr_mas->mas->store_type);
request = mas_prealloc_calc(wr_mas, entry);
if (!request)
return;
mas_wr_prealloc_setup(&wr_mas);
mas->store_type = mas_wr_store_type(&wr_mas);
+ printk("Sufficient is %u\n", wr_mas.sufficient_height);
request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
goto set_flag;