unsigned char max_end = mt_slots[mt];
unsigned char size;
unsigned long *gaps;
+ unsigned char gap_slot;
printk("Finalise %p end %u max end %u\n", node, end, max_end);
+ gaps = ma_gaps(node, mt);
if (end < max_end - 1) {
size = max_end - end;
memset(ma_slots(node, mt) + end, 0, size * sizeof(void*));
- gaps = ma_gaps(node, mt);
if (gaps)
memset(gaps + end, 0, size * sizeof(unsigned long));
memset(ma_pivots(node, mt) + end, 0, size * sizeof(unsigned long));
}
+ gap_slot = 0;
+ if (gaps && !ma_is_leaf(mt)) {
+ unsigned long max_gap;
+
+ max_gap = 0;
+ for (int i = 0; i <= end; i++)
+ if (gaps[i] > max_gap) {
+ gap_slot = i;
+ max_gap = gaps[i];
+ }
+ }
+
if (mt == maple_arange_64)
- ma_set_meta(node, mt, 0, end - 1);
+ ma_set_meta(node, mt, gap_slot, end - 1);
else if (end <= max_end - 1)
- ma_set_meta(node, mt, 0, end - 1);
+ ma_set_meta(node, mt, gap_slot, end - 1);
}
static inline void append_node_cp(struct maple_copy *cp,
printk("reset split %u\n", split);
/* Handle null entries */
if (s_max != ULONG_MAX && !ma_slots(dst, d_mt)[dst_offset - 1]) {
- printk("NULL entry end %u and max %lx\n", dst_offset - 1, s_max);
+ printk("NULL entry end %u and max %lx\n", dst_offset - 1, cp->dst[d].max);
//BUG_ON(1);
if (s_offset == cp->src[s].start) {
src = cp->src[--s].node;
}
printk("\t\tnext dst\n");
/* Reset local dst */
- dst = cp->dst[++d].node;
+ ++d;
+ dst = cp->dst[d].node;
d_mt = cp->dst[d].mt;
dst_offset = 0;
} while (data_offset <= data);
struct ma_state *sib)
{
unsigned char d;
+ unsigned long min = l_wr_mas->mas->min;
for (d = 0; d < cp->d_count; d++) {
- cp->slot[d] = mt_mk_node(cp->dst[d].node, cp->dst[d].mt);
- cp->pivot[d] = cp->dst[d].max;
- printk("cp slot %u => %p piv %lx\n", d, cp->slot[d], cp->pivot[d]);
+ struct maple_node *mn = cp->dst[d].node;
+ enum maple_type mt = cp->dst[d].mt;
+ unsigned long max = cp->dst[d].max;
+
+
+ cp->slot[d] = mt_mk_node(mn, mt);
+ cp->pivot[d] = max;
+ if (ma_is_leaf(mt)) {
+ cp->gap[d] = ma_leaf_max_gap(mn, mt, min, max,
+ ma_pivots(mn, mt), ma_slots(mn,mt));
+ min = max + 1;
+ }
}
if (sib->end) {
printk("\nlmas %p rmas %p\n", l_wr_mas->node, r_wr_mas->node);
cp.height++;
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
spanning_data_calc(&cp, mas, l_wr_mas, r_wr_mas, &sib);
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
spanning_split_dest_setup(&cp, mas, l_wr_mas->type);
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
spanning_split_src_setup(&cp, mas, l_wr_mas, r_wr_mas, &sib);
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
spanning_data_write(&cp, mas);
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
#if 0
if (debug < 2)
{
#endif
printk ("NEXT LEVEL %d\n", debug++);
BUG_ON(debug > 4);
+ printk("%d: cp min %lx\n", __LINE__, cp.min);
} while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib));
old_enode = mas->node;
for (i = 0; i <= max; i++)
mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
mtree_lock(mt);
- mt_dump(mt, mt_dump_dec);
+ mt_dump(mt, mt_dump_hex);
/* Store a null across a boundary that ends in a null */
mas_set(&mas, 49835);
MT_BUG_ON(mt, mas_walk(&mas) == NULL);
MT_BUG_ON(mt, mas.end != mas.offset);
MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
mas_set_range(&mas, 49835, mas.last - 1);
+ printk("Storing %lx - %lx\n", 49835, mas.last - 1);
mas_store_gfp(&mas, NULL, GFP_KERNEL);
mt_validate(mt);