[maple_leaf_64] = ULONG_MAX,
[maple_range_64] = ULONG_MAX,
[maple_arange_64] = ULONG_MAX,
+ [maple_copy] = ULONG_MAX,
};
#define mt_node_max(x) mt_max[mte_node_type(x)]
#endif
[maple_leaf_64] = MAPLE_RANGE64_SLOTS,
[maple_range_64] = MAPLE_RANGE64_SLOTS,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS,
+ [maple_copy] = 1,
};
#define mt_slot_count(x) mt_slots[mte_node_type(x)]
[maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
+ [maple_copy] = 0,
};
#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
[maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
+ [maple_copy] = 1, /* Should never be used */
};
#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
case maple_range_64:
case maple_leaf_64:
return node->mr64.pivot;
+ case maple_copy:
case maple_dense:
return NULL;
}
switch (type) {
case maple_arange_64:
return node->ma64.gap;
+ case maple_copy:
+ return node->cp.gap;
case maple_range_64:
case maple_leaf_64:
case maple_dense:
case maple_arange_64:
node->ma64.pivot[piv] = val;
break;
+ case maple_copy:
case maple_dense:
break;
}
return mn->mr64.slot;
case maple_dense:
return mn->slot;
+ case maple_copy:
+ return mn->cp.slot;
}
return NULL;
*/
static void mas_node_count(struct ma_state *mas, int count)
{
- return mas_node_count_gfp(mas, count, GFP_NOWAIT);
+ return mas_node_count_gfp(mas, count*2, GFP_NOWAIT);
}
/*
mast->l->offset += end;
}
+static inline
+bool mas_spanning_move(struct maple_subtree_state *mast,
+ struct ma_state *nneighbour)
+{
+ struct ma_state r_tmp = *mast->orig_r;
+ struct ma_state l_tmp = *mast->orig_l;
+ unsigned char depth = 0;
+
+ do {
+ mas_ascend(&r_tmp);
+ mas_ascend(&l_tmp);
+ depth++;
+ if (r_tmp.offset < mas_data_end(&r_tmp)) {
+ r_tmp.offset++;
+ mas_descend(&r_tmp);
+ r_tmp.offset = 0;
+ while (--depth)
+ mas_descend(&r_tmp);
+
+ r_tmp.end = mas_data_end(&r_tmp);
+ *nneighbour = r_tmp;
+ return true;
+ } else if (l_tmp.offset) {
+ l_tmp.offset--;
+ do {
+ mas_descend(&l_tmp);
+ l_tmp.offset = mas_data_end(&l_tmp);
+ } while (--depth);
+
+ l_tmp.end = l_tmp.offset;
+ *nneighbour = l_tmp;
+ return false;
+ }
+ } while (!mte_is_root(r_tmp.node));
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
/*
* mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
* the node to the right. Checking the nodes to the right then the left at each
return NULL;
}
+
+/* Direct node to node copy */
+static inline
+unsigned char node_copy(struct maple_node *src, unsigned char start,
+ unsigned char size, unsigned long s_max, enum maple_type s_mt,
+ struct maple_node *dst, unsigned char d_start, enum maple_type d_mt)
+{
+ void __rcu *s_slots;
+ void __rcu *d_slots;
+ unsigned long *s_pivots, *d_pivots;
+ unsigned long *s_gaps, *d_gaps;
+ bool set_last_piv = true;
+
+ s_slots = ma_slots(src, s_mt) + start;
+ d_slots = ma_slots(dst, d_mt) + d_start;
+ s_pivots = ma_pivots(src, s_mt) + start;
+ d_pivots = ma_pivots(dst, d_mt) + d_start;
+
+ printk("slot %p piv %p\n", s_slots, s_pivots);
+ printk("d slot %p piv %p\n", d_slots, d_pivots);
+ printk("\t\t\t\t\t\tcp %p[%u-%u] => %p[%u-%u]\n", src, start, start + size - 1,
+ dst, d_start, d_start + size - 1);
+ fflush(stdout);
+ memcpy(d_slots, s_slots, size * sizeof(void*));
+
+ d_gaps = ma_gaps(dst, d_mt);
+ if (d_gaps) {
+ s_gaps = ma_gaps(src, s_mt) + start;
+ d_gaps += d_start;
+ printk("CP GAPS??\n");
+ fflush(stdout);
+ memcpy(d_gaps, s_gaps, size * sizeof(unsigned long));
+ }
+
+ if (d_start + size >= mt_slots[d_mt]) {
+ set_last_piv = false;
+ size--;
+ }
+
+ if (start + size >= mt_slots[s_mt]) {
+ set_last_piv = false;
+ size--;
+ d_pivots[size] = s_max;
+ }
+
+ if (size)
+ memcpy(d_pivots, s_pivots, size * sizeof(unsigned long));
+
+ if (set_last_piv)
+ d_pivots[size] = s_max;
+
+ return size;
+}
+
+static inline
+void node_finalise(struct maple_node *node, enum maple_type mt, unsigned char end)
+{
+ unsigned char max_end = mt_slots[mt];
+ unsigned char size;
+ unsigned long *gaps;
+
+ if (end >= max_end - 1)
+ return;
+
+ size = max_end - end;
+ memset(ma_slots(node, mt) + end, 0, size * sizeof(void*));
+
+ gaps = ma_gaps(node, mt);
+ if (gaps)
+ memset(gaps + end, 0, size * sizeof(unsigned long));
+
+ if (--size) {
+ ma_set_meta(node, mt, 0, end - 1);
+ if (--size)
+ memset(ma_pivots(node, mt) + end, 0, size * sizeof(unsigned long));
+ }
+}
+
+static inline void append_node_cp(struct maple_copy *cp,
+ struct ma_state *mas, unsigned char start, unsigned char end)
+{
+ unsigned char count = cp->s_count;
+
+ cp->src[count].node = mas_mn(mas);
+ cp->src[count].mt = mte_node_type(mas->node);
+ cp->src[count].max = mas->max;
+ cp->src[count].start = start;
+ cp->src[count].end = end;
+ cp->s_count++;
+}
+
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format);
+
+static void spanning_data_write(struct maple_copy *cp, struct ma_state *mas)
+{
+ struct maple_node *dst, *src;
+ size_t s, d;
+ unsigned char dst_offset;
+ unsigned char data, data_offset;
+ unsigned char src_end, s_offset;
+ unsigned char split, next_node, size;
+ unsigned long s_max;
+ enum maple_type s_mt, d_mt;
+
+ s = d = 0;
+ /* Readability help */
+ src = cp->src[s].node;
+ dst = cp->dst[d].node;
+ s_offset = cp->src[s].start;
+ src_end = cp->src[s].end;
+ split = cp->split;
+ s_max = cp->src[s].max;
+ s_mt = cp->src[s].mt;
+ d_mt = cp->dst[d].mt;
+ data = cp->data;
+
+ dst_offset = 0;
+ data_offset = 0;
+ next_node = split;
+ do {
+ do {
+ size = next_node - data_offset;
+ printk("try to use size %d\n", size);
+ /* Fill the destination */
+ //printk("%d: %u %u %u\n", __LINE__, size, next_node, data_offset);
+
+ if (src_end - s_offset < size) {
+ printk("size too big for src end: %u %u\n", src_end, s_offset);
+ size = src_end - s_offset;
+ }
+ //printk("%d: %u\n", __LINE__, size);
+
+ printk("split %u dst_offset %u size %u\n", split, dst_offset, size);
+ if (split - dst_offset < size) {
+ size = split - dst_offset;
+ }
+ printk("%d: size %u\n", __LINE__, size);
+
+ size++;
+ node_copy(src, s_offset, size, s_max, s_mt,
+ dst, dst_offset, d_mt);
+
+ data_offset += size;
+ dst_offset += size;
+ s_offset += size;
+
+ if (s_offset > src_end) {
+ printk("\t\tnext src %d / %d\n", s +1, cp->s_count);
+ /* This source is exhausted */
+ s++;
+ if (s >= cp->s_count) {
+ node_finalise(dst, d_mt, dst_offset);
+ return;
+ }
+ /* Reset local src */
+ src = cp->src[s].node;
+ s_offset = cp->src[s].start;
+ src_end = cp->src[s].end;
+ s_max = cp->src[s].max;
+ s_mt = cp->src[s].mt;
+ }
+ printk("\toffset is %u next node is %u\n", data_offset, next_node);
+ } while (data_offset <= next_node);
+
+ next_node *= 2;
+ if (next_node > data + 1) {
+ printk("next node reduced to data %u", data);
+ next_node = data + 1;
+ }
+
+ split = cp->split;
+ printk("reset split %u\n", split);
+ /* Handle null entries */
+ if (s_max != ULONG_MAX && !ma_slots(dst, d_mt)[dst_offset - 1]) {
+ printk("NULL entry end %u and max %lx\n", dst_offset - 1, s_max);
+ //BUG_ON(1);
+ if (s_offset == cp->src[s].start) {
+ src = cp->src[--s].node;
+ src_end = cp->src[s].end;
+ s_max = cp->src[s].max;
+ s_mt = cp->src[s].mt;
+ s_offset = src_end;
+ } else {
+ s_offset--;
+ }
+ /* Set dst max and clear pivot */
+ next_node++;
+ split++;
+ printk("inc split %u\n", split);
+ if (dst_offset >= mt_pivots[d_mt])
+ ma_pivots(dst, d_mt)[dst_offset - 1] = 0;
+ data_offset--;
+ }
+ node_finalise(dst, d_mt, dst_offset);
+ mt_dump_node(mas->tree, mt_mk_node(dst, d_mt), 0, ULONG_MAX, 1, mt_dump_hex);
+ if (d >= cp->d_count) {
+ WARN_ON(data_offset < data);
+ return;
+ }
+ printk("\t\tnext dst\n");
+ /* Reset local dst */
+ dst = cp->dst[++d].node;
+ d_mt = cp->dst[d].mt;
+ dst_offset = 0;
+ } while (data_offset <= data);
+}
+
static void mas_spanning_rebalance_loop(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
struct maple_enode *old_enode;
+ bool debug = true;
/*
* Each level of the tree is examined and balanced, pushing data to the left or
* right, or rebalancing against left or right nodes is employed to avoid
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
new_height++;
+ if (debug) {
+ for (int i = 0; i < 3; i++) {
+ struct maple_enode *dst = NULL;
+ struct ma_state *state;
+
+ if (i == 0) {
+ dst = left;
+ state = mast->l;
+ } else if (i == 1) {
+ dst = middle;
+ state = mast->m;
+ } else {
+ state = mast->r;
+ dst = right;
+ }
+ if (!dst)
+ continue;
+
+ mt_dump_node(mas->tree, dst, state->min, state->max, count, mt_dump_hex);
+ }
+ debug = false;
+ }
/*
* Copy data from next level in the tree to mast->bn from next
* iteration
struct maple_subtree_state *mast, unsigned char height,
struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
{
- unsigned char data_size;
struct maple_big_node b_node;
+ struct ma_state sib;
+ struct maple_copy cp;
+ enum maple_type mt;
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(m_mas, mas->tree, mas->index, mas->index);
- data_size = mast->orig_l->offset;
- if (l_wr_mas->r_min < mas->index)
- data_size++;
+ /*
+ * Spanning store is different in that the write is actually from
+ * l_mas->index to r_mas->last - 1. The -1 is because the last
+ * was incremented to get to the next node in the case of NULL entries
+ * being stored to the last slot of the left node.
+ */
+
+ mt_dump(mas->tree, mt_dump_hex);
+ /* data from left + new entry */
+ cp.data = mast->orig_l->offset + 1;
+ printk("data size is off %u + 1\n", mast->orig_l->offset);
+ printk("write is now %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
+ /* new entry will overwrite one part of left */
+ if (l_wr_mas->r_min == mas->index) {
+ printk("min doesn't split, subtrack one\n");
+ cp.data--;
+ } else {
+ printk("min splits start %lx vs %lx\n", l_wr_mas->r_min, mas->index);
+ }
+
+ printk("%p data + 1 = %u\n", mast->orig_l->node, cp.data);
+
+ /* Data from right (offset to end) + 1 for zero, +1 for splitting */
+ cp.data += mast->orig_r->end - mast->orig_r->offset + 2;
+ printk("end %u - off %u + 1\n", mast->orig_r->end, mast->orig_r->offset);
+ /* new entry splits the insert location */
+ printk("end piv %lx vs last %lx\n", r_wr_mas->r_max, mas->last);
+ if (r_wr_mas->r_max == mas->last) {
+ printk("cp.data--\n");
+ cp.data--;
+ }
+
+ printk("%p data = %u\n", mast->orig_r->node, cp.data);
+
+ mt = l_wr_mas->type;
+ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
+ (cp.data < mt_min_slots[mt])) {
+ mas_spanning_move(mast, &sib);
+ cp.data += sib.end + 1;
+ printk("%p data = %u\n", sib.node, cp.data);
+ } else {
+ sib.end = 0;
+ }
+
+ cp.d_count = 0;
+ /* Calc split here */
+ if (cp.data < mt_slots[mt]) {
+ cp.split = cp.data;
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ /* New root */
+ } else if (cp.data > mt_slots[mt] * 2) {
+ cp.split = cp.data / 3;
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ /* New root */
+ } else {
+ cp.split = (cp.data + 1) / 2;
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ cp.dst[cp.d_count].mt = mt;
+ cp.dst[cp.d_count++].node = ma_mnode_ptr(mas_pop_node(mas));
+ }
+ printk("split = %u data %u\n", cp.split, cp.data);
+
+ cp.s_count = 0;
+ if (sib.end && sib.max < l_wr_mas->mas->min)
+ append_node_cp(&cp, &sib, 0, sib.end);
+
+ /* Copy left 0 - offset */
+ if (l_wr_mas->mas->offset || l_wr_mas->r_min < mas->index)
+ append_node_cp(&cp, l_wr_mas->mas, 0, l_wr_mas->mas->offset);
+
+ if (cp.s_count)
+ cp.src[cp.s_count - 1].max = mas->index - 1;
+ /* Insert, overwrite prev pivot */
+ cp.src[cp.s_count].node = ma_mnode_ptr(&cp);
+ cp.src[cp.s_count].max = mas->last;
+ cp.src[cp.s_count].mt = maple_copy;
+ cp.src[cp.s_count].end = 0;
+ cp.src[cp.s_count].start = 0;
+ cp.slot[0] = l_wr_mas->entry;
+ cp.max = mas->index;
+ if (l_wr_mas->entry)
+ cp.gap[0] = 0;
+ else
+ cp.gap[0] = mas->last - mas->index;
+ cp.s_count++;
+
+ /* Copy right either from offset or offset + 1 pending on r_max */
+ if (r_wr_mas->mas->end != r_wr_mas->mas->offset || r_wr_mas->r_max > mas->last)
+ append_node_cp(&cp, r_wr_mas->mas, r_wr_mas->mas->offset,
+ r_wr_mas->mas->end);
+
+ if (sib.end) {
+ if (sib.min > r_wr_mas->mas->max) {
+ append_node_cp(&cp, &sib, 0, sib.end);
+ /* Copy sibling, if necessary */
+ /* set sib to right */
+ //r_wr_mas->mas = sib;
+ } else {
+ /* set sib to left*/
+ //l_wr_mas->mas = sib;
+ }
+ }
+
+ spanning_data_write(&cp, mas);
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(l_wr_mas, &b_node, mast->orig_l->end);
- WARN_ON(data_size != b_node.b_end);
-
+ printk("big node end %u\n", b_node.b_end);
- data_size += mast->orig_r->end - mast->orig_r->offset;
- if (r_wr_mas->r_max > mas->last)
- data_size++;
/* Copy r_mas into b_node if there is anything to copy. */
if (mast->orig_r->max > mast->orig_r->last) {
mas_mab_cp(mast->orig_r, mast->orig_r->offset,
mast->orig_r->end, &b_node, b_node.b_end + 1);
- printk("%u Copy %u - %u\n", mast->orig_r->offset, mast->orig_r->end);
- WARN_ON(data_size != b_node.b_end);
+ printk("big node end %u\n", b_node.b_end);
} else {
- data_size++;
b_node.b_end++;
- WARN_ON(data_size != b_node.b_end);
+ printk("big node ++ end %u\n", b_node.b_end);
}
- if (data_size != b_node.b_end) {
- printk("data size = %u\n", mast->orig_l->offset);
- printk("r_end %u off %u\n", mast->orig_r->end, mast->orig_r->offset);
- printk("r max %lx vs %lx\n", r_wr_mas->r_max, mast->orig_r->last);
- printk("data %u != end %u\n", data_size, b_node.b_end);
- printk("data is %p\n", l_wr_mas->entry);
- }
- WARN_ON(data_size != b_node.b_end);
+
/* Stop spanning searches by searching for just index. */
mast->orig_l->index = mast->orig_l->last = mas->index;
/* Combine l_mas and r_mas and split them up evenly again. */
/*
- * The tree needs to be rebalanced and leaves need to be kept at the same level.
+ * The tree needs to be sibd and leaves need to be kept at the same level.
* Rebalancing is done by use of the ``struct maple_topiary``.
*/
mast->l = &l_mas;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
- unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) {
+#if 0
+ if (cp.data_size >= mt_min_slots[l_wr_mas->type]) {
+ printk("data size is %u vs %u\n", cp.data_size, mast->bn->b_end);
+ printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
+ mt_dump(mas->tree, mt_dump_hex);
+ BUG_ON(1);
+ }
+#endif
mast_spanning_rebalance(mast);
+#if 0
+ printk("big node end sib %u\n", b_node.b_end);
+ if (mast->orig_l->node == sib.node)
+ printk("left\n");
+ else if (mast->orig_r->node == sib.node)
+ printk("right (%p)\n", sib.node);
+ else
+ BUG_ON(1);
+ printk("end %u vs %u\n", mast->bn->b_end, cp.data_size);
+#endif
+ }
+#if 0
+ if (mast->bn->b_end != cp.data_size) {
+ mt_dump(mas->tree, mt_dump_hex);
+ printk("Writing %lx - %lx => %p\n", mas->index, mas->last, l_wr_mas->entry);
+ fflush(stdout);
+ BUG_ON(1);
+ }
+
+ printk("END \n\n\n");
+#endif
+#if 1
+ {
+ unsigned long min = mas->min;
+ printk("Count is %u\n", cp.d_count);
+ for (int i = 0; i < cp.d_count; i++) {
+ printk("dump %p %lu - %lu\n", cp.dst[i].node, min, cp.dst[i].max);
+ mt_dump_node(mas->tree, mt_mk_node(cp.dst[i].node, cp.dst[i].mt),
+ min, cp.dst[i].max, height, mt_dump_hex);
+ min = cp.dst[i].max + 1;
+ }
+ printk ("VS\n");
+ }
+#endif
mas_spanning_rebalance_loop(mas, mast, height);
}
/*
if (!r_wr_mas->content) {
if (r_mas->last < r_wr_mas->r_max)
- r_mas->last = r_wr_mas->r_max;
- r_mas->offset++;
- } else if ((r_mas->last == r_wr_mas->r_max) &&
- (r_mas->last < r_mas->max) &&
- !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
- r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
- r_wr_mas->type, r_mas->offset + 1);
- r_mas->offset++;
- r_wr_mas->r_max = r_mas->last;
+ r_mas->offset++;
+ r_mas->last = r_wr_mas->r_max;
+ printk("Extend end pivot\n");
}
}
*/
mas = wr_mas->mas;
trace_ma_op(__func__, mas);
+ printk("init write is %lx - %lx => %p\n", mas->index, mas->last, wr_mas->entry);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
r_mas.index = r_mas.last;
mas_wr_walk_index(&r_wr_mas);
r_mas.last = r_mas.index = mas->last;
- mt_dump(mas->tree, mt_dump_hex);
printk("r_mas is at %p\n", r_mas.node);
BUG_ON(!r_mas.end);