unsigned long *pivots;
unsigned long *gaps;
unsigned char offset; /* Current operating offset */
- unsigned char insert;
+ unsigned char insert; /* FIXME: Rename to offset and op_off or something */
enum maple_type type;
unsigned char end;
bool alloc;
};
+/*
+ * Unsafe from reader side
+ */
+static inline void mns_set_end(struct ma_node_state *mns)
+{
+ unsigned char offset;
+
+ if (mns->type == maple_arange_64) {
+ mns->end = ma_meta_end(mns->node, mns->type);
+ return;
+ }
+
+ offset = mt_pivots[mns->type] - 1;
+ if (likely(!mns->pivots[offset])) {
+ mns->end = ma_meta_end(mns->node, mns->type);
+ return;
+ }
+
+ if (likely(mns->pivots[offset] == mns->max)) {
+ mns->end = offset;
+ return;
+ }
+
+ mns->end = mt_pivots[mns->type];
+}
+
+/*
+ * mns_node_part_leaf_init() - Initialize what is being inserted, calculate how
+ * many slots will be skipped.
+ * @ma_part: The maple node part
+ * @wr_mas: The write maple state
+ * @src: The source maple node state (existing data)
+ */
static __always_inline
void mns_node_part_leaf_init(struct ma_node_part *ma_part,
struct ma_wr_state *wr_mas, struct ma_node_state *src)
mns->insert = mas->offset;
}
+/* FIXME: This comment.
+ * Parent ma_node_state init
+ */
+static inline
+void mns_pmns_init(struct ma_node_state *mns, struct ma_node_state *pmns,
+ unsigned char p_off, struct maple_tree *mt)
+{
+ mns->enode = mt_slot_locked(mt, pmns->slots, p_off);
+ mns->insert = p_off;
+ _mns_node_init(mns, mte_to_node(mns->enode),
+ mte_node_type(mns->enode));
+}
+
/*
* @src: The maple node state of the source
* @dst: The maple node state of the destination
/*
* There is insufficient data in the node after a store.
+ * This rebalance will succeed, not like the split variant that will attempt
+ * to rebalance.
+ *
+ * Rebalance leaves
+ * Continue upwards until parent is sufficient, or root is reached. If root
+ * has a single child, replace root with new root.
*/
static void mas_wr_rebalance(struct ma_wr_state *wr_mas)
{
+ struct ma_state *mas = wr_mas->mas;
+ struct ma_node_state src, parent, l_src, r_src;
+ struct ma_node_state left, right;
+ struct ma_node_part ma_part;
+ unsigned char total, split, height;
+
+ trace_ma_op(__func__, mas);
+ height = mas_mt_height(mas);
+ mns_mas_init(&src, mas);
+ mns_node_part_leaf_init(&ma_part, wr_mas, &src);
+ /* Total will lack sibling data until the sibling is known */
+ total = mas->end + ma_part.size - ma_part.skip - 1;
+
+ while (mas->depth) {
+ bool consume = false;
+ unsigned char data_size;
+ bool l_store;
+
+ mas_wr_ascend_init(mas, &parent);
+ mas->depth--;
+ mns_set_end(&parent);
+ parent.insert = mas->offset;
+
+ if (!parent.insert) {
+ /* Pull data from l_src */
+ mns_pmns_init(&l_src, &parent, mas->offset + 1, mas->tree);
+ r_src = src;
+ r_src.end = mas->end;
+ r_src.insert = mas->offset;
+ mns_set_end(&l_src);
+ total += l_src.end;
+ l_store = false;
+ } else {
+ /* Pull data from r_src */
+ mns_pmns_init(&r_src, &parent, mas->offset - 1, mas->tree);
+ mns_set_end(&r_src);
+ l_src = src;
+ l_src.end = mas->end;
+ l_src.insert = mas->offset;
+ total += r_src.end;
+ l_store = true;
+ }
+
+ mns_node_init(&left, mas_pop_node(mas), l_src.type);
+ /*
+ * Two possibilities:
+ * 1. keep two nodes if possible and limit ripple
+ * 2. make one node if possible and limit memory use
+ */
+ if ((total > 2 * mt_min_slots[l_src.type] ) ||
+ ma_is_root(parent.node)) {
+ struct ma_node_state new_parent;
+ /*
+ * Rebalance between nodes is possible, so the
+ * operation stops early.
+ */
+
+ mns_node_init(&right, mas_pop_node(mas), r_src.type);
+ split = mas_wr_rebalance_calc(total, l_src.type);
+ left.min = l_src.min;
+ mas_wr_rebalance_nodes(&l_src, &r_src, &left, &right,
+ l_store, split, &ma_part,
+ mas->offset, total);
+
+ mns_finalise(&left);
+ mns_finalise(&right);
+ mas_ascend(mas);
+ mas->end = parent.end;
+ mas->offset = parent.insert;
+ mns_node_part_init(&ma_part, &left, &right);
+ ma_part.skip = 2;
+ mas_wr_converged(&parent, &new_parent, &ma_part, mas);
+ src.enode = parent.enode;
+ mas->node = new_parent.enode;
+ mas->depth = height;
+ break;
+ }
+
+
+ /* Reduce two nodes into one */
+ if (l_store) {
+ if (l_src.insert)
+ mns_cp(&l_src, &left, l_src.insert);
+ mns_insert_part(&ma_part, &left);
+ l_src.offset += ma_part.skip;
+ if (l_src.offset <= l_src.end)
+ mns_cp(&l_src, &left,
+ l_src.end - l_src.offset + 1);
+ mns_cp(&r_src, &left, r_src.end);
+
+ } else {
+ mns_cp(&l_src, &left, l_src.end);
+ if (r_src.insert)
+ mns_cp(&r_src, &left, r_src.insert);
+ mns_insert_part(&ma_part, &left);
+ r_src.offset += ma_part.skip;
+ if (r_src.offset <= r_src.end)
+ mns_cp(&r_src, &left,
+ r_src.end - r_src.offset + 1);
+ }
+ left.node->parent = l_src.node->parent;
+ mns_finalise(&left);
+ if (mte_is_root(parent.enode)) {
+ /* Height reduction */
+ if (mas->depth)
+ mas->depth = --height;
+ else
+ mas->depth = height;
+
+ mas_set_height(mas);
+ break;
+ }
+ }
+
+
+ mas_wmb_replace(mas, parent.enode);
+ mtree_range_walk(mas);
}
/*