trace_ma_write(__func__, mas, new_end, wr_mas->entry);
}
+/*
+ * split_ascend() - See if a split operation has to keep walking up the tree
+ * @cp: The maple_copy node
+ * @wr_mas: The maple write state
+ * @sib: the maple state of the sibling
+ *
+ * Return: true if another split operation on the next level is needed, false
+ * otherwise
+ */
+static inline bool split_ascend(struct maple_copy *cp,
+ struct ma_wr_state *wr_mas, struct ma_state *sib,
+ struct ma_state *parent)
+{
+ struct ma_state *mas;
+ unsigned long min, max;
+
+ mas = wr_mas->mas;
+ min = mas->min; /* push right, or normal split */
+ max = mas->max;
+ wr_mas->offset_end = parent->offset;
+ if (sib->end) {
+ if (sib->max < mas->min) {
+ min = sib->min; /* push left */
+ parent->offset--;
+ } else {
+ max = sib->max; /* push right */
+ wr_mas->offset_end++;
+ }
+ }
+
+ cp_dst_to_slots(cp, min, max, mas);
+ if (cp_is_new_root(cp, mas))
+ return false;
+
+ if (cp_converged(cp, mas, sib))
+ return false;
+
+ cp->height++;
+ copy_tree_location(parent, mas);
+ wr_mas_setup(wr_mas, mas);
+ return true;
+}
+
+/*
+ * split_data() - Calculate the @cp data, populate @sib if the data can be
+ * pushed into a sibling.
+ * @cp: The maple copy node
+ * @wr_mas: The left write maple state
+ * @sib: The maple state of the sibling.
+ *
+ * Note: @cp->data is a size and not indexed by 0. @sib->end may be set to 0 to
+ * indicate it will not be used.
+ *
+ */
+static inline void split_data(struct maple_copy *cp,
+ struct ma_wr_state *wr_mas, struct ma_state *sib,
+ struct ma_state *parent)
+{
+ cp_data_calc(cp, wr_mas, wr_mas);
+ if (cp->data <= mt_slots[wr_mas->type]) {
+ sib->end = 0;
+ return;
+ }
+
+ push_data_sib(cp, wr_mas->mas, sib, parent);
+ if (sib->end)
+ cp->data += sib->end + 1;
+}
+
/*
* mas_wr_split() - Expand one node into two
* @wr_mas: The write maple state
*/
-static noinline_for_kasan void mas_wr_split(struct ma_wr_state *wr_mas)
+static void mas_wr_split(struct ma_wr_state *wr_mas)
{
- struct maple_big_node b_node;
+ struct maple_enode *old_enode;
+ struct ma_state parent;
+ struct ma_state *mas;
+ struct maple_copy cp;
+ struct ma_state sib;
+
+ mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
+ parent = *mas;
+ cp_leaf_init(&cp, mas, wr_mas, wr_mas);
+ do {
+ if (!mte_is_root(parent.node)) {
+ mas_ascend(&parent);
+ parent.end = mas_data_end(&parent);
+ }
+ split_data(&cp, wr_mas, &sib, &parent);
+ multi_src_setup(&cp, wr_mas, wr_mas, &sib);
+ dst_setup(&cp, mas, wr_mas->type);
+ cp_data_write(&cp, mas);
+ } while (split_ascend(&cp, wr_mas, &sib, &parent));
- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
- memset(&b_node, 0, sizeof(struct maple_big_node));
- mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- WARN_ON_ONCE(wr_mas->mas->store_type != wr_split_store);
- return mas_split(wr_mas->mas, &b_node);
+ old_enode = mas->node;
+ mas->node = cp.slot[0];
+ mas_wmb_replace(mas, old_enode, cp.height);
+ mtree_range_walk(mas);
}
/*
mt_set_non_kernel(10);
check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
mtree_destroy(mt);
/* Create tree of 1-200 */
/* Store 45-168 */
check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
mtree_destroy(mt);
check_seq(mt, 30, false);
check_store_range(mt, 6, 18, xa_mk_value(6), 0);
MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
mtree_destroy(mt);
/* Overwrite across multiple levels. */
check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1));
check_load(mt, 135, NULL);
check_load(mt, 140, NULL);
+ mt_validate(mt);
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
/* Cause a 3 child split all the way up the tree. */
- for (i = 5; i < 215; i += 10)
+ for (i = 5; i < 215; i += 10) {
check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
- for (i = 5; i < 65; i += 10)
+ mt_validate(mt);
+ }
+ for (i = 5; i < 65; i += 10) {
check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
+ mt_validate(mt);
+ }
MT_BUG_ON(mt, mt_height(mt) >= 4);
- for (i = 5; i < 45; i += 10)
+ for (i = 5; i < 45; i += 10) {
check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
+ mt_validate(mt);
+ }
if (!MAPLE_32BIT)
MT_BUG_ON(mt, mt_height(mt) < 4);
mtree_destroy(mt);
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
+ mt_validate(mt);
+ }
+ /* Fill parents and leaves before split. */
+ val = 7660;
+ for (i = 5; i < 490; i += 5) {
+ val += 5;
+ check_store_range(mt, val, val + 1, NULL, 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
}
+
+ val = 9460;
/* Fill parents and leaves before split. */
- for (i = 5; i < 455; i += 10)
- check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
+ for (i = 1; i < 10; i++) {
+ val++;
+ check_store_range(mt, val, val + 1, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
- for (i = 1; i < 16; i++)
- check_store_range(mt, 8185 + i, 8185 + i + 1,
- xa_mk_value(8185+i), 0);
- MT_BUG_ON(mt, mt_height(mt) >= 4);
+ val = 8000;
+ for (i = 1; i < 14; i++) {
+ val++;
+ check_store_range(mt, val, val + 1, xa_mk_value(val), 0);
+ mt_validate(mt);
+ }
+
+
+ check_store_range(mt, 8051, 8051, xa_mk_value(8081), 0);
+ check_store_range(mt, 8052, 8052, xa_mk_value(8082), 0);
+ check_store_range(mt, 8083, 8083, xa_mk_value(8083), 0);
+ check_store_range(mt, 8084, 8084, xa_mk_value(8084), 0);
+ check_store_range(mt, 8085, 8085, xa_mk_value(8085), 0);
/* triple split across multiple levels. */
- check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
+ check_store_range(mt, 8099, 8100, xa_mk_value(1), 0);
+
+ mt_validate(mt);
if (!MAPLE_32BIT)
MT_BUG_ON(mt, mt_height(mt) != 4);
}