}
static inline
-void rebalance_dest_setup(struct maple_copy *cp, struct ma_state *mas,
+void multi_dst_setup(struct maple_copy *cp, struct ma_state *mas,
enum maple_type mt)
{
/* Data is 1 indexed, every src has +1 added. */
cp->data = cp->d_count;
cp->s_count = 0;
- rebalance_dest_setup(cp, mas, mt);
+ multi_dst_setup(cp, mas, mt);
spanning_init_cp_src(cp);
node_copy(mas, cp->src[0].node, 0, cp->data, cp->max, maple_copy,
cp->dst[0].node, 0, mt);
do {
spanning_data_calc(&cp, l_wr_mas, r_wr_mas, &sib);
multi_src_setup(&cp, l_wr_mas, r_wr_mas, &sib);
- rebalance_dest_setup(&cp, mas, l_wr_mas->type);
+ multi_dst_setup(&cp, mas, l_wr_mas->type);
cp_data_write(&cp, mas);
} while (spanning_ascend(&cp, mas, l_wr_mas, r_wr_mas, &sib));
*/
static noinline_for_kasan void mas_wr_split(struct ma_wr_state *wr_mas)
{
+#if 1
+ struct maple_enode *old_enode;
+ struct ma_state *mas;
+ struct maple_copy cp;
+ struct ma_state sib;
+
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
+
+#else
struct maple_big_node b_node;
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
WARN_ON_ONCE(wr_mas->mas->store_type != wr_split_store);
return mas_split(wr_mas->mas, &b_node);
+#endif
}
/*
do {
rebalance_data_calc(&cp, wr_mas, &sib);
multi_src_setup(&cp, wr_mas, wr_mas, &sib);
- rebalance_dest_setup(&cp, mas, wr_mas->type);
+ multi_dst_setup(&cp, mas, wr_mas->type);
cp_data_write(&cp, mas);
} while (rebalance_ascend(&cp, wr_mas, &sib));