printk("%d\n", __LINE__);
new_data = end + 1 + cp->data;
+ printk("new_data %u vs %u\n", new_data, space);
if (new_data > space)
return false;
* calculation should be <=, but we want to keep an extra space in nodes
* to reduce jitter.
*/
+
+#if 1
+
+ if (ma_is_leaf(type)) {
+ if (new_data < space)
+ return true;
+ } else {
+ if (new_data < space)
+ return true;
+ }
+ return false;
+#else
+ /* Leave some room to reduce jitter. */
if (new_data < space - 1)
return true;
- /* Leave some room to reduce jitter. */
+ /* Leave some room to reduce jitter. */
if (!ma_is_leaf(type))
return false;
-
/*
* new_data == space, an exact fit can be foiled by a NULL at the split
* location.
return true;
return false;
+#endif
}
static void push_data_sib(struct maple_copy *cp, struct ma_state *mas,
if (ma_slots(cp->src[s].node, cp->src[s].mt)[off])
goto node_setup;
- cp->split++;
+ cp->split;
if (cp->split < mt_slots[mt])
goto node_setup;
* mas_wr_split() - Expand one node into two
* @wr_mas: The write maple state
*/
-static noinline_for_kasan void mas_wr_split(struct ma_wr_state *wr_mas)
+static void mas_wr_split(struct ma_wr_state *wr_mas)
{
-#if 1
struct maple_enode *old_enode;
struct ma_state *mas;
struct maple_copy cp;
//mt_dump(wr_mas->mas->tree, mt_dump_hex);
printk("Done\n\n");
mt_validate(wr_mas->mas->tree);
-
-#else
- struct maple_big_node b_node;
-
- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
- memset(&b_node, 0, sizeof(struct maple_big_node));
- mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- WARN_ON_ONCE(wr_mas->mas->store_type != wr_split_store);
- return mas_split(wr_mas->mas, &b_node);
-#endif
}
/*