mt_init_flags(mt, 0);
}
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if ((mt->ma_flags & MAPLE_USE_RCU) == 0)
+ return;
+
+ mtree_lock(mt);
+ mt->ma_flags &= ~MAPLE_USE_RCU;
+ mtree_unlock(mt);
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt->ma_flags & MAPLE_USE_RCU)
+ return;
+
+ mtree_lock(mt);
+ mt->ma_flags |= MAPLE_USE_RCU;
+ mtree_unlock(mt);
+}
+
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
void *_mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max,
bool start);
return (mt->ma_flags & MAPLE_HEIGHT_MASK) >> MAPLE_HEIGHT_OFFSET;
}
+static bool mas_in_rcu(struct ma_state *mas)
+{
+ return mas->tree->ma_flags & MAPLE_USE_RCU;
+}
+
static void mas_set_height(struct ma_state *mas)
{
unsigned int new_flags = mas->tree->ma_flags;
return mas->full_cnt * -1;
return mas->full_cnt;
}
+static inline bool mas_reuse_node(struct ma_state *mas,
+ struct maple_big_node *bn,
+ unsigned char end)
+{
+ int i;
+
+ if (mas_in_rcu(mas))
+ return false; // Need to be rcu safe.
+
+ mab_mas_cp(bn, 0, bn->b_end, mas);
+
+ if (end > bn->b_end) {
+ for (i = bn->b_end + 1; i < mt_slot_count(mas->node); i++) {
+ mte_set_rcu_slot(mas->node, i, NULL);
+ if (i < mt_pivot_count(mas->node))
+ mte_set_pivot(mas->node, i, 0);
+
+ //if (!mte_is_leaf(mas->node) && mt_is_alloc(mas->tree))
+ // mte_set_gap(mas->node, j, b_node->gap[i]);
+ }
+
+ }
+ return true;
+
+}
static inline int mas_commit_b_node(struct ma_state *mas,
- struct maple_big_node *b_node)
+ struct maple_big_node *b_node,
+ unsigned char end)
{
struct maple_enode *new_node;
return mas_split(mas, b_node);
}
+ if (mas_reuse_node(mas, b_node, end))
+ goto reused_node;
+
mas_node_cnt(mas, 1);
if (mas_is_err(mas))
return 0;
mab_mas_cp(b_node, 0, b_node->b_end, mas);
mas_replace(mas, false);
+reused_node:
mas_update_gap(mas);
return 2;
else if (b_node.b_end < mt_min_slot_cnt(mas->node))
mas_cnt_empty(mas);
- if (!mas_commit_b_node(mas, &b_node))
+ if (!mas_commit_b_node(mas, &b_node, end))
return NULL;
if (ret > 2)
void *root_node;
+ mt_set_in_rcu(mt);
for (int i = 0; i < 4; i++)
erase_check_insert(mt, i);
for (int i = 0; i < 4; i++)
mtree_init(&tree, 0);
check_seq(&tree, 16, false);
mtree_destroy(&tree);
+
mtree_init(&tree, 0);
check_seq(&tree, 1000, true);
mtree_destroy(&tree);
+
mtree_init(&tree, MAPLE_ALLOC_RANGE);
check_rev_seq(&tree, 1000, true);
mtree_destroy(&tree);