};
#define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)]
-#define MAPLE_BIG_NODE_SLOTS (MAPLE_NODE_SLOTS * 2 + 1)
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS* 2 + 1)
struct maple_big_node {
struct maple_pnode *parent;
int allocated = mas_get_alloc_cnt(mas);
BUG_ON(count > 127);
-
if (allocated < count) {
mas_set_alloc_req(mas, count - allocated);
mas_node_node(mas, GFP_NOWAIT | __GFP_NOWARN);
next = mas_get_rcu_slot(mas, mas_get_slot(mas));
+ if (unlikely(mt_is_empty(next)))
+ return false;
+
// Traverse.
mas->depth++;
mas->max = *range_max;
mas->min = *range_min;
- if (unlikely(mt_is_empty(next)))
- return false;
mas->node = next;
mas_set_slot(mas, 0);
return entry;
}
+static inline void mas_bfs_preorder(struct ma_state *mas)
+{
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ return;
+ }
+
+ if (mte_is_leaf(mas->node) && mte_is_root(mas->node)) {
+ mas->node = MAS_NONE;
+ return;
+ }
+
+}
+/* mas limits not adjusted */
static inline void mas_dfs_preorder(struct ma_state *mas)
{
return;
}
+static inline struct maple_enode *mas_dup_node(struct ma_state *oldmas,
+ struct ma_state *mas)
+{
+ struct maple_enode *enode= mt_mk_node(ma_mnode_ptr(mas_next_alloc(mas)),
+ mte_node_type(oldmas->node));
+ memcpy(mte_to_node(enode), mas_mn(oldmas), sizeof(struct maple_node));
+ return enode;
+}
+static inline void mas_dup_alloc(struct ma_state *mas, int *node_cnt)
+{
+
+ int alloc_cnt = min(*node_cnt, 127);
+ /* Allocate nodes for new tree. Maximum will be 16 ** height */
+ *node_cnt -= alloc_cnt;
+ mas_node_cnt(mas, alloc_cnt);
+ if (mas_is_err(mas))
+ return;
+}
+
+static inline void mas_dup_children(struct ma_state *mas, int *node_cnt)
+{
+ struct maple_node *child;
+ struct maple_enode *oldchild, *echild;
+ unsigned char slot, end;
+ int allocated = mas_get_alloc_cnt(mas);
+
+ end = mas_data_end(mas) + 1;
+ if (allocated < end) {
+ mas->span_enode = mas->node;
+ *node_cnt += allocated;
+ mas_dup_alloc(mas, node_cnt);
+ if (mas_is_err(mas))
+ return;
+ mas->span_enode = NULL;
+ }
+
+
+ for(slot = 0; slot < end; slot++) {
+ oldchild = mas_get_rcu_slot(mas, slot);
+ if (!oldchild)
+ return;
+
+ child = mas_next_alloc(mas);
+ echild = mt_mk_node(child, mte_node_type(oldchild));
+ mte_set_rcu_slot(mas->node, slot, echild);
+ memcpy(child, mte_to_node(oldchild), sizeof(struct maple_node));
+ }
+}
+
+static inline bool mas_dup_advance(struct ma_state *oldmas,
+ struct ma_state *mas)
+{
+ mas_dfs_preorder(oldmas);
+ mas_dfs_preorder(mas);
+
+ if (mas_is_none(oldmas))
+ return false;
+
+ return true;
+}
+
+static inline void mas_dup_tree_start(struct ma_state *oldmas,
+ struct ma_state *mas,
+ int *node_cnt)
+{
+ if (mas->alloc)
+ goto allocated;
+
+ // Get first node (root)
+ if (mas_is_start(oldmas)) // get the root.
+ mas_dfs_preorder(oldmas);
+
+ *node_cnt = mas_data_end(oldmas) + 1;
+ *node_cnt *= 1 << (4 * (oldmas->tree->ma_height - 2)); // assume all other levels full.
+ (*node_cnt)++;
+
+ mas_dup_alloc(mas, node_cnt);
+ if (mas_is_err(mas))
+ return;
+
+allocated:
+
+ mas->node = mas_dup_node(oldmas, mas);
+ mas_dup_children(mas, node_cnt);
+ mas_adopt_children(mas, mas->node);
+ mte_to_node(mas->node)->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+}
+void _mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas, int *node_cnt)
+{
+ if (!xa_is_node(oldmas->tree->ma_root)) {
+ mas->tree->ma_root = oldmas->tree->ma_root;
+ return;
+ }
+
+ if (mas->span_enode) {
+ mas->node = mas->span_enode;
+ mas->span_enode= NULL;
+ goto retry_dup_children;
+ }
+
+ if (mas_is_start(mas))
+ mas_dup_tree_start(oldmas, mas, node_cnt);
+
+ if (mas_is_err(mas))
+ return;
+
+ if (mte_is_leaf(oldmas->node))
+ return;
+
+ while(mas_dup_advance(oldmas, mas)) {
+ if (mte_is_leaf(oldmas->node))
+ continue;
+
+retry_dup_children:
+ mas_dup_children(mas, node_cnt);
+ if (mas_is_err(mas))
+ return;
+
+ mas_adopt_children(mas, mas->node);
+ }
+ mas_nomem(mas, GFP_KERNEL);
+
+}
+void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas)
+{
+ int node_cnt = 0;
+
+ if (!oldmas->tree->ma_root) // empty tree.
+ return;
+
+ mtree_lock(mas->tree);
+retry:
+ _mas_dup_tree(oldmas, mas, &node_cnt);
+ if (mas_nomem(mas, GFP_KERNEL))
+ goto retry;
+
+ mtree_unlock(mas->tree);
+}
+
/* Interface */
void __init maple_tree_init(void)
{
MT_BUG_ON(mt, count != 72);
}
+static void check_dup_tree(struct maple_tree *oldmt)
+{
+ unsigned long i, max = 10000;
+
+ MA_STATE(oldmas, oldmt, 0, 0);
+ DEFINE_MTREE(mt);
+ MA_STATE(mas, &mt, 0, 0);
+
+ check_seq(oldmt, max, false);
+ mas_dup_tree(&oldmas, &mas);
+ for (i = 0; i <= max; i++)
+ check_index_load(&mt, i);
+
+ check_load(&mt, max + 1, NULL);
+ mtree_destroy(&mt);
+}
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
check_dfs_preorder(&tree);
mtree_destroy(&tree);
+ mtree_init(&tree, 0);
+ check_dup_tree(&tree);
+ mtree_destroy(&tree);
+
/* Test ranges (store and insert) */
mtree_init(&tree, 0);
check_ranges(&tree);