return mt_node_type(entry) < maple_range_16;
}
+static inline enum maple_type mt_node_hole(const void *entry)
+{
+ return (unsigned long)entry & 4;
+
+}
static inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < 4096) && xa_is_internal(entry);
static inline void mt_set_parent(struct maple_node *node,
struct maple_node *parent, short slot)
{
- unsigned int bitmask = mt_slot_mask(node);
- unsigned int slot_shift = mt_slot_shift(node);
+ unsigned long bitmask = mt_slot_mask(node);
+ unsigned long slot_shift = mt_slot_shift(node);
unsigned long val = (unsigned long) parent;
BUG_ON(slot > MAPLE_NODE_SLOTS); // Only 4 bits to use.
val &= ~bitmask; // Remove any old slot number.
val |= (slot << slot_shift); // Set the slot.
- node->parent = (struct maple_node *)val;
+ mt_to_node(node)->parent = (struct maple_node *)val;
}
static inline unsigned int mt_parent_slot(struct maple_node *node)
{
unsigned int bitmask = mt_slot_mask(node);
unsigned int slot_shift = mt_slot_shift(node);
- unsigned long val = (unsigned long) node->parent;
+ unsigned long val = (unsigned long) mt_to_node(node)->parent;
return (val & bitmask) >> slot_shift;
}
static inline void *mt_parent(struct maple_node *node)
{
- unsigned int bitmask = mt_slot_mask(node) | 7;
+ unsigned long bitmask = mt_slot_mask(node) | 7;
- return (void *)((unsigned long)(node->parent) & ~bitmask);
+ return (void *)((unsigned long)(mt_to_node(node)->parent) & ~bitmask);
}
+
static inline struct maple_node *ma_get_alloc(const struct ma_state *ms)
{
return (struct maple_node *)((unsigned long)ms->alloc & ~0x7F);
return true;
return false;
}
+
+static inline void ma_encoded_parent(struct ma_state *mas)
+{
+ struct maple_node *parent, *gparent;
+ unsigned char slot;
+
+ if (ma_is_root(mt_to_node(mas->node)->parent)) {
+ mas->node = rcu_dereference(mas->tree->ma_root);
+ return;
+ }
+
+ parent = mt_parent(mas->node);
+ gparent = mt_parent(parent);
+ slot = mt_parent_slot(parent);
+ mas->node = rcu_dereference(gparent->mr64.slot[slot]);
+ return;
+}
+
static inline struct maple_node *ma_next_alloc(struct ma_state *ms)
{
int cnt;
return ms->alloc;
}
-static void *mas_start(struct ma_state *ms)
+static void *mas_start(struct ma_state *mas)
{
void *entry;
- if (mas_is_err(ms))
+ if (mas_is_err(mas))
return NULL;
- if (ms->node == MAS_START) {
- entry = ms->tree->ma_root;
+ if (mas->node == MAS_START) {
+ entry = mas->tree->ma_root;
if (!xa_is_node(entry)) {
- if (ms->index > 0)
+ if (mas->index > 0)
return NULL;
- ms->node = MAS_ROOT;
- ma_set_slot(ms, MAPLE_NODE_SLOTS);
+ mas->node = MAS_ROOT;
+ ma_set_slot(mas, MAPLE_NODE_SLOTS);
} else {
entry = mt_safe_root(entry);
}
} else {
- entry = ms->node;
+ entry = mas->node;
}
return entry;
}
-unsigned char ma_data_end_r64(const struct ma_state *ms)
+unsigned char ma_data_end_r64(const struct maple_range_64 *mr64,
+ unsigned long last)
{
- struct maple_range_64 *mr64 = &mt_to_node(ms->node)->mr64;
unsigned char data_end = 0;
- unsigned long last = ms->max;
for (data_end = 0; data_end < MAPLE_RANGE64_SLOTS - 1; data_end++) {
last = mr64->pivot[data_end];
return data_end;
}
- if (mr64->slot[data_end + 1] != NULL)
+ if (mr64->slot[data_end] != NULL)
data_end++;
return data_end;
}
+static int ma_calc_split(struct ma_state *mas)
+{
+ int i;
+ struct maple_range_64 *full64 = &mt_to_node(mas->node)->mr64;
+ unsigned long min = mas->min;
+ unsigned long max = min;
+
+ for (i = 3; i < MAPLE_RANGE64_SLOTS - 1; i++) {
+ max = full64->pivot[i];
+ if ((max - min) >= 8)
+ break;
+ }
+ return i;
+}
+void ma_copy(struct ma_state *mas, struct ma_cp *cp)
+{
+ struct maple_range_64 *src64 = &cp->src->mr64;
+ struct maple_range_64 *dst64;
+ unsigned char sloc = cp->src_start; // src location
+ unsigned char dloc = cp->dst_start; // dst location
+
+ if (!cp->dst) {
+ /* Allocate a new node */
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas))
+ return;
+ cp->dst = ma_next_alloc(mas);
+ }
+
+ dst64 = &cp->dst->mr64;
+ while (sloc <= cp->src_end &&
+ dloc <= cp->dst_end) {
+
+ if (sloc != 0 && sloc < MAPLE_RANGE64_SLOTS - 2 &&
+ src64->pivot[sloc] == 0)
+ break;
+
+ if (sloc < MAPLE_RANGE64_SLOTS - 1 &&
+ dloc < MAPLE_RANGE64_SLOTS - 1)
+ dst64->pivot[dloc] = src64->pivot[sloc];
+ else if (dloc > MAPLE_RANGE64_SLOTS - 1)
+ dst64->pivot[dloc] = mas->max;
+
+ RCU_INIT_POINTER(dst64->slot[dloc], src64->slot[sloc]);
+
+ /* Increment counters */
+ sloc++;
+ dloc++;
+ }
+ cp->dst_start = dloc;
+}
+static int ma_cp_data_64(struct ma_state *mas, struct maple_node *left,
+ struct maple_node *right, int off)
+{
+
+ int split = ma_calc_split(mas);
+
+ MA_CP(cp, mt_to_node(mas->node), left, off, split);
+ ma_copy(mas, &cp);
+ cp.src_start = split + 1;
+ cp.src_end = MAPLE_RANGE64_SLOTS - 1;
+ cp.dst = right;
+ cp.dst_start = 0;
+ ma_copy(mas, &cp);
+ return split;
+}
+
+static void ma_adopt_children(struct maple_node *parent)
+{
+
+ struct maple_range_64 *p64 = &parent->mr64;
+ struct maple_node *child;
+ unsigned char slot;
+
+ for (slot = 0; slot < MAPLE_RANGE64_SLOTS; slot++) {
+ if (slot != 0 && slot < MAPLE_RANGE64_SLOTS - 1 &&
+ p64->pivot[slot] == 0)
+ break;
+
+ child = p64->slot[slot];
+ if (child)
+ mt_set_parent(child, parent, slot);
+ }
+}
+
/* Private
- *
- * Splitting is done in a lazy-fashion. As such, the parent may not have room
- * for two entries and will require splitting itself. Rinse & repeat.
+ * Replace mn with mas->node in the tree
*/
-static int ma_split(struct ma_state *mas, unsigned char slot,
- unsigned char num)
+void mt_replace(struct ma_state *mas)
{
- return 0;
+ struct maple_node *mn = mt_to_node(mas->node);
+ struct maple_node *parent;
+ struct maple_range_64 *p64;
+ unsigned char slot = mt_parent_slot(mn);
+ struct maple_node *prev;
+
+ if (ma_is_root(mas->node)) {
+ prev = mas->tree->ma_root;
+ } else {
+ parent = mt_parent(mas->node);
+ p64 = &parent->mr64;
+ prev = p64->slot[slot];
+ }
+
+ if (prev == mn)
+ return;
+
+ if (!mt_is_leaf(mas->node))
+ ma_adopt_children(mn);
+
+ if (ma_is_root(mas->node)) {
+ mn->parent = (struct maple_node *)
+ ((unsigned long)mas->tree | MA_ROOT_PARENT);
+ rcu_assign_pointer(mas->tree->ma_root, mt_mk_root(mas->node));
+ } else {
+ rcu_assign_pointer(parent->slot[slot], mas->node);
+ }
+
+ mt_free(mt_to_node(prev));
}
/* Private
*
static struct maple_range_64 *mas_partial_copy(struct ma_state *mas,
unsigned char end)
{
- struct maple_node *smn = mt_to_node(mas->node);
- struct maple_range_64 *src = &smn->mr64;
- struct maple_range_64 *dst = NULL;
- struct maple_node *dmn;
- int i = 0;
- /* Allocate a new node */
- mas_node_cnt(mas, 1);
+ MA_CP(cp, mt_to_node(mas->node), NULL, 0, end);
+ ma_copy(mas, &cp);
if (mas_is_err(mas))
return NULL;
+ mas->node = mt_mk_node(cp.dst, mt_node_type(mas->node));
+ cp.dst->parent = cp.src->parent;
+ return &cp.dst->mr64;
+}
+
+static void ma_link(struct maple_node *new, struct maple_node *parent,
+ unsigned char slot, unsigned long pivot, enum maple_type type)
+{
+ struct maple_range_64 *p64 = &parent->mr64;
+ struct maple_node *new_enc = mt_mk_node(new, type);
+
+ mt_set_parent(new, parent, slot);
+ p64->pivot[slot] = pivot;
+ RCU_INIT_POINTER(p64->slot[slot], new_enc);
+ if (!mt_is_leaf(new_enc))
+ ma_adopt_children(new);
+
+}
+static int ma_split(struct ma_state *mas, unsigned char slot,
+ unsigned char num, int depth)
+{
+ struct maple_node *enc_full = mas->node; // Encoded full node.
+ struct maple_node *full = mt_to_node(mas->node);
+ unsigned char split, p_slot = 0, p_end = 0;
+ struct maple_node *old_parent, *new_parent, *left, *right;
+ enum maple_type ctype; // Child type.
+ unsigned long pivot;
+
+ if (ma_is_root(full)) {
+ old_parent = full;
+ mas->node = mt_safe_root(mas->node);
+ } else {
+ old_parent = mt_parent(mas->node);
+ p_end = ma_data_end_r64(&old_parent->mr64, UINT_MAX);
+
+ if (p_end >= MAPLE_RANGE64_SLOTS - 1) {
+ /* Must split the parent */
+ unsigned long max = mas->max;
+ unsigned long min = mas->min;
+
+ ma_encoded_parent(mas);
+ p_slot = mt_parent_slot(enc_full);
+ split = ma_split(mas, p_slot, p_end, 1);
+ if (mas_is_err(mas))
+ return 0;
+ if (split <= p_slot)
+ p_slot -= split;
+ // Split will return the parent.
+ old_parent = mt_to_node(mas->node);
+ mas->node = old_parent->mr64.slot[p_slot];
+ }
+ }
+
+ enc_full = mas->node;
+ mas_node_cnt(mas, 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ // Allocations.
+ right = ma_next_alloc(mas);
+ left = ma_next_alloc(mas);
+ new_parent = ma_next_alloc(mas);
+
+ // Record the node type for the children types.
+ ctype = mt_node_type(enc_full);
+ // copy the data and calculate the split location.
+ split = ma_cp_data_64(mas, left, right, 0);
+ // Copy the parents information
+ if (!ma_is_root(full)) {
+ // Copy the parent data and leave a hole.
+ p_slot = mt_parent_slot(enc_full);
+ MA_CP(cp, old_parent, new_parent, 0, p_slot);
+ ma_copy(mas, &cp);
+ cp.dst_start += 1;
+ cp.src_start = p_slot + 1;
+ cp.src_end = p_end;
+ ma_copy(mas, &cp);
+ // Update encoded slots in children
+ ma_adopt_children(new_parent);
+ }
+
+ // Copy grand parent to the parent, including slot encoding.
+ new_parent->parent = old_parent->parent;
+ // Set up the link to the right in the new parent
+ if (ma_is_root(full))
+ pivot = ULONG_MAX;
+ else
+ pivot = old_parent->mr64.pivot[p_slot];
+
+ ma_link(right, new_parent, p_slot + 1, pivot, ctype);
+
+ // Set up the link to the left in the new parent
+ if (split < MAPLE_RANGE64_SLOTS - 1)
+ pivot = full->mr64.pivot[split];
+ else
+ pivot = left->mr64.pivot[split - 1];
+
+ ma_link(left, new_parent, p_slot, pivot, ctype);
+
+ // Set up maple state for replacement of node.
+ // Note: old_parent may be the old parent or the full node.
+ if (ma_is_root(old_parent)) {
+ mas->node = mt_mk_node(new_parent, maple_range_64);
+ } else {
+ struct maple_node *gparent = mt_parent(old_parent);
+
+ old_parent = gparent->mr64.slot[mt_parent_slot(old_parent)];
+ mas->node = mt_mk_node(new_parent, mt_node_type(old_parent));
+ }
+ // Replace the parent node & free the old parent.
+ mt_replace(mas);
+
+ // Set up the ma_state for the return. Point to the correct node for
+ // the insert.
+ if (mas->index <= pivot)
+ mas->node = mt_mk_node(left, ctype);
+ else
+ mas->node = mt_mk_node(right, ctype);
+
+ // Free the full node.
+ mt_free(full);
+ return split;
+}
+/* Private
+ *
+ * Splitting is done in a lazy-fashion. As such, the parent may not have room
+ * for two entries and will require splitting itself. Rinse & repeat.
+ *
+ *
+ * If this is root, increase the branch height.
+ * If this is not root, promote the middle item as a pivot to the parent.
+ * Copy 1/2 data to each left & right children.
+ *
+ *
+ */
+static int _ma_split(struct ma_state *mas, unsigned char slot,
+ unsigned char num, int depth)
+{
+ struct maple_node *full = mas->node;
+ struct maple_node *mn = mt_to_node(mas->node);
+ struct maple_node *old_parent, *new_parent, *left, *right;
+ struct maple_range_64 *nparent64;
+ unsigned char p_slot;
+ unsigned char p_end;
+ unsigned char split;
+
+ if (ma_is_root(full)) {
+ old_parent = mn;
+ mas->node = mt_safe_root(mas->node);
+ p_slot = 0;
+ p_end = 0;
+ } else {
+ old_parent = mt_parent(mn);
+ p_slot = mt_parent_slot(mn);
+ p_end = ma_data_end_r64(&old_parent->mr64, UINT_MAX);
+ }
+
+
+ if (p_end >= MAPLE_RANGE64_SLOTS - 1) {
+ mas->node = mn;
+ ma_encoded_parent(mas);
+ unsigned char split = ma_split(mas, p_slot, p_end, depth + 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ if (split < slot)
+ slot -= split;
- dmn = ma_next_alloc(mas);
- dst = &dmn->mr64;
- dmn->parent = smn->parent;
- for (i = 0; i < end;i++)
- {
- RCU_INIT_POINTER(dst->slot[i],
- src->slot[i]);
- dst->pivot[i] = src->pivot[i];
+ mn = mt_to_node(mas->node);
+ mas->node = full;
+ old_parent = mt_parent(mn);
+ p_slot = mt_parent_slot(mn);
+ p_end = ma_data_end_r64(&old_parent->mr64, UINT_MAX);
+ }
+
+ mas_node_cnt(mas, 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ new_parent = ma_next_alloc(mas);
+ left = ma_next_alloc(mas);
+ right = ma_next_alloc(mas);
+
+ split = ma_cp_data_64(mas, left, right, 0);
+
+ new_parent->parent = old_parent->parent;
+ nparent64 = &new_parent->mr64;
+
+ if (!ma_is_root(full)) {
+ MA_CP(cp, old_parent, new_parent, 0, p_slot);
+ ma_copy(mas, &cp);
+ cp.dst_start += 1;
+ cp.src_start = p_slot+1;
+ cp.src_end = p_end;
+ ma_copy(mas, &cp);
+ }
+
+ mt_set_parent(left, new_parent, p_slot);
+ if (split >= MAPLE_RANGE64_SLOTS - 1)
+ nparent64->pivot[p_slot] = mn->mr64.pivot[split - 1];
+ else
+ nparent64->pivot[p_slot] = mn->mr64.pivot[split];
+
+ RCU_INIT_POINTER(nparent64->slot[p_slot],
+ mt_mk_node(left, mt_node_type(full)));
+
+ mt_set_parent(right, new_parent, ++p_slot);
+ nparent64->pivot[p_slot] = mas->max;
+ RCU_INIT_POINTER(nparent64->slot[p_slot],
+ mt_mk_node(right, mt_node_type(full)));
+
+ if (ma_is_root(old_parent)) {
+ mas->node = mt_mk_node(new_parent, maple_range_64);
+ mas->node = mt_mk_root(mas->node);
+ if (!mt_is_leaf(left)) {
+ ma_adopt_children(left);
+ ma_adopt_children(right);
+ }
+ } else {
+ struct maple_node *gparent = mt_parent(old_parent);
+ old_parent = gparent->mr64.slot[mt_parent_slot(old_parent)];
+ mas->node = mt_mk_node(new_parent, mt_node_type(old_parent));
}
- mas->node = mt_mk_node(dmn, maple_leaf_64);
- return dst;
+ mt_replace(mas); // Replace old with new parent & free.
+
+ mas->node = nparent64->slot[p_slot-1];
+ mas->max = nparent64->pivot[p_slot-1];
+ if (split <= slot) {
+ mas->node = nparent64->slot[p_slot];
+ mas->min = nparent64->pivot[p_slot-1];
+ mas->max = nparent64->pivot[p_slot];
+ } else {
+ }
+
+ mt_free(mn); // Free full node.
+ return split;
}
/* Private
*
* 4. Write the entry.
*
*/
-static int _ma_insert(struct ma_state *ms, void *entry, unsigned char slot)
+static int _ma_insert(struct ma_state *mas, void *entry, unsigned char slot)
{
- struct maple_range_64 *mr64 = &mt_to_node(ms->node)->mr64;
+ struct maple_range_64 *mr64 = &mt_to_node(mas->node)->mr64;
+ struct maple_node *p_mn;
struct maple_range_64 *p_mr64;
- int o_end = ma_data_end_r64(ms); // Old end
+ int o_end = ma_data_end_r64(mr64, mas->max); // Old end
int n_end = o_end; // New end
- unsigned long max = ms->max;
- unsigned long min = ms->min;
+ unsigned long max = mas->max;
+ unsigned long min = mas->min;
int ret = 1;
/* Calculate the range of the slot */
min = mr64->pivot[slot - 1];
/* Figure out how many slots are needed for the entry. */
- if (max != ms->last)
+ if (max != mas->last)
n_end++;
- if (min != ms->index - 1)
+ if (mas->index && min != mas->index - 1)
n_end++;
if (n_end > MAPLE_RANGE64_SLOTS -1) {
- return ma_split(ms, slot, n_end);
- }
+ unsigned char split = ma_split(mas, slot, o_end, 0);
+ if (mas_is_err(mas))
+ return 0;
+
+ if (split <= slot)
+ slot -= split;
+
+ n_end -= split;
+ mr64 = &mt_to_node(mas->node)->mr64;
+ o_end = ma_data_end_r64(mr64, mas->max); // Old end is not so old now.
+ if (o_end == 1)
+ o_end = 0; // This node is empty, append at the start.
+ }
/* Save the node in case we are not appending. */
p_mr64 = mr64;
+ p_mn = mt_to_node(mas->node);
/* Creates a new node and copies until slot (inclusively) */
if (slot == o_end) {
- /* Appending */
- o_end = n_end;
+ o_end = n_end; /* Appending */
} else {
/* Not appending */
- mr64 = mas_partial_copy(ms, slot);
+ if (p_mr64 == mr64) {
+ mr64 = mas_partial_copy(mas, slot);
+ if (mas_is_err(mas))
+ return 0;
+ }
+
o_end = slot;
- if (mas_is_err(ms))
- return 0;
}
- if (min != ms->index - 1) {
+ if (mas->index && min != mas->index - 1) {
/* When writing a NULL entry, the order must be reversed to
* ensure readers don't get incorrect data on appends
*/
/* Write the entry */
RCU_INIT_POINTER(mr64->slot[++slot], entry);
- mr64->pivot[slot] = ms->last;
+ mr64->pivot[slot] = mas->last;
/* Write NULL entry */
RCU_INIT_POINTER(mr64->slot[--slot], NULL);
if (o_end == n_end) // Append.
wmb();
- mr64->pivot[slot] = ms->index - 1;
- slot+=2;
+ mr64->pivot[slot] = mas->index - 1;
+ slot += 2;
ret = 2;
} else {
/* Write the entry */
if (o_end == n_end) // Append.
wmb();
- mr64->pivot[slot++] = ms->last;
+ mr64->pivot[slot++] = mas->last;
}
/* Skip possible duplicate entry that contains a NULL */
- if (p_mr64->pivot[o_end] == ms->last)
+ if (o_end != n_end && p_mr64->pivot[o_end] == mas->last)
o_end++;
/* Copy remainder of node if this isn't an append */
- for ( ; o_end < n_end; slot++, o_end++) {
- if(p_mr64->pivot[o_end] == 0)
- break;
- RCU_INIT_POINTER(mr64->slot[slot],
- p_mr64->slot[o_end]);
- if (slot < MAPLE_RANGE64_SLOTS - 1)
- mr64->pivot[slot] = p_mr64->pivot[o_end];
- }
+ MA_CP(cp, p_mn, mt_to_node(mas->node), o_end, MAPLE_RANGE64_SLOTS - 1);
+ cp.dst_start = slot;
+ cp.dst_end = n_end;
+ ma_copy(mas, &cp);
+
+ if (p_mr64 != mr64)
+ mt_replace(mas);
+
return ret;
}
static void ma_root_expand(struct ma_state *ms, void *entry)
{
- void *r_entry = ms->tree->ma_root; // root entry
+ void *r_entry = rcu_dereference(ms->tree->ma_root); // root entry
struct maple_node *mn;
mas_node_cnt(ms, 1);
if (mas_is_start(ms))
return;
- mr64 = &ms->node->mr64;
-
+ mr64 = &mt_to_node(ms->node)->mr64;
if (slot > 0)
ms->min = mr64->pivot[slot - 1] + 1;
*/
static int mas_coalesce(struct ma_state *mas)
{
- struct maple_node *smn = mt_to_node(mas->node);
- struct maple_range_64 *src = &smn->mr64;
+ struct maple_node *src_mn = mt_to_node(mas->node);
+ struct maple_range_64 *src = &src_mn->mr64;
unsigned char s_slot, d_slot = 0;
- unsigned long last = mas->max;
+ unsigned long last = UINT_MAX;
struct maple_range_64 *dst = NULL;
int ret = 0;
}
}
done:
- if (dst)
+ if (dst) {
ret = s_slot - d_slot;
- return ret;
-}
-
-static void ma_adopt_children(struct maple_node *parent)
-{
-
- struct maple_range_64 *p64 = &parent->mr64;
- struct maple_node *child;
- unsigned char slot;
-
- for (slot = 0; slot < MAPLE_RANGE64_SLOTS; slot++) {
- if (slot < MAPLE_RANGE64_SLOTS - 1 &&
- slot != 0 && p64->pivot[slot] == 0)
- break;
- child = p64->slot[slot];
- mt_set_parent(child, parent, slot);
+ mas->node = mt_mk_node(mt_to_node(mas->node),
+ mt_node_type(mas->node));
+ mt_replace(mas);
}
+ return ret;
}
static bool mas_search_slots(struct ma_state *ms, unsigned long val)
{
struct maple_range_64 *mr64;
int i = 0;
+ bool ret = false;
mr64 = &mt_to_node(ms->node)->mr64;
- for (i = 0; i <= MAPLE_RANGE64_SLOTS - 1; i++) {
+ for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
if (i != 0 && mr64->pivot[i] == 0) {
ma_set_slot(ms, MAPLE_NODE_SLOTS);
- return false;
+ return ret;
}
if (val <= mr64->pivot[i])
i++;
}
+ if (mr64->slot[i])
+ ret = true;
+
ma_set_slot(ms, i);
- return true;
+ return ret;
}
-void mas_traverse(struct ma_state *mas)
+
+bool mas_traverse(struct ma_state *mas)
{
unsigned char slot = ma_get_slot(mas);
- mas->node = mt_to_node(mas->node)->slot[slot];
mas_update_limits(mas, slot);
+ if (mt_is_leaf(mas->node))
+ return false;
+ mas->node = mt_to_node(mas->node)->mr64.slot[slot];
+ return true;
}
bool _mas_walk(struct ma_state *mas)
{
- void *p_entry; // Previous entry.
-
mas->node = mas_start(mas);
-
- if (mt_is_leaf(mas->node)) {
- mas_search_slots(mas, mas->index);
- return true;
- }
-
do {
- p_entry = mas->node;
- if (!mas_search_slots(mas, mas->index)) {
- mas->node = p_entry;
+ if (!mas_search_slots(mas, mas->index))
return mt_is_leaf(mas->node);
- }
- mas_traverse(mas);
- } while (!mt_is_leaf(mas->node));
+ } while (mas_traverse(mas));
return true;
}
return false;
}
-/* Private
- * Replace mn with mas->node in the tree
- */
-void mt_may_replace(struct ma_state *mas, void *p_entry, bool leaf)
-{
- struct maple_node *mn;
- struct maple_node *pmn = mt_to_node(p_entry);
-
- /* There is no new node to replace */
- if (mas->node == p_entry)
- return;
-
- mn = mt_to_node(mas->node);
-
- /* The targeted node was split and is already in the tree */
- if (mn == mt_parent(pmn))
- goto in_tree;
-
- mn->parent = pmn->parent;
- if (!leaf)
- ma_adopt_children(mn);
-
- if (ma_is_root(p_entry)) {
- mn->parent = (struct maple_node *)
- ((unsigned long)mas->tree | MA_ROOT_PARENT);
- rcu_assign_pointer(mas->tree->ma_root, mt_mk_root(mas->node));
- } else {
- struct maple_node *parent = mt_parent(mn->parent);
- unsigned char slot = mt_parent_slot(mn);
-
- RCU_INIT_POINTER(parent->slot[slot], mas->node);
- }
-
-in_tree:
- mt_free(pmn);
-}
void *ma_insert(struct ma_state *mas, void *entry)
{
- void *p_entry; // Previous entry.
unsigned char slot = MAPLE_NODE_SLOTS;
struct maple_range_64 *src;
bool leaf;
-
- if (ma_reserved(entry))
- mas_set_err(mas, -EINVAL);
-
mas->node = mas_start(mas);
-
-
- if (!xa_is_node(mas->tree->ma_root)) {
+ if (!xa_is_node(rcu_dereference(mas->tree->ma_root))) {
if (mas->last == 0) {
if (mas->node != NULL)
goto exists;
}
}
- p_entry = mas->node;
mas_coalesce(mas);
if (mas_is_err(mas))
goto error;
if (mas_is_err(mas))
goto error;
- /* Replace the node in the tree, if necessary. */
- mt_may_replace(mas, p_entry, leaf);
return NULL;
error:
unsigned char slot = MAPLE_NODE_SLOTS;
bool leaf = false;
- if (mas->tree->ma_root == NULL)
+ if (rcu_dereference(mas->tree->ma_root) == NULL)
return NULL;
if (!xa_is_node(mas->tree->ma_root)) {
if (mas->last == 0)
- return mas->tree->ma_root;
+ return rcu_dereference(mas->tree->ma_root);
return NULL;
}
if (leaf == true && slot != MAPLE_NODE_SLOTS) {
struct maple_range_64 mr = mt_to_node(mas->node)->mr64;
- entry = mr.slot[ma_get_slot(mas)];
+ entry = mr.slot[slot];
}
ma_set_slot(mas, 0);
return entry;
*/
int ma_erase(struct ma_state *mas)
{
- bool leaf = _mas_walk(mas);
- int slot = ma_get_slot(mas);
- struct maple_range_64 *mr64 = &mt_to_node(mas->node)->mr64;
+ struct maple_range_64 *mr64;
unsigned long piv_val;
int cnt = -EINVAL;
- void *p_entry; // Previous entry.
+ int slot;
+
+ _mas_walk(mas);
+ slot = ma_get_slot(mas);
+ mr64 = &mt_to_node(mas->node)->mr64;
if (slot == MAPLE_NODE_SLOTS)
return cnt;
mr64->pivot[slot] = piv_val;
cnt++;
}
- p_entry = mas->node;
+
mas_coalesce(mas);
if (mas_is_err(mas))
goto error;
-
- mt_may_replace(mas, p_entry, leaf);
-
error:
return cnt;
}
-void ma_destroy_walk(struct ma_state *mas)
+void ma_destroy_walk(struct maple_node *enc_mn)
{
- struct maple_node *mn = mas->node;
- unsigned int type = mt_node_type(mas->node);
+ struct maple_node *mn = mt_to_node(enc_mn);
+ unsigned int type = mt_node_type(enc_mn);
int i;
switch (type) {
case maple_range_64:
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
- if (i > 0 && mn->mr64.pivot[i] == 0)
+ if (i > 0 && i < MAPLE_RANGE64_SLOTS - 1 &&
+ mn->mr64.pivot[i] == 0)
break;
- ma_destroy_walk(mn->mr64.slot[i]);
+ if (rcu_dereference(mn->mr64.slot[i]))
+ ma_destroy_walk(rcu_dereference(mn->mr64.slot[i]));
}
break;
case maple_leaf_64:
{
spin_lock_init(&mt->ma_lock);
mt->ma_flags = 0;
- mt->ma_root = NULL;
+ rcu_assign_pointer(mt->ma_root, NULL);
}
EXPORT_SYMBOL(mtree_init);
void *mtree_load(struct maple_tree *mt, unsigned long index)
{
void *entry;
- MA_STATE(ms, mt, index, index);
+ MA_STATE(mas, mt, index, index);
rcu_read_lock();
- entry = mas_walk(&ms);
+ entry = mas_walk(&mas);
rcu_read_unlock();
return entry;
}
MA_STATE(ms, mt, first, last);
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(ma_reserved(entry)))
return -EINVAL;
if (first > last)
void mtree_destroy(struct maple_tree *mt)
{
- MA_STATE(ms, mt, 0, 0);
mtree_lock(mt);
- if (xa_is_node(mt->ma_root)) {
- ms.node = mt_to_node(mt->ma_root);
- ma_destroy_walk(&ms);
+ struct maple_node *destroyed = mt->ma_root;
+ rcu_assign_pointer(mt->ma_root, NULL);
+ if (xa_is_node(destroyed)) {
+ ma_destroy_walk(mt_safe_root(destroyed));
}
- mt->ma_root = NULL;
+ mt->ma_flags = 0;
mtree_unlock(mt);
}
EXPORT_SYMBOL(mtree_destroy);