return mt_root_locked(mas->tree);
}
-#define MA_META_END_MASK 0b1111
-#define MA_META_GAP_SHIFT 4
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+ enum maple_type mt)
+{
+ switch (mt) {
+ case maple_arange_64:
+ return &mn->ma64.meta;
+ default:
+ return &mn->mr64.meta;
+ }
+}
+
/*
* ma_set_meta() - Set the metadata information of a node.
* @mn: The maple node
static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
unsigned char offset, unsigned char end)
{
+ struct maple_metadata *meta = ma_meta(mn, mt);
- mn->ma64.meta = (offset << MA_META_GAP_SHIFT) | end;
+ meta->gap = offset;
+ meta->end = end;
}
/*
static inline unsigned char ma_meta_end(struct maple_node *mn,
enum maple_type mt)
{
+ struct maple_metadata *meta = ma_meta(mn, mt);
- return mn->ma64.meta & MA_META_END_MASK;
+ return meta->end;
}
/*
enum maple_type mt)
{
- return mn->ma64.meta >> MA_META_GAP_SHIFT;
+ return mn->ma64.meta.gap;
}
/*
unsigned char offset)
{
- mn->ma64.meta = (offset << MA_META_GAP_SHIFT) |
- (mn->ma64.meta & MA_META_END_MASK);
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
}
/*
if (type == maple_arange_64)
return ma_meta_end(mte_to_node(mas->node), type);
- offset = mt_min_slots[type];
pivots = ma_pivots(mas_mn(mas), type);
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]) || pivots[offset] == mas->max) {
+ int ret = ma_meta_end(mas_mn(mas), type);
+ if (ret)
+ return ret;
+ }
+
+ offset = mt_min_slots[type];
if (unlikely(!pivots[offset]))
goto decrement;
/* Totally full. */
if (pivots[offset] != mas->max)
return offset + 1;
+
return offset;
}
while (--offset) {
if (likely(pivots[offset]))
break;
- };
+ }
+
if (likely(pivots[offset] < mas->max))
offset++;
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
struct maple_enode *child;
unsigned char offset;
+ unsigned char count = mas_data_end(mas);
- for (offset = 0; offset < mt_slots[type]; offset++) {
+ for (offset = 0; offset <= count; offset++) {
child = mas_slot_locked(mas, slots, offset);
- if (unlikely(!child))
- break;
mte_set_parent(child, parent, offset);
}
}
mt = mte_node_type(mas->node);
node = mas_mn(mas);
slots = ma_slots(node, mt);
- count = mt_slots[mt];
- for (offset = mas->offset; offset < count; offset++) {
+ count = mas_data_end(mas);
+ for (offset = mas->offset; offset <= count; offset++) {
entry = mas_slot_locked(mas, slots, offset);
- /* end of node data. */
- if (unlikely(!entry))
- break;
-
if (mte_parent(entry) == node) {
*child = *mas;
mas->offset = offset + 1;
*/
static inline void mab_mas_cp(struct maple_big_node *b_node,
unsigned char mab_start, unsigned char mab_end,
- struct ma_state *mas)
+ struct ma_state *mas, bool new_max)
{
int i, j = 0;
enum maple_type mt = mte_node_type(mas->node);
void __rcu **slots = ma_slots(node, mt);
unsigned long *pivots = ma_pivots(node, mt);
unsigned long *gaps = NULL;
+ unsigned char end;
if (mab_end - mab_start > mt_pivots[mt])
mab_end--;
+ if (!pivots[mt_pivots[mt] - 1]) {
+ slots[mt_pivots[mt]] = NULL;
+ }
+
i = mab_start;
pivots[j++] = b_node->pivot[i++];
do {
memcpy(slots, b_node->slot + mab_start,
sizeof(void *) * (i - mab_start));
- mas->max = b_node->pivot[i - 1];
+ if (new_max)
+ mas->max = b_node->pivot[i - 1];
+ end = j - 1;
if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
unsigned long max_gap = 0;
unsigned char offset = 15;
- unsigned char end = j - 1;
gaps = ma_gaps(node, mt);
do {
max_gap = gaps[j];
}
} while (j);
+
ma_set_meta(node, mt, offset, end);
+ } else if (end <= mt_pivots[mt] - 1) {
+ if (pivots[end] != mas->max)
+ end++;
+
+ ma_set_meta(node, mt, 0, end);
}
}
mast->l->min = mast->orig_l->min;
mast->l->max = mast->bn->pivot[split];
- mab_mas_cp(mast->bn, 0, split, mast->l);
+ mab_mas_cp(mast->bn, 0, split, mast->l, true);
mast->r->max = mast->l->max;
if (middle) {
- mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m);
+ mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
mast->m->min = mast->bn->pivot[split] + 1;
mast->m->max = mast->bn->pivot[mid_split];
if (!save->node &&
}
if (right) {
- mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r);
+ mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, true);
mast->r->min = mast->bn->pivot[split] + 1;
mast->r->max = mast->bn->pivot[mast->bn->b_end];
if (!save->node && (save->offset > split)) {
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
mast->orig_l->depth++;
- mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas);
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
mte_set_parent(left, l_mas.node, slot);
if (middle)
mte_set_parent(middle, l_mas.node, ++slot);
mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
mast->l->node = ancestor;
- mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l);
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
return true;
}
{
unsigned char p_slot;
- mab_mas_cp(mast->bn, 0, split, mast->l);
+ mab_mas_cp(mast->bn, 0, split, mast->l, true);
mte_set_pivot(mast->r->node, 0, mast->r->max);
- mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r);
+ mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, true);
mast->l->offset = mte_parent_slot(mas->node);
mast->l->max = mast->bn->pivot[split];
mast->r->min = mast->l->max + 1;
static inline bool mas_reuse_node(struct ma_state *mas,
struct maple_big_node *bn, unsigned char end)
{
- unsigned long max;
-
/* Need to be rcu safe. */
if (mt_in_rcu(mas->tree))
return false;
- max = mas->max;
- mab_mas_cp(bn, 0, bn->b_end, mas);
- mas->max = max;
+ mab_mas_cp(bn, 0, bn->b_end, mas, false);
if (end > bn->b_end) {
/* Zero end of node. */
struct maple_node *mn = mas_mn(mas);
unsigned long *pivots = ma_pivots(mn, mt);
void __rcu **slots = ma_slots(mn, mt);
- char zero = mt_slots[mt] - bn->b_end - 1;
+ char clear = mt_slots[mt] - bn->b_end - 2;
- memset(slots + bn->b_end + 1, 0, sizeof(void *) * zero--);
- memset(pivots + bn->b_end + 1, 0, sizeof(unsigned long *) * zero);
+ if (clear > 0) {
+ memset(slots + bn->b_end + 1, 0,
+ sizeof(void *) * clear);
+ memset(pivots + bn->b_end + 1, 0,
+ sizeof(unsigned long *) * clear);
+ }
}
- return true;
+ return true;
}
/*
enum maple_type b_type = b_node->type;
if ((b_end < mt_min_slots[b_type]) &&
- (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1))
+ (!mte_is_root(mas->node)) && (mas_mt_height(mas) > 1)) {
return mas_rebalance(mas, b_node);
+ }
- if (b_end >= mt_slots[b_type])
+ if (b_end >= mt_slots[b_type]) {
return mas_split(mas, b_node);
+ }
- if (mas_reuse_node(mas, b_node, end))
+ if (mas_reuse_node(mas, b_node, end)) {
goto reuse_node;
+ }
mas_node_count(mas, 1);
if (mas_is_err(mas))
new_node = mt_mk_node(mas_pop_node(mas), mte_node_type(mas->node));
mte_to_node(new_node)->parent = mas_mn(mas)->parent;
mas->node = new_node;
- mab_mas_cp(b_node, 0, b_end, mas);
+ mab_mas_cp(b_node, 0, b_end, mas, true);
+ if (b_end < mt_pivots[b_type] - 1)
+ ma_set_meta(mas_mn(mas), maple_leaf_64, 0, b_end);
+
mas_replace(mas, false);
reuse_node:
mas_update_gap(mas);
rcu_assign_pointer(slots[slot], entry);
mas->offset = slot;
- pivots[slot++] = mas->last;
+ pivots[slot] = mas->last;
+ if (mas->last != ULONG_MAX)
+ slot++;
mas->depth = 1;
mas_set_height(mas);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ ma_set_meta(node, maple_leaf_64, 0, slot);
return slot;
}
void __rcu **slots;
unsigned long *pivots;
+ if (!entry && !mas->index && mas->last == ULONG_MAX) {
+ mas->depth = 0;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->node = MAS_START;
+ goto done;
+ }
+
mas_node_count(mas, 1);
if (mas_is_err(mas))
return 0;
mas->depth = 1;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+
+done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
memcpy(dst_pivots + dst_offset, pivots + offset_end,
sizeof(unsigned long) * copy_size);
}
+
done:
if ((end == mt_slots[mt] - 1) && (new_end < mt_slots[mt] - 1))
dst_pivots[new_end] = mas->max;
+ if (!dst_pivots[mt_pivots[mt] - 1] || dst_pivots[mt_pivots[mt] - 1] == mas->max) {
+ if (dst_pivots[new_end] && dst_pivots[new_end] < mas->max)
+ new_end++;
+ //printk("Set new end %u pivot is %lu max is %lu\n", new_end, dst_pivots[new_end],
+ // mas->max);
+ ma_set_meta(newnode, maple_leaf_64, 0, new_end);
+ }
+
if (!mt_in_rcu(mas->tree)) {
memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
} else {
mas->node = mt_mk_node(newnode, mt);
mas_replace(mas, false);
}
-
trace_ma_write(__func__, mas, 0, entry);
mas_update_gap(mas);
return true;
}
}
+ if (!mas->index && mas->last == ULONG_MAX) {
+ mas_new_root(mas, entry);
+ return content;
+ }
if (r_min == mas->index && r_max == mas->last) {
rcu_assign_pointer(slots[mas->offset], entry);
if (!!entry ^ !!content)
}
/* Appending can skip a lot. */
- if ((end < mt_slots[mt] - 1) && (mas->offset == end)) {
+ if (entry && (end < mt_slots[mt] - 1) && (mas->offset == end)) {
if ((mas->index != r_min) && (mas->last == r_max)) {
if (end + 1 < mt_pivots[mt])
pivots[end + 1] = pivots[end];
+ if (end + 1 < mt_pivots[mt]) {
+ ma_set_meta(mas_mn(mas), maple_leaf_64, 0, end + 1);
+ }
rcu_assign_pointer(slots[end + 1], entry);
pivots[end] = mas->index - 1;
+
if (!content || !entry)
mas_update_gap(mas);
+
return content;
} else if ((mas->index == r_min) && (mas->last < r_max)) {
if (end + 1 < mt_pivots[mt])
pivots[end + 1] = pivots[end];
rcu_assign_pointer(slots[end + 1], content);
+ if (end + 1 < mt_pivots[mt]) {
+ ma_set_meta(mas_mn(mas), maple_leaf_64, 0, end + 1);
+ }
pivots[end] = mas->last;
rcu_assign_pointer(slots[end], entry);
if (!content || !entry)
{
enum maple_type type;
struct maple_node *node;
- unsigned long pivot;
+ unsigned long pivot = 0;
unsigned long r_start;
- unsigned char count, offset;
+ unsigned char count;
+ unsigned char offset;
unsigned long *pivots;
void __rcu **slots;
void *entry = NULL;
+ if (mas->last == mas->max) {
+ *range_start = mas->max;
+ return NULL;
+ }
+
offset = mas->offset;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
r_start = mas_safe_min(mas, pivots, offset);
- count = mt_pivots[type];
+ count = mas_data_end(mas);
slots = ma_slots(node, type);
+
if (mte_dead_node(mas->node))
return NULL;
goto no_entry;
}
+
while (offset < count) {
pivot = pivots[offset];
entry = mas_slot(mas, slots, offset);
if (entry)
goto found;
+ if (pivot == mas->max)
+ goto no_entry;
+
r_start = pivot + 1;
if (r_start > max) {
mas->index = max;
offset++;
}
+
+ if (r_start > mas->max) {
+ goto no_entry;
+ }
pivot = _mas_safe_pivot(mas, pivots, offset, type);
entry = mas_slot(mas, slots, offset);
if (mte_dead_node(mas->node))
while (!mas_is_none(mas)) {
- if (likely(ma_is_leaf(mt)))
+ if (likely(ma_is_leaf(mt))) {
entry = mas_next_nentry(mas, limit, &r_start);
- else
+ } else {
entry = mas_first_entry(mas, limit, &r_start);
+ }
if (unlikely(mte_dead_node(mas->node))) {
mas_rewalk(mas, last);
goto retry;
}
- if (unlikely((r_start > limit)))
+ if (unlikely((r_start > limit))) {
break;
+ }
if (likely(entry)) {
goto retry;
}
- mas->offset = mt_slot_count(mas->node);
+ if (!mas_is_none(mas))
+ mas->offset = mas_data_end(mas) + 1;
}
mas->index = mas->last = limit;
for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
void *entry = mas_slot_locked(mas, slots, offset);
- if (!entry)
- break;
node = mte_to_node(entry);
+ if (!node)
+ break;
mte_set_node_dead(entry);
smp_wmb();
node->type = mte_node_type(entry);
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (mas->index > mas->last)
+ printk("Error %lu > %lu %p\n", mas->index, mas->last, entry);
MT_BUG_ON(mas->tree, mas->index > mas->last);
if (mas->index > mas->last) {
mas_set_err(mas, -EINVAL);
{
struct maple_enode *prev;
- unsigned char slot = 0;
+ unsigned char end, slot = 0;
if (mas_is_start(mas)) {
mas_start(mas);
goto done;
walk_up:
+ end = mas_data_end(mas);
if (mte_is_leaf(mas->node) ||
- (slot >= mt_slot_count(mas->node))) {
+ (slot > end)) {
if (mte_is_root(mas->node))
goto done;
slot = mte_parent_slot(mas->node) + 1;
- mas->node = mt_mk_node(mte_parent(mas->node),
- mas_parent_enum(mas, mas->node));
+ mas_ascend(mas);
goto walk_up;
}
prev = mas->node;
mas->node = mas_get_slot(mas, slot);
- if (!mas->node) {
+ if (!mas->node || slot > end) {
if (mte_is_root(prev))
goto done;
mas->node = prev;
slot = mte_parent_slot(mas->node) + 1;
- mas->node = mt_mk_node(mte_parent(mas->node),
- mas_parent_enum(mas, mas->node));
+ mas_ascend(mas);
goto walk_up;
}
pr_cont(" contents: ");
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
pr_cont("%lu ", node->gap[i]);
- pr_cont("| %02X | ", node->meta);
+ pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
pr_cont(MA_PTR" %lu ", node->slot[i], node->pivot[i]);
pr_cont(MA_PTR"\n", node->slot[i]);
{
enum maple_type type = mte_node_type(mas->node);
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
struct maple_enode *child;
unsigned char i;
for (i = 0; i < mt_slots[type]; i++) {
child = mas_slot(mas, slots, i);
+ if (!pivots[i] || pivots[i] == mas->max)
+ break;
+
if (!child)
break;
if (prev_piv > piv) {
pr_err(MA_PTR"[%u] piv %lu < prev_piv %lu\n",
mas_mn(mas), i, piv, prev_piv);
- mt_dump(mas->tree);
MT_BUG_ON(mas->tree, piv < prev_piv);
}
if (piv < mas->min) {
- if (piv < mas->min)
- mt_dump(mas->tree);
pr_err(MA_PTR"[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
- mt_dump(mas->tree);
MT_BUG_ON(mas->tree, piv < mas->min);
}
if (piv > mas->max) {
pr_err(MA_PTR"[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
- mt_dump(mas->tree);
MT_BUG_ON(mas->tree, piv > mas->max);
}
prev_piv = piv;