if (mas->index > 0)
goto done;
- entry = mas_root(mas);
+ entry = root;
mas->node = MAS_ROOT;
mas->offset = MAPLE_NODE_SLOTS;
}
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
- * @mas: The maple state
- * @type: The node type in the @mas
- * @range_min: The minimum range
- * @range_max: The maximum range
+ * @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
- *
*/
-static inline void mas_wr_node_walk(struct ma_state *mas, enum maple_type type,
- unsigned long *range_min, unsigned long *range_max)
+static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
{
- struct maple_node *node;
- unsigned long *pivots;
- unsigned long min;
- unsigned long max;
- unsigned long index;
+ struct ma_state *mas = wr_mas->mas;
unsigned char count;
unsigned char offset;
+ unsigned long index, min, max;
- if (unlikely(ma_is_dense(type))) {
- (*range_max) = (*range_min) = mas->index;
+ if (unlikely(ma_is_dense(wr_mas->type))) {
+ wr_mas->r_max = wr_mas->r_min = mas->index;
mas->offset = mas->index = mas->min;
return;
}
- node = mas_mn(mas);
- count = mt_pivots[type];
+ wr_mas->node = mas_mn(wr_mas->mas);
+ count = wr_mas->node_end = mas_data_end(mas);
offset = mas->offset;
- pivots = ma_pivots(node, type);
- min = mas_safe_min(mas, pivots, offset);
- max = pivots[offset];
+ wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
+ min = mas_safe_min(mas, wr_mas->pivots, offset);
+ max = wr_mas->pivots[offset];
if (unlikely(offset == count))
goto max;
offset++;
min = max + 1;
while (offset < count) {
- max = pivots[offset];
+ max = wr_mas->pivots[offset];
if (index <= max)
goto done;
max:
max = mas->max;
done:
- *range_max = max;
- *range_min = min;
+ wr_mas->r_max = max;
+ wr_mas->r_min = min;
mas->offset = offset;
}
+
/*
* mast_topiary() - Add the portions of the tree to the removal list; either to
* be freed or discarded (destroy walk).
static inline void mast_topiary(struct maple_subtree_state *mast)
{
unsigned char l_off, r_off, offset;
- unsigned long l_index, range_min, range_max;
+ unsigned long l_index;
struct maple_enode *child;
void __rcu **slots;
- enum maple_type mt;
+ struct ma_wr_state wr_mas;
+
+ wr_mas.mas = mast->orig_l;
+ wr_mas.type = mte_node_type(mast->orig_l->node);
/* The left node is consumed, so add to the free list. */
l_index = mast->orig_l->index;
mast->orig_l->index = mast->orig_l->last;
- mt = mte_node_type(mast->orig_l->node);
- mas_wr_node_walk(mast->orig_l, mt, &range_min, &range_max);
+ mas_wr_node_walk(&wr_mas);
mast->orig_l->index = l_index;
l_off = mast->orig_l->offset;
r_off = mast->orig_r->offset;
if (mast->orig_l->node == mast->orig_r->node) {
- slots = ma_slots(mte_to_node(mast->orig_l->node), mt);
+ slots = ma_slots(mte_to_node(mast->orig_l->node), wr_mas.type);
for (offset = l_off + 1; offset < r_off; offset++)
mat_add(mast->destroy, mas_slot_locked(mast->orig_l,
slots, offset));
/* Now destroy l_off + 1 -> end and 0 -> r_off - 1 */
offset = l_off + 1;
- slots = ma_slots(mte_to_node(mast->orig_l->node), mt);
- while (offset < mt_slots[mt]) {
+ slots = ma_slots(mte_to_node(mast->orig_l->node), wr_mas.type);
+ while (offset < mt_slots[wr_mas.type]) {
child = mas_slot_locked(mast->orig_l, slots, offset++);
if (!child)
break;
{
struct maple_enode *left = mast->orig_l->node;
struct maple_enode *right = mast->orig_r->node;
- unsigned long range_min, range_max;
+ struct ma_wr_state wr_mas;
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
*/
if (mast->orig_r->max < mast->orig_r->last)
mast->orig_r->offset = mas_data_end(mast->orig_r) + 1;
- else
- mas_wr_node_walk(mast->orig_r,
- mte_node_type(mast->orig_r->node), &range_min, &range_max);
+ else {
+ wr_mas.mas = mast->orig_r;
+ wr_mas.type = mte_node_type(mast->orig_r->node);
+ mas_wr_node_walk(&wr_mas);
+ }
/* Set up the left side of things */
mast->orig_l->offset = 0;
mast->orig_l->index = mast->l->min;
- mas_wr_node_walk(mast->orig_l, mte_node_type(mast->orig_l->node),
- &range_min, &range_max);
+ wr_mas.mas = mast->orig_l;
+ wr_mas.type = mte_node_type(mast->orig_l->node);
+ mas_wr_node_walk(&wr_mas);
}
/*
return slot;
}
-/*
- * mas_root_ptr() - Store entry into root.
- * @mas: The maple state
- * @entry: The entry to store
- * @overwrite: If it is okay to overwrite data
- *
- * Return: 0 on success, 1 otherwise.
- */
-static inline int mas_root_ptr(struct ma_state *mas, void *entry,
- bool overwrite)
-{
- int ret = 1;
-
- if (xa_is_node(mas_root(mas)))
- return 0;
-
- if (mas_root(mas) && mas->last == 0) {
- if (!overwrite)
- goto exists;
- }
- if (mas->last != 0)
- ret = mas_root_expand(mas, entry);
+static inline void mas_store_root(struct ma_state *mas, void *entry)
+{
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ mas_root_expand(mas, entry);
else if (((unsigned long) (entry) & 3) == 2)
- ret = mas_root_expand(mas, entry);
+ mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->node = MAS_START;
}
- return ret;
-
-exists:
- mas_set_err(mas, -EEXIST);
- return 0;
}
/*
*
* Return: True if this is a spanning write, false otherwise.
*/
-static bool mas_is_span_wr(struct ma_state *mas, unsigned long piv,
- enum maple_type type, void *entry)
+static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
{
unsigned long max;
- unsigned long last = mas->last;
+ unsigned long last = wr_mas->mas->last;
+ unsigned long piv = wr_mas->r_max;
+ enum maple_type type = wr_mas->type;
+ void *entry = wr_mas->entry;
+
/* Contained in this pivot */
if (piv > last)
return false;
- max = mas->max;
+ max = wr_mas->mas->max;
if (unlikely(ma_is_leaf(type))) {
/* Fits in the node, but may span slots. */
if (last < max)
/*
* mas_wr_walk(): Walk the tree for a write.
- * @range_min - pointer that will be set to the minimum of the slot range
- * @range_max - pointer that will be set to the maximum of the slot range
- * @entry - the value that will be written.
+ * @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
*
* Return: True if it's contained in a node, false on spanning write.
*/
-static bool mas_wr_walk(struct ma_state *mas, unsigned long *range_min,
- unsigned long *range_max, void *entry)
+static bool mas_wr_walk(struct ma_wr_state *wr_mas)
{
- enum maple_type type;
+ struct ma_state *mas = wr_mas->mas;
while (true) {
- type = mte_node_type(mas->node);
- mas->depth++;
-
- mas_wr_node_walk(mas, type, range_min, range_max);
- if (mas_is_span_wr(mas, *range_max, type, entry))
+ wr_mas->mas->depth++;
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ mas_wr_node_walk(wr_mas);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+ if (mas_is_span_wr(wr_mas))
return false;
- if (ma_is_leaf(type))
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
return true;
/* Traverse. */
- mas->max = *range_max;
- mas->min = *range_min;
- mas->node = mas_slot_locked(mas, ma_slots(mas_mn(mas), type),
- mas->offset);
+ mas->max = wr_mas->r_max;
+ mas->min = wr_mas->r_min;
+ mas->node = wr_mas->content;
mas->offset = 0;
}
return true;
}
/*
- * mas_extend_null() - Extend a store of a %NULL to include surrounding %NULLs.
+ * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
* @l_mas: The left maple state
* @r_mas: The right maple state
*/
-static inline void mas_extend_null(struct ma_state *l_mas, struct ma_state *r_mas)
+static inline void mas_extend_spanning_null(struct ma_state *l_mas,
+ struct ma_state *r_mas)
{
unsigned char l_slot = l_mas->offset;
unsigned char r_slot = r_mas->offset;
content = mas_get_slot(&l_mas, l_mas.offset);
if (!entry) {
- mas_extend_null(&l_mas, &r_mas);
+ mas_extend_spanning_null(&l_mas, &r_mas);
mas->index = l_mas.index;
mas->last = l_mas.last = r_mas.index = r_mas.last;
mas->offset = l_mas.offset;
*
* Return: True if stored, false otherwise
*/
+static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ void __rcu **dst_slots;
+ unsigned long *dst_pivots;
+ unsigned char dst_offset;
+ unsigned char new_end = wr_mas->node_end;
+ unsigned char offset;
+ unsigned char node_slots = mt_slots[wr_mas->type];
+ unsigned char node_pivots = mt_pivots[wr_mas->type];
+ struct maple_node reuse, *newnode;
+ unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
+
+ offset = mas->offset;
+ if (mas->last == wr_mas->r_max) {
+ /* runs right to the end of the node */
+ if (mas->last == mas->max)
+ new_end = offset;
+ /* don't copy this offset */
+ wr_mas->offset_end++;
+ } else if (mas->last < wr_mas->r_max) {
+ /* new range ends in this range */
+ if (unlikely(wr_mas->r_max == ULONG_MAX))
+ mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+
+ new_end++;
+ } else {
+ if (_mas_safe_pivot(mas, wr_mas->pivots, wr_mas->offset_end, wr_mas->type) == mas->last)
+ wr_mas->offset_end++;
+
+ new_end -= wr_mas->offset_end - offset - 1;
+ }
+
+ /* new range starts within a range */
+ if (wr_mas->r_min < mas->index)
+ new_end++;
+
+ /* Not enough room */
+ if (new_end >= node_slots)
+ return false;
+
+ /* Not enough data. */
+ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
+ !(mas->mas_flags & MA_STATE_BULK))
+ return false;
+
+ /* set up node. */
+ if (mt_in_rcu(mas->tree)) {
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return false;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ memset(&reuse, 0, sizeof(struct maple_node));
+ newnode = &reuse;
+ }
+
+ newnode->parent = mas_mn(mas)->parent;
+ dst_pivots = ma_pivots(newnode, wr_mas->type);
+ dst_slots = ma_slots(newnode, wr_mas->type);
+ /* Copy from start to insert point */
+ memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
+ memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
+ dst_offset = offset;
+
+ /* Handle insert of new range starting after old range */
+ if (wr_mas->r_min < mas->index) {
+ mas->offset++;
+ rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
+ dst_pivots[dst_offset++] = mas->index - 1;
+ }
+
+ /* Store the new entry and range end. */
+ if (dst_offset < max_piv)
+ dst_pivots[dst_offset] = mas->last;
+ mas->offset = dst_offset;
+ rcu_assign_pointer(dst_slots[dst_offset++], wr_mas->entry);
+
+ /* this range wrote to the end of the node. */
+ if (wr_mas->offset_end > wr_mas->node_end)
+ goto done;
+
+ /* Copy to the end of node if necessary. */
+ copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
+ memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
+ sizeof(void *) * copy_size);
+ if (dst_offset < max_piv) {
+ if (copy_size > max_piv - dst_offset)
+ copy_size = max_piv - dst_offset;
+ memcpy(dst_pivots + dst_offset, wr_mas->pivots + wr_mas->offset_end,
+ sizeof(unsigned long) * copy_size);
+ }
+
+done:
+ if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
+ dst_pivots[new_end] = mas->max;
+
+ if (!dst_pivots[node_pivots - 1] || dst_pivots[node_pivots - 1] == mas->max) {
+ if (dst_pivots[new_end] && dst_pivots[new_end] < mas->max)
+ new_end++;
+ ma_set_meta(newnode, maple_leaf_64, 0, new_end);
+ }
+
+ if (mt_in_rcu(mas->tree)) {
+ mas->node = mt_mk_node(newnode, wr_mas->type);
+ mas_replace(mas, false);
+ } else {
+ memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
+ }
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ return true;
+}
+
static inline bool mas_node_store(struct ma_state *mas, void *entry,
unsigned long min, unsigned long max,
unsigned char end, void *content,
*
* Return: True if stored, false otherwise
*/
+static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned long lmax; /* Logical max. */
+ unsigned char offset = mas->offset;
+
+ if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
+ (offset != wr_mas->node_end)))
+ return false;
+
+ if (offset == wr_mas->node_end - 1)
+ lmax = mas->max;
+ else
+ lmax = wr_mas->pivots[offset + 1];
+
+ /* going to overwrite too many slots. */
+ if (lmax < mas->last)
+ return false;
+
+ if (wr_mas->r_min == mas->index) {
+ /* overwriting two or more ranges with one. */
+ if (lmax == mas->last)
+ return false;
+
+ /* Overwriting all of offset and a portion of offset + 1. */
+ rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->last;
+ goto done;
+ }
+
+ /* Doesn't end on the next range end. */
+ if (lmax != mas->last)
+ return false;
+
+ /* Overwriting a portion of offset and all of offset + 1 */
+ if ((offset + 1 < mt_pivots[wr_mas->type]) &&
+ (wr_mas->entry || wr_mas->pivots[offset + 1]))
+ wr_mas->pivots[offset + 1] = mas->last;
+
+ rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+
+done:
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ return true;
+}
+
static inline bool mas_slot_store(struct ma_state *mas, void *entry,
unsigned long min, unsigned long max,
unsigned long end_piv,
return true;
}
-/*
- * _mas_store() - Internal call to store a value
- * @mas: The maple state
- * @entry: The entry to store
- * @overwrite: Allowed to overwrite entries or not
- *
- * Return: The contents that was stored at the index.
- */
-static inline void *_mas_store(struct ma_state *mas, void *entry, bool overwrite)
+static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
- unsigned long r_max, r_min; /* Ranges */
- unsigned long end_piv;
- unsigned char end, zero;
- void *content = NULL;
- struct maple_big_node b_node;
- void __rcu **slots;
- enum maple_type mt;
- struct maple_node *node;
- unsigned long *pivots;
- unsigned char offset_end;
+ while ((wr_mas->mas->last > wr_mas->end_piv) &&
+ (wr_mas->offset_end < wr_mas->node_end))
+ wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
- int ret = 0;
+ if (wr_mas->mas->last > wr_mas->end_piv)
+ wr_mas->end_piv = wr_mas->mas->max;
+}
- if (mas_start(mas) || mas_is_none(mas) || mas->node == MAS_ROOT) {
- ret = mas_root_ptr(mas, entry, overwrite);
- if (mas_is_err(mas))
- return NULL;
+static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
- if (ret)
- goto complete_at_root;
+ /* Check next slot(s) if we are overwriting the end */
+ if ((mas->last == wr_mas->end_piv) &&
+ !wr_mas->slots[wr_mas->offset_end + 1]) {
+ wr_mas->offset_end++;
+ if (wr_mas->offset_end == wr_mas->node_end)
+ mas->last = mas->max;
+ else
+ mas->last = wr_mas->pivots[wr_mas->offset_end];
+ } else if ((mas->last > wr_mas->end_piv) &&
+ !wr_mas->slots[wr_mas->offset_end]) {
+ mas->last = wr_mas->end_piv;
+ wr_mas->offset_end++;
}
- if (!mas_wr_walk(mas, &r_min, &r_max, entry)) {
- if (!overwrite) {
- /* spanning writes always overwrite something */
- mas_set_err(mas, -EEXIST);
- return NULL;
+ if (!wr_mas->content) {
+ /* If this one is null, the next and prev are not */
+ mas->index = wr_mas->r_min;
+ } else {
+ /* Check prev slot if we are overwriting the start */
+ if (mas->index == wr_mas->r_min && mas->offset &&
+ !wr_mas->slots[mas->offset - 1]) {
+ mas->offset--;
+ wr_mas->r_min = mas->index =
+ mas_safe_min(mas, wr_mas->pivots, mas->offset);
+ wr_mas->r_max = wr_mas->pivots[mas->offset];
}
-
- ret = mas_spanning_store(mas, entry);
- goto spanning_store;
}
+}
- /* At this point, we are at the leaf node that needs to be altered. */
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
+{
+ unsigned char end = wr_mas->node_end;
+ unsigned char new_end = end + 1;
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char node_pivots = mt_pivots[wr_mas->type];
- /* Calculate needed space */
- mt = mte_node_type(mas->node);
- node = mas_mn(mas);
- slots = ma_slots(node, mt);
- content = mas_slot_locked(mas, slots, mas->offset);
- if (unlikely(!overwrite) && (content || (mas->last > r_max))) {
- mas_set_err(mas, -EEXIST);
- return content;
- }
+ if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
+ if (new_end < node_pivots)
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
- pivots = ma_pivots(node, mt);
- offset_end = mas->offset;
- end_piv = r_max;
- end = mas_data_end(mas);
- while ((mas->last > end_piv) && (offset_end < end))
- end_piv = pivots[++offset_end];
+ if (new_end < node_pivots)
+ ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
+ new_end);
- if (mas->last > end_piv)
- end_piv = mas->max;
+ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
+ mas->offset = new_end;
+ wr_mas->pivots[end] = mas->index - 1;
- if (!entry) {
- /* Check next slot(s) if we are overwriting the end */
- if ((mas->last == end_piv) && !slots[offset_end + 1]) {
- offset_end++;
- if (offset_end == end)
- mas->last = mas->max;
- else
- mas->last = pivots[offset_end];
- } else if ((mas->last > end_piv) && !slots[offset_end]) {
- mas->last = end_piv;
- offset_end++;
- }
+ return 1;
+ } else if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
+ if (new_end < node_pivots)
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
- if (!content) {
- /* If this one is null, the next and prev are not */
- mas->index = r_min;
- } else {
- /* Check prev slot if we are overwriting the start */
- if (mas->index == r_min && mas->offset &&
- !slots[mas->offset - 1]) {
- mas->offset--;
- r_min = mas->index = mas_safe_min(mas, pivots,
- mas->offset);
- r_max = pivots[mas->offset];
- }
- }
- }
+ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
+ if (new_end < node_pivots)
+ ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
+ new_end);
- /* New root for a single pointer */
- if (!mas->index && mas->last == ULONG_MAX) {
- mas_new_root(mas, entry);
- return content;
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
+ return 1;
}
+ return 0;
+}
+static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+{
+ unsigned char zero;
+ unsigned char node_slots;
+ struct ma_state *mas = wr_mas->mas;
+ struct maple_big_node b_node;
+
/* Direct replacement */
- if (r_min == mas->index && r_max == mas->last) {
- rcu_assign_pointer(slots[mas->offset], entry);
- if (!!entry ^ !!content)
+ if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
+ if (!!wr_mas->entry ^ !!wr_mas->content)
mas_update_gap(mas);
- return content;
+ return;
}
/* Attempt to append */
- if (entry && (end < mt_slots[mt] - 1) && (mas->offset == end)) {
- unsigned char new_end = end + 1;
- if ((mas->index != r_min) && (mas->last == r_max)) {
- if (new_end < mt_pivots[mt])
- pivots[new_end] = pivots[end];
-
- if (new_end < mt_pivots[mt])
- ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
- new_end);
-
- rcu_assign_pointer(slots[new_end], entry);
- mas->offset = new_end;
- pivots[end] = mas->index - 1;
-
- if (!content || !entry)
- mas_update_gap(mas);
-
- return content;
- } else if ((mas->index == r_min) && (mas->last < r_max)) {
- if (new_end < mt_pivots[mt])
- pivots[new_end] = pivots[end];
-
- rcu_assign_pointer(slots[new_end], content);
- if (new_end < mt_pivots[mt])
- ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
- new_end);
+ node_slots = mt_slots[wr_mas->type];
+ /* slot and node store will not fit, go to the slow path */
+ if (unlikely(wr_mas->offset_end + 1 >= node_slots))
+ goto slow_path;
- pivots[end] = mas->last;
- rcu_assign_pointer(slots[end], entry);
- if (!content || !entry)
+ if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
+ (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
+ if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
-
- return content;
- }
+ return;
}
- /* slot and node store will not fit, go to the slow path */
- if (unlikely(offset_end + 1 >= mt_slots[mt]))
- goto slow_path;
-
- if ((offset_end - mas->offset <= 1) &&
- mas_slot_store(mas, entry, r_min, r_max, end_piv, end, content, mt,
- slots))
- return content;
- else if (mas_node_store(mas, entry, r_min, r_max, end, content, mt,
- slots, pivots, offset_end))
- return content;
+ if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
+ return;
+ else if (mas_wr_node_store(wr_mas))
+ return;
if (mas_is_err(mas))
- return content;
+ return;
slow_path:
b_node.type = mte_node_type(mas->node);
- b_node.b_end = mas_store_b_node(mas, &b_node, entry, end, offset_end,
- content);
+ b_node.b_end = mas_store_b_node(mas, &b_node, wr_mas->entry, wr_mas->node_end,
+ wr_mas->offset_end, wr_mas->content);
b_node.min = mas->min;
zero = MAPLE_BIG_NODE_SLOTS - b_node.b_end - 1;
memset(b_node.pivot + b_node.b_end + 1, 0,
sizeof(unsigned long) * zero);
- trace_ma_write(__func__, mas, 0, entry);
- if (!mas_commit_b_node(mas, &b_node, end))
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_commit_b_node(mas, &b_node, wr_mas->node_end);
+}
+
+/*
+ * mas_store_entry() - Internal call to store a value
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Return: The contents that was stored at the index.
+ */
+static inline void *mas_store_entry(struct ma_state *mas, void *entry)
+{
+ struct ma_wr_state wr_mas;
+
+ if ((wr_mas.content = mas_start(mas)) ||
+ mas_is_none(mas) || mas->node == MAS_ROOT) {
+ mas_store_root(mas, entry);
+ return wr_mas.content;
+ }
+
+ wr_mas.mas = mas;
+ wr_mas.entry = entry;
+ if (!mas_wr_walk(&wr_mas)) {
+ mas_spanning_store(mas, entry);
+ return wr_mas.content;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
+ mas_wr_end_piv(&wr_mas);
+
+ if (!entry)
+ mas_wr_extend_null(&wr_mas);
+
+ /* New root for a single pointer */
+ if (!mas->index && mas->last == ULONG_MAX) {
+ mas_new_root(mas, wr_mas.entry);
+ return wr_mas.content;
+ }
+
+ mas_wr_modify(&wr_mas);
+ return wr_mas.content;
+}
+
+/*
+ * mas_insert() - Internal call to insert a value
+ * @mas: The maple state
+ * @entry: The entry to store
+ *
+ * Return: %NULL or the contents that already exists at the requested index
+ * otherwise. The maple state needs to be checked for error conditions.
+ */
+static inline void *mas_insert(struct ma_state *mas, void *entry)
+{
+ struct ma_wr_state wr_mas;
+
+ wr_mas.content = mas_start(mas);
+ if (wr_mas.content)
+ goto exists;
+
+ if (mas_is_none(mas)) {
+ mas_store_root(mas, entry);
return NULL;
+ }
+
+ wr_mas.mas = mas;
+ wr_mas.entry = entry;
+ /* spanning writes always overwrite something */
+ if (!mas_wr_walk(&wr_mas))
+ goto exists;
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
+
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
-complete_at_root:
- if (ret > 2)
+ if (!entry)
return NULL;
-spanning_store:
- return content;
+
+ mas_wr_modify(&wr_mas);
+ return wr_mas.content;
+
+exists:
+ mas_set_err(mas, -EEXIST);
+ return wr_mas.content;
+
}
/*
mas->min = mas_safe_min(mas, pivots, pslot);
mas->node = mn;
mas->offset = slot;
- _mas_store(mas, entry, false);
+ mas_store_entry(mas, entry);
}
/*
void *mas_store(struct ma_state *mas, void *entry)
{
void *existing = NULL;
+ struct ma_wr_state wr_mas;
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
}
#endif
- if ( !mas_is_start(mas) && (mas_is_none(mas) ||
- mas_is_span_wr(mas, mas->max, mte_node_type(mas->node), entry)))
- mas->node = MAS_START;
+ wr_mas.mas = mas;
+ if ( !mas_is_start(mas)) {
+ if (mas_is_none(mas))
+ mas_reset(mas);
+ else {
+ wr_mas.r_max = mas->max;
+ wr_mas.type = mte_node_type(mas->node);
+ wr_mas.entry = entry;
+ if (mas_is_span_wr(&wr_mas))
+ mas_reset(mas);
+ }
+ }
- existing = _mas_store(mas, entry, true);
+ existing = mas_store_entry(mas, entry);
if (unlikely(mas_is_err(mas)))
return existing;
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
- if ( !mas_is_start(mas) && (mas_is_none(mas) ||
- mas_is_span_wr(mas, mas->max, mte_node_type(mas->node), entry)))
- mas->node = MAS_START;
+ struct ma_wr_state wr_mas;
+
+ wr_mas.mas = mas;
+ if ( !mas_is_start(mas)) {
+ if (mas_is_none(mas))
+ mas_reset(mas);
+ else {
+ wr_mas.r_max = mas->max;
+ wr_mas.type = mte_node_type(mas->node);
+ wr_mas.entry = entry;
+ if (mas_is_span_wr(&wr_mas))
+ mas_reset(mas);
+ }
+ }
trace_ma_write(__func__, mas, 0, entry);
retry:
- _mas_store(mas, entry, true);
+ mas_store_entry(mas, entry);
if (unlikely(mas_nomem(mas, gfp)))
goto retry;
mas->node = MAS_START;
mas->index = r_min;
mas->last = r_max;
- _mas_store(mas, NULL, true);
+ mas_store_entry(mas, NULL);
if (mas_nomem(mas, GFP_KERNEL))
goto retry;
mtree_lock(mt);
retry:
- _mas_store(&mas, entry, true);
+ mas_store_entry(&mas, entry);
if (mas_nomem(&mas, gfp))
goto retry;
mtree_lock(mt);
retry:
- _mas_store(&ms, entry, false);
+ mas_insert(&ms, entry);
if (mas_nomem(&ms, gfp))
goto retry;