{
unsigned long lpiv = _mas_safe_pivot(mas, pivots, offset, type);
- if (!lpiv && offset)
+ if (likely(lpiv))
+ return lpiv;
+
+ if (likely(offset))
return mas->max;
+
return lpiv;
}
return mas_spanning_rebalance(mas, &mast, height + 1);
}
-/*
- * mas_append() - Attempt to append data to the end of a node
- * @mas: The maple state
- * @entry: The entry to store
- * @min: The minimum of the range
- * @end: The end of the node
- * @content: The contents of the slot currently
- * @mt: The maple node type
- *
- * Appending never needs to allocate.
- *
- * Return: True if stored, false otherwise
- */
-static inline bool mas_append(struct ma_state *mas, void *entry,
- unsigned long min, unsigned char end,
- void *content, enum maple_type mt)
-{
- void __rcu **slots = ma_slots(mas_mn(mas), mt);
- unsigned long *pivots = ma_pivots(mas_mn(mas), mt);
- unsigned char new_end;
- unsigned char max_slots = mt_slots[mt];
-
- /*
- * slot store would happen if the last entry wasn't being split, so add
- * one.
- */
- new_end = end + 1;
- if (min < mas->index)
- new_end++;
-
- if (new_end >= max_slots)
- return false;
-
- if (new_end < max_slots - 1)
- pivots[new_end] = pivots[end];
- rcu_assign_pointer(slots[new_end--], content);
-
- if (new_end < max_slots - 1)
- pivots[new_end] = mas->last;
- rcu_assign_pointer(slots[new_end--], entry);
-
- if (min < mas->index) {
- pivots[new_end] = mas->index - 1;
- mas->offset++;
- }
-
- mas_update_gap(mas);
- return true;
-}
-
/*
* mas_node_store() - Attempt to store the value in a node
* @mas: The maple state
/* no data beyond this range */
offset_end = end + 1;
} else {
- unsigned long piv = 0;
-
- new_end++;
- do {
- offset_end++;
+ while (mas_logical_pivot(mas, pivots, ++offset_end, mt) <=
+ mas->last)
new_end--;
- piv = mas_logical_pivot(mas, pivots, offset_end, mt);
- } while (piv <= mas->last);
}
/* new range starts within a range */
mas_replace(mas, false);
}
+ trace_ma_write(__func__, mas, 0, entry);
mas_update_gap(mas);
return true;
}
}
/* out of room. */
- if (offset + 1 >= mt_slots[mt])
+ if (unlikely(offset + 1 >= mt_slots[mt]))
return false;
- /* going to split a single entry. */
if (max > mas->last) {
- if ((offset == end) &&
- mas_append(mas, entry, min, end, content, mt))
- return true;
+ if ((min != mas->index) || (offset != end))
+ goto try_node_store;
- goto try_node_store;
+ /* Appending */
+ if (end + 1 < mt_pivots[mt])
+ pivots[end + 1] = pivots[end];
+
+ rcu_assign_pointer(slots[end + 1], content);
+ pivots[end] = mas->last;
+ rcu_assign_pointer(slots[end], entry);
+ mas_update_gap(mas);
+ return true;
}
- lmax = mas_logical_pivot(mas, pivots, offset + 1, mt);
+ if (offset == end - 1)
+ lmax = mas->max;
+ else
+ lmax = pivots[offset + 1];
+
/* going to overwrite too many slots. */
if (lmax < mas->last)
goto try_node_store;
if (min == mas->index) {
/* overwriting two or more ranges with one. */
- if (lmax <= mas->last)
+ if (lmax == mas->last)
goto try_node_store;
- /* Overwriting a portion of offset + 1. */
+ /* Overwriting all of offset and a portion of offset + 1. */
rcu_assign_pointer(slots[offset], entry);
pivots[offset] = mas->last;
goto done;
- } else if (min < mas->index) {
- /* split start */
-
+ } else {
/* Doesn't end on the next range end. */
if (lmax != mas->last)
goto try_node_store;
- if (offset + 1 < mt_pivots[mt])
- pivots[offset + 1] = mas->last;
+ /* Overwriting a portion of offset and all of offset + 1 */
+ if (offset + 1 < mt_pivots[mt]) {
+ if (entry || pivots[offset + 1])
+ pivots[offset + 1] = mas->last;
+ }
rcu_assign_pointer(slots[offset + 1], entry);
pivots[offset] = mas->index - 1;
mas->offset++; /* Keep mas accurate. */
done:
+ trace_ma_write(__func__, mas, 0, entry);
mas_update_gap(mas);
return true;
mas_set_err(mas, -EEXIST);
return content;
}
+ // FIXME: Try finding end offset out here and passing it through.
+ // Maybe a struct for writes?
+ // lmax and offset_end ?
if (!entry) {
unsigned char offset_end = mas->offset;
mas->last = r_max;
} else {
unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long piv = r_max;
- /* Check next slot if we are overwriting the end */
- if ((mas->last == r_max) && !slots[mas->offset + 1]) {
- if (mas->offset < mt_pivots[mt] - 1 &&
- pivots[mas->offset + 1])
- mas->last = pivots[mas->offset + 1];
- else
- mas->last = mas->max;
- } else if (mas->last > r_max) {
- /* expand over this slot if necessary */
- unsigned long piv;
-
- do {
- piv = _mas_safe_pivot(mas, pivots,
+ /* Check next slot(s) if we are overwriting the end */
+ if (mas->last >= piv) {
+ while (mas->last >= piv && mas->last != ULONG_MAX)
+ piv = mas_logical_pivot(mas, pivots,
++offset_end, mt);
- } while (mas->last >= piv);
if (!slots[offset_end])
mas->last = piv;
memset(b_node.pivot + b_node.b_end + 1, 0,
sizeof(unsigned long) * zero);
+ trace_ma_write(__func__, mas, 0, entry);
if (!mas_commit_b_node(mas, &b_node, end))
return NULL;
{
void *existing = NULL;
+ trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
MT_BUG_ON(mas->tree, mas->index > mas->last);
if (mas->index > mas->last) {
mas_is_span_wr(mas, mas->max, mte_node_type(mas->node), entry)))
mas->node = MAS_START;
+ trace_ma_write(__func__, mas, 0, entry);
retry:
_mas_store(mas, entry, true);
if (unlikely(mas_nomem(mas, gfp)))