*
* Return: 0 on error, positive on success.
*/
-static inline int mas_spanning_store(struct ma_wr_state *wr_mas)
+static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
}
/*
- * mas_node_store() - Attempt to store the value in a node
- * @mas: The maple state
- * @entry: The value to store
- * @min: The minimum of the range
- * @max: The maximum of the range
- * @mt: The maple node type
- * @slots: Pointer to the slot array
- * @pivots: Pointer to the pivot array
+ * mas_wr_node_store() - Attempt to store the value in a node
+ * @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
*
mas->node = mt_mk_node(newnode, wr_mas->type);
mas_replace(mas, false);
} else {
- memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
+ memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
return true;
}
-static inline bool mas_node_store(struct ma_state *mas, void *entry,
- unsigned long min, unsigned long max,
- unsigned char end, void *content,
- enum maple_type mt, void __rcu **slots,
- unsigned long *pivots, unsigned char offset_end)
-{
- void __rcu **dst_slots;
- unsigned long *dst_pivots;
- unsigned char dst_offset;
- unsigned char new_end = end;
- unsigned char offset;
- struct maple_node reuse, *newnode;
- unsigned char copy_size, max_piv = mt_pivots[mt];
-
- offset = mas->offset;
- if (mas->last == max) {
- /* runs right to the end of the node */
- if (mas->last == mas->max)
- new_end = offset;
- /* don't copy this offset */
- offset_end++;
- } else if (mas->last < max) {
- /* new range ends in this range */
- if (unlikely(max == ULONG_MAX))
- mas_bulk_rebalance(mas, end, mt);
-
- new_end++;
- } else {
- if (mas_safe_pivot(mas, pivots, offset_end, mt) == mas->last)
- offset_end++;
-
- new_end -= offset_end - offset - 1;
- }
-
- /* new range starts within a range */
- if (min < mas->index)
- new_end++;
-
- /* Not enough room */
- if (new_end >= mt_slots[mt])
- return false;
-
- /* Not enough data. */
- if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[mt]) &&
- !(mas->mas_flags & MA_STATE_BULK))
- return false;
-
- /* set up node. */
- if (mt_in_rcu(mas->tree)) {
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return false;
-
- newnode = mas_pop_node(mas);
- } else {
- memset(&reuse, 0, sizeof(struct maple_node));
- newnode = &reuse;
- }
-
- newnode->parent = mas_mn(mas)->parent;
- dst_pivots = ma_pivots(newnode, mt);
- dst_slots = ma_slots(newnode, mt);
- /* Copy from start to insert point */
- memcpy(dst_pivots, pivots, sizeof(unsigned long) * (offset + 1));
- memcpy(dst_slots, slots, sizeof(void *) * (offset + 1));
- dst_offset = offset;
-
- /* Handle insert of new range starting after old range */
- if (min < mas->index) {
- mas->offset++;
- rcu_assign_pointer(dst_slots[dst_offset], content);
- dst_pivots[dst_offset++] = mas->index - 1;
- }
-
- /* Store the new entry and range end. */
- if (dst_offset < max_piv)
- dst_pivots[dst_offset] = mas->last;
- mas->offset = dst_offset;
- rcu_assign_pointer(dst_slots[dst_offset++], entry);
-
- /* this range wrote to the end of the node. */
- if (offset_end > end)
- goto done;
-
- /* Copy to the end of node if necessary. */
- copy_size = end - offset_end + 1;
- memcpy(dst_slots + dst_offset, slots + offset_end,
- sizeof(void *) * copy_size);
- if (dst_offset < max_piv) {
- if (copy_size > max_piv - dst_offset)
- copy_size = max_piv - dst_offset;
- memcpy(dst_pivots + dst_offset, pivots + offset_end,
- sizeof(unsigned long) * copy_size);
- }
-
-done:
- if ((end == mt_slots[mt] - 1) && (new_end < mt_slots[mt] - 1))
- dst_pivots[new_end] = mas->max;
-
- if (!dst_pivots[mt_pivots[mt] - 1] || dst_pivots[mt_pivots[mt] - 1] == mas->max) {
- if (dst_pivots[new_end] && dst_pivots[new_end] < mas->max)
- new_end++;
- ma_set_meta(newnode, maple_leaf_64, 0, new_end);
- }
-
- if (mt_in_rcu(mas->tree)) {
- mas->node = mt_mk_node(newnode, mt);
- mas_replace(mas, false);
- } else {
- memcpy(mas_mn(mas), newnode, sizeof(struct maple_node));
- }
- trace_ma_write(__func__, mas, 0, entry);
- mas_update_gap(mas);
- return true;
-}
-
/*
- * mas_slot_store: Attempt to store a value in a slot.
- * @mas: the maple state
- * @entry: The entry to store
- * @min: The range minimum
- * @max: The range maximum
- * @end: The end of the maple node
- * @content: The current content
- * @mt: The maple node type
- * @slots: The pointer to the slots array
+ * mas_wr_slot_store: Attempt to store a value in a slot.
+ * @wr_mas: the maple write state
*
* Return: True if stored, false otherwise
*/
return true;
}
-static inline bool mas_slot_store(struct ma_state *mas, void *entry,
- unsigned long min, unsigned long max,
- unsigned long end_piv,
- unsigned char end, void *content,
- enum maple_type mt, void __rcu **slots)
-{
- struct maple_node *node = mas_mn(mas);
- unsigned long *pivots = ma_pivots(node, mt);
- unsigned long lmax; /* Logical max. */
- unsigned char offset = mas->offset;
-
- if ((max > mas->last) && ((min != mas->index) || (offset != end)))
- return false;
-
- if (offset == end - 1)
- lmax = mas->max;
- else
- lmax = pivots[offset + 1];
-
- /* going to overwrite too many slots. */
- if (lmax < mas->last)
- return false;
-
- if (min == mas->index) {
- /* overwriting two or more ranges with one. */
- if (lmax == mas->last)
- return false;
-
- /* Overwriting all of offset and a portion of offset + 1. */
- rcu_assign_pointer(slots[offset], entry);
- pivots[offset] = mas->last;
- goto done;
- }
-
- /* Doesn't end on the next range end. */
- if (lmax != mas->last)
- return false;
-
- /* Overwriting a portion of offset and all of offset + 1 */
- if ((offset + 1 < mt_pivots[mt]) && (entry || pivots[offset + 1]))
- pivots[offset + 1] = mas->last;
-
- rcu_assign_pointer(slots[offset + 1], entry);
- pivots[offset] = mas->index - 1;
- mas->offset++; /* Keep mas accurate. */
-
-done:
- trace_ma_write(__func__, mas, 0, entry);
- mas_update_gap(mas);
- return true;
-}
-
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
while ((wr_mas->mas->last > wr_mas->end_piv) &&
wr_mas->pivots[new_end] = wr_mas->pivots[end];
if (new_end < node_pivots)
- ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
- new_end);
+ ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
mas->offset = new_end;
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
if (new_end < node_pivots)
- ma_set_meta(mas_mn(mas), maple_leaf_64, 0,
- new_end);
+ ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
wr_mas->pivots[end] = mas->last;
rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
}
/*
- * mas_store_entry() - Internal call to store a value
+ * mas_wr_store_entry() - Internal call to store a value
* @mas: The maple state
* @entry: The entry to store.
*
* Return: The contents that was stored at the index.
*/
-static inline void *mas_store_entry(struct ma_wr_state *wr_mas)
+static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
- mas_spanning_store(wr_mas);
+ mas_wr_spanning_store(wr_mas);
return wr_mas->content;
}
mas->min = mas_safe_min(mas, pivots, pslot);
mas->node = mn;
mas->offset = slot;
- mas_store_entry(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
}
/*
#endif
mas_wr_store_setup(&wr_mas);
- mas_store_entry(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
return wr_mas.content;
}
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
retry:
- mas_store_entry(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, gfp)))
goto retry;
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
mas_wr_store_setup(&wr_mas);
- mas_store_entry(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
if (mas_nomem(mas, GFP_KERNEL))
goto write_retry;
mtree_lock(mt);
retry:
- mas_store_entry(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
if (mas_nomem(&mas, gfp))
goto retry;