*
* Return: The actual end of the data stored in @b_node
*/
-static inline unsigned char mas_store_b_node(struct ma_state *mas,
+static inline unsigned char mas_store_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node,
- void *entry, unsigned char end,
- unsigned char offset_end,
- void *content)
+ unsigned char offset_end)
{
- unsigned char slot = mas->offset;
+ unsigned char slot;
unsigned char b_end;
/* Possible underflow of piv will wrap back to 0 before use. */
unsigned long piv;
- struct maple_node *node;
- enum maple_type mt;
- unsigned long *pivots;
+ struct ma_state *mas = wr_mas->mas;
+
+ b_node->type = wr_mas->type;
b_end = 0;
+ slot = mas->offset;
if (slot) {
/* Copy start data up to insert. */
mas_mab_cp(mas, 0, slot - 1, b_node, 0);
if (piv + 1 < mas->index) {
/* Handle range starting after old range */
- b_node->slot[b_end] = content;
- if (!content)
+ b_node->slot[b_end] = wr_mas->content;
+ if (!wr_mas->content)
b_node->gap[b_end] = mas->index - 1 - piv;
b_node->pivot[b_end++] = mas->index - 1;
}
/* Store the new entry. */
mas->offset = b_end;
- b_node->slot[b_end] = entry;
+ b_node->slot[b_end] = wr_mas->entry;
b_node->pivot[b_end] = mas->last;
/* Appended. */
return b_end;
/* Handle new range ending before old range ends */
- node = mas_mn(mas);
- mt = mte_node_type(mas->node);
- pivots = ma_pivots(node, mt);
- piv = mas_logical_pivot(mas, pivots, offset_end, mt);
+ piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
if (piv > mas->last) {
if (piv == ULONG_MAX)
- mas_bulk_rebalance(mas, b_node->b_end, mt);
-
- if (offset_end != slot) {
- void **slots = ma_slots(node, mt);
+ mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
- content = mas_slot_locked(mas, slots, offset_end);
- }
+ if (offset_end != slot)
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ offset_end);
- b_node->slot[++b_end] = content;
- if (!content)
+ b_node->slot[++b_end] = wr_mas->content;
+ if (!wr_mas->content)
b_node->gap[b_end] = piv - mas->last + 1;
b_node->pivot[b_end] = piv;
}
slot = offset_end + 1;
- if (slot > end)
+ if (slot > wr_mas->node_end)
return b_end;
/* Copy end data to the end of the node. */
- mas_mab_cp(mas, slot, end + 1, b_node, ++b_end);
+ mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
b_end = b_node->b_end - 1;
return b_end;
}
return true;
}
+static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->depth++;
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ mas_wr_node_walk(wr_mas);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+}
+
+static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->max = wr_mas->r_max;
+ wr_mas->mas->min = wr_mas->r_min;
+ wr_mas->mas->node = wr_mas->content;
+ wr_mas->mas->offset = 0;
+}
/*
* mas_wr_walk(): Walk the tree for a write.
* @wr_mas: The maple write state
struct ma_state *mas = wr_mas->mas;
while (true) {
- wr_mas->mas->depth++;
- wr_mas->type = mte_node_type(wr_mas->mas->node);
- mas_wr_node_walk(wr_mas);
- wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+ mas_wr_walk_descend(wr_mas);
if (mas_is_span_wr(wr_mas))
return false;
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
+ mas_wr_walk_traverse(wr_mas);
- /* Traverse. */
- mas->max = wr_mas->r_max;
- mas->min = wr_mas->r_min;
- mas->node = wr_mas->content;
- mas->offset = 0;
}
return true;
}
+static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+ mas_wr_walk_traverse(wr_mas);
+
+ }
+ return true;
+}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
* @l_wr_mas: The left maple write state
l_slot = l_mas->offset;
if (!l_wr_mas->content)
l_mas->index = l_wr_mas->r_min;
- else if ((l_mas->index == l_wr_mas->r_min) &&
+
+ if ((l_mas->index == l_wr_mas->r_min) &&
(l_slot &&
!mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
if (l_slot > 1)
/* Set up left side. */
l_mas = *mas;
+ l_wr_mas.entry = wr_mas->entry;
l_wr_mas.mas = &l_mas;
- /* Stop detection of spanning store on write walk */
- l_mas.last = l_mas.index;
- mas_wr_walk(&l_wr_mas);
+ mas_wr_walk_index(&l_wr_mas);
if (!wr_mas->entry) {
mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
} else
l_mas.last = mas->last;
- b_node.type = wr_mas->type;
/* Copy l_mas and store the value in b_node. */
- b_node.b_end = mas_store_b_node(&l_mas, &b_node, wr_mas->entry,
- l_wr_mas.node_end, l_wr_mas.node_end,
- l_wr_mas.content);
+ b_node.b_end = mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
/* Copy r_mas into b_node. */
mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
&b_node, b_node.b_end + 1);
return;
slow_path:
- b_node.type = wr_mas->type;
- b_node.b_end = mas_store_b_node(mas, &b_node, wr_mas->entry, wr_mas->node_end,
- wr_mas->offset_end, wr_mas->content);
+ b_node.b_end = mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
b_node.min = mas->min;
-
zero = MAPLE_BIG_NODE_SLOTS - b_node.b_end - 1;
memset(b_node.slot + b_node.b_end + 1, 0, sizeof(void *) * zero--);
memset(b_node.pivot + b_node.b_end + 1, 0,