*
* Attempts to reuse the node, but may allocate.
*/
-static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
- unsigned char new_end)
+static inline void mas_wr_node_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **dst_slots;
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
unsigned char height = mas_mt_height(mas);
+ unsigned char new_end;
- if (mas->last == wr_mas->end_piv)
+ /* Assume last adds an entry */
+ new_end = mas->end + 1 - wr_mas->offset_end + mas->offset;
+ if (mas->last == wr_mas->end_piv) {
offset_end++; /* don't copy this offset */
- else if (unlikely(wr_mas->r_max == ULONG_MAX))
+ new_end--;
+ } else if (unlikely(wr_mas->r_max == ULONG_MAX)) {
mas_bulk_rebalance(mas, mas->end, wr_mas->type);
+ }
/* set up node. */
if (in_rcu) {
if (wr_mas->r_min < mas->index) {
rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
dst_pivots[mas->offset++] = mas->index - 1;
+ new_end++;
}
/* Store the new entry and range end. */
/*
* mas_wr_append: Attempt to append
* @wr_mas: the maple write state
- * @new_end: The end of the node after the modification
*
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
*/
-static inline void mas_wr_append(struct ma_wr_state *wr_mas,
- unsigned char new_end)
+static inline void mas_wr_append(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **slots;
unsigned char end = mas->end;
+ unsigned char new_end = mas_wr_new_end(wr_mas);
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end = mas_wr_new_end(wr_mas);
switch (mas->store_type) {
case wr_exact_fit:
mas_update_gap(mas);
break;
case wr_append:
- mas_wr_append(wr_mas, new_end);
+ mas_wr_append(wr_mas);
break;
case wr_slot_store:
mas_wr_slot_store(wr_mas);
break;
case wr_node_store:
- mas_wr_node_store(wr_mas, new_end);
+ mas_wr_node_store(wr_mas);
break;
case wr_spanning_store:
mas_wr_spanning_store(wr_mas);