mas_set_err(mas, -ENOMEM);
}
+static inline void mas_empty_nodes(struct ma_state *mas)
+{
+ mas->node_request = 0;
+ if (mas->sheaf) {
+ mt_return_sheaf(mas->sheaf);
+ mas->sheaf = NULL;
+ }
+
+ if (mas->alloc) {
+ mt_free_one(mas->alloc);
+ mas->alloc = NULL;
+ }
+}
/*
* mas_free() - Free an encoded maple node
* @mas: The maple state
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
-
- mas->node_request = 0;
- if (mas->sheaf)
- mt_return_sheaf(mas->sheaf);
- mas->sheaf = NULL;
-
- if (mas->alloc)
- mt_free_one(mas->alloc);
- mas->alloc = NULL;
+ mas_empty_nodes(mas);
}
EXPORT_SYMBOL_GPL(mas_destroy);
struct maple_node *node = mte_to_node(mas->node);
struct maple_node *new_node = mte_to_node(new_mas->node);
enum maple_type type;
- unsigned char request, count, i;
+ unsigned char count, i;
void __rcu **slots;
void __rcu **new_slots;
unsigned long val;
/* Allocate memory for child nodes. */
type = mte_node_type(mas->node);
new_slots = ma_slots(new_node, type);
- request = mas_data_end(mas) + 1;
- count = mt_alloc_bulk(gfp, request, (void **)new_slots);
- if (unlikely(count < request)) {
- memset(new_slots, 0, request * sizeof(void *));
- mas_set_err(mas, -ENOMEM);
+ count = mas->node_request = mas_data_end(mas) + 1;
+ mas_alloc_nodes(mas, gfp);
+ if (unlikely(mas_is_err(mas)))
return;
- }
- /* Restore node type information in slots. */
slots = ma_slots(node, type);
for (i = 0; i < count; i++) {
val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
val &= MAPLE_NODE_MASK;
- ((unsigned long *)new_slots)[i] |= val;
+ new_slots[i] = ma_mnode_ptr((unsigned long)mas_pop_node(mas) |
+ val);
}
}
/* Only allocate child nodes for non-leaf nodes. */
mas_dup_alloc(mas, new_mas, gfp);
if (unlikely(mas_is_err(mas)))
- return;
+ goto empty_mas;
} else {
/*
* This is the last leaf node and duplication is
/* Make them the same height */
new_mas->tree->ma_flags = mas->tree->ma_flags;
rcu_assign_pointer(new_mas->tree->ma_root, root);
+empty_mas:
+ mas_empty_nodes(mas);
}
/**