*/
static inline void mast_topiary(struct maple_subtree_state *mast)
{
+ MA_WR_STATE(wr_mas, mast->orig_l, NULL);
unsigned char l_off, r_off, offset;
unsigned long l_index;
struct maple_enode *child;
void __rcu **slots;
- struct ma_wr_state wr_mas;
- wr_mas.mas = mast->orig_l;
wr_mas.type = mte_node_type(mast->orig_l->node);
/* The left node is consumed, so add to the free list. */
l_index = mast->orig_l->index;
static inline void
mast_ascend_free(struct maple_subtree_state *mast)
{
+ MA_WR_STATE(wr_mas, mast->orig_r, NULL);
struct maple_enode *left = mast->orig_l->node;
struct maple_enode *right = mast->orig_r->node;
- struct ma_wr_state wr_mas;
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
if (mast->orig_r->max < mast->orig_r->last)
mast->orig_r->offset = mas_data_end(mast->orig_r) + 1;
else {
- wr_mas.mas = mast->orig_r;
wr_mas.type = mte_node_type(mast->orig_r->node);
mas_wr_node_walk(&wr_mas);
}
*/
static inline int mas_spanning_store(struct ma_wr_state *wr_mas)
{
- struct ma_wr_state r_wr_mas, l_wr_mas;
struct maple_subtree_state mast;
struct maple_big_node b_node;
struct ma_state *mas;
MA_STATE(l_mas, NULL, 0, 0);
MA_STATE(r_mas, NULL, 0, 0);
+ MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
+ MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
+
mas = wr_mas->mas;
trace_ma_op(__func__, mas);
r_mas.last++;
r_mas.index = r_mas.last;
- r_wr_mas.mas = &r_mas;
mas_wr_walk_index(&r_wr_mas);
r_mas.last = r_mas.index = mas->last;
/* Set up left side. */
l_mas = *mas;
- l_wr_mas.entry = wr_mas->entry;
- l_wr_mas.mas = &l_mas;
mas_wr_walk_index(&l_wr_mas);
if (!wr_mas->entry) {
*/
static inline void *mas_store_entry(struct ma_state *mas, void *entry)
{
- struct ma_wr_state wr_mas;
+ MA_WR_STATE(wr_mas, mas, entry);
if ((wr_mas.content = mas_start(mas)) ||
mas_is_none(mas) || mas->node == MAS_ROOT) {
return wr_mas.content;
}
- wr_mas.mas = mas;
- wr_mas.entry = entry;
if (!mas_wr_walk(&wr_mas)) {
mas_spanning_store(&wr_mas);
return wr_mas.content;
*/
static inline void *mas_insert(struct ma_state *mas, void *entry)
{
- struct ma_wr_state wr_mas;
+ MA_WR_STATE(wr_mas, mas, entry);
wr_mas.content = mas_start(mas);
if (wr_mas.content)
return NULL;
}
- wr_mas.mas = mas;
- wr_mas.entry = entry;
/* spanning writes always overwrite something */
if (!mas_wr_walk(&wr_mas))
goto exists;
mt_destroy_walk(enode, mt->ma_flags, true);
}
}
+void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+{
+ if (!mas_is_start(wr_mas->mas)) {
+ if (mas_is_none(wr_mas->mas))
+ mas_reset(wr_mas->mas);
+ else {
+ wr_mas->r_max = wr_mas->mas->max;
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ if (mas_is_span_wr(wr_mas))
+ mas_reset(wr_mas->mas);
+ }
+ }
+
+}
/* Interface */
*/
void *mas_store(struct ma_state *mas, void *entry)
{
- void *existing = NULL;
- struct ma_wr_state wr_mas;
+ MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
}
#endif
- wr_mas.mas = mas;
- if ( !mas_is_start(mas)) {
- if (mas_is_none(mas))
- mas_reset(mas);
- else {
- wr_mas.r_max = mas->max;
- wr_mas.type = mte_node_type(mas->node);
- wr_mas.entry = entry;
- if (mas_is_span_wr(&wr_mas))
- mas_reset(mas);
- }
- }
- existing = mas_store_entry(mas, entry);
- if (unlikely(mas_is_err(mas)))
- return existing;
-
- return existing;
+ mas_wr_store_setup(&wr_mas);
+ wr_mas.content = mas_store_entry(mas, entry);
+ return wr_mas.content;
}
/*
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
- struct ma_wr_state wr_mas;
-
- wr_mas.mas = mas;
- if (!mas_is_start(mas)) {
- if (mas_is_none(mas))
- mas_reset(mas);
- else {
- wr_mas.r_max = mas->max;
- wr_mas.type = mte_node_type(mas->node);
- wr_mas.entry = entry;
- if (mas_is_span_wr(&wr_mas))
- mas_reset(mas);
- }
- }
+ MA_WR_STATE(wr_mas, mas, entry);
+ mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
retry:
mas_store_entry(mas, entry);