* Maple state flags
* * MA_STATE_BULK - Bulk insert mode
* * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
- * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
*/
#define MA_STATE_BULK 1
#define MA_STATE_REBALANCE 2
-#define MA_STATE_PREALLOC 4
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
- if (mas->mas_flags & MA_STATE_PREALLOC) {
- mas_wr_prealloc_setup(&wr_mas);
- mas_wr_store_type(&wr_mas);
- mas_wr_store_entry(&wr_mas);
- MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
- return wr_mas.content;
- }
-
mas_wr_store_prealloc(&wr_mas, entry);
WARN_ON_ONCE(mas->store_type == wr_invalid);
- if (mas_is_err(mas))
- return NULL;
+
+ /*
+ * Highly unlikely, but if there is no memory this can fail. This is
+ * expected to be preallocated, but a failure is still possible.
+ */
+ if (unlikely(mas_is_err(mas)))
+ return wr_mas.content;
mas_wr_store_entry(&wr_mas);
- mas_destroy(mas);
return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
MA_WR_STATE(wr_mas, mas, entry);
int ret;
- trace_ma_write(__func__, mas, 0, entry);
mas->gfp = gfp;
+ trace_ma_write(__func__, mas, 0, entry);
retry:
mas_wr_preallocate(&wr_mas, entry, gfp);
WARN_ON_ONCE(mas->store_type == wr_invalid);
if (unlikely(mas_nomem(mas, gfp)))
goto retry;
+
if (mas_is_err(mas))
goto out;
mas_wr_store_entry(&wr_mas);
out:
ret = xa_err(mas->node);
- mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
mas->gfp = __GFP_NOFAIL;
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
- mas_destroy(mas);
-
}
EXPORT_SYMBOL_GPL(mas_store_prealloc);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
- int ret = 0;
+ int ret;
mas_wr_preallocate(&wr_mas, entry, gfp);
- if (mas_is_err(mas)) {
- ret = xa_err(mas->node);
- mas_destroy(mas);
- mas_reset(mas);
- return ret;
- }
+ if (likely(!mas_is_err(mas)))
+ return 0;
- mas->mas_flags |= MA_STATE_PREALLOC;
+ ret = xa_err(mas->node);
+ mas_reset(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
- mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+
+ mas->mas_flags &= ~MA_STATE_BULK;
}
EXPORT_SYMBOL_GPL(mas_destroy);
/* Add working room for split (2 nodes) + new parents */
mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
- /* Detect if allocations run out */
- mas->mas_flags |= MA_STATE_PREALLOC;
-
if (!mas_is_err(mas))
return 0;
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
- int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
goto retry;
mtree_unlock(mt);
- if (mas_is_err(&ms))
- ret = xa_err(ms.node);
+ if (!mas_is_err(&ms))
+ return 0;
- mas_destroy(&ms);
- return ret;
+ return xa_err(ms.node);
}
EXPORT_SYMBOL(mtree_insert_range);
unlock:
mtree_unlock(mt);
- mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
unlock:
mtree_unlock(mt);
- mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
return -ENOMEM;
/* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma))) {
- mas_destroy(&mas);
+ if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- }
/* Lock the VMA before expanding to prevent concurrent page faults */
vma_start_write(vma);
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma(vma, vma->vm_flags);
- mas_destroy(&mas);
validate_mm(mm);
return error;
}
return -ENOMEM;
/* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma))) {
- mas_destroy(&mas);
+ if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- }
/* Lock the VMA before expanding to prevent concurrent page faults */
vma_start_write(vma);
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma(vma, vma->vm_flags);
- mas_destroy(&mas);
validate_mm(mm);
return error;
}
err = vma_dup_policy(vma, new);
if (err)
- goto out_free_vmi;
+ goto out_free_vma;
err = anon_vma_clone(new, vma);
if (err)
out_free_mpol:
mpol_put(vma_policy(new));
-out_free_vmi:
- vma_iter_free(vmi);
out_free_vma:
vm_area_free(new);
return err;