/* Functions */
static inline struct maple_node *mt_alloc_one(gfp_t gfp)
{
- return kmem_cache_alloc(maple_node_cache, gfp);
+ return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
}
static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
return 0;
}
-/*
- * mas_pop_node() - Get a previously allocated maple node from the maple state.
- * @mas: The maple state
- *
- * Return: A pointer to a maple node.
- */
-static inline struct maple_node *mas_pop_node(struct ma_state *mas)
-{
- struct maple_alloc *ret, *node = mas->alloc;
- unsigned long total = mas_allocated(mas);
- unsigned int req = mas_alloc_req(mas);
-
- /* nothing or a request pending. */
- if (WARN_ON(!total))
- return NULL;
-
- if (total == 1) {
- /* single allocation in this ma_state */
- mas->alloc = NULL;
- ret = node;
- goto single_node;
- }
-
- if (node->node_count == 1) {
- /* Single allocation in this node. */
- mas->alloc = node->slot[0];
- mas->alloc->total = node->total - 1;
- ret = node;
- goto new_head;
- }
- node->total--;
- ret = node->slot[--node->node_count];
- node->slot[node->node_count] = NULL;
-
-single_node:
-new_head:
- if (req) {
- req++;
- mas_set_alloc_req(mas, req);
- }
-
- memset(ret, 0, sizeof(*ret));
- return (struct maple_node *)ret;
-}
-
-/*
- * mas_push_node() - Push a node back on the maple state allocation.
- * @mas: The maple state
- * @used: The used maple node
- *
- * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
- * requested node count as necessary.
- */
-static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
-{
- struct maple_alloc *reuse = (struct maple_alloc *)used;
- struct maple_alloc *head = mas->alloc;
- unsigned long count;
- unsigned int requested = mas_alloc_req(mas);
-
- count = mas_allocated(mas);
-
- reuse->request_count = 0;
- reuse->node_count = 0;
- if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
- head->slot[head->node_count++] = reuse;
- head->total++;
- goto done;
- }
-
- reuse->total = 1;
- if ((head) && !((unsigned long)head & 0x1)) {
- reuse->slot[0] = head;
- reuse->node_count = 1;
- reuse->total += head->total;
- }
-
- mas->alloc = reuse;
-done:
- if (requested > 1)
- mas_set_alloc_req(mas, requested - 1);
-}
-
-/*
- * mas_alloc_nodes() - Allocate nodes into a maple state
- * @mas: The maple state
- * @gfp: The GFP Flags
- */
-static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
-{
- struct maple_alloc *node;
- unsigned long allocated = mas_allocated(mas);
- unsigned int requested = mas_alloc_req(mas);
- unsigned int count;
- void **slots = NULL;
- unsigned int max_req = 0;
-
- if (!requested)
- return;
-
- mas_set_alloc_req(mas, 0);
- if (mas->mas_flags & MA_STATE_BULK)
- return;
-
- if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
- node = (struct maple_alloc *)mt_alloc_one(gfp);
- if (!node)
- goto nomem_one;
-
- if (allocated) {
- node->slot[0] = mas->alloc;
- node->node_count = 1;
- } else {
- node->node_count = 0;
- }
-
- mas->alloc = node;
- node->total = ++allocated;
- requested--;
- }
-
- node = mas->alloc;
- node->request_count = 0;
- while (requested) {
- max_req = MAPLE_ALLOC_SLOTS - node->node_count;
- slots = (void **)&node->slot[node->node_count];
- max_req = min(requested, max_req);
- count = mt_alloc_bulk(gfp, max_req, slots);
- if (!count)
- goto nomem_bulk;
-
- if (node->node_count == 0) {
- node->slot[0]->node_count = 0;
- node->slot[0]->request_count = 0;
- }
-
- node->node_count += count;
- allocated += count;
- node = node->slot[0];
- requested -= count;
- }
- mas->alloc->total = allocated;
- return;
-
-nomem_bulk:
- /* Clean up potential freed allocations on bulk failure */
- memset(slots, 0, max_req * sizeof(unsigned long));
-nomem_one:
- mas_set_alloc_req(mas, requested);
- if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
- mas->alloc->total = allocated;
- mas_set_err(mas, -ENOMEM);
-}
-
/*
* mas_free() - Free an encoded maple node
* @mas: The maple state
if (mt_in_rcu(mas->tree))
ma_free_rcu(tmp);
else
- mas_push_node(mas, tmp);
+ mt_free_one(tmp);
}
/*
*/
static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
{
- unsigned long allocated = mas_allocated(mas);
+ int ret;
- if (allocated < count) {
- mas_set_alloc_req(mas, count - allocated);
- mas_alloc_nodes(mas, gfp);
- }
+ ret = kmem_cache_prefill_percpu_array(maple_node_cache, count, gfp);
+ if (!ret)
+ return;
+
+ mas_set_err(mas, ret);
+ mas_set_alloc_req(mas, count);
}
/*
*/
static void mas_node_count(struct ma_state *mas, int count)
{
- return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
+ return mas_node_count_gfp(mas, count, mas->gfp);
}
/*
static inline struct maple_enode
*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
{
- return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
+ return mt_mk_node(ma_mnode_ptr(mt_alloc_one(mas->gfp)), b_node->type);
}
/*
if (in_rcu)
ma_free_rcu(tmp);
else
- mas_push_node(mas, tmp);
+ mt_free_one(tmp);
}
/*
count++;
}
- l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mt_alloc_one(mas->gfp)),
mte_node_type(mast->orig_l->node));
l_mas.depth++;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
if (mas_is_err(mas))
return;
- newnode = mas_pop_node(mas);
+ newnode = mt_alloc_one(mas->gfp);
} else {
newnode = &reuse;
}
mas->node = mt_mk_node(newnode, mt);
ma_set_meta(newnode, mt, 0, tmp);
- new_left = mas_pop_node(mas);
+ new_left = mt_alloc_one(mas->gfp);
new_left->parent = left->parent;
mt = mte_node_type(l_mas.node);
slots = ma_slots(new_left, mt);
/* replace parent. */
offset = mte_parent_slot(mas->node);
mt = mas_parent_type(&l_mas, l_mas.node);
- parent = mas_pop_node(mas);
+ parent = mt_alloc_one(mas->gfp);
slots = ma_slots(parent, mt);
pivs = ma_pivots(parent, mt);
memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
if (mas_is_err(wr_mas->mas))
return 0;
- node = mas_pop_node(wr_mas->mas);
+ node = mt_alloc_one(wr_mas->mas->gfp);
node->parent = mas_mn(wr_mas->mas)->parent;
wr_mas->mas->node = mt_mk_node(node, b_type);
mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
if (unlikely(mas_is_err(mas)))
return 0;
- node = mas_pop_node(mas);
+ node = mt_alloc_one(mas->gfp);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
if (mas_is_err(mas))
return 0;
- node = mas_pop_node(mas);
+ node = mt_alloc_one(mas->gfp);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
if (mas_is_err(mas))
return false;
- newnode = mas_pop_node(mas);
+ newnode = mt_alloc_one(mas->gfp);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
newnode = &reuse;
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
- * Note: The @mas should have pre-allocated entries to ensure there is memory to
- * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
+ * Note: The per-cpu array should have pre-allocated entries to ensure there is
+ * memory to store the entry.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
+ mas->gfp = GFP_NOWAIT | __GFP_NOWARN;
+
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
+ mas->gfp = gfp;
retry:
mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, gfp)))
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
+ mas->gfp = __GFP_NOFAIL;
retry:
mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, GFP_ATOMIC | __GFP_NOFAIL)))
return false;
}
+ mas->status = ma_start;
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
- mas_alloc_nodes(mas, gfp);
+ mas_node_count_gfp(mas, mas_alloc_req(mas), gfp);
mtree_lock(mas->tree);
} else {
- mas_alloc_nodes(mas, gfp);
+ mas_node_count_gfp(mas, mas_alloc_req(mas), gfp);
}
- if (!mas_allocated(mas))
+ mas_set_alloc_req(mas, 0);
+ if (mas_is_err(mas))
return false;
- mas->status = ma_start;
return true;
}
if (index > last)
return -EINVAL;
+ mas.gfp = gfp;
mtree_lock(mt);
retry:
mas_wr_store_entry(&wr_mas);
if (first > last)
return -EINVAL;
+ ms.gfp = gfp;
mtree_lock(mt);
retry:
mas_insert(&ms, entry);
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
+ mas.gfp = gfp;
mtree_lock(mt);
retry:
ret = mas_empty_area(&mas, min, max, size);
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
+ mas.gfp = gfp;
mtree_lock(mt);
retry:
ret = mas_empty_area_rev(&mas, min, max, size);
MA_STATE(mas, mt, index, index);
trace_ma_op(__func__, &mas);
+ mas.gfp = GFP_NOWAIT | __GFP_NOWARN;
mtree_lock(mt);
entry = mas_erase(&mas);
mtree_unlock(mt);
struct rcu_test_struct2 *test;
};
-static int get_alloc_node_count(struct ma_state *mas)
-{
- int count = 1;
- struct maple_alloc *node = mas->alloc;
-
- if (!node || ((unsigned long)node & 0x1))
- return 0;
- while (node->node_count) {
- count += node->node_count;
- node = node->slot[0];
- }
- return count;
-}
-
-static void check_mas_alloc_node_count(struct ma_state *mas)
-{
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL);
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL);
- MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total);
- mas_destroy(mas);
-}
-
-/*
- * check_new_node() - Check the creation of new nodes and error path
- * verification.
- */
-static noinline void __init check_new_node(struct maple_tree *mt)
-{
-
- struct maple_node *mn, *mn2, *mn3;
- struct maple_alloc *smn;
- struct maple_node *nodes[100];
- int i, j, total;
-
- MA_STATE(mas, mt, 0, 0);
-
- check_mas_alloc_node_count(&mas);
-
- /* Try allocating 3 nodes */
- mtree_lock(mt);
- mt_set_non_kernel(0);
- /* request 3 nodes to be allocated. */
- mas_node_count(&mas, 3);
- /* Allocation request of 3. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 3);
- /* Allocate failed. */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- mas_push_node(&mas, mn);
- mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* free */
- mtree_unlock(mt);
-
-
- /* Try allocating 1 node, then 2 more */
- mtree_lock(mt);
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- /* Check Allocation request of 1. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- /* Eat the requested node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas.status = ma_start;
- mas_nomem(&mas, GFP_KERNEL);
- /* Allocate 3 nodes, will fail. */
- mas_node_count(&mas, 3);
- /* Drop the lock and allocate 3 nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- /* Ensure 3 are allocated. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Allocation request of 0. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
-
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- /* Ensure we counted 3. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Free. */
- mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL);
-
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- /* Check the node is only one node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- MT_BUG_ON(mt, mas.alloc->node_count);
-
- mas_set_alloc_req(&mas, 2); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 2);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- for (i = 2; i >= 0; i--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 64;
- mas_set_alloc_req(&mas, total); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != total);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (i = total; i > 0; i--) {
- unsigned int e = 0; /* expected node_count */
-
- if (!MAPLE_32BIT) {
- if (i >= 35)
- e = i - 34;
- else if (i >= 5)
- e = i - 4;
- else if (i >= 2)
- e = i - 1;
- } else {
- if (i >= 4)
- e = i - 3;
- else if (i >= 1)
- e = i - 1;
- else
- e = 0;
- }
-
- MT_BUG_ON(mt, mas.alloc->node_count != e);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
- MT_BUG_ON(mt, !mn);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 100;
- for (i = 1; i < total; i++) {
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = i; j > 0; j--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- nodes[j] = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
-
- while (j) {
- j--;
- mas_push_node(&mas, nodes[j]);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
- mas_reset(&mas);
- MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
-
- }
-
- /* Set allocation request. */
- total = 500;
- mas_node_count(&mas, total);
- /* Drop the lock and allocate the nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- MT_BUG_ON(mt, !mas.alloc);
- i = 1;
- smn = mas.alloc;
- while (i < total) {
- for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) {
- i++;
- MT_BUG_ON(mt, !smn->slot[j]);
- if (i == total)
- break;
- }
- smn = smn->slot[0]; /* next. */
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != total);
- mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* Free. */
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- for (i = 1; i < 128; i++) {
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = i; j > 0; j--) { /*Free the requests */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- }
-
- for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
- MA_STATE(mas2, mt, 0, 0);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas2, mn);
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mas_allocated(&mas2) != i);
-
- for (j = i; j > 0; j--) { /*Free the requests */
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- mn = mas_pop_node(&mas2); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
- }
-
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
-
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- /* Check the limit of pop/push/pop */
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_alloc_req(&mas));
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
-
- for (i = 3; i < MAPLE_NODE_MASK * 3; i++) {
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put it back */
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mn3 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_push_node(&mas, mn3);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
- }
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 5); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != 5);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1);
- mas_destroy(&mas);
-
- mtree_unlock(mt);
-}
-
/*
* Check erasing including RCU.
*/
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- mas_reset(&mas);
- mt_zero_nr_tallocated();
- mt_set_non_kernel(200);
- mas_expected_entries(&mas, max);
- for (count = 0; count <= max; count++) {
- mas.index = mas.last = count;
- mas_store(&mas, xa_mk_value(count));
- MT_BUG_ON(mt, mas_is_err(&mas));
- }
mas_destroy(&mas);
rcu_barrier();
/*
}
/* End of depth first search tests */
-/* Preallocation testing */
-static noinline void __init check_prealloc(struct maple_tree *mt)
-{
- unsigned long i, max = 100;
- unsigned long allocated;
- unsigned char height;
- struct maple_node *mn;
- void *ptr = check_prealloc;
- MA_STATE(mas, mt, 10, 20);
-
- mt_set_non_kernel(1000);
- for (i = 0; i <= max; i++)
- mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
-
- /* Spanning store */
- mas_set_range(&mas, 470, 500);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated == 0);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated == 0);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mas_store_prealloc(&mas, ptr);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- /* Slot store does not need allocations */
- mas_set_range(&mas, 6, 9);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
- mas_store_prealloc(&mas, ptr);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mas_set_range(&mas, 6, 10);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 0);
- mas_store_prealloc(&mas, ptr);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- /* Split */
- mas_set_range(&mas, 54, 54);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 1 + height * 2);
- mas_store_prealloc(&mas, ptr);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mt_set_non_kernel(1);
- /* Spanning store */
- mas_set_range(&mas, 1, 100);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 0);
- mas_destroy(&mas);
-
-
- /* Spanning store */
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated == 0);
- MT_BUG_ON(mt, allocated != 1 + height * 3);
- mas_store_prealloc(&mas, ptr);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_set_range(&mas, 0, 200);
- mt_set_non_kernel(1);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- MT_BUG_ON(mt, allocated != 0);
-}
-/* End of preallocation testing */
-
/* Spanning writes, writes that span nodes and layers of the tree */
static noinline void __init check_spanning_write(struct maple_tree *mt)
{
check_dfs_preorder(&tree);
mtree_destroy(&tree);
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_prealloc(&tree);
- mtree_destroy(&tree);
-
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_spanning_write(&tree);
mtree_destroy(&tree);
check_erase_testset(&tree);
mtree_destroy(&tree);
- mt_init_flags(&tree, 0);
- check_new_node(&tree);
- mtree_destroy(&tree);
-
if (!MAPLE_32BIT) {
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_rcu_simulated(&tree);