#include <linux/slab.h>
#include <linux/limits.h>
#include <asm/barrier.h>
-//#include <linux/mm.h> // for task_size
#define CREATE_TRACE_POINTS
#include <trace/events/maple_tree.h>
}
// Free the allocations.
-static inline void ma_free_rcu_alloc(struct maple_node *node)
+static inline void mas_empty_alloc(struct ma_state *mas)
{
- int alloc = 0;
+ struct maple_node *node;
- while (alloc < MAPLE_NODE_SLOTS && node->slot[alloc]) {
- if (ma_mnode_ptr(node->slot[alloc])->slot[0])
- ma_free_rcu_alloc(node->slot[alloc]);
- else
- kfree(node->slot[alloc]);
- alloc++;
+ while (mas_get_alloc(mas)) {
+ node = mas_next_alloc(mas);
+ kmem_cache_free(maple_node_cache, node);
}
- kfree(node);
}
-void mas_empty_alloc(struct ma_state *mas)
-{
- struct maple_node *node = mas_get_alloc(mas);
-
- if (node)
- ma_free_rcu_alloc(node);
- mas->alloc = NULL;
-}
/*
* Check if there was an error allocating and do the allocation if necessary
* If there are allocations, then free them.
unsigned char end = mas_data_end(mas);
void *contents = mas_get_rcu_slot(mas, slot);
unsigned char b_end = 0;
- unsigned long piv = mas->min;
+ // Possible underflow of piv will wrap back to 0 before use.
+ unsigned long piv = mas->min - 1;
if (slot) {
mas_mab_cp(mas, 0, slot - 1, b_node, 0);
piv = b_node->pivot[b_end - 1];
}
- if (mas->index && piv < mas->index - 1) {
+ if (piv + 1 < mas->index) {
b_node->slot[b_end] = contents;
if (!contents)
b_node->gap[b_end] = mas->index - 1 - piv;
MT_BUG_ON(mt, mas_get_alloc_cnt(&mas) != 0);
ma_free_rcu(mn);
+ mas.node = MAS_START;
+ mas_nomem(&mas, GFP_KERNEL);
// Allocate 3 nodes, will fail.
mas_node_cnt(&mas, 3);
// Drop the lock and allocate 3 nodes.
mtree_unlock(mt);
mtree_destroy(mt);
}
+
+static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
+ bool verbose)
+{
+ unsigned long i = max, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ nr_tallocated = 0;
+ while (i) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = i; j <= max; j++)
+ check_index_load(mt, j);
+
+ check_load(mt, i - 1, NULL);
+ i--;
+ }
+ check_load(mt, max + 1, NULL);
+
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt);
+ pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
+ __func__, max, mt_get_alloc_size()/1024, nr_allocated,
+ nr_tallocated);
+ }
+}
+
static noinline void check_seq(struct maple_tree *mt, unsigned long max,
bool verbose)
{
check_new_node(&tree);
mtree_destroy(&tree);
+ mtree_init(&tree, MAPLE_ALLOC_RANGE);
+ check_rev_seq(&tree, 10, true);
+ mtree_destroy(&tree);
+ mtree_init(&tree, 0);
+ check_seq(&tree, 10, true);
+ mtree_destroy(&tree);
+
+ mtree_init(&tree, 0);
+ check_dfs_preorder(&tree);
+ mtree_destroy(&tree);
+
+ mtree_init(&tree, 0);
+ check_dup_tree(&tree);
+ mtree_destroy(&tree);
+
mtree_init(&tree, 0);
check_dfs_preorder(&tree);
mtree_destroy(&tree);
mtree_destroy(&tree);
check_nomem(&tree);
+ mtree_init(&tree, 0);
check_seq(&tree, 16, false);
mtree_destroy(&tree);
+ mtree_init(&tree, 0);
check_seq(&tree, 1000, true);
mtree_destroy(&tree);