struct maple_node *parent;
void __rcu *slot[MAPLE_NODE_SLOTS];
};
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ };
struct maple_range_64 mr64;
struct maple_range_32 mr32;
struct maple_range_16 mr16;
*/
#include <linux/maple_tree.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *maple_node_cache;
+
+static struct maple_node *mt_alloc_one(gfp_t gfp)
+{
+ return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
+}
+
+static void mt_free_rcu(struct rcu_head *head)
+{
+ struct maple_node *node = container_of(head, struct maple_node, rcu);
+ kmem_cache_free(maple_node_cache, node);
+}
+
+static void mt_free(struct maple_node *node)
+{
+ node->parent = node;
+ call_rcu(&node->rcu, mt_free_rcu);
+}
static inline enum maple_type mt_node_type(const void *entry)
{
return mt_node_type(entry) < maple_range_16;
}
-static inline bool ma_is_reserved(const void *entry)
+static inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < 4096) && xa_is_internal(entry);
}
return (void *)((unsigned long)node | (type << 3) | 2);
}
-void mtree_init(struct maple_tree *mt) {
+void mtree_init(struct maple_tree *mt)
+{
spin_lock_init(&mt->ma_lock);
mt->ma_flags = 0;
mt->ma_root = NULL;
}
-void *mtree_load(struct maple_tree *mt, unsigned long index) {
+
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
return NULL;
}
-int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, gfp_t gfp) {
+
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, gfp_t gfp)
+{
return -EINVAL;
}
+
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
- unsigned long last, void *entry, gfp_t gfp) {
+ unsigned long last, void *entry, gfp_t gfp)
+{
return -EINVAL;
}
-int mtree_erase(struct maple_tree *mt, unsigned long index) {
+
+int mtree_erase(struct maple_tree *mt, unsigned long index)
+{
return -EINVAL;
}
-void mtree_destroy(struct maple_tree *mt) {
+void mtree_destroy(struct maple_tree *mt)
+{
+}
+
+void __init maple_tree_init(void)
+{
+ maple_node_cache = kmem_cache_create("maple node",
+ sizeof(struct maple_node), sizeof(struct maple_node),
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, NULL);
}
#ifdef MT_DEBUG
xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
- else if (ma_is_reserved(entry))
+ else if (mt_is_reserved(entry))
pr_cont("UNKNOWN ENTRY (%p)\n", entry);
else
pr_cont("%p\n", entry);
struct kmem_cache {
pthread_mutex_t lock;
- int size;
+ unsigned int size;
+ unsigned int align;
int nr_objs;
void *objs;
void (*ctor)(void *);
};
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
{
- struct radix_tree_node *node;
+ void *p;
- if (!(flags & __GFP_DIRECT_RECLAIM))
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
return NULL;
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs) {
+ struct radix_tree_node *node = cachep->objs;
cachep->nr_objs--;
- node = cachep->objs;
cachep->objs = node->parent;
pthread_mutex_unlock(&cachep->lock);
node->parent = NULL;
+ p = node;
} else {
pthread_mutex_unlock(&cachep->lock);
- node = malloc(cachep->size);
+ if (cachep->align)
+ posix_memalign(&p, cachep->align, cachep->size);
+ else
+ p = malloc(cachep->size);
if (cachep->ctor)
- cachep->ctor(node);
+ cachep->ctor(p);
+ else if (gfp & __GFP_ZERO)
+ memset(p, 0, cachep->size);
}
uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
- printf("Allocating %p from slab\n", node);
- return node;
+ printf("Allocating %p from slab\n", p);
+ return p;
}
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
pthread_mutex_lock(&cachep->lock);
- if (cachep->nr_objs > 10) {
+ if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
} else {
}
struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t offset,
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *ret = malloc(sizeof(*ret));
pthread_mutex_init(&ret->lock, NULL);
ret->size = size;
+ ret->align = align;
ret->nr_objs = 0;
ret->objs = NULL;
ret->ctor = ctor;
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t offset,
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
unsigned long flags, void (*ctor)(void *));
#endif /* SLAB_H */
tree.ma_root = xa_mk_value(0);
mt_dump(&tree);
- posix_memalign(&node, 128, 128);
+ node = mt_alloc_one(GFP_KERNEL);
node->parent = (void *)((unsigned long)(&tree) | 1);
node->slot[0] = xa_mk_value(0);
node->slot[1] = xa_mk_value(1);
node->mr64.pivot[2] = 0;
tree.ma_root = mt_mk_node(node, maple_leaf_64);
mt_dump(&tree);
+
+ mt_free(node);
}
void maple_tree_tests(void)
int __weak main(void)
{
- radix_tree_init();
+ maple_tree_init();
maple_tree_tests();
- radix_tree_cpu_dead(1);
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);