]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
radix tree test suite: add support for slab bulk APIs
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Thu, 14 Apr 2022 06:07:12 +0000 (23:07 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 26 Apr 2022 14:22:00 +0000 (10:22 -0400)
Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
radix tree test suite.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
tools/include/linux/slab.h
tools/testing/radix-tree/linux.c

index f41d8a0eb1a4204bf312d89de69a066b7d2a0ae1..2322184125736cc71ecebd03d5fca42999e6afde 100644 (file)
@@ -35,4 +35,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
                        unsigned int align, unsigned int flags,
                        void (*ctor)(void *));
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+                         void **list);
+
 #endif         /* _TOOLS_SLAB_H */
index e64741ef89efb360d169cb7611d13259f8e208f3..75c416ef86188eb087eeea9e7c34a4576e5f9814 100644 (file)
@@ -92,14 +92,13 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
        return p;
 }
 
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
 {
        assert(objp);
        uatomic_dec(&nr_allocated);
        uatomic_dec(&cachep->nr_allocated);
        if (kmalloc_verbose)
                printf("Freeing %p to slab\n", objp);
-       pthread_mutex_lock(&cachep->lock);
        if (cachep->nr_objs > 10 || cachep->align) {
                memset(objp, POISON_FREE, cachep->size);
                free(objp);
@@ -109,9 +108,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
                node->parent = cachep->objs;
                cachep->objs = node;
        }
+}
+
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+{
+       pthread_mutex_lock(&cachep->lock);
+       kmem_cache_free_locked(cachep, objp);
        pthread_mutex_unlock(&cachep->lock);
 }
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+{
+       if (kmalloc_verbose)
+               pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+
+       pthread_mutex_lock(&cachep->lock);
+       for (int i = 0; i < size; i++)
+               kmem_cache_free_locked(cachep, list[i]);
+       pthread_mutex_unlock(&cachep->lock);
+}
+
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+                         void **p)
+{
+       size_t i;
+
+       if (kmalloc_verbose)
+               pr_debug("Bulk alloc %lu\n", size);
+
+       if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+               if (cachep->non_kernel < size)
+                       return 0;
+
+               cachep->non_kernel -= size;
+       }
+
+       pthread_mutex_lock(&cachep->lock);
+       if (cachep->nr_objs >= size) {
+               struct radix_tree_node *node;
+
+               for (i = 0; i < size; i++) {
+                       node = cachep->objs;
+                       cachep->nr_objs--;
+                       cachep->objs = node->parent;
+                       p[i] = node;
+                       node->parent = NULL;
+               }
+               pthread_mutex_unlock(&cachep->lock);
+       } else {
+               pthread_mutex_unlock(&cachep->lock);
+               for (i = 0; i < size; i++) {
+                       if (cachep->align) {
+                               posix_memalign(&p[i], cachep->align,
+                                              cachep->size * size);
+                       } else {
+                               p[i] = malloc(cachep->size * size);
+                       }
+                       if (cachep->ctor)
+                               cachep->ctor(p[i]);
+                       else if (gfp & __GFP_ZERO)
+                               memset(p[i], 0, cachep->size);
+               }
+       }
+
+       for (i = 0; i < size; i++) {
+               uatomic_inc(&nr_allocated);
+               uatomic_inc(&cachep->nr_allocated);
+               uatomic_inc(&cachep->nr_tallocated);
+               if (kmalloc_verbose)
+                       printf("Allocating %p from slab\n", p[i]);
+       }
+
+       return size;
+}
+
 struct kmem_cache *
 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
                unsigned int flags, void (*ctor)(void *))
@@ -129,3 +199,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
        ret->non_kernel = 0;
        return ret;
 }
+
+/*
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
+ */
+void test_kmem_cache_bulk(void)
+{
+       int i;
+       void *list[12];
+       static struct kmem_cache *test_cache, *test_cache2;
+
+       /*
+        * Testing the bulk allocators without aligned kmem_cache to force the
+        * bulk alloc/free to reuse
+        */
+       test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
+
+       for (i = 0; i < 5; i++)
+               list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+       for (i = 0; i < 5; i++)
+               kmem_cache_free(test_cache, list[i]);
+       assert(test_cache->nr_objs == 5);
+
+       kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
+       kmem_cache_free_bulk(test_cache, 5, list);
+
+       for (i = 0; i < 12 ; i++)
+               list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+       for (i = 0; i < 12; i++)
+               kmem_cache_free(test_cache, list[i]);
+
+       /* The last free will not be kept around */
+       assert(test_cache->nr_objs == 11);
+
+       /* Aligned caches will immediately free */
+       test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
+
+       kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
+       kmem_cache_free_bulk(test_cache2, 10, list);
+       assert(!test_cache2->nr_objs);
+
+
+}