From: Liam R. Howlett Date: Wed, 4 Dec 2024 20:23:35 +0000 (-0500) Subject: mm/slub: Add misc sheaf functions X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=6a1e8abf3a44f013caa62d1e796c3c630cc42cf7;p=users%2Fjedix%2Flinux-maple.git mm/slub: Add misc sheaf functions One was needed for the size of the sheaf, one for refilling the sheaf. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/slab.h b/include/linux/slab.h index a87dc3c6392f..dd8874f77449 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -823,6 +823,9 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, struct slab_sheaf * kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count); +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf); + void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, struct slab_sheaf *sheaf); @@ -831,6 +834,8 @@ void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp, #define kmem_cache_alloc_from_sheaf(...) \ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__)) +unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf); + /* * These macros allow declaring a kmem_buckets * parameter alongside size, which * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call diff --git a/mm/slub.c b/mm/slub.c index a0e2cb7dfb51..4e72c7794799 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4962,6 +4962,20 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) return sheaf; } +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf) +{ + struct slub_percpu_sheaves *pcs; + + if (!sheaf) + return -EINVAL; + + //TODO: handle via oversize sheaf + //TODO: handle failures + refill_sheaf(s, sheaf, gfp); + return 0; +} + /* * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() * It tries to refill the sheaf back to the cache's sheaf_capacity @@ -5042,6 +5056,10 @@ out: return ret; } +unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf) +{ + return sheaf->size; +} /* * To avoid unnecessary overhead, we pass through large allocation requests * directly to the page allocator. We use __GFP_COMP, because we will need to diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h index 46ae97e87517..f466a4135c5c 100644 --- a/tools/include/linux/slab.h +++ b/tools/include/linux/slab.h @@ -25,6 +25,7 @@ enum slab_state { struct slab_sheaf { struct kmem_cache *cache; unsigned int size; + bool oversized; void *objects[]; }; @@ -94,5 +95,12 @@ kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp, void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, struct slab_sheaf *sheaf); +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf); + +static inline unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf) +{ + return sheaf->size; +} #endif /* _TOOLS_SLAB_H */ diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c index c2bd723f878e..27dcef4e2fbc 100644 --- a/tools/testing/shared/linux.c +++ b/tools/testing/shared/linux.c @@ -277,14 +277,15 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) { struct slab_sheaf *sheaf; size_t size; + bool oversized = false; if (count > s->sheaf_capacity) { - printf("No support for over-capacity sheaf %u > %u\n", count, - s->sheaf_capacity); - return NULL; + size = sizeof(*sheaf) + sizeof(void *) * count; + oversized = true; + } else { + size = sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity; } - size = sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity; sheaf = malloc(size); if (!sheaf) { return NULL; @@ -292,6 +293,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) memset(sheaf, 0, size); sheaf->cache = s; + sheaf->oversized = oversized; sheaf->size = kmem_cache_alloc_bulk(s, gfp, count, sheaf->objects); if (!sheaf->size) { free(sheaf); @@ -301,11 +303,31 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) return sheaf; } +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf) +{ + unsigned char refill; + + refill = s->sheaf_capacity - sheaf->size; + if (!refill) + return 0; + + refill = kmem_cache_alloc_bulk(s, gfp, refill, + &sheaf->objects[sheaf->size]); + if (!refill) + return -ENOMEM; + + sheaf->size += refill; + return 0; +} + void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, struct slab_sheaf *sheaf) { - if (sheaf->size) + if (sheaf->size) { + //s->non_kernel += sheaf->size; kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); + } free(sheaf); }