One was needed for the size of the sheaf, one for refilling the sheaf.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
struct slab_sheaf *
kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
#define kmem_cache_alloc_from_sheaf(...) \
alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf);
+
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
return sheaf;
}
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ struct slub_percpu_sheaves *pcs;
+
+ if (!sheaf)
+ return -EINVAL;
+
+ //TODO: handle via oversize sheaf
+ //TODO: handle failures
+ refill_sheaf(s, sheaf, gfp);
+ return 0;
+}
+
/*
* Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
* It tries to refill the sheaf back to the cache's sheaf_capacity
return ret;
}
+unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
/*
* To avoid unnecessary overhead, we pass through large allocation requests
* directly to the page allocator. We use __GFP_COMP, because we will need to
struct slab_sheaf {
struct kmem_cache *cache;
unsigned int size;
+ bool oversized;
void *objects[];
};
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+static inline unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
#endif /* _TOOLS_SLAB_H */
{
struct slab_sheaf *sheaf;
size_t size;
+ bool oversized = false;
if (count > s->sheaf_capacity) {
- printf("No support for over-capacity sheaf %u > %u\n", count,
- s->sheaf_capacity);
- return NULL;
+ size = sizeof(*sheaf) + sizeof(void *) * count;
+ oversized = true;
+ } else {
+ size = sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity;
}
- size = sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity;
sheaf = malloc(size);
if (!sheaf) {
return NULL;
memset(sheaf, 0, size);
sheaf->cache = s;
+ sheaf->oversized = oversized;
sheaf->size = kmem_cache_alloc_bulk(s, gfp, count, sheaf->objects);
if (!sheaf->size) {
free(sheaf);
return sheaf;
}
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ unsigned char refill;
+
+ refill = s->sheaf_capacity - sheaf->size;
+ if (!refill)
+ return 0;
+
+ refill = kmem_cache_alloc_bulk(s, gfp, refill,
+ &sheaf->objects[sheaf->size]);
+ if (!refill)
+ return -ENOMEM;
+
+ sheaf->size += refill;
+ return 0;
+}
+
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf)
{
- if (sheaf->size)
+ if (sheaf->size) {
+ //s->non_kernel += sheaf->size;
kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
+ }
free(sheaf);
}