struct slab_sheaf *
kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count);
+void kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf, unsigned long count);
+
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
struct slab_sheaf *sheaf);
#define kmem_cache_alloc_from_sheaf(...) \
alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf);
+
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
return sheaf;
}
+void kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf, unsigned long count)
+{
+ struct slub_percpu_sheaves *pcs;
+
+ if (!sheaf)
+ return;
+
+ //TODO: handle via oversize sheaf
+ if (count + sheaf->size > s->sheaf_capacity)
+ return;
+
+ //TODO: handle failures
+ refill_sheaf(s, sheaf, gfp);
+ sheaf->size += count;
+}
+
/*
* Use this to return a sheaf obtained by kmem_cache_prefill_sheaf()
* It tries to refill the sheaf back to the cache's sheaf_capacity
return ret;
}
+unsigned int kmem_cache_sheaf_count(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
/*
* To avoid unnecessary overhead, we pass through large allocation requests
* directly to the page allocator. We use __GFP_COMP, because we will need to