};
struct kmem_cache *cache;
unsigned int size;
+ int node; /* only used for rcu_sheaf */
void *objects[];
};
*/
__rcu_free_sheaf_prepare(s, sheaf);
- barn = get_node(s, numa_mem_id())->barn;
+ barn = get_node(s, sheaf->node)->barn;
/* due to slab_free_hook() */
if (unlikely(sheaf->size == 0))
rcu_sheaf->objects[rcu_sheaf->size++] = obj;
- if (likely(rcu_sheaf->size < s->sheaf_capacity))
+ if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
rcu_sheaf = NULL;
- else
+ } else {
pcs->rcu_free = NULL;
+ rcu_sheaf->node = numa_mem_id();
+ }
local_unlock(&s->cpu_sheaves->lock);
struct slab_sheaf *main, *empty;
bool init = slab_want_init_on_free(s);
unsigned int batch, i = 0;
+ void *remote_objects[PCS_BATCH_MAX];
+ unsigned int remote_nr = 0;
+ int node = numa_mem_id();
+next_remote_batch:
while (i < size) {
struct slab *slab = virt_to_slab(p[i]);
if (unlikely(!slab_free_hook(s, p[i], init, false))) {
p[i] = p[--size];
if (!size)
- return;
+ goto flush_remote;
+ continue;
+ }
+
+ if (unlikely(IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)) {
+ remote_objects[remote_nr] = p[i];
+ p[i] = p[--size];
+ if (++remote_nr >= PCS_BATCH_MAX)
+ goto flush_remote;
continue;
}
*/
fallback:
__kmem_cache_free_bulk(s, size, p);
+
+flush_remote:
+ if (remote_nr) {
+ __kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]);
+ if (i < size) {
+ remote_nr = 0;
+ goto next_remote_batch;
+ }
+ }
}
#ifndef CONFIG_SLUB_TINY
if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false)))
return;
- if (!s->cpu_sheaves || !free_to_pcs(s, object))
- do_slab_free(s, slab, object, object, 1, addr);
+ if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) ||
+ slab_nid(slab) == numa_mem_id())) {
+ if (likely(free_to_pcs(s, object))) {
+ return;
+ }
+ }
+
+ do_slab_free(s, slab, object, object, 1, addr);
}
#ifdef CONFIG_MEMCG