]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
slab: add sheaf support for batching kfree_rcu() operations
authorVlastimil Babka <vbabka@suse.cz>
Thu, 4 Jul 2024 08:06:07 +0000 (10:06 +0200)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 4 Mar 2025 16:35:05 +0000 (11:35 -0500)
Extend the sheaf infrastructure for more efficient kfree_rcu() handling.
For caches with sheaves, on each cpu maintain a rcu_free sheaf in
addition to main and spare sheaves.

kfree_rcu() operations will try to put objects on this sheaf. Once full,
the sheaf is detached and submitted to call_rcu() with a handler that
will try to put in in the barn, or flush to slab pages using bulk free,
when the barn is full. Then a new empty sheaf must be obtained to put
more objects there.

It's possible that no free sheaves are available to use for a new
rcu_free sheaf, and the allocation in kfree_rcu() context can only use
GFP_NOWAIT and thus may fail. In that case, fall back to the existing
kfree_rcu() machinery.

Expected advantages:
- batching the kfree_rcu() operations, that could eventually replace the
  existing batching
- sheaves can be reused for allocations via barn instead of being
  flushed to slabs, which is more efficient
  - this includes cases where only some cpus are allowed to process rcu
    callbacks (Android)

Possible disadvantage:
- objects might be waiting for more than their grace period (it is
  determined by the last object freed into the sheaf), increasing memory
  usage - but the existing batching does that too?

Only implement this for CONFIG_KVFREE_RCU_BATCHED as the tiny
implementation favors smaller memory footprint over performance.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slab.h
mm/slab_common.c
mm/slub.c

index 8daaec53b6ecfc44171191d421adb12e5cba2c58..94e9959e1aefa350d3d74e3f5309fde7a5cf2ec8 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -459,6 +459,8 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
        return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
 }
 
+bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
+
 /* Legal flag mask for kmem_cache_create(), for various configurations */
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
                         SLAB_CACHE_DMA32 | SLAB_PANIC | \
index ceeefb287899a82f30ad79b403556001c1860311..c6853450ed74160cfcb497c09f92c1f9f7b12629 100644 (file)
@@ -1613,6 +1613,24 @@ static void kfree_rcu_work(struct work_struct *work)
                kvfree_rcu_list(head);
 }
 
+static bool kfree_rcu_sheaf(void *obj)
+{
+       struct kmem_cache *s;
+       struct folio *folio;
+       struct slab *slab;
+
+       folio = virt_to_folio(obj);
+       if (unlikely(!folio_test_slab(folio)))
+               return false;
+
+       slab = folio_slab(folio);
+       s = slab->slab_cache;
+       if (s->cpu_sheaves)
+               return __kfree_rcu_sheaf(s, obj);
+
+       return false;
+}
+
 static bool
 need_offload_krc(struct kfree_rcu_cpu *krcp)
 {
@@ -1957,6 +1975,9 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
        if (!head)
                might_sleep();
 
+       if (kfree_rcu_sheaf(ptr))
+               return;
+
        // Queue the object but don't yet schedule the batch.
        if (debug_rcu_head_queue(ptr)) {
                // Probable double kfree_rcu(), just leak.
index 4007b551c4f6468ff0e29c33c7dfdb22aaaf289e..79bcc778ecc9ac4173bf3951eb0410a561ec5290 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -350,6 +350,8 @@ enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
        FREE_PCS,               /* Free to percpu sheaf */
+       FREE_RCU_SHEAF,         /* Free to rcu_free sheaf */
+       FREE_RCU_SHEAF_FAIL,    /* Failed to free to a rcu_free sheaf */
        FREE_FASTPATH,          /* Free to cpu slab */
        FREE_SLOWPATH,          /* Freeing not to cpu slab */
        FREE_FROZEN,            /* Freeing to frozen slab */
@@ -2582,6 +2584,24 @@ static void sheaf_flush(struct kmem_cache *s, struct slab_sheaf *sheaf)
        sheaf->size = 0;
 }
 
+static void __rcu_free_sheaf_prepare(struct kmem_cache *s,
+                                    struct slab_sheaf *sheaf);
+
+static void rcu_free_sheaf_nobarn(struct rcu_head *head)
+{
+       struct slab_sheaf *sheaf;
+       struct kmem_cache *s;
+
+       sheaf = container_of(head, struct slab_sheaf, rcu_head);
+       s = sheaf->cache;
+
+       __rcu_free_sheaf_prepare(s, sheaf);
+
+       sheaf_flush(s, sheaf);
+
+       free_empty_sheaf(s, sheaf);
+}
+
 /*
  * Caller needs to make sure migration is disabled in order to fully flush
  * single cpu's sheaves
@@ -2611,8 +2631,8 @@ static void pcs_flush_all(struct kmem_cache *s)
                free_empty_sheaf(s, spare);
        }
 
-       // TODO: handle rcu_free
-       BUG_ON(rcu_free);
+       if (rcu_free)
+               call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
 
        sheaf_flush_main(s);
 }
@@ -2629,8 +2649,10 @@ static void __pcs_flush_all_cpu(struct kmem_cache *s, unsigned int cpu)
                pcs->spare = NULL;
        }
 
-       // TODO: handle rcu_free
-       BUG_ON(pcs->rcu_free);
+       if (pcs->rcu_free) {
+               call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
+               pcs->rcu_free = NULL;
+       }
 
        sheaf_flush_main(s);
 }
@@ -5205,6 +5227,118 @@ do_free:
        stat(s, FREE_PCS);
 }
 
+static void __rcu_free_sheaf_prepare(struct kmem_cache *s,
+                                    struct slab_sheaf *sheaf)
+{
+       bool init = slab_want_init_on_free(s);
+       void **p = &sheaf->objects[0];
+       unsigned int i = 0;
+
+       while (i < sheaf->size) {
+               struct slab *slab = virt_to_slab(p[i]);
+
+               memcg_slab_free_hook(s, slab, p + i, 1);
+               alloc_tagging_slab_free_hook(s, slab, p + i, 1);
+
+               if (unlikely(!slab_free_hook(s, p[i], init, false))) {
+                       p[i] = p[--sheaf->size];
+                       continue;
+               }
+
+               i++;
+       }
+}
+
+static void rcu_free_sheaf(struct rcu_head *head)
+{
+       struct slab_sheaf *sheaf;
+       struct node_barn *barn;
+       struct kmem_cache *s;
+
+       sheaf = container_of(head, struct slab_sheaf, rcu_head);
+
+       s = sheaf->cache;
+
+       __rcu_free_sheaf_prepare(s, sheaf);
+
+       barn = get_node(s, numa_mem_id())->barn;
+
+       /* due to slab_free_hook() */
+       if (unlikely(sheaf->size == 0))
+               goto empty;
+
+       if (!barn_put_full_sheaf(barn, sheaf, false))
+               return;
+
+       sheaf_flush(s, sheaf);
+
+empty:
+       if (!barn_put_empty_sheaf(barn, sheaf, false))
+               return;
+
+       free_empty_sheaf(s, sheaf);
+}
+
+bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
+{
+       struct slub_percpu_sheaves *pcs;
+       struct slab_sheaf *rcu_sheaf;
+       unsigned long flags;
+
+       local_lock_irqsave(&s->cpu_sheaves->lock, flags);
+       pcs = this_cpu_ptr(s->cpu_sheaves);
+
+       if (unlikely(!pcs->rcu_free)) {
+
+               struct slab_sheaf *empty;
+
+               empty = barn_get_empty_sheaf(pcs->barn);
+
+               if (empty) {
+                       pcs->rcu_free = empty;
+                       goto do_free;
+               }
+
+               local_unlock_irqrestore(&s->cpu_sheaves->lock, flags);
+
+               empty = alloc_empty_sheaf(s, GFP_NOWAIT);
+
+               if (!empty) {
+                       stat(s, FREE_RCU_SHEAF_FAIL);
+                       return false;
+               }
+
+               local_lock_irqsave(&s->cpu_sheaves->lock, flags);
+               pcs = this_cpu_ptr(s->cpu_sheaves);
+
+               if (unlikely(pcs->rcu_free))
+                       barn_put_empty_sheaf(pcs->barn, empty, true);
+               else
+                       pcs->rcu_free = empty;
+       }
+
+do_free:
+
+       rcu_sheaf = pcs->rcu_free;
+
+       rcu_sheaf->objects[rcu_sheaf->size++] = obj;
+
+       if (likely(rcu_sheaf->size < s->sheaf_capacity)) {
+               local_unlock_irqrestore(&s->cpu_sheaves->lock, flags);
+               stat(s, FREE_RCU_SHEAF);
+               return true;
+       }
+
+       pcs->rcu_free = NULL;
+       local_unlock_irqrestore(&s->cpu_sheaves->lock, flags);
+
+       call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
+
+       stat(s, FREE_RCU_SHEAF);
+
+       return true;
+}
+
 /*
  * Bulk free objects to the percpu sheaves.
  * Unlike free_to_pcs() this includes the calls to all necessary hooks
@@ -6535,6 +6669,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
        struct kmem_cache_node *n;
 
        flush_all_cpus_locked(s);
+
+       /* we might have rcu sheaves in flight */
+       if (s->cpu_sheaves)
+               rcu_barrier();
+
        /* Attempt to free all objects */
        for_each_kmem_cache_node(s, node, n) {
                if (n->barn)
@@ -7940,6 +8079,8 @@ STAT_ATTR(ALLOC_PCS, alloc_cpu_sheaf);
 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
 STAT_ATTR(FREE_PCS, free_cpu_sheaf);
+STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
+STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
 STAT_ATTR(FREE_FASTPATH, free_fastpath);
 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
 STAT_ATTR(FREE_FROZEN, free_frozen);
@@ -8035,6 +8176,8 @@ static struct attribute *slab_attrs[] = {
        &alloc_fastpath_attr.attr,
        &alloc_slowpath_attr.attr,
        &free_cpu_sheaf_attr.attr,
+       &free_rcu_sheaf_attr.attr,
+       &free_rcu_sheaf_fail_attr.attr,
        &free_fastpath_attr.attr,
        &free_slowpath_attr.attr,
        &free_frozen_attr.attr,