]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ...
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Mon, 23 Aug 2021 23:59:01 +0000 (09:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:31 +0000 (09:33 +1000)
flush_all() flushes a specific SLAB cache on each CPU (where the cache is
present).  The deactivate_slab()/__free_slab() invocation happens within
IPI handler and is problematic for PREEMPT_RT.

The flush operation is not a frequent operation or a hot path.  The
per-CPU flush operation can be moved to within a workqueue.

[vbabka@suse.cz: adapt to new SLUB changes]
Link: https://lkml.kernel.org/r/20210805152000.12817-30-vbabka@suse.cz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index bb7e055437389184b0bcf778aba5388007f0538d..0da7074f4c9a43a31b3082f49a86a36c3b216fcf 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2516,33 +2516,73 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
        unfreeze_partials_cpu(s, c);
 }
 
+struct slub_flush_work {
+       struct work_struct work;
+       struct kmem_cache *s;
+       bool skip;
+};
+
 /*
  * Flush cpu slab.
  *
- * Called from IPI handler with interrupts disabled.
+ * Called from CPU work handler with migration disabled.
  */
-static void flush_cpu_slab(void *d)
+static void flush_cpu_slab(struct work_struct *w)
 {
-       struct kmem_cache *s = d;
-       struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
+       struct kmem_cache *s;
+       struct kmem_cache_cpu *c;
+       struct slub_flush_work *sfw;
+
+       sfw = container_of(w, struct slub_flush_work, work);
+
+       s = sfw->s;
+       c = this_cpu_ptr(s->cpu_slab);
 
        if (c->page)
-               flush_slab(s, c, false);
+               flush_slab(s, c, true);
 
        unfreeze_partials(s);
 }
 
-static bool has_cpu_slab(int cpu, void *info)
+static bool has_cpu_slab(int cpu, struct kmem_cache *s)
 {
-       struct kmem_cache *s = info;
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
        return c->page || slub_percpu_partial(c);
 }
 
+static DEFINE_MUTEX(flush_lock);
+static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
+
 static void flush_all(struct kmem_cache *s)
 {
-       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
+       struct slub_flush_work *sfw;
+       unsigned int cpu;
+
+       mutex_lock(&flush_lock);
+       cpus_read_lock();
+
+       for_each_online_cpu(cpu) {
+               sfw = &per_cpu(slub_flush, cpu);
+               if (!has_cpu_slab(cpu, s)) {
+                       sfw->skip = true;
+                       continue;
+               }
+               INIT_WORK(&sfw->work, flush_cpu_slab);
+               sfw->skip = false;
+               sfw->s = s;
+               schedule_work_on(cpu, &sfw->work);
+       }
+
+       for_each_online_cpu(cpu) {
+               sfw = &per_cpu(slub_flush, cpu);
+               if (sfw->skip)
+                       continue;
+               flush_work(&sfw->work);
+       }
+
+       cpus_read_unlock();
+       mutex_unlock(&flush_lock);
 }
 
 /*