]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: move irq control into unfreeze_partials()
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:58:59 +0000 (09:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:30 +0000 (09:33 +1000)
unfreeze_partials() can be optimized so that it doesn't need irqs disabled
for the whole time.  As the first step, move irq control into the function
and remove it from the put_cpu_partial() caller.

Link: https://lkml.kernel.org/r/20210805152000.12817-23-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index 44c4de4b37551a3f9ac1976fdd076da63ed50e67..b1c120d3d461a1d38b840ac5dfecc63a5c6fcfcd 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2336,9 +2336,8 @@ redo:
 /*
  * Unfreeze all the cpu partial slabs.
  *
- * This function must be called with interrupts disabled
- * for the cpu using c (or some other guarantee must be there
- * to guarantee no concurrent accesses).
+ * This function must be called with preemption or migration
+ * disabled with c local to the cpu.
  */
 static void unfreeze_partials(struct kmem_cache *s,
                struct kmem_cache_cpu *c)
@@ -2346,6 +2345,9 @@ static void unfreeze_partials(struct kmem_cache *s,
 #ifdef CONFIG_SLUB_CPU_PARTIAL
        struct kmem_cache_node *n = NULL, *n2 = NULL;
        struct page *page, *discard_page = NULL;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        while ((page = slub_percpu_partial(c))) {
                struct page new;
@@ -2398,6 +2400,8 @@ static void unfreeze_partials(struct kmem_cache *s,
                discard_slab(s, page);
                stat(s, FREE_SLAB);
        }
+
+       local_irq_restore(flags);
 #endif /* CONFIG_SLUB_CPU_PARTIAL */
 }
 
@@ -2425,14 +2429,11 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                        pobjects = oldpage->pobjects;
                        pages = oldpage->pages;
                        if (drain && pobjects > slub_cpu_partial(s)) {
-                               unsigned long flags;
                                /*
                                 * partial array is full. Move the existing
                                 * set to the per node partial list.
                                 */
-                               local_irq_save(flags);
                                unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
-                               local_irq_restore(flags);
                                oldpage = NULL;
                                pobjects = 0;
                                pages = 0;