]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:58:57 +0000 (09:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:28 +0000 (09:33 +1000)
Continue reducing the irq disabled scope.  Check for per-cpu partial slabs
with first with irqs enabled and then recheck with irqs disabled before
grabbing the slab page.  Mostly preparatory for the following patches.

Link: https://lkml.kernel.org/r/20210805152000.12817-15-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index fcc38638c645bac410e5fce19c1c93ef3359522a..a437730d7ae2491031c2461e35c954d4a39379f7 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2692,11 +2692,6 @@ reread_page:
                if (unlikely(node != NUMA_NO_NODE &&
                             !node_isset(node, slab_nodes)))
                        node = NUMA_NO_NODE;
-               local_irq_save(flags);
-               if (unlikely(c->page)) {
-                       local_irq_restore(flags);
-                       goto reread_page;
-               }
                goto new_slab;
        }
 redo:
@@ -2737,6 +2732,7 @@ redo:
 
        if (!freelist) {
                c->page = NULL;
+               local_irq_restore(flags);
                stat(s, DEACTIVATE_BYPASS);
                goto new_slab;
        }
@@ -2766,12 +2762,19 @@ deactivate_slab:
                goto reread_page;
        }
        deactivate_slab(s, page, c->freelist, c);
+       local_irq_restore(flags);
 
 new_slab:
 
-       lockdep_assert_irqs_disabled();
-
        if (slub_percpu_partial(c)) {
+               local_irq_save(flags);
+               if (unlikely(c->page)) {
+                       local_irq_restore(flags);
+                       goto reread_page;
+               }
+               if (unlikely(!slub_percpu_partial(c)))
+                       goto new_objects; /* stolen by an IRQ handler */
+
                page = c->page = slub_percpu_partial(c);
                slub_set_percpu_partial(c, page);
                local_irq_restore(flags);
@@ -2779,6 +2782,16 @@ new_slab:
                goto redo;
        }
 
+       local_irq_save(flags);
+       if (unlikely(c->page)) {
+               local_irq_restore(flags);
+               goto reread_page;
+       }
+
+new_objects:
+
+       lockdep_assert_irqs_disabled();
+
        freelist = get_partial(s, gfpflags, node, &page);
        if (freelist) {
                c->page = page;
@@ -2811,15 +2824,18 @@ new_slab:
 check_new_page:
 
        if (kmem_cache_debug(s)) {
-               if (!alloc_debug_processing(s, page, freelist, addr))
+               if (!alloc_debug_processing(s, page, freelist, addr)) {
                        /* Slab failed checks. Next slab needed */
+                       c->page = NULL;
+                       local_irq_restore(flags);
                        goto new_slab;
-               else
+               } else {
                        /*
                         * For debug case, we don't load freelist so that all
                         * allocations go through alloc_debug_processing()
                         */
                        goto return_single;
+               }
        }
 
        if (unlikely(!pfmemalloc_match(page, gfpflags)))