]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: stop disabling irqs around get_partial()
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:58:58 +0000 (09:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:29 +0000 (09:33 +1000)
The function get_partial() does not need to have irqs disabled as a whole.
It's sufficient to convert spin_lock operations to their irq
saving/restoring versions.

As a result, it's now possible to reach the page allocator from the slab
allocator without disabling and re-enabling interrupts on the way.

Link: https://lkml.kernel.org/r/20210805152000.12817-19-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index d5ac278084cfb5a2ea8a05ffea5b53aa28ff9178..ad2c58a31af5a44baaede3e0652b0dfa8f010b55 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1996,11 +1996,12 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
  * Try to allocate a partial slab from a specific node.
  */
 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
-                             struct page **ret_page, gfp_t flags)
+                             struct page **ret_page, gfp_t gfpflags)
 {
        struct page *page, *page2;
        void *object = NULL;
        unsigned int available = 0;
+       unsigned long flags;
        int objects;
 
        /*
@@ -2012,11 +2013,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
        if (!n || !n->nr_partial)
                return NULL;
 
-       spin_lock(&n->list_lock);
+       spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
                void *t;
 
-               if (!pfmemalloc_match(page, flags))
+               if (!pfmemalloc_match(page, gfpflags))
                        continue;
 
                t = acquire_slab(s, n, page, object == NULL, &objects);
@@ -2037,7 +2038,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
                        break;
 
        }
-       spin_unlock(&n->list_lock);
+       spin_unlock_irqrestore(&n->list_lock, flags);
        return object;
 }
 
@@ -2765,8 +2766,10 @@ new_slab:
                        local_irq_restore(flags);
                        goto reread_page;
                }
-               if (unlikely(!slub_percpu_partial(c)))
+               if (unlikely(!slub_percpu_partial(c))) {
+                       local_irq_restore(flags);
                        goto new_objects; /* stolen by an IRQ handler */
+               }
 
                page = c->page = slub_percpu_partial(c);
                slub_set_percpu_partial(c, page);
@@ -2775,18 +2778,9 @@ new_slab:
                goto redo;
        }
 
-       local_irq_save(flags);
-       if (unlikely(c->page)) {
-               local_irq_restore(flags);
-               goto reread_page;
-       }
-
 new_objects:
 
-       lockdep_assert_irqs_disabled();
-
        freelist = get_partial(s, gfpflags, node, &page);
-       local_irq_restore(flags);
        if (freelist)
                goto check_new_page;