]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: unify cmpxchg_double_slab() and __cmpxchg_double_slab()
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:58:55 +0000 (09:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:25 +0000 (09:33 +1000)
These functions differ only in irq disabling in the slow path.  We can
create a common function with an extra bool parameter to control the irq
disabling.  As the functions are inline and the parameter compile-time
constant, there will be no runtime overhead due to this change.

Also change the DEBUG_VM based irqs disable assert to the more standard
lockdep_assert based one.

Link: https://lkml.kernel.org/r/20210805152000.12817-7-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index 79e53303844c34d72e5540900f7b1109b4310874..e1c4e934c6203945eedaba0af358398096585a23 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -371,13 +371,13 @@ static __always_inline void slab_unlock(struct page *page)
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
-/* Interrupts must be disabled (for the fallback code to work right) */
-static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                void *freelist_old, unsigned long counters_old,
                void *freelist_new, unsigned long counters_new,
-               const char *n)
+               const char *n, bool disable_irqs)
 {
-       VM_BUG_ON(!irqs_disabled());
+       if (!disable_irqs)
+               lockdep_assert_irqs_disabled();
 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
        if (s->flags & __CMPXCHG_DOUBLE) {
@@ -388,15 +388,23 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
        } else
 #endif
        {
+               unsigned long flags;
+
+               if (disable_irqs)
+                       local_irq_save(flags);
                slab_lock(page);
                if (page->freelist == freelist_old &&
                                        page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
+                       if (disable_irqs)
+                               local_irq_restore(flags);
                        return true;
                }
                slab_unlock(page);
+               if (disable_irqs)
+                       local_irq_restore(flags);
        }
 
        cpu_relax();
@@ -409,45 +417,23 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
        return false;
 }
 
-static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+/* Interrupts must be disabled (for the fallback code to work right) */
+static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                void *freelist_old, unsigned long counters_old,
                void *freelist_new, unsigned long counters_new,
                const char *n)
 {
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
-    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
-       if (s->flags & __CMPXCHG_DOUBLE) {
-               if (cmpxchg_double(&page->freelist, &page->counters,
-                                  freelist_old, counters_old,
-                                  freelist_new, counters_new))
-                       return true;
-       } else
-#endif
-       {
-               unsigned long flags;
-
-               local_irq_save(flags);
-               slab_lock(page);
-               if (page->freelist == freelist_old &&
-                                       page->counters == counters_old) {
-                       page->freelist = freelist_new;
-                       page->counters = counters_new;
-                       slab_unlock(page);
-                       local_irq_restore(flags);
-                       return true;
-               }
-               slab_unlock(page);
-               local_irq_restore(flags);
-       }
-
-       cpu_relax();
-       stat(s, CMPXCHG_DOUBLE_FAIL);
-
-#ifdef SLUB_DEBUG_CMPXCHG
-       pr_info("%s %s: cmpxchg double redo ", n, s->name);
-#endif
+       return ___cmpxchg_double_slab(s, page, freelist_old, counters_old,
+                                     freelist_new, counters_new, n, false);
+}
 
-       return false;
+static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+               void *freelist_old, unsigned long counters_old,
+               void *freelist_new, unsigned long counters_new,
+               const char *n)
+{
+       return ___cmpxchg_double_slab(s, page, freelist_old, counters_old,
+                                     freelist_new, counters_new, n, true);
 }
 
 #ifdef CONFIG_SLUB_DEBUG