]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: make slab_lock() disable irqs with PREEMPT_RT
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:59:02 +0000 (09:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:33 +0000 (09:33 +1000)
We need to disable irqs around slab_lock() (a bit spinlock) to make it
irq-safe.  The calls to slab_lock() are nested under spin_lock_irqsave()
which doesn't disable irqs on PREEMPT_RT, so add explicit disabling with
PREEMPT_RT.

We also distinguish cmpxchg_double_slab() where we do the disabling
explicitly and __cmpxchg_double_slab() for contexts with already disabled
irqs.  However these context are also typically spin_lock_irqsave() thus
insufficient on PREEMPT_RT.  Thus, change __cmpxchg_double_slab() to be
same as cmpxchg_double_slab() on PREEMPT_RT.

Link: https://lkml.kernel.org/r/20210805152000.12817-33-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index 59722a4bb5468ec26a6a36683930bcccd79edc26..3286defe4857dc13e3175a0970bd2be7ea256819 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -380,12 +380,12 @@ __slab_unlock(struct page *page, unsigned long *flags, bool disable_irqs)
 static __always_inline void
 slab_lock(struct page *page, unsigned long *flags)
 {
-       __slab_lock(page, flags, false);
+       __slab_lock(page, flags, IS_ENABLED(CONFIG_PREEMPT_RT));
 }
 
 static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
 {
-       __slab_unlock(page, flags, false);
+       __slab_unlock(page, flags, IS_ENABLED(CONFIG_PREEMPT_RT));
 }
 
 static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
@@ -429,14 +429,19 @@ static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *pag
        return false;
 }
 
-/* Interrupts must be disabled (for the fallback code to work right) */
+/*
+ * Interrupts must be disabled (for the fallback code to work right), typically
+ * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
+ * so we disable interrupts explicitly here.
+ */
 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                void *freelist_old, unsigned long counters_old,
                void *freelist_new, unsigned long counters_new,
                const char *n)
 {
        return ___cmpxchg_double_slab(s, page, freelist_old, counters_old,
-                                     freelist_new, counters_new, n, false);
+                                     freelist_new, counters_new, n,
+                                     IS_ENABLED(CONFIG_PREEMPT_RT));
 }
 
 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,