*                     the fast path and disables lockless freelists.
  */
 
+/*
+ * We could simply use migrate_disable()/enable() but as long as it's a
+ * function call even on !PREEMPT_RT, use inline preempt_disable() there.
+ */
+#ifndef CONFIG_PREEMPT_RT
+#define slub_get_cpu_ptr(var)  get_cpu_ptr(var)
+#define slub_put_cpu_ptr(var)  put_cpu_ptr(var)
+#else
+#define slub_get_cpu_ptr(var)          \
+({                                     \
+       migrate_disable();              \
+       this_cpu_ptr(var);              \
+})
+#define slub_put_cpu_ptr(var)          \
+do {                                   \
+       (void)(var);                    \
+       migrate_enable();               \
+} while (0)
+#endif
+
 #ifdef CONFIG_SLUB_DEBUG
 #ifdef CONFIG_SLUB_DEBUG_ON
 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
        if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
                goto deactivate_slab;
 
-       /* must check again c->page in case IRQ handler changed it */
+       /* must check again c->page in case we got preempted and it changed */
        local_irq_save(flags);
        if (unlikely(page != c->page)) {
                local_irq_restore(flags);
                }
                if (unlikely(!slub_percpu_partial(c))) {
                        local_irq_restore(flags);
-                       goto new_objects; /* stolen by an IRQ handler */
+                       /* we were preempted and partial list got empty */
+                       goto new_objects;
                }
 
                page = c->page = slub_percpu_partial(c);
        if (freelist)
                goto check_new_page;
 
-       put_cpu_ptr(s->cpu_slab);
+       slub_put_cpu_ptr(s->cpu_slab);
        page = new_slab(s, gfpflags, node);
-       c = get_cpu_ptr(s->cpu_slab);
+       c = slub_get_cpu_ptr(s->cpu_slab);
 
        if (unlikely(!page)) {
                slab_out_of_memory(s, gfpflags, node);
         * cpu before disabling preemption. Need to reload cpu area
         * pointer.
         */
-       c = get_cpu_ptr(s->cpu_slab);
+       c = slub_get_cpu_ptr(s->cpu_slab);
 #endif
 
        p = ___slab_alloc(s, gfpflags, node, addr, c);
 #ifdef CONFIG_PREEMPT_COUNT
-       put_cpu_ptr(s->cpu_slab);
+       slub_put_cpu_ptr(s->cpu_slab);
 #endif
        return p;
 }
         * IRQs, which protects against PREEMPT and interrupts
         * handlers invoking normal fastpath.
         */
-       c = get_cpu_ptr(s->cpu_slab);
+       c = slub_get_cpu_ptr(s->cpu_slab);
        local_irq_disable();
 
        for (i = 0; i < size; i++) {
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
-       put_cpu_ptr(s->cpu_slab);
+       slub_put_cpu_ptr(s->cpu_slab);
 
        /*
         * memcg and kmem_cache debug support and memory initialization.
                                slab_want_init_on_alloc(flags, s));
        return i;
 error:
-       put_cpu_ptr(s->cpu_slab);
+       slub_put_cpu_ptr(s->cpu_slab);
        slab_post_alloc_hook(s, objcg, flags, i, p, false);
        __kmem_cache_free_bulk(s, i, p);
        return 0;