* we need to allocate a new slab. This is the slowest path since it involves
  * a call to the page allocator and the setup of a new slab.
  *
- * Version of __slab_alloc to use when we know that interrupts are
+ * Version of __slab_alloc to use when we know that preemption is
  * already disabled (which is the case for bulk allocation).
  */
 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 {
        void *freelist;
        struct page *page;
+       unsigned long flags;
 
        stat(s, ALLOC_SLOWPATH);
 
+       local_irq_save(flags);
        page = c->page;
        if (!page) {
                /*
        VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
+       local_irq_restore(flags);
        return freelist;
 
 new_slab:
                goto check_new_page;
        }
 
+       put_cpu_ptr(s->cpu_slab);
        page = new_slab(s, gfpflags, node);
+       c = get_cpu_ptr(s->cpu_slab);
 
        if (unlikely(!page)) {
+               local_irq_restore(flags);
                slab_out_of_memory(s, gfpflags, node);
                return NULL;
        }
 
-       c = raw_cpu_ptr(s->cpu_slab);
        if (c->page)
                flush_slab(s, c);
 
 return_single:
 
        deactivate_slab(s, page, get_freepointer(s, freelist), c);
+       local_irq_restore(flags);
        return freelist;
 }
 
 /*
- * Another one that disabled interrupt and compensates for possible
- * cpu changes by refetching the per cpu area pointer.
+ * A wrapper for ___slab_alloc() for contexts where preemption is not yet
+ * disabled. Compensates for possible cpu changes by refetching the per cpu area
+ * pointer.
  */
 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
                          unsigned long addr, struct kmem_cache_cpu *c)
 {
        void *p;
-       unsigned long flags;
 
-       local_irq_save(flags);
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT_COUNT
        /*
         * We may have been preempted and rescheduled on a different
-        * cpu before disabling interrupts. Need to reload cpu area
+        * cpu before disabling preemption. Need to reload cpu area
         * pointer.
         */
-       c = this_cpu_ptr(s->cpu_slab);
+       c = get_cpu_ptr(s->cpu_slab);
 #endif
 
        p = ___slab_alloc(s, gfpflags, node, addr, c);
-       local_irq_restore(flags);
+#ifdef CONFIG_PREEMPT_COUNT
+       put_cpu_ptr(s->cpu_slab);
+#endif
        return p;
 }
 
         * IRQs, which protects against PREEMPT and interrupts
         * handlers invoking normal fastpath.
         */
+       c = get_cpu_ptr(s->cpu_slab);
        local_irq_disable();
-       c = this_cpu_ptr(s->cpu_slab);
 
        for (i = 0; i < size; i++) {
                void *object = kfence_alloc(s, s->object_size, flags);
                         */
                        c->tid = next_tid(c->tid);
 
+                       local_irq_enable();
+
                        /*
                         * Invoking slow path likely have side-effect
                         * of re-populating per CPU c->freelist
                        c = this_cpu_ptr(s->cpu_slab);
                        maybe_wipe_obj_freeptr(s, p[i]);
 
+                       local_irq_disable();
+
                        continue; /* goto for-loop */
                }
                c->freelist = get_freepointer(s, object);
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
+       put_cpu_ptr(s->cpu_slab);
 
        /*
         * memcg and kmem_cache debug support and memory initialization.
                                slab_want_init_on_alloc(flags, s));
        return i;
 error:
-       local_irq_enable();
+       put_cpu_ptr(s->cpu_slab);
        slab_post_alloc_hook(s, objcg, flags, i, p, false);
        __kmem_cache_free_bulk(s, i, p);
        return 0;