WARN_ON_ONCE(local_inc_return(&c->active) != 1);
 }
 
-static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
+static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
 {
        local_dec(&c->active);
        if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               local_irq_restore(flags);
+               local_irq_restore(*flags);
 }
 
 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
        inc_active(c, &flags);
        __llist_add(obj, &c->free_llist);
        c->free_cnt++;
-       dec_active(c, flags);
+       dec_active(c, &flags);
 }
 
 /* Mostly runs from irq_work except __init phase. */
                        cnt = --c->free_cnt;
                else
                        cnt = 0;
-               dec_active(c, flags);
+               dec_active(c, &flags);
                if (llnode)
                        enque_to_free(tgt, llnode);
        } while (cnt > (c->high_watermark + c->low_watermark) / 2);
                llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
                        if (__llist_add(llnode, &c->free_by_rcu))
                                c->free_by_rcu_tail = llnode;
-               dec_active(c, flags);
+               dec_active(c, &flags);
        }
 
        if (llist_empty(&c->free_by_rcu))
        inc_active(c, &flags);
        WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
        c->waiting_for_gp_tail = c->free_by_rcu_tail;
-       dec_active(c, flags);
+       dec_active(c, &flags);
 
        if (unlikely(READ_ONCE(c->draining))) {
                free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);