static DEFINE_PER_CPU(unsigned long, irqsave_flags);
 
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
 {
        unsigned long flags;
 
        local_irq_save(flags);
        __bpf_spin_lock(lock);
        __this_cpu_write(irqsave_flags, flags);
+}
+
+notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+{
+       __bpf_spin_lock_irqsave(lock);
        return 0;
 }
 
        .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
 };
 
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
 {
        unsigned long flags;
 
        flags = __this_cpu_read(irqsave_flags);
        __bpf_spin_unlock(lock);
        local_irq_restore(flags);
+}
+
+notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+{
+       __bpf_spin_unlock_irqrestore(lock);
        return 0;
 }
 
        else
                lock = dst + map->spin_lock_off;
        preempt_disable();
-       ____bpf_spin_lock(lock);
+       __bpf_spin_lock_irqsave(lock);
        copy_map_value(map, dst, src);
-       ____bpf_spin_unlock(lock);
+       __bpf_spin_unlock_irqrestore(lock);
        preempt_enable();
 }