void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 {
+       unsigned long flags;
+
        /* cBPF to eBPF migrations are currently not in the idr store.
         * Offloaded programs are removed from the store when their device
         * disappears - even if someone grabs an fd to them they are unusable,
                return;
 
        if (do_idr_lock)
-               spin_lock_bh(&prog_idr_lock);
+               spin_lock_irqsave(&prog_idr_lock, flags);
        else
                __acquire(&prog_idr_lock);
 
        prog->aux->id = 0;
 
        if (do_idr_lock)
-               spin_unlock_bh(&prog_idr_lock);
+               spin_unlock_irqrestore(&prog_idr_lock, flags);
        else
                __release(&prog_idr_lock);
 }
        }
 }
 
+static void bpf_prog_put_deferred(struct work_struct *work)
+{
+       struct bpf_prog_aux *aux;
+       struct bpf_prog *prog;
+
+       aux = container_of(work, struct bpf_prog_aux, work);
+       prog = aux->prog;
+       perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
+       bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
+       __bpf_prog_put_noref(prog, true);
+}
+
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
-       if (atomic64_dec_and_test(&prog->aux->refcnt)) {
-               perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
-               bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
+       struct bpf_prog_aux *aux = prog->aux;
+
+       if (atomic64_dec_and_test(&aux->refcnt)) {
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-               __bpf_prog_put_noref(prog, true);
+
+               if (in_irq() || irqs_disabled()) {
+                       INIT_WORK(&aux->work, bpf_prog_put_deferred);
+                       schedule_work(&aux->work);
+               } else {
+                       bpf_prog_put_deferred(&aux->work);
+               }
        }
 }