*             0 on success.
  *
  *             **-ENOENT** if the bpf-local-storage cannot be found.
+ *
+ * int bpf_send_signal(u32 sig)
+ *     Description
+ *             Send signal *sig* to the current task.
+ *     Return
+ *             0 on success or successfully queued.
+ *
+ *             **-EBUSY** if work queue under nmi is full.
+ *
+ *             **-EINVAL** if *sig* is invalid.
+ *
+ *             **-EPERM** if no permission to send the *sig*.
+ *
+ *             **-EAGAIN** if bpf program can try again.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(strtol),                     \
        FN(strtoul),                    \
        FN(sk_storage_get),             \
-       FN(sk_storage_delete),
+       FN(sk_storage_delete),          \
+       FN(send_signal),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
 
        .arg3_type      = ARG_ANYTHING,
 };
 
+struct send_signal_irq_work {
+       struct irq_work irq_work;
+       struct task_struct *task;
+       u32 sig;
+};
+
+static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
+
+static void do_bpf_send_signal(struct irq_work *entry)
+{
+       struct send_signal_irq_work *work;
+
+       work = container_of(entry, struct send_signal_irq_work, irq_work);
+       group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
+}
+
+BPF_CALL_1(bpf_send_signal, u32, sig)
+{
+       struct send_signal_irq_work *work = NULL;
+
+       /* Similar to bpf_probe_write_user, task needs to be
+        * in a sound condition and kernel memory access be
+        * permitted in order to send signal to the current
+        * task.
+        */
+       if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
+               return -EPERM;
+       if (unlikely(uaccess_kernel()))
+               return -EPERM;
+       if (unlikely(!nmi_uaccess_okay()))
+               return -EPERM;
+
+       if (in_nmi()) {
+               work = this_cpu_ptr(&send_signal_work);
+               if (work->irq_work.flags & IRQ_WORK_BUSY)
+                       return -EBUSY;
+
+               /* Add the current task, which is the target of sending signal,
+                * to the irq_work. The current task may change when queued
+                * irq works get executed.
+                */
+               work->task = current;
+               work->sig = sig;
+               irq_work_queue(&work->irq_work);
+               return 0;
+       }
+
+       return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
+}
+
+static const struct bpf_func_proto bpf_send_signal_proto = {
+       .func           = bpf_send_signal,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *
 tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
        case BPF_FUNC_get_current_cgroup_id:
                return &bpf_get_current_cgroup_id_proto;
 #endif
+       case BPF_FUNC_send_signal:
+               return &bpf_send_signal_proto;
        default:
                return NULL;
        }
        return 0;
 }
 
+static int __init send_signal_irq_work_init(void)
+{
+       int cpu;
+       struct send_signal_irq_work *work;
+
+       for_each_possible_cpu(cpu) {
+               work = per_cpu_ptr(&send_signal_work, cpu);
+               init_irq_work(&work->irq_work, do_bpf_send_signal);
+       }
+       return 0;
+}
+
 fs_initcall(bpf_event_init);
+subsys_initcall(send_signal_irq_work_init);
 #endif /* CONFIG_MODULES */