struct trace_event_call *event_call = __data;                   \
        struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
        struct trace_event_raw_##call *entry;                           \
+       struct bpf_prog *prog = event_call->prog;                       \
        struct pt_regs *__regs;                                         \
        u64 __count = 1;                                                \
        struct task_struct *__task = NULL;                              \
        __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
                                                                        \
        head = this_cpu_ptr(event_call->perf_events);                   \
-       if (__builtin_constant_p(!__task) && !__task &&                 \
+       if (!prog && __builtin_constant_p(!__task) && !__task &&        \
                                hlist_empty(head))                      \
                return;                                                 \
                                                                        \
                                                                        \
        { assign; }                                                     \
                                                                        \
+       if (prog) {                                                     \
+               *(struct pt_regs **)entry = __regs;                     \
+               if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \
+                       perf_swevent_put_recursion_context(rctx);       \
+                       return;                                         \
+               }                                                       \
+       }                                                               \
        perf_trace_buf_submit(entry, __entry_size, rctx,                \
                              event_call->event.type, __count, __regs,  \
                              head, __task);                            \
 
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-inline void perf_swevent_put_recursion_context(int rctx)
+void perf_swevent_put_recursion_context(int rctx)
 {
        struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 
        put_recursion_context(swhash->recursion, rctx);
 }
+EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
 
 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
 
 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 {
+       bool is_kprobe, is_tracepoint;
        struct bpf_prog *prog;
 
        if (event->attr.type != PERF_TYPE_TRACEPOINT)
        if (event->tp_event->prog)
                return -EEXIST;
 
-       if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
-               /* bpf programs can only be attached to u/kprobes */
+       is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
+       is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
+       if (!is_kprobe && !is_tracepoint)
+               /* bpf programs can only be attached to u/kprobe or tracepoint */
                return -EINVAL;
 
        prog = bpf_prog_get(prog_fd);
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       if (prog->type != BPF_PROG_TYPE_KPROBE) {
+       if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
+           (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
                /* valid fd, but invalid bpf program type */
                bpf_prog_put(prog);
                return -EINVAL;