#include <linux/stddef.h>
 #include <linux/bpfptr.h>
 #include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
 
 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
 void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
 int bpf_prog_array_length(struct bpf_prog_array *progs);
 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
        return ret;
 }
 
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
+ *
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
+ *
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
+ */
+static __always_inline u32
+bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
+                            const void *ctx, bpf_prog_run_fn run_prog)
+{
+       const struct bpf_prog_array_item *item;
+       const struct bpf_prog *prog;
+       const struct bpf_prog_array *array;
+       struct bpf_run_ctx *old_run_ctx;
+       struct bpf_trace_run_ctx run_ctx;
+       u32 ret = 1;
+
+       might_fault();
+
+       rcu_read_lock_trace();
+       migrate_disable();
+
+       array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
+       if (unlikely(!array))
+               goto out;
+       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+       item = &array->items[0];
+       while ((prog = READ_ONCE(item->prog))) {
+               if (!prog->aux->sleepable)
+                       rcu_read_lock();
+
+               run_ctx.bpf_cookie = item->bpf_cookie;
+               ret &= run_prog(prog, ctx);
+               item++;
+
+               if (!prog->aux->sleepable)
+                       rcu_read_unlock();
+       }
+       bpf_reset_run_ctx(old_run_ctx);
+out:
+       migrate_enable();
+       rcu_read_unlock_trace();
+       return ret;
+}
+
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
 extern struct mutex bpf_stats_enabled_mutex;
 
        kfree_rcu(progs, rcu);
 }
 
+static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
+{
+       struct bpf_prog_array *progs;
+
+       progs = container_of(rcu, struct bpf_prog_array, rcu);
+       kfree_rcu(progs, rcu);
+}
+
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
+{
+       if (!progs || progs == &bpf_empty_prog_array.hdr)
+               return;
+       call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
+}
+
 int bpf_prog_array_length(struct bpf_prog_array *array)
 {
        struct bpf_prog_array_item *item;
 
        event->prog = prog;
        event->bpf_cookie = bpf_cookie;
        rcu_assign_pointer(event->tp_event->prog_array, new_array);
-       bpf_prog_array_free(old_array);
+       bpf_prog_array_free_sleepable(old_array);
 
 unlock:
        mutex_unlock(&bpf_event_mutex);
                bpf_prog_array_delete_safe(old_array, event->prog);
        } else {
                rcu_assign_pointer(event->tp_event->prog_array, new_array);
-               bpf_prog_array_free(old_array);
+               bpf_prog_array_free_sleepable(old_array);
        }
 
        bpf_prog_put(event->prog);
 
 #include <linux/namei.h>
 #include <linux/string.h>
 #include <linux/rculist.h>
+#include <linux/filter.h>
 
 #include "trace_dynevent.h"
 #include "trace_probe.h"
        if (bpf_prog_array_valid(call)) {
                u32 ret;
 
-               preempt_disable();
-               ret = trace_call_bpf(call, regs);
-               preempt_enable();
+               ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
                if (!ret)
                        return;
        }