-// SPDX-License-Identifier: GPL-2.0-only
 /*
  * builtin-trace.c
  *
                int             max;
                struct syscall  *table;
                struct bpf_map  *map;
+               struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
+                       struct bpf_map  *sys_enter,
+                                       *sys_exit;
+               }               prog_array;
                struct {
                        struct perf_evsel *sys_enter,
                                          *sys_exit,
        goto out;
 }
 
+static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
+{
+       bool in_ev_qualifier;
+
+       if (trace->ev_qualifier_ids.nr == 0)
+               return true;
+
+       in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
+                                 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
+
+       if (in_ev_qualifier)
+              return !trace->not_ev_qualifier;
+
+       return trace->not_ev_qualifier;
+}
+
 /*
  * args is to be interpreted as a series of longs but we need to handle
  * 8-byte unaligned accesses. args points to raw_data within the event
        }
 }
 
+static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
+{
+       struct syscall *sc = trace__syscall_info(trace, NULL, id);
+       return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+}
+
+static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
+{
+       struct syscall *sc = trace__syscall_info(trace, NULL, id);
+       return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
+}
+
 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
 {
        struct syscall *sc = trace__syscall_info(trace, NULL, id);
        int err = 0, key;
 
        for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
-               if (enabled) {
+               if (enabled)
                        trace__init_bpf_map_syscall_args(trace, key, &value);
-                       trace__init_syscall_bpf_progs(trace, key);
-               }
 
                err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
                if (err)
 
        return __trace__init_syscalls_bpf_map(trace, enabled);
 }
+
+static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
+{
+       int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
+           map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
+       int err = 0, key;
+
+       for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
+               int prog_fd;
+
+               if (!trace__syscall_enabled(trace, key))
+                       continue;
+
+               trace__init_syscall_bpf_progs(trace, key);
+
+               // It'll get at least the "!raw_syscalls:unaugmented"
+               prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
+               err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
+               if (err)
+                       break;
+               prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
+               err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
 #else
 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
 {
 {
        return NULL;
 }
+
+static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
+{
+       return 0;
+}
 #endif // HAVE_LIBBPF_SUPPORT
 
 static int trace__set_ev_qualifier_filter(struct trace *trace)
        if (trace->syscalls.map)
                trace__init_syscalls_bpf_map(trace);
 
+       if (trace->syscalls.prog_array.sys_enter)
+               trace__init_syscalls_bpf_prog_array_maps(trace);
+
+
        if (trace->ev_qualifier_ids.nr > 0) {
                err = trace__set_ev_qualifier_filter(trace);
                if (err < 0)
 static void trace__set_bpf_map_syscalls(struct trace *trace)
 {
        trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
+       trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
+       trace->syscalls.prog_array.sys_exit  = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
 }
 
 static int trace__config(const char *var, const char *value, void *arg)
 
 
 bpf_map(syscalls, ARRAY, int, struct syscall, 512);
 
+/*
+ * What to augment at entry?
+ *
+ * Pointer arg payloads (filenames, etc) passed from userspace to the kernel
+ */
+bpf_map(syscalls_sys_enter, PROG_ARRAY, u32, u32, 512);
+
+/*
+ * What to augment at exit?
+ *
+ * Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
+ */
+bpf_map(syscalls_sys_exit, PROG_ARRAY, u32, u32, 512);
+
 struct syscall_enter_args {
        unsigned long long common_tp_fields;
        long               syscall_nr;