struct trace {
struct perf_tool tool;
- struct syscalltbl *sctbl;
struct {
/** Sorted sycall numbers used by the trace. */
struct syscall *table;
pid_t *entries;
struct bpf_map *map;
} filter_pids;
+ /*
+ * TODO: The map is from an ID (aka system call number) to struct
+ * syscall_stats. If there is >1 e_machine, such as i386 and x86-64
+ * processes, then the stats here will gather wrong the statistics for
+ * the non EM_HOST system calls. A fix would be to add the e_machine
+ * into the key, but this would make the code inconsistent with the
+ * per-thread version.
+ */
struct hashmap *syscall_stats;
double duration_filter;
double runtime_ms;
return 0;
}
- name = syscalltbl__name(trace->sctbl, sc->id);
+ name = syscalltbl__name(sc->e_machine, sc->id);
if (name == NULL) {
sc->nonexistent = true;
return -EEXIST;
strlist__for_each_entry(pos, trace->ev_qualifier) {
const char *sc = pos->s;
- int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
+ /*
+ * TODO: Assume more than the validation/warnings are all for
+ * the same binary type as perf.
+ */
+ int id = syscalltbl__id(EM_HOST, sc), match_next = -1;
if (id < 0) {
- id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
+ id = syscalltbl__strglobmatch_first(EM_HOST, sc, &match_next);
if (id >= 0)
goto matches;
continue;
while (1) {
- id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
+ id = syscalltbl__strglobmatch_next(EM_HOST, sc, &match_next);
if (id < 0)
break;
if (nr_allocated == nr_used) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
int augmented_args_size = 0;
void *augmented_args = NULL;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
struct thread_trace *ttrace;
struct thread_trace *ttrace;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
char msg[1024];
void *args, *augmented_args = NULL;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
int alignment = trace->args_alignment;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
struct thread_trace *ttrace;
if (evsel == trace->syscalls.events.bpf_output) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
if (sc) {
return trace->skel->progs.syscall_unaugmented;
}
-static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
+static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
if (sc == NULL)
return;
sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
}
-static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
+static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
-static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
+static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
-static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array)
+static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array)
{
struct tep_format_field *field;
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
const struct btf_type *bt;
char *struct_offset, *tmp, name[32];
bool can_augment = false;
return NULL;
try_to_find_pair:
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int id = syscalltbl__id_at_idx(trace->sctbl, i);
+ for (int i = 0, num_idx = syscalltbl__num_idx(sc.e_machine); i < num_idx; ++i) {
+ int id = syscalltbl__id_at_idx(sc.e_machine, i);
/* calling trace__syscall_info() may invalidate '_sc' */
struct syscall *pair = trace__syscall_info(trace, NULL, sc.e_machine, id);
struct bpf_program *pair_prog;
return NULL;
}
-static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
+static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine)
{
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
int err = 0;
unsigned int beauty_array[6];
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
+ for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
+ int prog_fd, key = syscalltbl__id_at_idx(e_machine, i);
if (!trace__syscall_enabled(trace, key))
continue;
- trace__init_syscall_bpf_progs(trace, key);
+ trace__init_syscall_bpf_progs(trace, e_machine, key);
// It'll get at least the "!raw_syscalls:unaugmented"
- prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
+ prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key);
err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
- prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
+ prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key);
err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
/* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */
memset(beauty_array, 0, sizeof(beauty_array));
- err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array);
+ err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array);
if (err)
continue;
err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY);
* first and second arg (this one on the raw_syscalls:sys_exit prog
* array tail call, then that one will be used.
*/
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int key = syscalltbl__id_at_idx(trace->sctbl, i);
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
+ int key = syscalltbl__id_at_idx(e_machine, i);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
struct bpf_program *pair_prog;
int prog_fd;
* Get syscall info again as find usable entry above might
* modify the syscall table and shuffle it.
*/
- sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ sc = trace__syscall_info(trace, NULL, e_machine, key);
sc->bpf_prog.sys_enter = pair_prog;
/*
goto out_error_mem;
#ifdef HAVE_BPF_SKEL
- if (trace->skel && trace->skel->progs.sys_enter)
- trace__init_syscalls_bpf_prog_array_maps(trace);
+ if (trace->skel && trace->skel->progs.sys_enter) {
+ /*
+ * TODO: Initialize for all host binary machine types, not just
+ * those matching the perf binary.
+ */
+ trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
+ }
#endif
if (trace->ev_qualifier_ids.nr > 0) {
* So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
* not in use.
*/
- trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
+ /* TODO: support for more than just perf binary machine type close. */
+ trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close"));
err = trace__expand_filters(trace, &evsel);
if (err)
return entry;
}
-static size_t syscall__dump_stats(struct trace *trace, FILE *fp,
+static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
struct hashmap *syscall_stats)
{
size_t printed = 0;
pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
avg /= NSEC_PER_MSEC;
- sc = trace__syscall_info(trace, /*evsel=*/NULL, EM_HOST, entry->syscall);
+ sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall);
if (!sc)
continue;
}
static size_t thread__dump_stats(struct thread_trace *ttrace,
- struct trace *trace, FILE *fp)
+ struct trace *trace, int e_machine, FILE *fp)
{
- return syscall__dump_stats(trace, fp, ttrace->syscall_stats);
+ return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats);
}
-static size_t system__dump_stats(struct trace *trace, FILE *fp)
+static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp)
{
- return syscall__dump_stats(trace, fp, trace->syscall_stats);
+ return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats);
}
static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
else if (fputc('\n', fp) != EOF)
++printed;
- printed += thread__dump_stats(ttrace, trace, fp);
+ /* TODO: get e_machine from thread. */
+ printed += thread__dump_stats(ttrace, trace, EM_HOST, fp);
return printed;
}
else if (fputc('\n', fp) != EOF)
++printed;
- printed += system__dump_stats(trace, fp);
+ /* TODO: get all system e_machines. */
+ printed += system__dump_stats(trace, EM_HOST, fp);
return printed;
}
*sep = '\0';
list = 0;
- if (syscalltbl__id(trace->sctbl, s) >= 0 ||
- syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
+ /* TODO: support for more than just perf binary machine type syscalls. */
+ if (syscalltbl__id(EM_HOST, s) >= 0 ||
+ syscalltbl__strglobmatch_first(EM_HOST, s, &idx) >= 0) {
list = 1;
goto do_concat;
}
syscall__exit(&trace->syscalls.table[i]);
zfree(&trace->syscalls.table);
}
- syscalltbl__delete(trace->sctbl);
zfree(&trace->perfconfig_events);
}
sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
- trace.sctbl = syscalltbl__new();
- if (trace.evlist == NULL || trace.sctbl == NULL) {
+ if (trace.evlist == NULL) {
pr_err("Not enough memory to run!\n");
err = -ENOMEM;
goto out;
#include <stdlib.h>
#include <asm/bitsperlong.h>
#include <linux/compiler.h>
+#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <string.h>
#include <asm/syscalls_32.h>
#endif
-const int syscalltbl_native_max_id = SYSCALLTBL_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl;
+const char *syscalltbl__name(int e_machine __maybe_unused, int id)
+{
+ if (id >= 0 && id <= (int)ARRAY_SIZE(syscall_num_to_name))
+ return syscall_num_to_name[id];
+ return NULL;
+}
-struct syscall {
- int id;
+struct syscall_cmp_key {
const char *name;
+ const char *const *tbl;
};
static int syscallcmpname(const void *vkey, const void *ventry)
{
- const char *key = vkey;
- const struct syscall *entry = ventry;
+ const struct syscall_cmp_key *key = vkey;
+ const uint16_t *entry = ventry;
- return strcmp(key, entry->name);
+ return strcmp(key->name, key->tbl[*entry]);
}
-static int syscallcmp(const void *va, const void *vb)
+int syscalltbl__id(int e_machine __maybe_unused, const char *name)
{
- const struct syscall *a = va, *b = vb;
-
- return strcmp(a->name, b->name);
+ struct syscall_cmp_key key = {
+ .name = name,
+ .tbl = syscall_num_to_name,
+ };
+ const int *id = bsearch(&key, syscall_sorted_names,
+ ARRAY_SIZE(syscall_sorted_names),
+ sizeof(syscall_sorted_names[0]),
+ syscallcmpname);
+
+ return id ? *id : -1;
}
-static int syscalltbl__init_native(struct syscalltbl *tbl)
+int syscalltbl__num_idx(int e_machine __maybe_unused)
{
- int nr_entries = 0, i, j;
- struct syscall *entries;
-
- for (i = 0; i <= syscalltbl_native_max_id; ++i)
- if (syscalltbl_native[i])
- ++nr_entries;
-
- entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
- if (tbl->syscalls.entries == NULL)
- return -1;
-
- for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
- if (syscalltbl_native[i]) {
- entries[j].name = syscalltbl_native[i];
- entries[j].id = i;
- ++j;
- }
- }
-
- qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
- tbl->syscalls.nr_entries = nr_entries;
- tbl->syscalls.max_id = syscalltbl_native_max_id;
- return 0;
+ return ARRAY_SIZE(syscall_sorted_names);
}
-struct syscalltbl *syscalltbl__new(void)
+int syscalltbl__id_at_idx(int e_machine __maybe_unused, int idx)
{
- struct syscalltbl *tbl = malloc(sizeof(*tbl));
- if (tbl) {
- if (syscalltbl__init_native(tbl)) {
- free(tbl);
- return NULL;
- }
- }
- return tbl;
-}
-
-void syscalltbl__delete(struct syscalltbl *tbl)
-{
- zfree(&tbl->syscalls.entries);
- free(tbl);
-}
-
-const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id)
-{
- return id <= syscalltbl_native_max_id ? syscalltbl_native[id]: NULL;
-}
-
-int syscalltbl__id(struct syscalltbl *tbl, const char *name)
-{
- struct syscall *sc = bsearch(name, tbl->syscalls.entries,
- tbl->syscalls.nr_entries, sizeof(*sc),
- syscallcmpname);
-
- return sc ? sc->id : -1;
-}
-
-int syscalltbl__id_at_idx(struct syscalltbl *tbl, int idx)
-{
- struct syscall *syscalls = tbl->syscalls.entries;
-
- return idx < tbl->syscalls.nr_entries ? syscalls[idx].id : -1;
+ return syscall_sorted_names[idx];
}
-int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
+int syscalltbl__strglobmatch_next(int e_machine __maybe_unused, const char *syscall_glob, int *idx)
{
- int i;
- struct syscall *syscalls = tbl->syscalls.entries;
+ for (int i = *idx + 1; i < (int)ARRAY_SIZE(syscall_sorted_names); ++i) {
+ const char *name = syscall_num_to_name[syscall_sorted_names[i]];
- for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) {
- if (strglobmatch(syscalls[i].name, syscall_glob)) {
+ if (strglobmatch(name, syscall_glob)) {
*idx = i;
- return syscalls[i].id;
+ return syscall_sorted_names[i];
}
}
return -1;
}
-int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
+int syscalltbl__strglobmatch_first(int e_machine, const char *syscall_glob, int *idx)
{
*idx = -1;
- return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
+ return syscalltbl__strglobmatch_next(e_machine, syscall_glob, idx);
}