From 13d750c2c03e9861e15268574ed2c239cca9c9d5 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:12 -0400 Subject: [PATCH 01/16] tracing/ftrace: disable preemption in syscall probe In preparation for allowing system call enter/exit instrumentation to handle page faults, make sure that ftrace can handle this change by explicitly disabling preemption within the ftrace system call tracepoint probes to respect the current expectations within ftrace ring buffer code. This change does not yet allow ftrace to take page faults per se within its probe, but allows its existing probes to adapt to the upcoming change. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-3-mathieu.desnoyers@efficios.com Acked-by: Masami Hiramatsu (Google) Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/trace_events.h | 39 ++++++++++++++++++++++++++++------- kernel/trace/trace_syscalls.c | 12 +++++++++++ 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 8bcbb9ee44de..63071aa5923d 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -263,6 +263,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ tstruct \ {} }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) @@ -396,11 +399,11 @@ static inline notrace int trace_event_get_offsets_##call( \ #include "stages/stage6_event_callback.h" -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ - \ + +#undef __DECLARE_EVENT_CLASS +#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -trace_event_raw_event_##call(void *__data, proto) \ +do_trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ @@ -425,15 +428,35 @@ trace_event_raw_event_##call(void *__data, proto) \ \ trace_event_buffer_commit(&fbuffer); \ } + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + do_trace_event_raw_event_##call(__data, args); \ +} + +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + preempt_disable_notrace(); \ + do_trace_event_raw_event_##call(__data, args); \ + preempt_enable_notrace(); \ +} + /* * The ftrace_test_probe is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the ftrace probe will * fail to compile unless it too is updated. */ -#undef DECLARE_EVENT_SYSCALL_CLASS -#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS - #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ static inline void ftrace_test_probe_##call(void) \ @@ -443,6 +466,8 @@ static inline void ftrace_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef __DECLARE_EVENT_CLASS + #include "stages/stage7_class_define.h" #undef DECLARE_EVENT_CLASS diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 785733245ead..f9b21bac9d45 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -299,6 +299,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) int syscall_nr; int size; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; @@ -338,6 +344,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) struct trace_event_buffer fbuffer; int syscall_nr; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; -- 2.51.0 From 65e7462a16cea593025ca3b34c5d74e69b027ee0 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:13 -0400 Subject: [PATCH 02/16] tracing/perf: disable preemption in syscall probe In preparation for allowing system call enter/exit instrumentation to handle page faults, make sure that perf can handle this change by explicitly disabling preemption within the perf system call tracepoint probes to respect the current expectations within perf ring buffer code. This change does not yet allow perf to take page faults per se within its probe, but allows its existing probes to adapt to the upcoming change. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-4-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/perf.h | 42 +++++++++++++++++++++++++++++++---- kernel/trace/trace_syscalls.c | 12 ++++++++++ 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/include/trace/perf.h b/include/trace/perf.h index ded997af481e..15cde7eac8b4 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -12,10 +12,10 @@ #undef __perf_task #define __perf_task(t) (__task = (t)) -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +#undef __DECLARE_EVENT_CLASS +#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -perf_trace_##call(void *__data, proto) \ +do_perf_trace_##call(void *__data, proto) \ { \ struct trace_event_call *event_call = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ @@ -55,8 +55,39 @@ perf_trace_##call(void *__data, proto) \ head, __task); \ } +/* + * Define unused __count and __task variables to use @args to pass + * arguments to do_perf_trace_##call. This is needed because the + * macros __perf_count and __perf_task introduce the side-effect to + * store copies into those local variables. + */ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + u64 __count __attribute__((unused)); \ + struct task_struct *__task __attribute__((unused)); \ + \ + do_perf_trace_##call(__data, args); \ +} + #undef DECLARE_EVENT_SYSCALL_CLASS -#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + u64 __count __attribute__((unused)); \ + struct task_struct *__task __attribute__((unused)); \ + \ + preempt_disable_notrace(); \ + do_perf_trace_##call(__data, args); \ + preempt_enable_notrace(); \ +} /* * This part is compiled out, it is only here as a build time check @@ -76,4 +107,7 @@ static inline void perf_test_probe_##call(void) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef __DECLARE_EVENT_CLASS + #endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index f9b21bac9d45..b1cc19806f3d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -596,6 +596,12 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) int rctx; int size; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; @@ -698,6 +704,12 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) int rctx; int size; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; -- 2.51.0 From 4aadde89d81fbf4cf3105a61dbc48888b819ecfb Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:14 -0400 Subject: [PATCH 03/16] tracing/bpf: disable preemption in syscall probe In preparation for allowing system call enter/exit instrumentation to handle page faults, make sure that bpf can handle this change by explicitly disabling preemption within the bpf system call tracepoint probes to respect the current expectations within bpf tracing code. This change does not yet allow bpf to take page faults per se within its probe, but allows its existing probes to adapt to the upcoming change. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-5-mathieu.desnoyers@efficios.com Acked-by: Andrii Nakryiko Tested-by: Andrii Nakryiko # BPF parts Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/bpf_probe.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index c85bbce5aaa5..fec97c93e1c9 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -53,8 +53,18 @@ __bpf_trace_##call(void *__data, proto) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#define __BPF_DECLARE_TRACE_SYSCALL(call, proto, args) \ +static notrace void \ +__bpf_trace_##call(void *__data, proto) \ +{ \ + preempt_disable_notrace(); \ + CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ + preempt_enable_notrace(); \ +} + #undef DECLARE_EVENT_SYSCALL_CLASS -#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ + __BPF_DECLARE_TRACE_SYSCALL(call, PARAMS(proto), PARAMS(args)) /* * This part is compiled out, it is only here as a build time check -- 2.51.0 From a363d27cdbc2bc2d1899b5a1520b64e3590fcd9a Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:15 -0400 Subject: [PATCH 04/16] tracing: Allow system call tracepoints to handle page faults Use Tasks Trace RCU to protect iteration of system call enter/exit tracepoint probes to allow those probes to handle page faults. In preparation for this change, all tracers registering to system call enter/exit tracepoints should expect those to be called with preemption enabled. This allows tracers to fault-in userspace system call arguments such as path strings within their probe callbacks. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-6-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/linux/tracepoint.h | 18 ++++++++++++++++-- init/Kconfig | 1 + 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 76e441b39a96..0dc67fad706c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -107,6 +108,7 @@ void for_each_tracepoint_in_module(struct module *mod, #ifdef CONFIG_TRACEPOINTS static inline void tracepoint_synchronize_unregister(void) { + synchronize_rcu_tasks_trace(); synchronize_rcu(); } #else @@ -196,6 +198,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. + * + * With @syscall=0, the tracepoint callback array dereference is + * protected by disabling preemption. + * With @syscall=1, the tracepoint callback array dereference is + * protected by Tasks Trace RCU, which allows probes to handle page + * faults. */ #define __DO_TRACE(name, args, cond, syscall) \ do { \ @@ -204,11 +212,17 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) if (!(cond)) \ return; \ \ - preempt_disable_notrace(); \ + if (syscall) \ + rcu_read_lock_trace(); \ + else \ + preempt_disable_notrace(); \ \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ \ - preempt_enable_notrace(); \ + if (syscall) \ + rcu_read_unlock_trace(); \ + else \ + preempt_enable_notrace(); \ } while (0) /* diff --git a/init/Kconfig b/init/Kconfig index 530a382ee0fe..4ac3d1b48278 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1985,6 +1985,7 @@ config BINDGEN_VERSION_TEXT # config TRACEPOINTS bool + select TASKS_TRACE_RCU source "kernel/Kconfig.kexec" -- 2.51.0 From a3204c740a59bebb3b37a294d83f4e353303a52c Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:16 -0400 Subject: [PATCH 05/16] tracing/ftrace: Add might_fault check to syscall probes Add a might_fault() check to validate that the ftrace sys_enter/sys_exit probe callbacks are indeed called from a context where page faults can be handled. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-7-mathieu.desnoyers@efficios.com Acked-by: Masami Hiramatsu (Google) Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/trace_events.h | 1 + kernel/trace/trace_syscalls.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 63071aa5923d..4f22136fd465 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -446,6 +446,7 @@ __DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ static notrace void \ trace_event_raw_event_##call(void *__data, proto) \ { \ + might_fault(); \ preempt_disable_notrace(); \ do_trace_event_raw_event_##call(__data, args); \ preempt_enable_notrace(); \ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index b1cc19806f3d..6d6bbd56ed92 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -303,6 +303,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled. */ + might_fault(); guard(preempt_notrace)(); syscall_nr = trace_get_syscall_nr(current, regs); @@ -348,6 +349,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled. */ + might_fault(); guard(preempt_notrace)(); syscall_nr = trace_get_syscall_nr(current, regs); -- 2.51.0 From cdb537ac417938408ee819992f432c410f2d01a2 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:17 -0400 Subject: [PATCH 06/16] tracing/perf: Add might_fault check to syscall probes Add a might_fault() check to validate that the perf sys_enter/sys_exit probe callbacks are indeed called from a context where page faults can be handled. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-8-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/perf.h | 1 + kernel/trace/trace_syscalls.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/include/trace/perf.h b/include/trace/perf.h index 15cde7eac8b4..a1754b73a8f5 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -84,6 +84,7 @@ perf_trace_##call(void *__data, proto) \ u64 __count __attribute__((unused)); \ struct task_struct *__task __attribute__((unused)); \ \ + might_fault(); \ preempt_disable_notrace(); \ do_perf_trace_##call(__data, args); \ preempt_enable_notrace(); \ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 6d6bbd56ed92..46aab0ab9350 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -602,6 +602,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled. */ + might_fault(); guard(preempt_notrace)(); syscall_nr = trace_get_syscall_nr(current, regs); @@ -710,6 +711,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled. */ + might_fault(); guard(preempt_notrace)(); syscall_nr = trace_get_syscall_nr(current, regs); -- 2.51.0 From 0850e1bc88b1bdc30f7f0b223a92eb22e5f06be0 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Oct 2024 21:07:18 -0400 Subject: [PATCH 07/16] tracing/bpf: Add might_fault check to syscall probes Add a might_fault() check to validate that the bpf sys_enter/sys_exit probe callbacks are indeed called from a context where page faults can be handled. Cc: Michael Jeanson Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Alexei Starovoitov Cc: Yonghong Song Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Namhyung Kim Cc: Andrii Nakryiko Cc: bpf@vger.kernel.org Cc: Joel Fernandes Link: https://lore.kernel.org/20241009010718.2050182-9-mathieu.desnoyers@efficios.com Acked-by: Andrii Nakryiko Tested-by: Andrii Nakryiko # BPF parts Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/trace/bpf_probe.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index fec97c93e1c9..183fa2aa2935 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -57,6 +57,7 @@ __bpf_trace_##call(void *__data, proto) \ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ + might_fault(); \ preempt_disable_notrace(); \ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ preempt_enable_notrace(); \ -- 2.51.0 From afe5960dc208fe069ddaaeb0994d857b24ac19d1 Mon Sep 17 00:00:00 2001 From: Levi Yun Date: Fri, 13 Sep 2024 03:13:47 +0100 Subject: [PATCH 08/16] trace/trace_event_perf: remove duplicate samples on the first tracepoint event When a tracepoint event is created with attr.freq = 1, 'hwc->period_left' is not initialized correctly. As a result, in the perf_swevent_overflow() function, when the first time the event occurs, it calculates the event overflow and the perf_swevent_set_period() returns 3, this leads to the event are recorded for three duplicate times. Step to reproduce: 1. Enable the tracepoint event & starting tracing $ echo 1 > /sys/kernel/tracing/events/module/module_free $ echo 1 > /sys/kernel/tracing/tracing_on 2. Record with perf $ perf record -a --strict-freq -F 1 -e "module:module_free" 3. Trigger module_free event. $ modprobe -i sunrpc $ modprobe -r sunrpc Result: - Trace pipe result: $ cat trace_pipe modprobe-174509 [003] ..... 6504.868896: module_free: sunrpc - perf sample: modprobe 174509 [003] 6504.868980: module:module_free: sunrpc modprobe 174509 [003] 6504.868980: module:module_free: sunrpc modprobe 174509 [003] 6504.868980: module:module_free: sunrpc By setting period_left via perf_swevent_set_period() as other sw_event did, This problem could be solved. After patch: - Trace pipe result: $ cat trace_pipe modprobe 1153096 [068] 613468.867774: module:module_free: xfs - perf sample modprobe 1153096 [068] 613468.867794: module:module_free: xfs Link: https://lore.kernel.org/20240913021347.595330-1-yeoreum.yun@arm.com Fixes: bd2b5b12849a ("perf_counter: More aggressive frequency adjustment") Signed-off-by: Levi Yun Acked-by: Namhyung Kim Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_event_perf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 05e791241812..3ff9caa4a71b 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -352,10 +352,16 @@ void perf_uprobe_destroy(struct perf_event *p_event) int perf_trace_add(struct perf_event *p_event, int flags) { struct trace_event_call *tp_event = p_event->tp_event; + struct hw_perf_event *hwc = &p_event->hw; if (!(flags & PERF_EF_START)) p_event->hw.state = PERF_HES_STOPPED; + if (is_sampling_event(p_event)) { + hwc->last_period = hwc->sample_period; + perf_swevent_set_period(p_event); + } + /* * If TRACE_REG_PERF_ADD returns false; no custom action was performed * and we need to take the default action of enqueueing our event on -- 2.51.0 From eb887c4567d1b0e7684c026fe7df44afa96589e6 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 7 Oct 2024 10:56:28 +0200 Subject: [PATCH 09/16] tracing: Use atomic64_inc_return() in trace_clock_counter() Use atomic64_inc_return(&ref) instead of atomic64_add_return(1, &ref) to use optimized implementation and ease register pressure around the primitive for targets that implement optimized variant. Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Link: https://lore.kernel.org/20241007085651.48544-1-ubizjak@gmail.com Signed-off-by: Uros Bizjak Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 4702efb00ff2..4cb2ebc439be 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -154,5 +154,5 @@ static atomic64_t trace_counter; */ u64 notrace trace_clock_counter(void) { - return atomic64_add_return(1, &trace_counter); + return atomic64_inc_return(&trace_counter); } -- 2.51.0 From 6e59bcc9c8adec9a5bbedfa95a89946c56c510d9 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 30 Oct 2024 16:04:24 +0000 Subject: [PATCH 10/16] rust: add static_branch_unlikely for static_key_false Add just enough support for static key so that we can use it from tracepoints. Tracepoints rely on `static_branch_unlikely` with a `struct static_key_false`, so we add the same functionality to Rust. This patch only provides a generic implementation without code patching (matching the one used when CONFIG_JUMP_LABEL is disabled). Later patches add support for inline asm implementations that use runtime patching. When CONFIG_JUMP_LABEL is unset, `static_key_count` is a static inline function, so a Rust helper is defined for `static_key_count` in this case. If Rust is compiled with LTO, this call should get inlined. The helper can be eliminated once we have the necessary inline asm to make atomic operations from Rust. Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Jason Baron Cc: Ard Biesheuvel Cc: Miguel Ojeda Cc: Alex Gaynor Cc: Wedson Almeida Filho Cc: " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " Cc: Benno Lossin Cc: Andreas Hindborg Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Sean Christopherson Cc: Uros Bizjak Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Rutland Cc: Ryan Roberts Cc: Fuad Tabba Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Anup Patel Cc: Andrew Jones Cc: Alexandre Ghiti Cc: Conor Dooley Cc: Samuel Holland Cc: Huacai Chen Cc: WANG Xuerui Cc: Bibo Mao Cc: Tiezhu Yang Cc: Andrew Morton Cc: Tianrui Zhao Link: https://lore.kernel.org/20241030-tracepoint-v12-1-eec7f0f8ad22@google.com Reviewed-by: Boqun Feng Reviewed-by: Gary Guo Signed-off-by: Alice Ryhl Signed-off-by: Steven Rostedt (Google) --- rust/bindings/bindings_helper.h | 1 + rust/helpers/helpers.c | 1 + rust/helpers/jump_label.c | 14 ++++++++++++++ rust/kernel/jump_label.rs | 30 ++++++++++++++++++++++++++++++ rust/kernel/lib.rs | 1 + 5 files changed, 47 insertions(+) create mode 100644 rust/helpers/jump_label.c create mode 100644 rust/kernel/jump_label.rs diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index ae82e9c941af..e0846e7e93e6 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 30f40149f3a9..17e1b60d178f 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -12,6 +12,7 @@ #include "build_assert.c" #include "build_bug.c" #include "err.c" +#include "jump_label.c" #include "kunit.c" #include "mutex.c" #include "page.c" diff --git a/rust/helpers/jump_label.c b/rust/helpers/jump_label.c new file mode 100644 index 000000000000..fc1f1e0df08e --- /dev/null +++ b/rust/helpers/jump_label.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2024 Google LLC. + */ + +#include + +#ifndef CONFIG_JUMP_LABEL +int rust_helper_static_key_count(struct static_key *key) +{ + return static_key_count(key); +} +#endif diff --git a/rust/kernel/jump_label.rs b/rust/kernel/jump_label.rs new file mode 100644 index 000000000000..4b7655b2a022 --- /dev/null +++ b/rust/kernel/jump_label.rs @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Logic for static keys. +//! +//! C header: [`include/linux/jump_label.h`](srctree/include/linux/jump_label.h). + +/// Branch based on a static key. +/// +/// Takes three arguments: +/// +/// * `key` - the path to the static variable containing the `static_key`. +/// * `keytyp` - the type of `key`. +/// * `field` - the name of the field of `key` that contains the `static_key`. +/// +/// # Safety +/// +/// The macro must be used with a real static key defined by C. +#[macro_export] +macro_rules! static_branch_unlikely { + ($key:path, $keytyp:ty, $field:ident) => {{ + let _key: *const $keytyp = ::core::ptr::addr_of!($key); + let _key: *const $crate::bindings::static_key_false = ::core::ptr::addr_of!((*_key).$field); + let _key: *const $crate::bindings::static_key = _key.cast(); + + $crate::bindings::static_key_count(_key.cast_mut()) > 0 + }}; +} +pub use static_branch_unlikely; diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index b5f4b3ce6b48..708ff817ccc3 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -36,6 +36,7 @@ pub mod error; pub mod firmware; pub mod init; pub mod ioctl; +pub mod jump_label; #[cfg(CONFIG_KUNIT)] pub mod kunit; pub mod list; -- 2.51.0 From ad37bcd965fda43f34cf5cc051f5d310880bd1e7 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 30 Oct 2024 16:04:25 +0000 Subject: [PATCH 11/16] rust: add tracepoint support Make it possible to have Rust code call into tracepoints defined by C code. It is still required that the tracepoint is declared in a C header, and that this header is included in the input to bindgen. Instead of calling __DO_TRACE directly, the exported rust_do_trace_ function calls an inline helper function. This is because the `cond` argument does not exist at the callsite of DEFINE_RUST_DO_TRACE. __DECLARE_TRACE always emits an inline static and an extern declaration that is only used when CREATE_RUST_TRACE_POINTS is set. These should not end up in the final binary so it is not a problem that they sometimes are emitted without a user. Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Jason Baron Cc: Ard Biesheuvel Cc: Miguel Ojeda Cc: Alex Gaynor Cc: Wedson Almeida Filho Cc: " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " Cc: Benno Lossin Cc: Andreas Hindborg Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Sean Christopherson Cc: Uros Bizjak Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Rutland Cc: Ryan Roberts Cc: Fuad Tabba Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Anup Patel Cc: Andrew Jones Cc: Alexandre Ghiti Cc: Conor Dooley Cc: Samuel Holland Cc: Huacai Chen Cc: WANG Xuerui Cc: Bibo Mao Cc: Tiezhu Yang Cc: Andrew Morton Cc: Tianrui Zhao Link: https://lore.kernel.org/20241030-tracepoint-v12-2-eec7f0f8ad22@google.com Reviewed-by: Carlos Llamas Reviewed-by: Gary Guo Reviewed-by: Boqun Feng Signed-off-by: Alice Ryhl Signed-off-by: Steven Rostedt (Google) --- include/linux/tracepoint.h | 28 ++++++++++++++++++- include/trace/define_trace.h | 12 ++++++++ rust/bindings/bindings_helper.h | 1 + rust/kernel/lib.rs | 1 + rust/kernel/tracepoint.rs | 49 +++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 rust/kernel/tracepoint.rs diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 0dc67fad706c..84c4924e499f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -225,6 +225,18 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) preempt_enable_notrace(); \ } while (0) +/* + * Declare an exported function that Rust code can call to trigger this + * tracepoint. This function does not include the static branch; that is done + * in Rust to avoid a function call when the tracepoint is disabled. + */ +#define DEFINE_RUST_DO_TRACE(name, proto, args) +#define __DEFINE_RUST_DO_TRACE(name, proto, args) \ + notrace void rust_do_trace_##name(proto) \ + { \ + __rust_do_trace_##name(args); \ + } + /* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the @@ -240,6 +252,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ + extern void rust_do_trace_##name(proto); \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ @@ -271,6 +284,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + __DO_TRACE(name, \ + TP_ARGS(args), \ + TP_CONDITION(cond), 0); \ + } \ static inline void trace_##name(proto) \ { \ if (static_branch_unlikely(&__tracepoint_##name.key)) \ @@ -285,6 +304,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __DECLARE_TRACE_SYSCALL(name, proto, args, cond, data_proto) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + __DO_TRACE(name, \ + TP_ARGS(args), \ + TP_CONDITION(cond), 1); \ + } \ static inline void trace_##name(proto) \ { \ if (static_branch_unlikely(&__tracepoint_##name.key)) \ @@ -339,7 +364,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) void __probestub_##_name(void *__data, proto) \ { \ } \ - DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); + DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); \ + DEFINE_RUST_DO_TRACE(_name, TP_PROTO(proto), TP_ARGS(args)) #define DEFINE_TRACE(name, proto, args) \ DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args)); diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index ff5fa17a6259..0557626b6f6a 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -76,6 +76,13 @@ #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) +/* If requested, create helpers for calling these tracepoints from Rust. */ +#ifdef CREATE_RUST_TRACE_POINTS +#undef DEFINE_RUST_DO_TRACE +#define DEFINE_RUST_DO_TRACE(name, proto, args) \ + __DEFINE_RUST_DO_TRACE(name, PARAMS(proto), PARAMS(args)) +#endif + #undef TRACE_INCLUDE #undef __TRACE_INCLUDE @@ -134,6 +141,11 @@ # undef UNDEF_TRACE_INCLUDE_PATH #endif +#ifdef CREATE_RUST_TRACE_POINTS +# undef DEFINE_RUST_DO_TRACE +# define DEFINE_RUST_DO_TRACE(name, proto, args) +#endif + /* We may be processing more files */ #define CREATE_TRACE_POINTS diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index e0846e7e93e6..752572e638a6 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 708ff817ccc3..55f81f49024e 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -54,6 +54,7 @@ pub mod str; pub mod sync; pub mod task; pub mod time; +pub mod tracepoint; pub mod types; pub mod uaccess; pub mod workqueue; diff --git a/rust/kernel/tracepoint.rs b/rust/kernel/tracepoint.rs new file mode 100644 index 000000000000..c6e80aa99e8e --- /dev/null +++ b/rust/kernel/tracepoint.rs @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Logic for tracepoints. + +/// Declare the Rust entry point for a tracepoint. +/// +/// This macro generates an unsafe function that calls into C, and its safety requirements will be +/// whatever the relevant C code requires. To document these safety requirements, you may add +/// doc-comments when invoking the macro. +#[macro_export] +macro_rules! declare_trace { + ($($(#[$attr:meta])* $pub:vis unsafe fn $name:ident($($argname:ident : $argtyp:ty),* $(,)?);)*) => {$( + $( #[$attr] )* + #[inline(always)] + $pub unsafe fn $name($($argname : $argtyp),*) { + #[cfg(CONFIG_TRACEPOINTS)] + { + // SAFETY: It's always okay to query the static key for a tracepoint. + let should_trace = unsafe { + $crate::macros::paste! { + $crate::jump_label::static_branch_unlikely!( + $crate::bindings::[< __tracepoint_ $name >], + $crate::bindings::tracepoint, + key + ) + } + }; + + if should_trace { + $crate::macros::paste! { + // SAFETY: The caller guarantees that it is okay to call this tracepoint. + unsafe { $crate::bindings::[< rust_do_trace_ $name >]($($argname),*) }; + } + } + } + + #[cfg(not(CONFIG_TRACEPOINTS))] + { + // If tracepoints are disabled, insert a trivial use of each argument + // to avoid unused argument warnings. + $( let _unused = $argname; )* + } + } + )*} +} + +pub use declare_trace; -- 2.51.0 From 91d39024e1b02914cc5e2dbc137908e29b269ce4 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 30 Oct 2024 16:04:26 +0000 Subject: [PATCH 12/16] rust: samples: add tracepoint to Rust sample This updates the Rust printing sample to invoke a tracepoint. This ensures that we have a user in-tree from the get-go even though the patch is being merged before its real user. Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Jason Baron Cc: Ard Biesheuvel Cc: Miguel Ojeda Cc: Alex Gaynor Cc: Wedson Almeida Filho Cc: Gary Guo Cc: " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " Cc: Benno Lossin Cc: Andreas Hindborg Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Sean Christopherson Cc: Uros Bizjak Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Rutland Cc: Ryan Roberts Cc: Fuad Tabba Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Anup Patel Cc: Andrew Jones Cc: Alexandre Ghiti Cc: Conor Dooley Cc: Samuel Holland Cc: Huacai Chen Cc: WANG Xuerui Cc: Bibo Mao Cc: Tiezhu Yang Cc: Andrew Morton Cc: Tianrui Zhao Link: https://lore.kernel.org/20241030-tracepoint-v12-3-eec7f0f8ad22@google.com Reviewed-by: Boqun Feng Signed-off-by: Alice Ryhl Signed-off-by: Steven Rostedt (Google) --- MAINTAINERS | 1 + include/trace/events/rust_sample.h | 31 ++++++++++++++++++++++++++++++ rust/bindings/bindings_helper.h | 1 + samples/rust/Makefile | 3 ++- samples/rust/rust_print.rs | 18 +++++++++++++++++ samples/rust/rust_print_events.c | 8 ++++++++ 6 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 include/trace/events/rust_sample.h create mode 100644 samples/rust/rust_print_events.c diff --git a/MAINTAINERS b/MAINTAINERS index a097afd76ded..a9b71411d77a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20223,6 +20223,7 @@ C: zulip://rust-for-linux.zulipchat.com P: https://rust-for-linux.com/contributing T: git https://github.com/Rust-for-Linux/linux.git rust-next F: Documentation/rust/ +F: include/trace/events/rust_sample.h F: rust/ F: samples/rust/ F: scripts/*rust* diff --git a/include/trace/events/rust_sample.h b/include/trace/events/rust_sample.h new file mode 100644 index 000000000000..dbc80ca2e465 --- /dev/null +++ b/include/trace/events/rust_sample.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tracepoints for `samples/rust/rust_print.rs`. + * + * Copyright (C) 2024 Google, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rust_sample + +#if !defined(_RUST_SAMPLE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RUST_SAMPLE_TRACE_H + +#include + +TRACE_EVENT(rust_sample_loaded, + TP_PROTO(int magic_number), + TP_ARGS(magic_number), + TP_STRUCT__entry( + __field(int, magic_number) + ), + TP_fast_assign( + __entry->magic_number = magic_number; + ), + TP_printk("magic=%d", __entry->magic_number) +); + +#endif /* _RUST_SAMPLE_TRACE_H */ + +/* This part must be outside protection */ +#include diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 752572e638a6..b072c197ce9e 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -23,6 +23,7 @@ #include #include #include +#include /* `bindgen` gets confused at certain things. */ const size_t RUST_CONST_HELPER_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN; diff --git a/samples/rust/Makefile b/samples/rust/Makefile index 03086dabbea4..f29280ec4820 100644 --- a/samples/rust/Makefile +++ b/samples/rust/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o -obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o +obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o rust_print_events.o subdir-$(CONFIG_SAMPLE_RUST_HOSTPROGS) += hostprogs diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print.rs index 6eabb0d79ea3..6d14b08cac1c 100644 --- a/samples/rust/rust_print.rs +++ b/samples/rust/rust_print.rs @@ -69,6 +69,8 @@ impl kernel::Module for RustPrint { arc_print()?; + trace::trace_rust_sample_loaded(42); + Ok(RustPrint) } } @@ -78,3 +80,19 @@ impl Drop for RustPrint { pr_info!("Rust printing macros sample (exit)\n"); } } + +mod trace { + use core::ffi::c_int; + + kernel::declare_trace! { + /// # Safety + /// + /// Always safe to call. + unsafe fn rust_sample_loaded(magic: c_int); + } + + pub(crate) fn trace_rust_sample_loaded(magic: i32) { + // SAFETY: Always safe to call. + unsafe { rust_sample_loaded(magic as c_int) } + } +} diff --git a/samples/rust/rust_print_events.c b/samples/rust/rust_print_events.c new file mode 100644 index 000000000000..a9169ff0edf1 --- /dev/null +++ b/samples/rust/rust_print_events.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2024 Google LLC + */ + +#define CREATE_TRACE_POINTS +#define CREATE_RUST_TRACE_POINTS +#include -- 2.51.0 From aecaf181651c91b8c89058708b065da9312a0ccd Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 30 Oct 2024 16:04:27 +0000 Subject: [PATCH 13/16] jump_label: adjust inline asm to be consistent To avoid duplication of inline asm between C and Rust, we need to import the inline asm from the relevant `jump_label.h` header into Rust. To make that easier, this patch updates the header files to expose the inline asm via a new ARCH_STATIC_BRANCH_ASM macro. The header files are all updated to define a ARCH_STATIC_BRANCH_ASM that takes the same arguments in a consistent order so that Rust can use the same logic for every architecture. Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Josh Poimboeuf Cc: Jason Baron Cc: Ard Biesheuvel Cc: Alex Gaynor Cc: Wedson Almeida Filho Cc: Boqun Feng Cc: Gary Guo Cc: " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " Cc: Benno Lossin Cc: Andreas Hindborg Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Sean Christopherson Cc: Uros Bizjak Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Rutland Cc: Ryan Roberts Cc: Fuad Tabba Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Anup Patel Cc: Andrew Jones Cc: Alexandre Ghiti Cc: Conor Dooley Cc: Samuel Holland Cc: Huacai Chen Cc: WANG Xuerui Cc: Bibo Mao Cc: Tiezhu Yang Cc: Andrew Morton Cc: Tianrui Zhao Cc: Palmer Dabbelt Link: https://lore.kernel.org/20241030-tracepoint-v12-4-eec7f0f8ad22@google.com Suggested-by: Peter Zijlstra (Intel) Co-developed-by: Miguel Ojeda Signed-off-by: Miguel Ojeda Acked-by: Peter Zijlstra (Intel) Acked-by: Catalin Marinas Acked-by: Palmer Dabbelt # RISC-V Signed-off-by: Alice Ryhl Signed-off-by: Steven Rostedt (Google) --- arch/arm/include/asm/jump_label.h | 14 ++++--- arch/arm64/include/asm/jump_label.h | 20 ++++++---- arch/loongarch/include/asm/jump_label.h | 16 +++++--- arch/riscv/include/asm/jump_label.h | 50 ++++++++++++++----------- arch/x86/include/asm/jump_label.h | 35 +++++++---------- 5 files changed, 73 insertions(+), 62 deletions(-) diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index e4eb54f6cd9f..a35aba7f548c 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -9,13 +9,17 @@ #define JUMP_LABEL_NOP_SIZE 4 +/* This macro is also expanded on the Rust side. */ +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + "1:\n\t" \ + WASM(nop) "\n\t" \ + ".pushsection __jump_table, \"aw\"\n\t" \ + ".word 1b, " label ", " key "\n\t" \ + ".popsection\n\t" \ + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm goto("1:\n\t" - WASM(nop) "\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".word 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm goto(ARCH_STATIC_BRANCH_ASM("%c0", "%l[l_yes]") : : "i" (&((char *)key)[branch]) : : l_yes); return false; diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index a0a5bbae7229..424ed421cd97 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -19,10 +19,14 @@ #define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\"\n\t" \ ".align 3\n\t" \ - ".long 1b - ., %l["#label"] - .\n\t" \ - ".quad %c0 - .\n\t" \ - ".popsection\n\t" \ - : : "i"(key) : : label + ".long 1b - ., " label " - .\n\t" \ + ".quad " key " - .\n\t" \ + ".popsection\n\t" + +/* This macro is also expanded on the Rust side. */ +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + "1: nop\n\t" \ + JUMP_TABLE_ENTRY(key, label) static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) @@ -30,8 +34,8 @@ static __always_inline bool arch_static_branch(struct static_key * const key, char *k = &((char *)key)[branch]; asm goto( - "1: nop \n\t" - JUMP_TABLE_ENTRY(k, l_yes) + ARCH_STATIC_BRANCH_ASM("%c0", "%l[l_yes]") + : : "i"(k) : : l_yes ); return false; @@ -43,9 +47,11 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke const bool branch) { char *k = &((char *)key)[branch]; + asm goto( "1: b %l[l_yes] \n\t" - JUMP_TABLE_ENTRY(k, l_yes) + JUMP_TABLE_ENTRY("%c0", "%l[l_yes]") + : : "i"(k) : : l_yes ); return false; l_yes: diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h index 29acfe3de3fa..8a924bd69d19 100644 --- a/arch/loongarch/include/asm/jump_label.h +++ b/arch/loongarch/include/asm/jump_label.h @@ -13,18 +13,22 @@ #define JUMP_LABEL_NOP_SIZE 4 -#define JUMP_TABLE_ENTRY \ +/* This macro is also expanded on the Rust side. */ +#define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ ".align 3 \n\t" \ - ".long 1b - ., %l[l_yes] - . \n\t" \ - ".quad %0 - . \n\t" \ + ".long 1b - ., " label " - . \n\t" \ + ".quad " key " - . \n\t" \ ".popsection \n\t" +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + "1: nop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm goto( - "1: nop \n\t" - JUMP_TABLE_ENTRY + ARCH_STATIC_BRANCH_ASM("%0", "%l[l_yes]") : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -37,7 +41,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke { asm goto( "1: b %l[l_yes] \n\t" - JUMP_TABLE_ENTRY + JUMP_TABLE_ENTRY("%0", "%l[l_yes]") : : "i"(&((char *)key)[branch]) : : l_yes); return false; diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 1c768d02bd0c..87a71cc6d146 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -16,21 +16,28 @@ #define JUMP_LABEL_NOP_SIZE 4 +#define JUMP_TABLE_ENTRY(key, label) \ + ".pushsection __jump_table, \"aw\" \n\t" \ + ".align " RISCV_LGPTR " \n\t" \ + ".long 1b - ., " label " - . \n\t" \ + "" RISCV_PTR " " key " - . \n\t" \ + ".popsection \n\t" + +/* This macro is also expanded on the Rust side. */ +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + " .align 2 \n\t" \ + " .option push \n\t" \ + " .option norelax \n\t" \ + " .option norvc \n\t" \ + "1: nop \n\t" \ + " .option pop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm goto( - " .align 2 \n\t" - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" - "1: nop \n\t" - " .option pop \n\t" - " .pushsection __jump_table, \"aw\" \n\t" - " .align " RISCV_LGPTR " \n\t" - " .long 1b - ., %l[label] - . \n\t" - " " RISCV_PTR " %0 - . \n\t" - " .popsection \n\t" + ARCH_STATIC_BRANCH_ASM("%0", "%l[label]") : : "i"(&((char *)key)[branch]) : : label); return false; @@ -38,21 +45,20 @@ label: return true; } +#define ARCH_STATIC_BRANCH_JUMP_ASM(key, label) \ + " .align 2 \n\t" \ + " .option push \n\t" \ + " .option norelax \n\t" \ + " .option norvc \n\t" \ + "1: j " label " \n\t" \ + " .option pop \n\t" \ + JUMP_TABLE_ENTRY(key, label) + static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm goto( - " .align 2 \n\t" - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" - "1: j %l[label] \n\t" - " .option pop \n\t" - " .pushsection __jump_table, \"aw\" \n\t" - " .align " RISCV_LGPTR " \n\t" - " .long 1b - ., %l[label] - . \n\t" - " " RISCV_PTR " %0 - . \n\t" - " .popsection \n\t" + ARCH_STATIC_BRANCH_JUMP_ASM("%0", "%l[label]") : : "i"(&((char *)key)[branch]) : : label); return false; diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index cbbef32517f0..3f1c1d6c0da1 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -12,35 +12,28 @@ #include #include -#define JUMP_TABLE_ENTRY \ +#define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ _ASM_ALIGN "\n\t" \ ".long 1b - . \n\t" \ - ".long %l[l_yes] - . \n\t" \ - _ASM_PTR "%c0 + %c1 - .\n\t" \ + ".long " label " - . \n\t" \ + _ASM_PTR " " key " - . \n\t" \ ".popsection \n\t" +/* This macro is also expanded on the Rust side. */ #ifdef CONFIG_HAVE_JUMP_LABEL_HACK - -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) -{ - asm goto("1:" - "jmp %l[l_yes] # objtool NOPs this \n\t" - JUMP_TABLE_ENTRY - : : "i" (key), "i" (2 | branch) : : l_yes); - - return false; -l_yes: - return true; -} - +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + "1: jmp " label " # objtool NOPs this \n\t" \ + JUMP_TABLE_ENTRY(key " + 2", label) #else /* !CONFIG_HAVE_JUMP_LABEL_HACK */ +#define ARCH_STATIC_BRANCH_ASM(key, label) \ + "1: .byte " __stringify(BYTES_NOP5) "\n\t" \ + JUMP_TABLE_ENTRY(key, label) +#endif /* CONFIG_HAVE_JUMP_LABEL_HACK */ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { - asm goto("1:" - ".byte " __stringify(BYTES_NOP5) "\n\t" - JUMP_TABLE_ENTRY + asm goto(ARCH_STATIC_BRANCH_ASM("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; @@ -48,13 +41,11 @@ l_yes: return true; } -#endif /* CONFIG_HAVE_JUMP_LABEL_HACK */ - static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm goto("1:" "jmp %l[l_yes]\n\t" - JUMP_TABLE_ENTRY + JUMP_TABLE_ENTRY("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; -- 2.51.0 From 169484ab667788e73d1817d75c2a2c4af37dbc7f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 30 Oct 2024 16:04:28 +0000 Subject: [PATCH 14/16] rust: add arch_static_branch To allow the Rust implementation of static_key_false to use runtime code patching instead of the generic implementation, pull in the relevant inline assembly from the jump_label.h header by running the C preprocessor on a .rs.S file. Build rules are added for .rs.S files. Since the relevant inline asm has been adjusted to export the inline asm via the ARCH_STATIC_BRANCH_ASM macro in a consistent way, the Rust side does not need architecture specific code to pull in the asm. It is not possible to use the existing C implementation of arch_static_branch via a Rust helper because it passes the argument `key` to inline assembly as an 'i' parameter. Any attempt to add a C helper for this function will fail to compile because the value of `key` must be known at compile-time. Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Josh Poimboeuf Cc: Jason Baron Cc: Ard Biesheuvel Cc: Alex Gaynor Cc: Wedson Almeida Filho Cc: Gary Guo Cc: " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " Cc: Benno Lossin Cc: Andreas Hindborg Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Sean Christopherson Cc: Uros Bizjak Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Rutland Cc: Ryan Roberts Cc: Fuad Tabba Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Anup Patel Cc: Andrew Jones Cc: Alexandre Ghiti Cc: Conor Dooley Cc: Samuel Holland Cc: Huacai Chen Cc: WANG Xuerui Cc: Bibo Mao Cc: Tiezhu Yang Cc: Andrew Morton Cc: Tianrui Zhao Link: https://lore.kernel.org/20241030-tracepoint-v12-5-eec7f0f8ad22@google.com Suggested-by: Peter Zijlstra (Intel) Co-developed-by: Miguel Ojeda Signed-off-by: Miguel Ojeda Reviewed-by: Boqun Feng Signed-off-by: Alice Ryhl Signed-off-by: Steven Rostedt (Google) --- rust/Makefile | 6 ++++ rust/kernel/.gitignore | 3 ++ rust/kernel/arch_static_branch_asm.rs.S | 7 ++++ rust/kernel/jump_label.rs | 46 ++++++++++++++++++++++++- rust/kernel/lib.rs | 35 +++++++++++++++++++ scripts/Makefile.build | 9 ++++- 6 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 rust/kernel/.gitignore create mode 100644 rust/kernel/arch_static_branch_asm.rs.S diff --git a/rust/Makefile b/rust/Makefile index b5e0a73b78f3..bc2a9071dd29 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -36,6 +36,8 @@ always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.c obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.o obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o +always-$(subst y,$(CONFIG_RUST),$(CONFIG_JUMP_LABEL)) += kernel/arch_static_branch_asm.rs + # Avoids running `$(RUSTC)` for the sysroot when it may not be available. ifdef CONFIG_RUST @@ -424,4 +426,8 @@ $(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \ $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE +$(call if_changed_rule,rustc_library) +ifdef CONFIG_JUMP_LABEL +$(obj)/kernel.o: $(obj)/kernel/arch_static_branch_asm.rs +endif + endif # CONFIG_RUST diff --git a/rust/kernel/.gitignore b/rust/kernel/.gitignore new file mode 100644 index 000000000000..d082731007c6 --- /dev/null +++ b/rust/kernel/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +/arch_static_branch_asm.rs diff --git a/rust/kernel/arch_static_branch_asm.rs.S b/rust/kernel/arch_static_branch_asm.rs.S new file mode 100644 index 000000000000..2afb638708db --- /dev/null +++ b/rust/kernel/arch_static_branch_asm.rs.S @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +// Cut here. + +::kernel::concat_literals!(ARCH_STATIC_BRANCH_ASM("{symb} + {off} + {branch}", "{l_yes}")) diff --git a/rust/kernel/jump_label.rs b/rust/kernel/jump_label.rs index 4b7655b2a022..2f2df03a3275 100644 --- a/rust/kernel/jump_label.rs +++ b/rust/kernel/jump_label.rs @@ -24,7 +24,51 @@ macro_rules! static_branch_unlikely { let _key: *const $crate::bindings::static_key_false = ::core::ptr::addr_of!((*_key).$field); let _key: *const $crate::bindings::static_key = _key.cast(); - $crate::bindings::static_key_count(_key.cast_mut()) > 0 + #[cfg(not(CONFIG_JUMP_LABEL))] + { + $crate::bindings::static_key_count(_key) > 0 + } + + #[cfg(CONFIG_JUMP_LABEL)] + $crate::jump_label::arch_static_branch! { $key, $keytyp, $field, false } }}; } pub use static_branch_unlikely; + +/// Assert that the assembly block evaluates to a string literal. +#[cfg(CONFIG_JUMP_LABEL)] +const _: &str = include!(concat!( + env!("OBJTREE"), + "/rust/kernel/arch_static_branch_asm.rs" +)); + +#[macro_export] +#[doc(hidden)] +#[cfg(CONFIG_JUMP_LABEL)] +macro_rules! arch_static_branch { + ($key:path, $keytyp:ty, $field:ident, $branch:expr) => {'my_label: { + $crate::asm!( + include!(concat!(env!("OBJTREE"), "/rust/kernel/arch_static_branch_asm.rs")); + l_yes = label { + break 'my_label true; + }, + symb = sym $key, + off = const ::core::mem::offset_of!($keytyp, $field), + branch = const $crate::jump_label::bool_to_int($branch), + ); + + break 'my_label false; + }}; +} + +#[cfg(CONFIG_JUMP_LABEL)] +pub use arch_static_branch; + +/// A helper used by inline assembly to pass a boolean to as a `const` parameter. +/// +/// Using this function instead of a cast lets you assert that the input is a boolean, and not some +/// other type that can also be cast to an integer. +#[doc(hidden)] +pub const fn bool_to_int(b: bool) -> i32 { + b as i32 +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 55f81f49024e..97286b99270e 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -148,3 +148,38 @@ macro_rules! container_of { ptr.sub(offset) as *const $type }} } + +/// Helper for `.rs.S` files. +#[doc(hidden)] +#[macro_export] +macro_rules! concat_literals { + ($( $asm:literal )* ) => { + ::core::concat!($($asm),*) + }; +} + +/// Wrapper around `asm!` configured for use in the kernel. +/// +/// Uses a semicolon to avoid parsing ambiguities, even though this does not match native `asm!` +/// syntax. +// For x86, `asm!` uses intel syntax by default, but we want to use at&t syntax in the kernel. +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[macro_export] +macro_rules! asm { + ($($asm:expr),* ; $($rest:tt)*) => { + ::core::arch::asm!( $($asm)*, options(att_syntax), $($rest)* ) + }; +} + +/// Wrapper around `asm!` configured for use in the kernel. +/// +/// Uses a semicolon to avoid parsing ambiguities, even though this does not match native `asm!` +/// syntax. +// For non-x86 arches we just pass through to `asm!`. +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +#[macro_export] +macro_rules! asm { + ($($asm:expr),* ; $($rest:tt)*) => { + ::core::arch::asm!( $($asm)*, $($rest)* ) + }; +} diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 8f423a1faf50..03ee558fcd4d 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -248,12 +248,13 @@ $(obj)/%.lst: $(obj)/%.c FORCE # Compile Rust sources (.rs) # --------------------------------------------------------------------------- -rust_allowed_features := new_uninit +rust_allowed_features := asm_const,asm_goto,new_uninit # `--out-dir` is required to avoid temporaries being created by `rustc` in the # current working directory, which may be not accessible in the out-of-tree # modules case. rust_common_cmd = \ + OBJTREE=$(abspath $(objtree)) \ RUST_MODFILE=$(modfile) $(RUSTC_OR_CLIPPY) $(rust_flags) \ -Zallow-features=$(rust_allowed_features) \ -Zcrate-attr=no_std \ @@ -303,6 +304,12 @@ quiet_cmd_rustc_ll_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ $(obj)/%.ll: $(obj)/%.rs FORCE +$(call if_changed_dep,rustc_ll_rs) +quiet_cmd_rustc_rs_rs_S = RSCPP $(quiet_modtag) $@ + cmd_rustc_rs_rs_S = $(CPP) $(c_flags) -xc -C -P $< | sed '1,/^\/\/ Cut here.$$/d' >$@ + +$(obj)/%.rs: $(obj)/%.rs.S FORCE + +$(call if_changed_dep,rustc_rs_rs_S) + # Compile assembler sources (.S) # --------------------------------------------------------------------------- -- 2.51.0 From 22193c586b43ee88d66954395885742a6e4a49a9 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Mon, 11 Nov 2024 23:08:05 +0100 Subject: [PATCH 15/16] samples: rust: fix `rust_print` build making it a combined module MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The `rust_print` module, when built as a module, fails to build with: ERROR: modpost: missing MODULE_LICENSE() in samples/rust/rust_print_events.o ERROR: modpost: "__tracepoint_rust_sample_loaded" [samples/rust/rust_print.ko] undefined! ERROR: modpost: "rust_do_trace_rust_sample_loaded" [samples/rust/rust_print.ko] undefined! Fix it by building it as a combined one. Cc: Masami Hiramatsu Cc: Alex Gaynor Cc: Mark Rutland Cc: Boqun Feng Cc: Gary Guo Cc: Björn Roy Baron Cc: Benno Lossin Cc: Andreas Hindborg Cc: Alice Ryhl Cc: Trevor Gross Cc: "Linux Next Mailing List" Link: https://lore.kernel.org/20241111220805.708889-1-ojeda@kernel.org Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/all/20241108152149.28459a72@canb.auug.org.au/ Fixes: 91d39024e1b0 ("rust: samples: add tracepoint to Rust sample") Signed-off-by: Miguel Ojeda Signed-off-by: Steven Rostedt (Google) --- samples/rust/Makefile | 4 +++- samples/rust/{rust_print.rs => rust_print_main.rs} | 0 2 files changed, 3 insertions(+), 1 deletion(-) rename samples/rust/{rust_print.rs => rust_print_main.rs} (100%) diff --git a/samples/rust/Makefile b/samples/rust/Makefile index f29280ec4820..c1a5c1655395 100644 --- a/samples/rust/Makefile +++ b/samples/rust/Makefile @@ -2,6 +2,8 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o -obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o rust_print_events.o +obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o + +rust_print-y := rust_print_main.o rust_print_events.o subdir-$(CONFIG_SAMPLE_RUST_HOSTPROGS) += hostprogs diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print_main.rs similarity index 100% rename from samples/rust/rust_print.rs rename to samples/rust/rust_print_main.rs -- 2.51.0 From 7643155dce1428fd63e47d7afe8bf3dbca20cc25 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 18 Nov 2024 20:27:26 +0000 Subject: [PATCH 16/16] jump_label: rust: pass a mut ptr to `static_key_count` When building the rust_print sample with CONFIG_JUMP_LABEL=n, the Rust static key support falls back to using static_key_count. This function accepts a mutable pointer to the `struct static_key`, but the Rust abstractions are incorrectly passing a const pointer. This means that builds using CONFIG_JUMP_LABEL=n and SAMPLE_RUST_PRINT=y fail with the following error message: error[E0308]: mismatched types --> /samples/rust/rust_print_main.rs:87:5 | 87 | / kernel::declare_trace! { 88 | | /// # Safety 89 | | /// 90 | | /// Always safe to call. 91 | | unsafe fn rust_sample_loaded(magic: c_int); 92 | | } | | ^ | | | | |_____types differ in mutability | arguments to this function are incorrect | = note: expected raw pointer `*mut kernel::bindings::static_key` found raw pointer `*const kernel::bindings::static_key` note: function defined here --> /rust/bindings/bindings_helpers_generated.rs:33:12 | 33 | pub fn static_key_count(key: *mut static_key) -> c_int; | ^^^^^^^^^^^^^^^^ To fix this, insert a pointer cast so that the pointer is mutable. Link: https://lore.kernel.org/20241118202727.73646-1-aliceryhl@google.com Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202411181440.qEdcuyh6-lkp@intel.com/ Fixes: 169484ab6677 ("rust: add arch_static_branch") Signed-off-by: Alice Ryhl Acked-by: Miguel Ojeda Signed-off-by: Steven Rostedt (Google) --- rust/kernel/jump_label.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/kernel/jump_label.rs b/rust/kernel/jump_label.rs index 2f2df03a3275..b5aff632ecc7 100644 --- a/rust/kernel/jump_label.rs +++ b/rust/kernel/jump_label.rs @@ -26,7 +26,7 @@ macro_rules! static_branch_unlikely { #[cfg(not(CONFIG_JUMP_LABEL))] { - $crate::bindings::static_key_count(_key) > 0 + $crate::bindings::static_key_count(_key.cast_mut()) > 0 } #[cfg(CONFIG_JUMP_LABEL)] -- 2.51.0