]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
fgraph: Get ftrace recursion lock in function_graph_enter
authorMasami Hiramatsu (Google) <mhiramat@kernel.org>
Tue, 10 Dec 2024 02:08:57 +0000 (11:08 +0900)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Tue, 24 Dec 2024 02:02:48 +0000 (21:02 -0500)
Get the ftrace recursion lock in the generic function_graph_enter()
instead of each architecture code.
This changes all function_graph tracer callbacks running in
non-preemptive state. On x86 and powerpc, this is by default, but
on the other architecutres, this will be new.

Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/173379653720.973433.18438622234884980494.stgit@devnote2
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
arch/powerpc/kernel/trace/ftrace.c
arch/powerpc/kernel/trace/ftrace_64_pg.c
arch/x86/kernel/ftrace.c
kernel/trace/fgraph.c

index 5ccd791761e8fd5f3c2c083d010e1ffce27a5287..e41daf2c4a3157e853a4c84535138ff147c9ccca 100644 (file)
@@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
-       int bit;
 
        if (unlikely(ftrace_graph_is_dead()))
                goto out;
@@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
 
-       bit = ftrace_test_recursion_trylock(ip, parent_ip);
-       if (bit < 0)
-               goto out;
-
        if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
                parent_ip = ppc_function_entry(return_to_handler);
 
-       ftrace_test_recursion_unlock(bit);
 out:
        arch_ftrace_regs(fregs)->regs.link = parent_ip;
 }
index 98787376eb87c3e6a397b5375e76e48c72e657de..8fb860b90ae1c4028c564aa071a6cb96771b0895 100644 (file)
@@ -790,7 +790,6 @@ static unsigned long
 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
 {
        unsigned long return_hooker;
-       int bit;
 
        if (unlikely(ftrace_graph_is_dead()))
                goto out;
@@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
 
-       bit = ftrace_test_recursion_trylock(ip, parent);
-       if (bit < 0)
-               goto out;
-
        return_hooker = ppc_function_entry(return_to_handler);
 
        if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
                parent = return_hooker;
 
-       ftrace_test_recursion_unlock(bit);
 out:
        return parent;
 }
index 4dd0ad6c94d6610eb6329aa2e4c8e62571bc572f..33f50c80f4812f4b817d954963b58a1c191acef3 100644 (file)
@@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
                           unsigned long frame_pointer)
 {
        unsigned long return_hooker = (unsigned long)&return_to_handler;
-       int bit;
 
        /*
         * When resuming from suspend-to-ram, this function can be indirectly
@@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
-       bit = ftrace_test_recursion_trylock(ip, *parent);
-       if (bit < 0)
-               return;
-
        if (!function_graph_enter(*parent, ip, frame_pointer, parent))
                *parent = return_hooker;
-
-       ftrace_test_recursion_unlock(bit);
 }
 
 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
index ddedcb50917f4b8bd66f1b32769b5b3a727c1e71..5c68d61091192338a9f6e943ed67a4ff889244d0 100644 (file)
@@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
        struct ftrace_graph_ent trace;
        unsigned long bitmap = 0;
        int offset;
+       int bit;
        int i;
 
+       bit = ftrace_test_recursion_trylock(func, ret);
+       if (bit < 0)
+               return -EBUSY;
+
        trace.func = func;
        trace.depth = ++current->curr_ret_depth;
 
@@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
         * flag, set that bit always.
         */
        set_bitmap(current, offset, bitmap | BIT(0));
-
+       ftrace_test_recursion_unlock(bit);
        return 0;
  out_ret:
        current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
  out:
        current->curr_ret_depth--;
+       ftrace_test_recursion_unlock(bit);
        return -EBUSY;
 }