]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
fgraph: Initialize tracing_graph_pause at task creation
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 29 Jan 2021 15:13:53 +0000 (10:13 -0500)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 29 Jan 2021 20:07:32 +0000 (15:07 -0500)
On some archs, the idle task can call into cpu_suspend(). The cpu_suspend()
will disable or pause function graph tracing, as there's some paths in
bringing down the CPU that can have issues with its return address being
modified. The task_struct structure has a "tracing_graph_pause" atomic
counter, that when set to something other than zero, the function graph
tracer will not modify the return address.

The problem is that the tracing_graph_pause counter is initialized when the
function graph tracer is enabled. This can corrupt the counter for the idle
task if it is suspended in these architectures.

   CPU 1 CPU 2
   ----- -----
  do_idle()
    cpu_suspend()
      pause_graph_tracing()
          task_struct->tracing_graph_pause++ (0 -> 1)

start_graph_tracing()
  for_each_online_cpu(cpu) {
    ftrace_graph_init_idle_task(cpu)
      task-struct->tracing_graph_pause = 0 (1 -> 0)

      unpause_graph_tracing()
          task_struct->tracing_graph_pause-- (0 -> -1)

The above should have gone from 1 to zero, and enabled function graph
tracing again. But instead, it is set to -1, which keeps it disabled.

There's no reason that the field tracing_graph_pause on the task_struct can
not be initialized at boot up.

Cc: stable@vger.kernel.org
Fixes: 380c4b1411ccd ("tracing/function-graph-tracer: append the tracing_graph_flag")
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=211339
Reported-by: pierre.gondois@arm.com
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
init/init_task.c
kernel/trace/fgraph.c

index 8a992d73e6fb7027e0b7b642b3935d81cb518fec..3711cdaafed2fb47ea655f98dd860c588d10fe1e 100644 (file)
@@ -198,7 +198,8 @@ struct task_struct init_task
        .lockdep_recursion = 0,
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       .ret_stack      = NULL,
+       .ret_stack              = NULL,
+       .tracing_graph_pause    = ATOMIC_INIT(0),
 #endif
 #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
        .trace_recursion = 0,
index 73edb9e4f3548e0331ea0d38c351750a05a418d9..29a6ebeebc9e1eb75f803046c92cb7fef97c418d 100644 (file)
@@ -394,7 +394,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
-                       atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
                        t->curr_ret_stack = -1;
                        t->curr_ret_depth = -1;
@@ -489,7 +488,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
 static void
 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
 {
-       atomic_set(&t->tracing_graph_pause, 0);
        atomic_set(&t->trace_overrun, 0);
        t->ftrace_timestamp = 0;
        /* make curr_ret_stack visible before we add the ret_stack */