]> www.infradead.org Git - linux.git/commitdiff
rcu: Add ->dynticks field to rcu_dyntick trace event
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 4 Oct 2017 23:24:29 +0000 (16:24 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 28 Nov 2017 23:51:19 +0000 (15:51 -0800)
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
include/trace/events/rcu.h
kernel/rcu/tree.c

index b0a48231ea0ef94f0d527eb724e607038a4ad0ac..d103de9f8c10d386d5e9bd990c1df8af1b9c5b6c 100644 (file)
@@ -436,24 +436,27 @@ TRACE_EVENT(rcu_fqs,
  */
 TRACE_EVENT(rcu_dyntick,
 
-       TP_PROTO(const char *polarity, long oldnesting, long newnesting),
+       TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
 
-       TP_ARGS(polarity, oldnesting, newnesting),
+       TP_ARGS(polarity, oldnesting, newnesting, dynticks),
 
        TP_STRUCT__entry(
                __field(const char *, polarity)
                __field(long, oldnesting)
                __field(long, newnesting)
+               __field(int, dynticks)
        ),
 
        TP_fast_assign(
                __entry->polarity = polarity;
                __entry->oldnesting = oldnesting;
                __entry->newnesting = newnesting;
+               __entry->dynticks = atomic_read(&dynticks);
        ),
 
-       TP_printk("%s %lx %lx", __entry->polarity,
-                 __entry->oldnesting, __entry->newnesting)
+       TP_printk("%s %lx %lx %#3x", __entry->polarity,
+                 __entry->oldnesting, __entry->newnesting,
+                 __entry->dynticks & 0xfff)
 );
 
 /*
@@ -801,7 +804,7 @@ TRACE_EVENT(rcu_barrier,
                                         grplo, grphi, gp_tasks) do { } \
        while (0)
 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
 #define trace_rcu_prep_idle(reason) do { } while (0)
 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
index 92de3bacda07e5b385780e97e770d2023750fd63..5febb76809f665a00a63da9bf27ae6fa7af91e6a 100644 (file)
@@ -761,13 +761,13 @@ static void rcu_eqs_enter_common(bool user)
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
        lockdep_assert_irqs_disabled();
-       trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
+       trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
        if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
            !user && !is_idle_task(current)) {
                struct task_struct *idle __maybe_unused =
                        idle_task(smp_processor_id());
 
-               trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
+               trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
                rcu_ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
@@ -880,15 +880,14 @@ void rcu_nmi_exit(void)
         * leave it in non-RCU-idle state.
         */
        if (rdtp->dynticks_nmi_nesting != 1) {
-               trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting,
-                                 rdtp->dynticks_nmi_nesting - 2);
+               trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
                WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
                           rdtp->dynticks_nmi_nesting - 2);
                return;
        }
 
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
-       trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0);
+       trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
        WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
        rcu_dynticks_eqs_enter();
 }
@@ -953,14 +952,13 @@ static void rcu_eqs_exit_common(long newval, int user)
        rcu_dynticks_task_exit();
        rcu_dynticks_eqs_exit();
        rcu_cleanup_after_idle();
-       trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval);
+       trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
        if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
            !user && !is_idle_task(current)) {
                struct task_struct *idle __maybe_unused =
                        idle_task(smp_processor_id());
 
-               trace_rcu_dyntick(TPS("Error on exit: not idle task"),
-                                 rdtp->dynticks_nesting, newval);
+               trace_rcu_dyntick(TPS("Error on exit: not idle task"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
                rcu_ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
@@ -1062,7 +1060,7 @@ void rcu_nmi_enter(void)
        }
        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
                          rdtp->dynticks_nmi_nesting,
-                         rdtp->dynticks_nmi_nesting + incby);
+                         rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
        WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
                   rdtp->dynticks_nmi_nesting + incby);
        barrier();