/*
  * RCU callback function for rcu_barrier().  If we are last, wake
  * up the task executing rcu_barrier().
+ *
+ * Note that the value of rcu_state.barrier_sequence must be captured
+ * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
+ * other CPUs might count the value down to zero before this CPU gets
+ * around to invoking rcu_barrier_trace(), which might result in bogus
+ * data from the next instance of rcu_barrier().
  */
 static void rcu_barrier_callback(struct rcu_head *rhp)
 {
+       unsigned long __maybe_unused s = rcu_state.barrier_sequence;
+
        if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
-               rcu_barrier_trace(TPS("LastCB"), -1,
-                                 rcu_state.barrier_sequence);
+               rcu_barrier_trace(TPS("LastCB"), -1, s);
                complete(&rcu_state.barrier_completion);
        } else {
-               rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
+               rcu_barrier_trace(TPS("CB"), -1, s);
        }
 }