*/
 static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
 {
-       if (rdtp->dynticks_nesting) {
-               trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
-               return;
-       }
        trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
        if (!is_idle_task(current)) {
                struct task_struct *idle = idle_task(smp_processor_id());
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting--;
        WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
-       rcu_idle_enter_common(rdtp, oldval);
+       if (rdtp->dynticks_nesting)
+               trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+       else
+               rcu_idle_enter_common(rdtp, oldval);
        local_irq_restore(flags);
 }
 
  */
 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
 {
-       if (oldval) {
-               trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
-               return;
-       }
        smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
        atomic_inc(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting++;
        WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
-       rcu_idle_exit_common(rdtp, oldval);
+       if (oldval)
+               trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+       else
+               rcu_idle_exit_common(rdtp, oldval);
        local_irq_restore(flags);
 }