if (broadcast && tick_broadcast_enter())
                return -EBUSY;
 
+       /* Take note of the planned idle state. */
+       sched_idle_set_state(target_state);
+
        trace_cpu_idle_rcuidle(index, dev->cpu);
        time_start = ktime_get();
 
        time_end = ktime_get();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
+       /* The cpu is no longer idle or about to enter idle. */
+       sched_idle_set_state(NULL);
+
        if (broadcast) {
                if (WARN_ON_ONCE(!irqs_disabled()))
                        local_irq_disable();
 
        struct cpuidle_device *dev) {return NULL; }
 #endif
 
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
 #else
 
 
 #include "sched.h"
 
+/**
+ * sched_idle_set_state - Record idle state for the current CPU.
+ * @idle_state: State to record.
+ */
+void sched_idle_set_state(struct cpuidle_state *idle_state)
+{
+       idle_set_state(this_rq(), idle_state);
+}
+
 static int __read_mostly cpu_idle_force_poll;
 
 void cpu_idle_poll_ctrl(bool enable)
                return -EBUSY;
        }
 
-       /* Take note of the planned idle state. */
-       idle_set_state(this_rq(), &drv->states[next_state]);
-
        /*
         * Enter the idle state previously returned by the governor decision.
         * This function will block until an interrupt occurs and will take
         */
        entered_state = cpuidle_enter(drv, dev, next_state);
 
-       /* The cpu is no longer idle or about to enter idle. */
-       idle_set_state(this_rq(), NULL);
-
        if (entered_state == -EBUSY)
                default_idle_call();