unsigned int expected_interval;
        unsigned long nr_iowaiters, cpu_load;
        int resume_latency = dev_pm_qos_raw_read_value(device);
+       ktime_t delta_next;
 
        if (data->needs_update) {
                menu_update(drv, dev);
        }
 
        /* determine the expected residency time, round up */
-       data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
+       data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
 
        get_iowait_load(&nr_iowaiters, &cpu_load);
        data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
         * expected idle duration is shorter than the tick period length.
         */
        if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-           expected_interval < TICK_USEC)
+           expected_interval < TICK_USEC) {
+               unsigned int delta_next_us = ktime_to_us(delta_next);
+
                *stop_tick = false;
 
+               if (!tick_nohz_tick_stopped() && idx > 0 &&
+                   drv->states[idx].target_residency > delta_next_us) {
+                       /*
+                        * The tick is not going to be stopped and the target
+                        * residency of the state to be returned is not within
+                        * the time until the next timer event including the
+                        * tick, so try to correct that.
+                        */
+                       for (i = idx - 1; i >= 0; i--) {
+                           if (drv->states[i].disabled ||
+                               dev->states_usage[i].disable)
+                                       continue;
+
+                               idx = i;
+                               if (drv->states[i].target_residency <= delta_next_us)
+                                       break;
+                       }
+               }
+       }
+
        data->last_state_idx = idx;
 
        return data->last_state_idx;
 
 extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern bool tick_nohz_idle_got_tick(void);
-extern ktime_t tick_nohz_get_sleep_length(void);
+extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
 extern unsigned long tick_nohz_get_idle_calls(void);
 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 static inline void tick_nohz_idle_exit(void) { }
 static inline bool tick_nohz_idle_got_tick(void) { return false; }
 
-static inline ktime_t tick_nohz_get_sleep_length(void)
+static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
 {
-       return NSEC_PER_SEC / HZ;
+       *delta_next = TICK_NSEC;
+       return *delta_next;
 }
 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
 
 
 /**
  * tick_nohz_get_sleep_length - return the expected length of the current sleep
+ * @delta_next: duration until the next event if the tick cannot be stopped
  *
  * Called from power state control code with interrupts disabled
  */
-ktime_t tick_nohz_get_sleep_length(void)
+ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
 {
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
        WARN_ON_ONCE(!ts->inidle);
 
+       *delta_next = ktime_sub(dev->next_event, now);
+
        if (!can_stop_idle_tick(cpu, ts))
-               goto out_dev;
+               return *delta_next;
 
        next_event = tick_nohz_next_event(ts, cpu);
        if (!next_event)
-               goto out_dev;
+               return *delta_next;
 
        /*
         * If the next highres timer to expire is earlier than next_event, the
                           hrtimer_next_event_without(&ts->sched_timer));
 
        return ktime_sub(next_event, now);
-
-out_dev:
-       return ktime_sub(dev->next_event, now);
 }
 
 /**