trace_hrtimer_cancel(timer);
 }
 
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+{
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
+       int i;
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+               struct timerqueue_node *next;
+               struct hrtimer *timer;
+
+               next = timerqueue_getnext(&base->active);
+               if (!next)
+                       continue;
+
+               timer = container_of(next, struct hrtimer, node);
+               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+               if (expires.tv64 < expires_next.tv64)
+                       expires_next = expires;
+       }
+       /*
+        * clock_was_set() might have changed base->offset of any of
+        * the clock bases so the result might be negative. Fix it up
+        * to prevent a false positive in clockevents_program_event().
+        */
+       if (expires_next.tv64 < 0)
+               expires_next.tv64 = 0;
+       return expires_next;
+}
+#endif
+
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
 static void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
-       int i;
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t expires, expires_next;
-
-       expires_next.tv64 = KTIME_MAX;
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-               struct hrtimer *timer;
-               struct timerqueue_node *next;
-
-               next = timerqueue_getnext(&base->active);
-               if (!next)
-                       continue;
-               timer = container_of(next, struct hrtimer, node);
-
-               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-               /*
-                * clock_was_set() has changed base->offset so the
-                * result might be negative. Fix it up to prevent a
-                * false positive in clockevents_program_event()
-                */
-               if (expires.tv64 < 0)
-                       expires.tv64 = 0;
-               if (expires.tv64 < expires_next.tv64)
-                       expires_next = expires;
-       }
+       ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
 
        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
                return;
        if (expires.tv64 >= cpu_base->expires_next.tv64)
                return 0;
 
+       /*
+        * When the target cpu of the timer is currently executing
+        * hrtimer_interrupt(), then we do not touch the clock event
+        * device. hrtimer_interrupt() will reevaluate all clock bases
+        * before reprogramming the device.
+        */
+       if (cpu_base->in_hrtirq)
+               return 0;
+
        /*
         * If a hang was detected in the last timer interrupt then we
         * do not schedule a timer which is earlier than the expiry
 ktime_t hrtimer_get_next_event(void)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+       ktime_t mindelta = { .tv64 = KTIME_MAX };
        unsigned long flags;
-       int i;
 
        raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
-       if (!hrtimer_hres_active()) {
-               for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-                       struct hrtimer *timer;
-                       struct timerqueue_node *next;
-
-                       next = timerqueue_getnext(&base->active);
-                       if (!next)
-                               continue;
-
-                       timer = container_of(next, struct hrtimer, node);
-                       delta.tv64 = hrtimer_get_expires_tv64(timer);
-                       delta = ktime_sub(delta, base->get_time());
-                       if (delta.tv64 < mindelta.tv64)
-                               mindelta.tv64 = delta.tv64;
-               }
-       }
+       if (!hrtimer_hres_active())
+               mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
+                                    ktime_get());
 
        raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
        raw_spin_lock(&cpu_base->lock);
        entry_time = now = hrtimer_update_base(cpu_base);
 retry:
-       expires_next.tv64 = KTIME_MAX;
+       cpu_base->in_hrtirq = 1;
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
                         * are right-of a not yet expired timer, because that
                         * timer will have to trigger a wakeup anyway.
                         */
-
-                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
-                               ktime_t expires;
-
-                               expires = ktime_sub(hrtimer_get_expires(timer),
-                                                   base->offset);
-                               if (expires.tv64 < 0)
-                                       expires.tv64 = KTIME_MAX;
-                               if (expires.tv64 < expires_next.tv64)
-                                       expires_next = expires;
+                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
                                break;
-                       }
 
                        __run_hrtimer(timer, &basenow);
                }
        }
-
+       /* Reevaluate the clock bases for the next expiry */
+       expires_next = __hrtimer_get_next_event(cpu_base);
        /*
         * Store the new expiry value so the migration code can verify
         * against it.
         */
        cpu_base->expires_next = expires_next;
+       cpu_base->in_hrtirq = 0;
        raw_spin_unlock(&cpu_base->lock);
 
        /* Reprogramming necessary ? */