void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
 {
-       if (hrtimer_active(period_timer))
-               return;
+       /*
+        * Do not forward the expiration time of active timers;
+        * we do not want to loose an overrun.
+        */
+       if (!hrtimer_active(period_timer))
+               hrtimer_forward_now(period_timer, period);
 
-       hrtimer_forward_now(period_timer, period);
        hrtimer_start_expires(period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
-       if (runtime_enabled && cfs_b->timer_active) {
-               /* force a reprogram */
-               __start_cfs_bandwidth(cfs_b, true);
-       }
+       if (runtime_enabled)
+               start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
 
        for_each_online_cpu(i) {
 
        if (cfs_b->quota == RUNTIME_INF)
                amount = min_amount;
        else {
-               /*
-                * If the bandwidth pool has become inactive, then at least one
-                * period must have elapsed since the last consumption.
-                * Refresh the global state and ensure bandwidth timer becomes
-                * active.
-                */
-               if (!cfs_b->timer_active) {
-                       __refill_cfs_bandwidth_runtime(cfs_b);
-                       __start_cfs_bandwidth(cfs_b, false);
-               }
+               start_cfs_bandwidth(cfs_b);
 
                if (cfs_b->runtime > 0) {
                        amount = min(cfs_b->runtime, min_amount);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        long task_delta, dequeue = 1;
+       bool empty;
 
        se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
 
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
+       empty = list_empty(&cfs_rq->throttled_list);
+
        /*
         * Add to the _head_ of the list, so that an already-started
         * distribute_cfs_runtime will not see us
         */
        list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
-       if (!cfs_b->timer_active)
-               __start_cfs_bandwidth(cfs_b, false);
+
+       /*
+        * If we're the first throttled task, make sure the bandwidth
+        * timer is running.
+        */
+       if (empty)
+               start_cfs_bandwidth(cfs_b);
+
        raw_spin_unlock(&cfs_b->lock);
 }
 
        if (cfs_b->idle && !throttled)
                goto out_deactivate;
 
-       /*
-        * if we have relooped after returning idle once, we need to update our
-        * status as actually running, so that other cpus doing
-        * __start_cfs_bandwidth will stop trying to cancel us.
-        */
-       cfs_b->timer_active = 1;
-
        __refill_cfs_bandwidth_runtime(cfs_b);
 
        if (!throttled) {
        return 0;
 
 out_deactivate:
-       cfs_b->timer_active = 0;
        return 1;
 }
 
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, slack_timer);
+
        do_sched_cfs_slack_timer(cfs_b);
 
        return HRTIMER_NORESTART;
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, period_timer);
-       ktime_t now;
        int overrun;
        int idle = 0;
 
        raw_spin_lock(&cfs_b->lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, cfs_b->period);
-
+               overrun = hrtimer_forward_now(timer, cfs_b->period);
                if (!overrun)
                        break;
 
        INIT_LIST_HEAD(&cfs_rq->throttled_list);
 }
 
-/* requires cfs_b->lock, may release to reprogram timer */
-void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
+void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
-       /*
-        * The timer may be active because we're trying to set a new bandwidth
-        * period or because we're racing with the tear-down path
-        * (timer_active==0 becomes visible before the hrtimer call-back
-        * terminates).  In either case we ensure that it's re-programmed
-        */
-       while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
-              hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
-               /* bounce the lock to allow do_sched_cfs_period_timer to run */
-               raw_spin_unlock(&cfs_b->lock);
-               cpu_relax();
-               raw_spin_lock(&cfs_b->lock);
-               /* if someone else restarted the timer then we're done */
-               if (!force && cfs_b->timer_active)
-                       return;
-       }
-
-       cfs_b->timer_active = 1;
        start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
 }
 
 
 {
        struct rt_bandwidth *rt_b =
                container_of(timer, struct rt_bandwidth, rt_period_timer);
-       ktime_t now;
-       int overrun;
        int idle = 0;
+       int overrun;
 
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, rt_b->rt_period);
-
+               overrun = hrtimer_forward_now(timer, rt_b->rt_period);
                if (!overrun)
                        break;
 
+               raw_spin_unlock(&rt_b->rt_runtime_lock);
                idle = do_sched_rt_period_timer(rt_b, overrun);
+               raw_spin_lock(&rt_b->rt_runtime_lock);
        }
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }
        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
 
-       if (hrtimer_active(&rt_b->rt_period_timer))
-               return;
-
        raw_spin_lock(&rt_b->rt_runtime_lock);
        start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
        raw_spin_unlock(&rt_b->rt_runtime_lock);