if (runtime_refresh_within(cfs_b, min_left))
                return;
 
+       /* don't push forwards an existing deferred unthrottle */
+       if (cfs_b->slack_started)
+               return;
+       cfs_b->slack_started = true;
+
        hrtimer_start(&cfs_b->slack_timer,
                        ns_to_ktime(cfs_bandwidth_slack_period),
                        HRTIMER_MODE_REL);
 
        /* confirm we're still not at a refresh boundary */
        raw_spin_lock_irqsave(&cfs_b->lock, flags);
+       cfs_b->slack_started = false;
        if (cfs_b->distribute_running) {
                raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
                return;
        hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        cfs_b->slack_timer.function = sched_cfs_slack_timer;
        cfs_b->distribute_running = 0;
+       cfs_b->slack_started = false;
 }
 
 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
        u64                     runtime_expires;
        int                     expires_seq;
 
-       short                   idle;
-       short                   period_active;
+       u8                      idle;
+       u8                      period_active;
+       u8                      distribute_running;
+       u8                      slack_started;
        struct hrtimer          period_timer;
        struct hrtimer          slack_timer;
        struct list_head        throttled_cfs_rq;
        int                     nr_periods;
        int                     nr_throttled;
        u64                     throttled_time;
-
-       bool                    distribute_running;
 #endif
 };