resched_curr(rq);
 }
 
-static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
+static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 {
        struct cfs_rq *cfs_rq;
-       u64 runtime;
-       u64 starting_runtime = remaining;
+       u64 runtime, remaining = 1;
 
        rcu_read_lock();
        list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
                /* By the above check, this should never be true */
                SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
 
+               raw_spin_lock(&cfs_b->lock);
                runtime = -cfs_rq->runtime_remaining + 1;
-               if (runtime > remaining)
-                       runtime = remaining;
-               remaining -= runtime;
+               if (runtime > cfs_b->runtime)
+                       runtime = cfs_b->runtime;
+               cfs_b->runtime -= runtime;
+               remaining = cfs_b->runtime;
+               raw_spin_unlock(&cfs_b->lock);
 
                cfs_rq->runtime_remaining += runtime;
 
                        break;
        }
        rcu_read_unlock();
-
-       return starting_runtime - remaining;
 }
 
 /*
  */
 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
 {
-       u64 runtime;
        int throttled;
 
        /* no need to continue the timer with no bandwidth constraint */
        cfs_b->nr_throttled += overrun;
 
        /*
-        * This check is repeated as we are holding onto the new bandwidth while
-        * we unthrottle. This can potentially race with an unthrottled group
-        * trying to acquire new bandwidth from the global pool. This can result
-        * in us over-using our runtime if it is all used during this loop, but
-        * only by limited amounts in that extreme case.
+        * This check is repeated as we release cfs_b->lock while we unthrottle.
         */
        while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
-               runtime = cfs_b->runtime;
                cfs_b->distribute_running = 1;
                raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
                /* we can't nest cfs_b->lock while distributing bandwidth */
-               runtime = distribute_cfs_runtime(cfs_b, runtime);
+               distribute_cfs_runtime(cfs_b);
                raw_spin_lock_irqsave(&cfs_b->lock, flags);
 
                cfs_b->distribute_running = 0;
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-
-               lsub_positive(&cfs_b->runtime, runtime);
        }
 
        /*
        if (!runtime)
                return;
 
-       runtime = distribute_cfs_runtime(cfs_b, runtime);
+       distribute_cfs_runtime(cfs_b);
 
        raw_spin_lock_irqsave(&cfs_b->lock, flags);
-       lsub_positive(&cfs_b->runtime, runtime);
        cfs_b->distribute_running = 0;
        raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 }