return calc_delta_fair(sched_slice(cfs_rq, se), se);
 }
 
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
-static void update_cfs_shares(struct cfs_rq *cfs_rq);
-
 /*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
 
        curr->vruntime += delta_exec_weighted;
        update_min_vruntime(cfs_rq);
-
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
-       cfs_rq->load_unacc_exec_time += delta_exec;
-#endif
 }
 
 static void update_curr(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-/* we need this in update_cfs_load and load-balance functions below */
-static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 # ifdef CONFIG_SMP
-static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
-                                           int global_update)
-{
-       struct task_group *tg = cfs_rq->tg;
-       long load_avg;
-
-       load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
-       load_avg -= cfs_rq->load_contribution;
-
-       if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
-               atomic_add(load_avg, &tg->load_weight);
-               cfs_rq->load_contribution += load_avg;
-       }
-}
-
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
-{
-       u64 period = sysctl_sched_shares_window;
-       u64 now, delta;
-       unsigned long load = cfs_rq->load.weight;
-
-       if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
-               return;
-
-       now = rq_of(cfs_rq)->clock_task;
-       delta = now - cfs_rq->load_stamp;
-
-       /* truncate load history at 4 idle periods */
-       if (cfs_rq->load_stamp > cfs_rq->load_last &&
-           now - cfs_rq->load_last > 4 * period) {
-               cfs_rq->load_period = 0;
-               cfs_rq->load_avg = 0;
-               delta = period - 1;
-       }
-
-       cfs_rq->load_stamp = now;
-       cfs_rq->load_unacc_exec_time = 0;
-       cfs_rq->load_period += delta;
-       if (load) {
-               cfs_rq->load_last = now;
-               cfs_rq->load_avg += delta * load;
-       }
-
-       /* consider updating load contribution on each fold or truncate */
-       if (global_update || cfs_rq->load_period > period
-           || !cfs_rq->load_period)
-               update_cfs_rq_load_contribution(cfs_rq, global_update);
-
-       while (cfs_rq->load_period > period) {
-               /*
-                * Inline assembly required to prevent the compiler
-                * optimising this loop into a divmod call.
-                * See __iter_div_u64_rem() for another example of this.
-                */
-               asm("" : "+rm" (cfs_rq->load_period));
-               cfs_rq->load_period /= 2;
-               cfs_rq->load_avg /= 2;
-       }
-
-       if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
-               list_del_leaf_cfs_rq(cfs_rq);
-}
-
 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
 {
        long tg_weight;
         * to gain a more accurate current total weight. See
         * update_cfs_rq_load_contribution().
         */
-       tg_weight = atomic_read(&tg->load_weight);
-       tg_weight -= cfs_rq->load_contribution;
+       tg_weight = atomic64_read(&tg->load_avg);
+       tg_weight -= cfs_rq->tg_load_contrib;
        tg_weight += cfs_rq->load.weight;
 
        return tg_weight;
 
        return shares;
 }
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
-               update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq);
-       }
-}
 # else /* CONFIG_SMP */
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
-{
-}
-
 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
        return tg->shares;
 }
-
-static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-}
 # endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
                account_entity_enqueue(cfs_rq, se);
 }
 
+static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
+
 static void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
        struct task_group *tg;
        reweight_entity(cfs_rq_of(se), se, shares);
 }
 #else /* CONFIG_FAIR_GROUP_SCHED */
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
-{
-}
-
 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
 }
-
-static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-}
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
-       update_cfs_load(cfs_rq, 0);
        enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
        account_entity_enqueue(cfs_rq, se);
        update_cfs_shares(cfs_rq);
        if (se != cfs_rq->curr)
                __dequeue_entity(cfs_rq, se);
        se->on_rq = 0;
-       update_cfs_load(cfs_rq, 0);
        account_entity_dequeue(cfs_rq, se);
 
        /*
        update_entity_load_avg(curr, 1);
        update_cfs_rq_blocked_load(cfs_rq, 1);
 
-       /*
-        * Update share accounting for long-running entities.
-        */
-       update_entity_shares_tick(cfs_rq);
-
 #ifdef CONFIG_SCHED_HRTICK
        /*
         * queued ticks are scheduled to match the slice, so don't bother
        cfs_rq->throttle_count--;
 #ifdef CONFIG_SMP
        if (!cfs_rq->throttle_count) {
-               u64 delta = rq->clock_task - cfs_rq->load_stamp;
-
-               /* leaving throttled state, advance shares averaging windows */
-               cfs_rq->load_stamp += delta;
-               cfs_rq->load_last += delta;
-
                /* adjust cfs_rq_clock_task() */
                cfs_rq->throttled_clock_task_time += rq->clock_task -
                                             cfs_rq->throttled_clock_task;
-
-               /* update entity weight now that we are on_rq again */
-               update_cfs_shares(cfs_rq);
        }
 #endif
 
        struct rq *rq = data;
        struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 
-       /* group is entering throttled state, record last load */
-       if (!cfs_rq->throttle_count) {
-               update_cfs_load(cfs_rq, 0);
+       /* group is entering throttled state, stop time */
+       if (!cfs_rq->throttle_count)
                cfs_rq->throttled_clock_task = rq->clock_task;
-       }
        cfs_rq->throttle_count++;
 
        return 0;
                if (cfs_rq_throttled(cfs_rq))
                        break;
 
-               update_cfs_load(cfs_rq, 0);
                update_cfs_shares(cfs_rq);
                update_entity_load_avg(se, 1);
        }
                if (cfs_rq_throttled(cfs_rq))
                        break;
 
-               update_cfs_load(cfs_rq, 0);
                update_cfs_shares(cfs_rq);
                update_entity_load_avg(se, 1);
        }
  */
 static int update_shares_cpu(struct task_group *tg, int cpu)
 {
+       struct sched_entity *se;
        struct cfs_rq *cfs_rq;
        unsigned long flags;
        struct rq *rq;
 
-       if (!tg->se[cpu])
-               return 0;
-
        rq = cpu_rq(cpu);
+       se = tg->se[cpu];
        cfs_rq = tg->cfs_rq[cpu];
 
        raw_spin_lock_irqsave(&rq->lock, flags);
 
        update_rq_clock(rq);
-       update_cfs_load(cfs_rq, 1);
        update_cfs_rq_blocked_load(cfs_rq, 1);
 
-       /*
-        * We need to update shares after updating tg->load_weight in
-        * order to adjust the weight of groups with long running tasks.
-        */
-       update_cfs_shares(cfs_rq);
+       if (se) {
+               update_entity_load_avg(se, 1);
+               /*
+                * We pivot on our runnable average having decayed to zero for
+                * list removal.  This generally implies that all our children
+                * have also been removed (modulo rounding error or bandwidth
+                * control); however, such cases are rare and we can fix these
+                * at enqueue.
+                *
+                * TODO: fix up out-of-order children on enqueue.
+                */
+               if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
+                       list_del_leaf_cfs_rq(cfs_rq);
+       } else {
+               update_rq_runnable_avg(rq, rq->nr_running);
+       }
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
 
        cfs_rq->tg = tg;
        cfs_rq->rq = rq;
-#ifdef CONFIG_SMP
-       /* allow initial update_cfs_load() to truncate */
-       cfs_rq->load_stamp = 1;
-#endif
        init_cfs_rq_runtime(cfs_rq);
 
        tg->cfs_rq[cpu] = cfs_rq;