trace_sched_stat_runtime(curr, delta_exec, 0);
 
-       curr->se.sum_exec_runtime += delta_exec;
-       account_group_exec_runtime(curr, delta_exec);
-
-       curr->se.exec_start = now;
-       cgroup_account_cputime(curr, delta_exec);
+       update_current_exec_runtime(curr, now, delta_exec);
 
        if (dl_entity_is_special(dl_se))
                return;
 
 
        trace_sched_stat_runtime(curr, delta_exec, 0);
 
-       curr->se.sum_exec_runtime += delta_exec;
-       account_group_exec_runtime(curr, delta_exec);
-
-       curr->se.exec_start = now;
-       cgroup_account_cputime(curr, delta_exec);
+       update_current_exec_runtime(curr, now, delta_exec);
 
        if (!rt_bandwidth_enabled())
                return;
 
 extern void sched_dynamic_update(int mode);
 #endif
 
+static inline void update_current_exec_runtime(struct task_struct *curr,
+                                               u64 now, u64 delta_exec)
+{
+       curr->se.sum_exec_runtime += delta_exec;
+       account_group_exec_runtime(curr, delta_exec);
+
+       curr->se.exec_start = now;
+       cgroup_account_cputime(curr, delta_exec);
+}
+
 #endif /* _KERNEL_SCHED_SCHED_H */
 
 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
 {
        struct task_struct *curr = rq->curr;
-       u64 delta_exec;
+       u64 now, delta_exec;
 
-       delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+       now = rq_clock_task(rq);
+       delta_exec = now - curr->se.exec_start;
        if (unlikely((s64)delta_exec < 0))
                delta_exec = 0;
 
        schedstat_set(curr->stats.exec_max,
                      max(curr->stats.exec_max, delta_exec));
 
-       curr->se.sum_exec_runtime += delta_exec;
-       account_group_exec_runtime(curr, delta_exec);
-
-       curr->se.exec_start = rq_clock_task(rq);
-       cgroup_account_cputime(curr, delta_exec);
+       update_current_exec_runtime(curr, now, delta_exec);
 }
 
 /*