rq_unlock(env->dst_rq, &rf);
 }
 
+#ifdef CONFIG_NO_HZ_COMMON
 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
 {
        if (cfs_rq->avg.load_avg)
        return false;
 }
 
+static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
+{
+       rq->last_blocked_load_update_tick = jiffies;
+
+       if (!has_blocked)
+               rq->has_blocked_load = 0;
+}
+#else
+static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
+static inline bool others_have_blocked(struct rq *rq) { return false; }
+static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
+#endif
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
        if (others_have_blocked(rq))
                done = false;
 
-#ifdef CONFIG_NO_HZ_COMMON
-       rq->last_blocked_load_update_tick = jiffies;
-       if (done)
-               rq->has_blocked_load = 0;
-#endif
+       update_blocked_load_status(rq, !done);
        rq_unlock_irqrestore(rq, &rf);
 }
 
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
-#ifdef CONFIG_NO_HZ_COMMON
-       rq->last_blocked_load_update_tick = jiffies;
-       if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
-               rq->has_blocked_load = 0;
-#endif
+       update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
        rq_unlock_irqrestore(rq, &rf);
 }