{
        struct util_est ue = READ_ONCE(p->se.avg.util_est);
 
-       return max(ue.ewma, ue.enqueued);
+       return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
 }
 
 static inline unsigned long task_util_est(struct task_struct *p)
 
        /* Update root cfs_rq's estimated utilization */
        enqueued  = cfs_rq->avg.util_est.enqueued;
-       enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
+       enqueued += _task_util_est(p);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
 }
 
 
        /* Update root cfs_rq's estimated utilization */
        ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-       ue.enqueued -= min_t(unsigned int, ue.enqueued,
-                            (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+       ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
        /*
                 */
                if (unlikely(task_on_rq_queued(p) || current == p)) {
                        estimated -= min_t(unsigned int, estimated,
-                                          (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+                                          _task_util_est(p));
                }
                util = max(util, estimated);
        }