static int
 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
 {
-       unsigned long best_cap = 0;
+       unsigned long task_util, best_cap = 0;
        int cpu, best_cpu = -1;
        struct cpumask *cpus;
 
-       sync_entity_load_avg(&p->se);
-
        cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
        cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
+       task_util = uclamp_task_util(p);
+
        for_each_cpu_wrap(cpu, cpus, target) {
                unsigned long cpu_cap = capacity_of(cpu);
 
                if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
                        continue;
-               if (task_fits_capacity(p, cpu_cap))
+               if (fits_capacity(task_util, cpu_cap))
                        return cpu;
 
                if (cpu_cap > best_cap) {
        return best_cpu;
 }
 
+static inline bool asym_fits_capacity(int task_util, int cpu)
+{
+       if (static_branch_unlikely(&sched_asym_cpucapacity))
+               return fits_capacity(task_util, capacity_of(cpu));
+
+       return true;
+}
+
 /*
  * Try and locate an idle core/thread in the LLC cache domain.
  */
 static int select_idle_sibling(struct task_struct *p, int prev, int target)
 {
        struct sched_domain *sd;
+       unsigned long task_util;
        int i, recent_used_cpu;
 
        /*
-        * For asymmetric CPU capacity systems, our domain of interest is
-        * sd_asym_cpucapacity rather than sd_llc.
+        * On asymmetric system, update task utilization because we will check
+        * that the task fits with cpu's capacity.
         */
        if (static_branch_unlikely(&sched_asym_cpucapacity)) {
-               sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
-               /*
-                * On an asymmetric CPU capacity system where an exclusive
-                * cpuset defines a symmetric island (i.e. one unique
-                * capacity_orig value through the cpuset), the key will be set
-                * but the CPUs within that cpuset will not have a domain with
-                * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
-                * capacity path.
-                */
-               if (!sd)
-                       goto symmetric;
-
-               i = select_idle_capacity(p, sd, target);
-               return ((unsigned)i < nr_cpumask_bits) ? i : target;
+               sync_entity_load_avg(&p->se);
+               task_util = uclamp_task_util(p);
        }
 
-symmetric:
-       if (available_idle_cpu(target) || sched_idle_cpu(target))
+       if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
+           asym_fits_capacity(task_util, target))
                return target;
 
        /*
         * If the previous CPU is cache affine and idle, don't be stupid:
         */
        if (prev != target && cpus_share_cache(prev, target) &&
-           (available_idle_cpu(prev) || sched_idle_cpu(prev)))
+           (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
+           asym_fits_capacity(task_util, prev))
                return prev;
 
        /*
            recent_used_cpu != target &&
            cpus_share_cache(recent_used_cpu, target) &&
            (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
-           cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
+           cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
+           asym_fits_capacity(task_util, recent_used_cpu)) {
                /*
                 * Replace recent_used_cpu with prev as it is a potential
                 * candidate for the next wake:
                return recent_used_cpu;
        }
 
+       /*
+        * For asymmetric CPU capacity systems, our domain of interest is
+        * sd_asym_cpucapacity rather than sd_llc.
+        */
+       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+               sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
+               /*
+                * On an asymmetric CPU capacity system where an exclusive
+                * cpuset defines a symmetric island (i.e. one unique
+                * capacity_orig value through the cpuset), the key will be set
+                * but the CPUs within that cpuset will not have a domain with
+                * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
+                * capacity path.
+                */
+               if (sd) {
+                       i = select_idle_capacity(p, sd, target);
+                       return ((unsigned)i < nr_cpumask_bits) ? i : target;
+               }
+       }
+
        sd = rcu_dereference(per_cpu(sd_llc, target));
        if (!sd)
                return target;