#endif
 
 /********** Helpers for find_busiest_group ************************/
+
+enum group_type {
+       group_other = 0,
+       group_imbalanced,
+       group_overloaded,
+};
+
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
        unsigned int group_capacity_factor;
        unsigned int idle_cpus;
        unsigned int group_weight;
-       int group_imb; /* Is there an imbalance in the group ? */
+       enum group_type group_type;
        int group_has_free_capacity;
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
                .total_capacity = 0UL,
                .busiest_stat = {
                        .avg_load = 0UL,
+                       .sum_nr_running = 0,
+                       .group_type = group_other,
                },
        };
 }
        return capacity_factor;
 }
 
+static enum group_type
+group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
+{
+       if (sgs->sum_nr_running > sgs->group_capacity_factor)
+               return group_overloaded;
+
+       if (sg_imbalanced(group))
+               return group_imbalanced;
+
+       return group_other;
+}
+
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
                sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
        sgs->group_weight = group->group_weight;
-
-       sgs->group_imb = sg_imbalanced(group);
        sgs->group_capacity_factor = sg_capacity_factor(env, group);
+       sgs->group_type = group_classify(group, sgs);
 
        if (sgs->group_capacity_factor > sgs->sum_nr_running)
                sgs->group_has_free_capacity = 1;
                                   struct sched_group *sg,
                                   struct sg_lb_stats *sgs)
 {
-       if (sgs->avg_load <= sds->busiest_stat.avg_load)
-               return false;
+       struct sg_lb_stats *busiest = &sds->busiest_stat;
 
-       if (sgs->sum_nr_running > sgs->group_capacity_factor)
+       if (sgs->group_type > busiest->group_type)
                return true;
 
-       if (sgs->group_imb)
+       if (sgs->group_type < busiest->group_type)
+               return false;
+
+       if (sgs->avg_load <= busiest->avg_load)
+               return false;
+
+       /* This is the busiest node in its class. */
+       if (!(env->sd->flags & SD_ASYM_PACKING))
                return true;
 
        /*
         * numbered CPUs in the group, therefore mark all groups
         * higher than ourself as busy.
         */
-       if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
-           env->dst_cpu < group_first_cpu(sg)) {
+       if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
                if (!sds->busiest)
                        return true;
 
        local = &sds->local_stat;
        busiest = &sds->busiest_stat;
 
-       if (busiest->group_imb) {
+       if (busiest->group_type == group_imbalanced) {
                /*
                 * In the group_imb case we cannot rely on group-wide averages
                 * to ensure cpu-load equilibrium, look at wider averages. XXX
                return fix_small_imbalance(env, sds);
        }
 
-       if (busiest->sum_nr_running > busiest->group_capacity_factor) {
+       if (busiest->group_type == group_overloaded) {
                /*
                 * Don't want to pull so many tasks that a group would go idle.
                 * Except of course for the group_imb case, since then we might
         * work because they assume all things are equal, which typically
         * isn't true due to cpus_allowed constraints and the like.
         */
-       if (busiest->group_imb)
+       if (busiest->group_type == group_imbalanced)
                goto force_balance;
 
        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */