goto retry;
 }
 
+/*
+ * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
+ * domain is not defined).
+ */
+static unsigned int llc_weight(s32 cpu)
+{
+       struct sched_domain *sd;
+
+       sd = rcu_dereference(per_cpu(sd_llc, cpu));
+       if (!sd)
+               return 0;
+
+       return sd->span_weight;
+}
+
+/*
+ * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
+ * domain is not defined).
+ */
+static struct cpumask *llc_span(s32 cpu)
+{
+       struct sched_domain *sd;
+
+       sd = rcu_dereference(per_cpu(sd_llc, cpu));
+       if (!sd)
+               return 0;
+
+       return sched_domain_span(sd);
+}
+
+/*
+ * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
+ * NUMA domain is not defined).
+ */
+static unsigned int numa_weight(s32 cpu)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg;
+
+       sd = rcu_dereference(per_cpu(sd_numa, cpu));
+       if (!sd)
+               return 0;
+       sg = sd->groups;
+       if (!sg)
+               return 0;
+
+       return sg->group_weight;
+}
+
+/*
+ * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
+ * domain is not defined).
+ */
+static struct cpumask *numa_span(s32 cpu)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg;
+
+       sd = rcu_dereference(per_cpu(sd_numa, cpu));
+       if (!sd)
+               return NULL;
+       sg = sd->groups;
+       if (!sg)
+               return NULL;
+
+       return sched_group_span(sg);
+}
+
 /*
  * Return true if the LLC domains do not perfectly overlap with the NUMA
  * domains, false otherwise.
         * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
         * domains).
         */
-       for_each_online_cpu(cpu) {
-               const struct cpumask *numa_cpus;
-               struct sched_domain *sd;
-
-               sd = rcu_dereference(per_cpu(sd_llc, cpu));
-               if (!sd)
+       for_each_online_cpu(cpu)
+               if (llc_weight(cpu) != numa_weight(cpu))
                        return true;
 
-               numa_cpus = cpumask_of_node(cpu_to_node(cpu));
-               if (sd->span_weight != cpumask_weight(numa_cpus))
-                       return true;
-       }
-
        return false;
 }
 
 static void update_selcpu_topology(void)
 {
        bool enable_llc = false, enable_numa = false;
-       struct sched_domain *sd;
-       const struct cpumask *cpus;
+       unsigned int nr_cpus;
        s32 cpu = cpumask_first(cpu_online_mask);
 
        /*
         * CPUs.
         */
        rcu_read_lock();
-       sd = rcu_dereference(per_cpu(sd_llc, cpu));
-       if (sd) {
-               if (sd->span_weight < num_online_cpus())
+       nr_cpus = llc_weight(cpu);
+       if (nr_cpus > 0) {
+               if (nr_cpus < num_online_cpus())
                        enable_llc = true;
+               pr_debug("sched_ext: LLC=%*pb weight=%u\n",
+                        cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
        }
 
        /*
         * enabling both NUMA and LLC optimizations is unnecessary, as checking
         * for an idle CPU in the same domain twice is redundant.
         */
-       cpus = cpumask_of_node(cpu_to_node(cpu));
-       if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch())
-               enable_numa = true;
+       nr_cpus = numa_weight(cpu);
+       if (nr_cpus > 0) {
+               if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+                       enable_numa = true;
+               pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+                        cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+       }
        rcu_read_unlock();
 
        pr_debug("sched_ext: LLC idle selection %s\n",
 
        *found = false;
 
-
        /*
         * This is necessary to protect llc_cpus.
         */
         */
        if (p->nr_cpus_allowed >= num_possible_cpus()) {
                if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
-                       numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
-
-               if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
-                       struct sched_domain *sd;
+                       numa_cpus = numa_span(prev_cpu);
 
-                       sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
-                       if (sd)
-                               llc_cpus = sched_domain_span(sd);
-               }
+               if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
+                       llc_cpus = llc_span(prev_cpu);
        }
 
        /*