#endif
 }
 
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
+#define this_rq()              this_cpu_ptr(&runqueues)
+#define task_rq(p)             cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
+#define raw_rq()               raw_cpu_ptr(&runqueues)
+
 struct sched_group;
 #ifdef CONFIG_SCHED_CORE
 static inline struct cpumask *sched_group_span(struct sched_group *sg);
                return true;
 
        for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
-               if (sched_core_cookie_match(rq, p))
+               if (sched_core_cookie_match(cpu_rq(cpu), p))
                        return true;
        }
        return false;
 static inline void update_idle_core(struct rq *rq) { }
 #endif
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
-#define this_rq()              this_cpu_ptr(&runqueues)
-#define task_rq(p)             cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
-#define raw_rq()               raw_cpu_ptr(&runqueues)
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static inline struct task_struct *task_of(struct sched_entity *se)
 {