}
 
        for (;;) {
-               lock = rq_lockp(rq);
+               lock = __rq_lockp(rq);
                raw_spin_lock_nested(lock, subclass);
-               if (likely(lock == rq_lockp(rq))) {
+               if (likely(lock == __rq_lockp(rq))) {
                        /* preempt_count *MUST* be > 1 */
                        preempt_enable_no_resched();
                        return;
        }
 
        for (;;) {
-               lock = rq_lockp(rq);
+               lock = __rq_lockp(rq);
                ret = raw_spin_trylock(lock);
-               if (!ret || (likely(lock == rq_lockp(rq)))) {
+               if (!ret || (likely(lock == __rq_lockp(rq)))) {
                        preempt_enable();
                        return ret;
                }
                swap(rq1, rq2);
 
        raw_spin_rq_lock(rq1);
-       if (rq_lockp(rq1) == rq_lockp(rq2))
+       if (__rq_lockp(rq1) == __rq_lockp(rq2))
                return;
 
        raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
         * task_rq_lock().
         */
        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-                                     lockdep_is_held(rq_lockp(task_rq(p)))));
+                                     lockdep_is_held(__rq_lockp(task_rq(p)))));
 #endif
        /*
         * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
         * do an early lockdep release here:
         */
        rq_unpin_lock(rq, rf);
-       spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_);
+       spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
        rq_lockp(rq)->owner = next;
         * fix up the runqueue lock - which gets 'carried over' from
         * prev into current:
         */
-       spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
+       spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
        __balance_callbacks(rq);
        raw_spin_rq_unlock_irq(rq);
 }
 
        return !static_branch_unlikely(&__sched_core_enabled);
 }
 
+/*
+ * Be careful with this function; not for general use. The return value isn't
+ * stable unless you actually hold a relevant rq->__lock.
+ */
 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
 {
        if (sched_core_enabled(rq))
        return &rq->__lock;
 }
 
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+       if (rq->core_enabled)
+               return &rq->core->__lock;
+
+       return &rq->__lock;
+}
+
 #else /* !CONFIG_SCHED_CORE */
 
 static inline bool sched_core_enabled(struct rq *rq)
        return &rq->__lock;
 }
 
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+       return &rq->__lock;
+}
+
 #endif /* CONFIG_SCHED_CORE */
 
 static inline void lockdep_assert_rq_held(struct rq *rq)
 {
-       lockdep_assert_held(rq_lockp(rq));
+       lockdep_assert_held(__rq_lockp(rq));
 }
 
 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
  */
 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
 {
-       rf->cookie = lockdep_pin_lock(rq_lockp(rq));
+       rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
 
 #ifdef CONFIG_SCHED_DEBUG
        rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
                rf->clock_update_flags = RQCF_UPDATED;
 #endif
 
-       lockdep_unpin_lock(rq_lockp(rq), rf->cookie);
+       lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
 }
 
 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
 {
-       lockdep_repin_lock(rq_lockp(rq), rf->cookie);
+       lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
 
 #ifdef CONFIG_SCHED_DEBUG
        /*
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
-       if (rq_lockp(this_rq) == rq_lockp(busiest))
+       if (__rq_lockp(this_rq) == __rq_lockp(busiest))
                return 0;
 
        if (likely(raw_spin_rq_trylock(busiest)))
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       if (rq_lockp(this_rq) != rq_lockp(busiest))
+       if (__rq_lockp(this_rq) != __rq_lockp(busiest))
                raw_spin_rq_unlock(busiest);
-       lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
+       lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
 }
 
 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       if (rq_lockp(rq1) != rq_lockp(rq2))
+       if (__rq_lockp(rq1) != __rq_lockp(rq2))
                raw_spin_rq_unlock(rq2);
        else
                __release(rq2->lock);