static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
                           intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 
                seq_printf(m, "current GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                          intel_gpu_freq(dev_priv, rps->cur_freq));
 
                seq_printf(m, "max GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
 
                seq_printf(m, "min GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+                          intel_gpu_freq(dev_priv, rps->min_freq));
 
                seq_printf(m, "idle GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+                          intel_gpu_freq(dev_priv, rps->idle_freq));
 
                seq_printf(m,
                           "efficient (RPe) frequency: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+                          intel_gpu_freq(dev_priv, rps->efficient_freq));
                mutex_unlock(&dev_priv->pcu_lock);
        } else if (INTEL_GEN(dev_priv) >= 6) {
                u32 rp_state_limits;
                seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
                           pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
-                          dev_priv->rps.pm_intrmsk_mbz);
+                          rps->pm_intrmsk_mbz);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
                           (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
                           rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
                seq_printf(m, "RP PREV UP: %d (%dus)\n",
                           rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
-               seq_printf(m, "Up threshold: %d%%\n",
-                          dev_priv->rps.up_threshold);
+               seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
 
                seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
                           rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
                           rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
                seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
                           rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
-               seq_printf(m, "Down threshold: %d%%\n",
-                          dev_priv->rps.down_threshold);
+               seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
 
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
 
                seq_printf(m, "Current freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                          intel_gpu_freq(dev_priv, rps->cur_freq));
                seq_printf(m, "Actual freq: %d MHz\n", cagf);
                seq_printf(m, "Idle freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+                          intel_gpu_freq(dev_priv, rps->idle_freq));
                seq_printf(m, "Min freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+                          intel_gpu_freq(dev_priv, rps->min_freq));
                seq_printf(m, "Boost freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+                          intel_gpu_freq(dev_priv, rps->boost_freq));
                seq_printf(m, "Max freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
                seq_printf(m,
                           "efficient (RPe) frequency: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+                          intel_gpu_freq(dev_priv, rps->efficient_freq));
        } else {
                seq_puts(m, "no P-state info available\n");
        }
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
-               min_gpu_freq =
-                       dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
-               max_gpu_freq =
-                       dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+               min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
+               max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
        } else {
-               min_gpu_freq = dev_priv->rps.min_freq_softlimit;
-               max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+               min_gpu_freq = rps->min_freq_softlimit;
+               max_gpu_freq = rps->max_freq_softlimit;
        }
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_device *dev = &dev_priv->drm;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct drm_file *file;
 
-       seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+       seq_printf(m, "RPS enabled? %d\n", rps->enabled);
        seq_printf(m, "GPU busy? %s [%d requests]\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Boosts outstanding? %d\n",
-                  atomic_read(&dev_priv->rps.num_waiters));
+                  atomic_read(&rps->num_waiters));
        seq_printf(m, "Frequency requested %d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                  intel_gpu_freq(dev_priv, rps->cur_freq));
        seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                  intel_gpu_freq(dev_priv, rps->min_freq),
+                  intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
+                  intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
+                  intel_gpu_freq(dev_priv, rps->max_freq));
        seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+                  intel_gpu_freq(dev_priv, rps->idle_freq),
+                  intel_gpu_freq(dev_priv, rps->efficient_freq),
+                  intel_gpu_freq(dev_priv, rps->boost_freq));
 
        mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                seq_printf(m, "%s [%d]: %d boosts\n",
                           task ? task->comm : "<unknown>",
                           task ? task->pid : -1,
-                          atomic_read(&file_priv->rps.boosts));
+                          atomic_read(&file_priv->rps_client.boosts));
                rcu_read_unlock();
        }
        seq_printf(m, "Kernel (anonymous) boosts: %d\n",
-                  atomic_read(&dev_priv->rps.boosts));
+                  atomic_read(&rps->boosts));
        mutex_unlock(&dev->filelist_mutex);
 
        if (INTEL_GEN(dev_priv) >= 6 &&
-           dev_priv->rps.enabled &&
+           rps->enabled &&
            dev_priv->gt.active_requests) {
                u32 rpup, rpupei;
                u32 rpdown, rpdownei;
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
                seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
-                          rps_power_to_str(dev_priv->rps.power));
+                          rps_power_to_str(rps->power));
                seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
                           rpup && rpupei ? 100 * rpup / rpupei : 0,
-                          dev_priv->rps.up_threshold);
+                          rps->up_threshold);
                seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
                           rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
-                          dev_priv->rps.down_threshold);
+                          rps->down_threshold);
        } else {
                seq_puts(m, "\nRPS Autotuning inactive\n");
        }
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+       *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
        return 0;
 }
 
 i915_max_freq_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 hw_max, hw_min;
        int ret;
 
         */
        val = intel_freq_opcode(dev_priv, val);
 
-       hw_max = dev_priv->rps.max_freq;
-       hw_min = dev_priv->rps.min_freq;
+       hw_max = rps->max_freq;
+       hw_min = rps->min_freq;
 
-       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
+       if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.max_freq_softlimit = val;
+       rps->max_freq_softlimit = val;
 
        if (intel_set_rps(dev_priv, val))
                DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+       *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
        return 0;
 }
 
 i915_min_freq_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 hw_max, hw_min;
        int ret;
 
         */
        val = intel_freq_opcode(dev_priv, val);
 
-       hw_max = dev_priv->rps.max_freq;
-       hw_min = dev_priv->rps.min_freq;
+       hw_max = rps->max_freq;
+       hw_min = rps->min_freq;
 
        if (val < hw_min ||
-           val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+           val > hw_max || val > rps->max_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_freq_softlimit = val;
+       rps->min_freq_softlimit = val;
 
        if (intel_set_rps(dev_priv, val))
                DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
 
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
+       if (WARN_ON_ONCE(!(dev_priv->gt_pm.rps.enabled && intel_enable_rc6())))
                return -ENODEV;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 
 
        struct intel_rps_client {
                atomic_t boosts;
-       } rps;
+       } rps_client;
 
        unsigned int bsd_engine;
 
        u32 media_c0;
 };
 
-struct intel_gen6_power_mgmt {
+struct intel_rps {
        /*
         * work, interrupts_enabled and pm_iir are protected by
         * dev_priv->irq_lock
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
        bool enabled;
-       struct delayed_work autoenable_work;
        atomic_t num_waiters;
        atomic_t boosts;
 
        struct intel_rps_ei ei;
 };
 
+struct intel_gen6_power_mgmt {
+       struct intel_rps rps;
+       struct delayed_work autoenable_work;
+};
+
 /* defined intel_pm.c */
 extern spinlock_t mchdev_lock;
 
         */
        struct mutex pcu_lock;
 
-       /* gen6+ rps state */
-       struct intel_gen6_power_mgmt rps;
+       /* gen6+ GT PM state */
+       struct intel_gen6_power_mgmt gt_pm;
 
        /* ilk-only ips/rps state. Everything in here is protected by the global
         * mchdev_lock in intel_pm.c */
 
 i915_gem_object_wait_fence(struct dma_fence *fence,
                           unsigned int flags,
                           long timeout,
-                          struct intel_rps_client *rps)
+                          struct intel_rps_client *rps_client)
 {
        struct drm_i915_gem_request *rq;
 
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (rps) {
+       if (rps_client) {
                if (INTEL_GEN(rq->i915) >= 6)
-                       gen6_rps_boost(rq, rps);
+                       gen6_rps_boost(rq, rps_client);
                else
-                       rps = NULL;
+                       rps_client = NULL;
        }
 
        timeout = i915_wait_request(rq, flags, timeout);
 i915_gem_object_wait_reservation(struct reservation_object *resv,
                                 unsigned int flags,
                                 long timeout,
-                                struct intel_rps_client *rps)
+                                struct intel_rps_client *rps_client)
 {
        unsigned int seq = __read_seqcount_begin(&resv->seq);
        struct dma_fence *excl;
                for (i = 0; i < count; i++) {
                        timeout = i915_gem_object_wait_fence(shared[i],
                                                             flags, timeout,
-                                                            rps);
+                                                            rps_client);
                        if (timeout < 0)
                                break;
 
        }
 
        if (excl && timeout >= 0) {
-               timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+               timeout = i915_gem_object_wait_fence(excl, flags, timeout,
+                                                    rps_client);
                prune_fences = timeout >= 0;
        }
 
 i915_gem_object_wait(struct drm_i915_gem_object *obj,
                     unsigned int flags,
                     long timeout,
-                    struct intel_rps_client *rps)
+                    struct intel_rps_client *rps_client)
 {
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
 
        timeout = i915_gem_object_wait_reservation(obj->resv,
                                                   flags, timeout,
-                                                  rps);
+                                                  rps_client);
        return timeout < 0 ? timeout : 0;
 }
 
 {
        struct drm_i915_file_private *fpriv = file->driver_priv;
 
-       return &fpriv->rps;
+       return &fpriv->rps_client;
 }
 
 static int
 
 
        spin_lock_irq(&request->lock);
        if (request->waitboost)
-               atomic_dec(&request->i915->rps.num_waiters);
+               atomic_dec(&request->i915->gt_pm.rps.num_waiters);
        dma_fence_signal_locked(&request->fence);
        spin_unlock_irq(&request->lock);
 
 
 
 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int irqs;
         * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
         * result in the register bit being left SET!
         */
-       dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
-       dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+       rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+       rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 }
 
 static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int irqs;
        I915_WRITE(GUC_VCS2_VCS1_IER, 0);
        I915_WRITE(GUC_WD_VECS_IER, 0);
 
-       dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-       dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+       rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+       rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
 }
 
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 
 {
        spin_lock_irq(&dev_priv->irq_lock);
        gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
-       dev_priv->rps.pm_iir = 0;
+       dev_priv->gt_pm.rps.pm_iir = 0;
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       if (READ_ONCE(rps->interrupts_enabled))
                return;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       WARN_ON_ONCE(dev_priv->rps.pm_iir);
+       WARN_ON_ONCE(rps->pm_iir);
        WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-       dev_priv->rps.interrupts_enabled = true;
+       rps->interrupts_enabled = true;
        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
        spin_unlock_irq(&dev_priv->irq_lock);
 
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       if (!READ_ONCE(rps->interrupts_enabled))
                return;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       dev_priv->rps.interrupts_enabled = false;
+       rps->interrupts_enabled = false;
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
         * we will reset the GPU to minimum frequencies, so the current
         * state of the worker can be discarded.
         */
-       cancel_work_sync(&dev_priv->rps.work);
+       cancel_work_sync(&rps->work);
        gen6_reset_rps_interrupts(dev_priv);
 }
 
 
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
 {
-       memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+       memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
 }
 
 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
-       const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       const struct intel_rps_ei *prev = &rps->ei;
        struct intel_rps_ei now;
        u32 events = 0;
 
                c0 = max(render, media);
                c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
 
-               if (c0 > time * dev_priv->rps.up_threshold)
+               if (c0 > time * rps->up_threshold)
                        events = GEN6_PM_RP_UP_THRESHOLD;
-               else if (c0 < time * dev_priv->rps.down_threshold)
+               else if (c0 < time * rps->down_threshold)
                        events = GEN6_PM_RP_DOWN_THRESHOLD;
        }
 
-       dev_priv->rps.ei = now;
+       rps->ei = now;
        return events;
 }
 
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, rps.work);
+               container_of(work, struct drm_i915_private, gt_pm.rps.work);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        bool client_boost = false;
        int new_delay, adj, min, max;
        u32 pm_iir = 0;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->rps.interrupts_enabled) {
-               pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
-               client_boost = atomic_read(&dev_priv->rps.num_waiters);
+       if (rps->interrupts_enabled) {
+               pm_iir = fetch_and_zero(&rps->pm_iir);
+               client_boost = atomic_read(&rps->num_waiters);
        }
        spin_unlock_irq(&dev_priv->irq_lock);
 
 
        pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
 
-       adj = dev_priv->rps.last_adj;
-       new_delay = dev_priv->rps.cur_freq;
-       min = dev_priv->rps.min_freq_softlimit;
-       max = dev_priv->rps.max_freq_softlimit;
+       adj = rps->last_adj;
+       new_delay = rps->cur_freq;
+       min = rps->min_freq_softlimit;
+       max = rps->max_freq_softlimit;
        if (client_boost)
-               max = dev_priv->rps.max_freq;
-       if (client_boost && new_delay < dev_priv->rps.boost_freq) {
-               new_delay = dev_priv->rps.boost_freq;
+               max = rps->max_freq;
+       if (client_boost && new_delay < rps->boost_freq) {
+               new_delay = rps->boost_freq;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                if (adj > 0)
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
 
-               if (new_delay >= dev_priv->rps.max_freq_softlimit)
+               if (new_delay >= rps->max_freq_softlimit)
                        adj = 0;
        } else if (client_boost) {
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-               if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
-                       new_delay = dev_priv->rps.efficient_freq;
-               else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
-                       new_delay = dev_priv->rps.min_freq_softlimit;
+               if (rps->cur_freq > rps->efficient_freq)
+                       new_delay = rps->efficient_freq;
+               else if (rps->cur_freq > rps->min_freq_softlimit)
+                       new_delay = rps->min_freq_softlimit;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
 
-               if (new_delay <= dev_priv->rps.min_freq_softlimit)
+               if (new_delay <= rps->min_freq_softlimit)
                        adj = 0;
        } else { /* unknown event */
                adj = 0;
        }
 
-       dev_priv->rps.last_adj = adj;
+       rps->last_adj = adj;
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
 
        if (intel_set_rps(dev_priv, new_delay)) {
                DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
-               dev_priv->rps.last_adj = 0;
+               rps->last_adj = 0;
        }
 
        mutex_unlock(&dev_priv->pcu_lock);
 out:
        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
        spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->rps.interrupts_enabled)
+       if (rps->interrupts_enabled)
                gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
 }
  * the work queue. */
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        if (pm_iir & dev_priv->pm_rps_events) {
                spin_lock(&dev_priv->irq_lock);
                gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
-               if (dev_priv->rps.interrupts_enabled) {
-                       dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-                       schedule_work(&dev_priv->rps.work);
+               if (rps->interrupts_enabled) {
+                       rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+                       schedule_work(&rps->work);
                }
                spin_unlock(&dev_priv->irq_lock);
        }
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int i;
 
        intel_hpd_init_work(dev_priv);
 
-       INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+       INIT_WORK(&rps->work, gen6_pm_rps_work);
 
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
        for (i = 0; i < MAX_L3_SLICES; ++i)
        else
                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
-       dev_priv->rps.pm_intrmsk_mbz = 0;
+       rps->pm_intrmsk_mbz = 0;
 
        /*
         * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
         * TODO: verify if this can be reproduced on VLV,CHV.
         */
        if (INTEL_GEN(dev_priv) <= 7)
-               dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+               rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
 
        if (INTEL_GEN(dev_priv) >= 8)
-               dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+               rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
        if (IS_GEN2(dev_priv)) {
                /* Gen2 doesn't have a hardware frame counter */
 
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.cur_freq));
+                                      dev_priv->gt_pm.rps.cur_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.boost_freq));
+                                      dev_priv->gt_pm.rps.boost_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
                                       const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
 
        /* Validate against (static) hardware limits */
        val = intel_freq_opcode(dev_priv, val);
-       if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+       if (val < rps->min_freq || val > rps->max_freq)
                return -EINVAL;
 
        mutex_lock(&dev_priv->pcu_lock);
-       dev_priv->rps.boost_freq = val;
+       rps->boost_freq = val;
        mutex_unlock(&dev_priv->pcu_lock);
 
        return count;
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.efficient_freq));
+                                      dev_priv->gt_pm.rps.efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.max_freq_softlimit));
+                                      dev_priv->gt_pm.rps.max_freq_softlimit));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
 
        val = intel_freq_opcode(dev_priv, val);
 
-       if (val < dev_priv->rps.min_freq ||
-           val > dev_priv->rps.max_freq ||
-           val < dev_priv->rps.min_freq_softlimit) {
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val < rps->min_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
                intel_runtime_pm_put(dev_priv);
                return -EINVAL;
        }
 
-       if (val > dev_priv->rps.rp0_freq)
+       if (val > rps->rp0_freq)
                DRM_DEBUG("User requested overclocking to %d\n",
                          intel_gpu_freq(dev_priv, val));
 
-       dev_priv->rps.max_freq_softlimit = val;
+       rps->max_freq_softlimit = val;
 
-       val = clamp_t(int, dev_priv->rps.cur_freq,
-                     dev_priv->rps.min_freq_softlimit,
-                     dev_priv->rps.max_freq_softlimit);
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
 
        /* We still need *_set_rps to process the new max_delay and
         * update the interrupt limits and PMINTRMSK even though
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.min_freq_softlimit));
+                                      dev_priv->gt_pm.rps.min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
 
        val = intel_freq_opcode(dev_priv, val);
 
-       if (val < dev_priv->rps.min_freq ||
-           val > dev_priv->rps.max_freq ||
-           val > dev_priv->rps.max_freq_softlimit) {
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val > rps->max_freq_softlimit) {
                mutex_unlock(&dev_priv->pcu_lock);
                intel_runtime_pm_put(dev_priv);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_freq_softlimit = val;
+       rps->min_freq_softlimit = val;
 
-       val = clamp_t(int, dev_priv->rps.cur_freq,
-                     dev_priv->rps.min_freq_softlimit,
-                     dev_priv->rps.max_freq_softlimit);
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
 
        /* We still need *_set_rps to process the new min_delay and
         * update the interrupt limits and PMINTRMSK even though
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+               val = intel_gpu_freq(dev_priv, rps->rp0_freq);
        else if (attr == &dev_attr_gt_RP1_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+               val = intel_gpu_freq(dev_priv, rps->rp1_freq);
        else if (attr == &dev_attr_gt_RPn_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+               val = intel_gpu_freq(dev_priv, rps->min_freq);
        else
                BUG();
 
 
 static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
                                            u32 mask)
 {
-       return mask & ~i915->rps.pm_intrmsk_mbz;
+       return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
 }
 
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
 
  */
 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 limits;
 
        /* Only set the down limit when we've reached the lowest level to avoid
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
        if (INTEL_GEN(dev_priv) >= 9) {
-               limits = (dev_priv->rps.max_freq_softlimit) << 23;
-               if (val <= dev_priv->rps.min_freq_softlimit)
-                       limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+               limits = (rps->max_freq_softlimit) << 23;
+               if (val <= rps->min_freq_softlimit)
+                       limits |= (rps->min_freq_softlimit) << 14;
        } else {
-               limits = dev_priv->rps.max_freq_softlimit << 24;
-               if (val <= dev_priv->rps.min_freq_softlimit)
-                       limits |= dev_priv->rps.min_freq_softlimit << 16;
+               limits = rps->max_freq_softlimit << 24;
+               if (val <= rps->min_freq_softlimit)
+                       limits |= rps->min_freq_softlimit << 16;
        }
 
        return limits;
 
 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int new_power;
        u32 threshold_up = 0, threshold_down = 0; /* in % */
        u32 ei_up = 0, ei_down = 0;
 
-       new_power = dev_priv->rps.power;
-       switch (dev_priv->rps.power) {
+       new_power = rps->power;
+       switch (rps->power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.efficient_freq + 1 &&
-                   val > dev_priv->rps.cur_freq)
+               if (val > rps->efficient_freq + 1 &&
+                   val > rps->cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.efficient_freq &&
-                   val < dev_priv->rps.cur_freq)
+               if (val <= rps->efficient_freq &&
+                   val < rps->cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_freq &&
-                        val > dev_priv->rps.cur_freq)
+               else if (val >= rps->rp0_freq &&
+                        val > rps->cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
-                   val < dev_priv->rps.cur_freq)
+               if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+                   val < rps->cur_freq)
                        new_power = BETWEEN;
                break;
        }
        /* Max/min bins are special */
-       if (val <= dev_priv->rps.min_freq_softlimit)
+       if (val <= rps->min_freq_softlimit)
                new_power = LOW_POWER;
-       if (val >= dev_priv->rps.max_freq_softlimit)
+       if (val >= rps->max_freq_softlimit)
                new_power = HIGH_POWER;
-       if (new_power == dev_priv->rps.power)
+       if (new_power == rps->power)
                return;
 
        /* Note the units here are not exactly 1us, but 1280ns. */
                   GEN6_RP_DOWN_IDLE_AVG);
 
 skip_hw_write:
-       dev_priv->rps.power = new_power;
-       dev_priv->rps.up_threshold = threshold_up;
-       dev_priv->rps.down_threshold = threshold_down;
-       dev_priv->rps.last_adj = 0;
+       rps->power = new_power;
+       rps->up_threshold = threshold_up;
+       rps->down_threshold = threshold_down;
+       rps->last_adj = 0;
 }
 
 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 mask = 0;
 
        /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
-       if (val > dev_priv->rps.min_freq_softlimit)
+       if (val > rps->min_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-       if (val < dev_priv->rps.max_freq_softlimit)
+       if (val < rps->max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
        mask &= dev_priv->pm_rps_events;
  * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* min/max delay may still have been modified so be sure to
         * write the limits value.
         */
-       if (val != dev_priv->rps.cur_freq) {
+       if (val != rps->cur_freq) {
                gen6_set_rps_thresholds(dev_priv, val);
 
                if (INTEL_GEN(dev_priv) >= 9)
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
        I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       dev_priv->rps.cur_freq = val;
+       rps->cur_freq = val;
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 
        return 0;
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       if (val != dev_priv->rps.cur_freq) {
+       if (val != dev_priv->gt_pm.rps.cur_freq) {
                err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
                if (err)
                        return err;
                gen6_set_rps_thresholds(dev_priv, val);
        }
 
-       dev_priv->rps.cur_freq = val;
+       dev_priv->gt_pm.rps.cur_freq = val;
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 
        return 0;
 */
 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
-       u32 val = dev_priv->rps.idle_freq;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       u32 val = rps->idle_freq;
        int err;
 
-       if (dev_priv->rps.cur_freq <= val)
+       if (rps->cur_freq <= val)
                return;
 
        /* The punit delays the write of the frequency and voltage until it
 
 void gen6_rps_busy(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        mutex_lock(&dev_priv->pcu_lock);
-       if (dev_priv->rps.enabled) {
+       if (rps->enabled) {
                u8 freq;
 
                if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
-                          gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+                          gen6_rps_pm_mask(dev_priv, rps->cur_freq));
 
                gen6_enable_rps_interrupts(dev_priv);
 
                /* Use the user's desired frequency as a guide, but for better
                 * performance, jump directly to RPe as our starting frequency.
                 */
-               freq = max(dev_priv->rps.cur_freq,
-                          dev_priv->rps.efficient_freq);
+               freq = max(rps->cur_freq,
+                          rps->efficient_freq);
 
                if (intel_set_rps(dev_priv,
                                  clamp(freq,
-                                       dev_priv->rps.min_freq_softlimit,
-                                       dev_priv->rps.max_freq_softlimit)))
+                                       rps->min_freq_softlimit,
+                                       rps->max_freq_softlimit)))
                        DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
        }
        mutex_unlock(&dev_priv->pcu_lock);
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* Flush our bottom-half so that it does not race with us
         * setting the idle frequency and so that it is bounded by
         * our rpm wakeref. And then disable the interrupts to stop any
        gen6_disable_rps_interrupts(dev_priv);
 
        mutex_lock(&dev_priv->pcu_lock);
-       if (dev_priv->rps.enabled) {
+       if (rps->enabled) {
                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                        vlv_set_rps_idle(dev_priv);
                else
-                       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
-               dev_priv->rps.last_adj = 0;
+                       gen6_set_rps(dev_priv, rps->idle_freq);
+               rps->last_adj = 0;
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_sanitize_rps_pm_mask(dev_priv, ~0));
        }
 }
 
 void gen6_rps_boost(struct drm_i915_gem_request *rq,
-                   struct intel_rps_client *rps)
+                   struct intel_rps_client *rps_client)
 {
-       struct drm_i915_private *i915 = rq->i915;
+       struct intel_rps *rps = &rq->i915->gt_pm.rps;
        unsigned long flags;
        bool boost;
 
        /* This is intentionally racy! We peek at the state here, then
         * validate inside the RPS worker.
         */
-       if (!i915->rps.enabled)
+       if (!rps->enabled)
                return;
 
        boost = false;
        spin_lock_irqsave(&rq->lock, flags);
        if (!rq->waitboost && !i915_gem_request_completed(rq)) {
-               atomic_inc(&i915->rps.num_waiters);
+               atomic_inc(&rps->num_waiters);
                rq->waitboost = true;
                boost = true;
        }
        if (!boost)
                return;
 
-       if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
-               schedule_work(&i915->rps.work);
+       if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
+               schedule_work(&rps->work);
 
-       atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
+       atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
 }
 
 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int err;
 
        lockdep_assert_held(&dev_priv->pcu_lock);
-       GEM_BUG_ON(val > dev_priv->rps.max_freq);
-       GEM_BUG_ON(val < dev_priv->rps.min_freq);
+       GEM_BUG_ON(val > rps->max_freq);
+       GEM_BUG_ON(val < rps->min_freq);
 
-       if (!dev_priv->rps.enabled) {
-               dev_priv->rps.cur_freq = val;
+       if (!rps->enabled) {
+               rps->cur_freq = val;
                return 0;
        }
 
 
 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* All of these values are in units of 50MHz */
 
        /* static values from HW: RP0 > RP1 > RPn (min_freq) */
        if (IS_GEN9_LP(dev_priv)) {
                u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
-               dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
-               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
-               dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+               rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+               rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+               rps->min_freq = (rp_state_cap >>  0) & 0xff;
        } else {
                u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
-               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
-               dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+               rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
+               rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+               rps->min_freq = (rp_state_cap >> 16) & 0xff;
        }
        /* hw_max = RP0 until we check for overclocking */
-       dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+       rps->max_freq = rps->rp0_freq;
 
-       dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+       rps->efficient_freq = rps->rp1_freq;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
            IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                u32 ddcc_status = 0;
                if (sandybridge_pcode_read(dev_priv,
                                           HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
                                           &ddcc_status) == 0)
-                       dev_priv->rps.efficient_freq =
+                       rps->efficient_freq =
                                clamp_t(u8,
                                        ((ddcc_status >> 8) & 0xff),
-                                       dev_priv->rps.min_freq,
-                                       dev_priv->rps.max_freq);
+                                       rps->min_freq,
+                                       rps->max_freq);
        }
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Store the frequency values in 16.66 MHZ units, which is
                 * the natural hardware unit for SKL
                 */
-               dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+               rps->rp0_freq *= GEN9_FREQ_SCALER;
+               rps->rp1_freq *= GEN9_FREQ_SCALER;
+               rps->min_freq *= GEN9_FREQ_SCALER;
+               rps->max_freq *= GEN9_FREQ_SCALER;
+               rps->efficient_freq *= GEN9_FREQ_SCALER;
        }
 }
 
 static void reset_rps(struct drm_i915_private *dev_priv,
                      int (*set)(struct drm_i915_private *, u8))
 {
-       u8 freq = dev_priv->rps.cur_freq;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       u8 freq = rps->cur_freq;
 
        /* force a reset */
-       dev_priv->rps.power = -1;
-       dev_priv->rps.cur_freq = -1;
+       rps->power = -1;
+       rps->cur_freq = -1;
 
        if (set(dev_priv, freq))
                DRM_ERROR("Failed to reset RPS to initial values\n");
 
        /* Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RC_VIDEO_FREQ,
-               GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+               GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
 
        /* 1 second timeout*/
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
 
 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* 1 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+                  HSW_FREQUENCY(rps->rp1_freq));
        I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+                  HSW_FREQUENCY(rps->rp1_freq));
        /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
 
        /* Docs recommend 900MHz, and 300 MHz respectively */
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_freq_softlimit << 24 |
-                  dev_priv->rps.min_freq_softlimit << 16);
+                  rps->max_freq_softlimit << 24 |
+                  rps->min_freq_softlimit << 16);
 
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
 
 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int min_freq = 15;
        unsigned int gpu_freq;
        unsigned int max_ia_freq, min_ring_freq;
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
-               min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
-               max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+               min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
+               max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
        } else {
-               min_gpu_freq = dev_priv->rps.min_freq;
-               max_gpu_freq = dev_priv->rps.max_freq;
+               min_gpu_freq = rps->min_freq;
+               max_gpu_freq = rps->max_freq;
        }
 
        /*
 
 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
 {
-       dev_priv->rps.gpll_ref_freq =
+       dev_priv->gt_pm.rps.gpll_ref_freq =
                vlv_get_cck_clock(dev_priv, "GPLL ref",
                                  CCK_GPLL_CLOCK_CONTROL,
                                  dev_priv->czclk_freq);
 
        DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
-                        dev_priv->rps.gpll_ref_freq);
+                        dev_priv->gt_pm.rps.gpll_ref_freq);
 }
 
 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        valleyview_setup_pctx(dev_priv);
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
-       dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       rps->max_freq = valleyview_rps_max_freq(dev_priv);
+       rps->rp0_freq = rps->max_freq;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+                        intel_gpu_freq(dev_priv, rps->max_freq),
+                        rps->max_freq);
 
-       dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+       rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, rps->efficient_freq),
+                        rps->efficient_freq);
 
-       dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+       rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
        DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+                        intel_gpu_freq(dev_priv, rps->rp1_freq),
+                        rps->rp1_freq);
 
-       dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+       rps->min_freq = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+                        intel_gpu_freq(dev_priv, rps->min_freq),
+                        rps->min_freq);
 }
 
 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        cherryview_setup_pctx(dev_priv);
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
-       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       rps->max_freq = cherryview_rps_max_freq(dev_priv);
+       rps->rp0_freq = rps->max_freq;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+                        intel_gpu_freq(dev_priv, rps->max_freq),
+                        rps->max_freq);
 
-       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, rps->efficient_freq),
+                        rps->efficient_freq);
 
-       dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+       rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
        DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+                        intel_gpu_freq(dev_priv, rps->rp1_freq),
+                        rps->rp1_freq);
 
-       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       rps->min_freq = cherryview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+                        intel_gpu_freq(dev_priv, rps->min_freq),
+                        rps->min_freq);
 
-       WARN_ONCE((dev_priv->rps.max_freq |
-                  dev_priv->rps.efficient_freq |
-                  dev_priv->rps.rp1_freq |
-                  dev_priv->rps.min_freq) & 1,
+       WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+                  rps->min_freq) & 1,
                  "Odd GPU freq values\n");
 }
 
 
        lockdep_assert_held(&mchdev_lock);
 
-       pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
+       pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);
 
 
 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
         * requirement.
                gen6_init_rps_frequencies(dev_priv);
 
        /* Derive initial user preferences/limits from the hardware limits */
-       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-       dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+       rps->idle_freq = rps->min_freq;
+       rps->cur_freq = rps->idle_freq;
 
-       dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-       dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+       rps->max_freq_softlimit = rps->max_freq;
+       rps->min_freq_softlimit = rps->min_freq;
 
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               dev_priv->rps.min_freq_softlimit =
+               rps->min_freq_softlimit =
                        max_t(int,
-                             dev_priv->rps.efficient_freq,
+                             rps->efficient_freq,
                              intel_freq_opcode(dev_priv, 450));
 
        /* After setting max-softlimit, find the overclock max freq */
                sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
                if (params & BIT(31)) { /* OC supported */
                        DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
-                                        (dev_priv->rps.max_freq & 0xff) * 50,
+                                        (rps->max_freq & 0xff) * 50,
                                         (params & 0xff) * 50);
-                       dev_priv->rps.max_freq = params & 0xff;
+                       rps->max_freq = params & 0xff;
                }
        }
 
        /* Finally allow us to boost to max by default */
-       dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+       rps->boost_freq = rps->max_freq;
 
        mutex_unlock(&dev_priv->pcu_lock);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (INTEL_GEN(dev_priv) < 6)
                return;
 
-       if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+       if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
                intel_runtime_pm_put(dev_priv);
 
        /* gen6_rps_idle() will be called later to disable interrupts */
 
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       dev_priv->rps.enabled = true; /* force disabling */
+       dev_priv->gt_pm.rps.enabled = true; /* force disabling */
        intel_disable_gt_powersave(dev_priv);
 
        gen6_reset_rps_interrupts(dev_priv);
 
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       if (!READ_ONCE(dev_priv->rps.enabled))
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       if (!READ_ONCE(rps->enabled))
                return;
 
        mutex_lock(&dev_priv->pcu_lock);
                ironlake_disable_drps(dev_priv);
        }
 
-       dev_priv->rps.enabled = false;
+       rps->enabled = false;
        mutex_unlock(&dev_priv->pcu_lock);
 }
 
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* We shouldn't be disabling as we submit, so this should be less
         * racy than it appears!
         */
-       if (READ_ONCE(dev_priv->rps.enabled))
+       if (READ_ONCE(rps->enabled))
                return;
 
        /* Powersaving is controlled by the host when inside a VM */
                intel_init_emon(dev_priv);
        }
 
-       WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
-       WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+       WARN_ON(rps->max_freq < rps->min_freq);
+       WARN_ON(rps->idle_freq > rps->max_freq);
 
-       WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
-       WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+       WARN_ON(rps->efficient_freq < rps->min_freq);
+       WARN_ON(rps->efficient_freq > rps->max_freq);
 
-       dev_priv->rps.enabled = true;
+       rps->enabled = true;
        mutex_unlock(&dev_priv->pcu_lock);
 }
 
 static void __intel_autoenable_gt_powersave(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+               container_of(work,
+                            typeof(*dev_priv),
+                            gt_pm.autoenable_work.work);
        struct intel_engine_cs *rcs;
        struct drm_i915_gem_request *req;
 
-       if (READ_ONCE(dev_priv->rps.enabled))
+       if (READ_ONCE(dev_priv->gt_pm.rps.enabled))
                goto out;
 
        rcs = dev_priv->engine[RCS];
 
 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       if (READ_ONCE(dev_priv->rps.enabled))
+       if (READ_ONCE(dev_priv->gt_pm.rps.enabled))
                return;
 
        if (IS_IRONLAKE_M(dev_priv)) {
                 * runtime resume it's necessary).
                 */
                if (queue_delayed_work(dev_priv->wq,
-                                      &dev_priv->rps.autoenable_work,
+                                      &dev_priv->gt_pm.autoenable_work,
                                       round_jiffies_up_relative(HZ)))
                        intel_runtime_pm_get_noresume(dev_priv);
        }
 
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * N = val - 0xb7
         * Slow = Fast = GPLL ref * N
         */
-       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
+       return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
 }
 
 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
 }
 
 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * N = val / 2
         * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
         */
-       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
+       return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
 }
 
 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* CHV needs even values */
-       return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
+       return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
 }
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        mutex_init(&dev_priv->pcu_lock);
 
-       INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+       INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
                          __intel_autoenable_gt_powersave);
-       atomic_set(&dev_priv->rps.num_waiters, 0);
+       atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
 
        dev_priv->runtime_pm.suspended = false;
        atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);