{
        struct intel_gt *gt = rps_to_gt(rps);
 
+       GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
+                rps->pm_events, rps_pm_mask(rps, rps->last_freq));
+
        rps_reset_ei(rps);
 
        spin_lock_irq(>->irq_lock);
        cancel_work_sync(&rps->work);
 
        rps_reset_interrupts(rps);
+       GT_TRACE(gt, "interrupts:off\n");
 }
 
 static const struct cparams {
        if (IS_VALLEYVIEW(i915))
                goto skip_hw_write;
 
+       GT_TRACE(rps_to_gt(rps),
+                "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
+                new_power, threshold_up, ei_up, threshold_down, ei_down);
+
        set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
        set(uncore, GEN6_RP_UP_THRESHOLD,
            GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
 
 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
 {
+       GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
+
        mutex_lock(&rps->power.mutex);
        if (interactive) {
                if (!rps->power.interactive++ && READ_ONCE(rps->active))
                         GEN6_AGGRESSIVE_TURBO);
        set(uncore, GEN6_RPNSWREQ, swreq);
 
+       GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
+                val, intel_gpu_freq(rps, val), swreq);
+
        return 0;
 }
 
        err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
        vlv_punit_put(i915);
 
+       GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
+                val, intel_gpu_freq(rps, val));
+
        return err;
 }
 
        if (!rps->enabled)
                return;
 
+       GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
+
        /*
         * Use the user's desired frequency as a guide, but for better
         * performance, jump directly to RPe as our starting frequency.
         */
        rps->cur_freq =
                max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
+
+       GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
 }
 
 void intel_rps_boost(struct i915_request *rq)
            !dma_fence_is_signaled_locked(&rq->fence)) {
                set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 
+               GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+                        rq->fence.context, rq->fence.seqno);
+
                if (!atomic_fetch_inc(&rps->num_waiters) &&
                    READ_ONCE(rps->cur_freq) < rps->boost_freq)
                        schedule_work(&rps->work);
 static bool rps_reset(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
+
        /* force a reset */
        rps->power.mode = -1;
        rps->last_freq = -1;
        if (!rps->enabled)
                return;
 
-       drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
-       drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
+       GT_TRACE(rps_to_gt(rps),
+                "min:%x, max:%x, freq:[%d, %d]\n",
+                rps->min_freq, rps->max_freq,
+                intel_gpu_freq(rps, rps->min_freq),
+                intel_gpu_freq(rps, rps->max_freq));
 
-       drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
-       drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
+       GEM_BUG_ON(rps->max_freq < rps->min_freq);
+       GEM_BUG_ON(rps->idle_freq > rps->max_freq);
+
+       GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
+       GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
 }
 
 static void gen6_rps_disable(struct intel_rps *rps)
        max = rps->max_freq_softlimit;
        if (client_boost)
                max = rps->max_freq;
+
+       GT_TRACE(gt,
+                "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
+                pm_iir, yesno(client_boost),
+                adj, new_freq, min, max);
+
        if (client_boost && new_freq < rps->boost_freq) {
                new_freq = rps->boost_freq;
                adj = 0;
        if (unlikely(!events))
                return;
 
+       GT_TRACE(gt, "irq events:%x\n", events);
+
        gen6_gt_pm_mask_irq(gt, events);
 
        rps->pm_iir |= events;
        if (events) {
                spin_lock(>->irq_lock);
 
+               GT_TRACE(gt, "irq events:%x\n", events);
+
                gen6_gt_pm_mask_irq(gt, events);
                rps->pm_iir |= events;