max_freq * GT_FREQUENCY_MULTIPLIER);
 
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+                          dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
        } else if (IS_VALLEYVIEW(dev)) {
                u32 freq_sts, val;
 
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-       for (gpu_freq = dev_priv->rps.min_delay;
-            gpu_freq <= dev_priv->rps.max_delay;
+       for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+            gpu_freq <= dev_priv->rps.max_freq_softlimit;
             gpu_freq++) {
                ia_freq = gpu_freq;
                sandybridge_pcode_read(dev_priv,
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        else
-               *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.max_delay = val;
+       dev_priv->rps.max_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        else
-               *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+               *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
                do_div(val, GT_FREQUENCY_MULTIPLIER);
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                hw_min = (rp_state_cap >> 16) & 0xff;
        }
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_delay = val;
+       dev_priv->rps.min_freq_softlimit = val;
 
        if (IS_VALLEYVIEW(dev))
                valleyview_set_rps(dev, val);
 
                             u32 pm_iir, int new_delay)
 {
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               if (new_delay >= dev_priv->rps.max_delay) {
+               if (new_delay >= dev_priv->rps.max_freq_softlimit) {
                        /* Mask UP THRESHOLD Interrupts */
                        I915_WRITE(GEN6_PMINTRMSK,
                                   I915_READ(GEN6_PMINTRMSK) |
                        dev_priv->rps.rp_down_masked = false;
                }
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-               if (new_delay <= dev_priv->rps.min_delay) {
+               if (new_delay <= dev_priv->rps.min_freq_softlimit) {
                        /* Mask DOWN THRESHOLD Interrupts */
                        I915_WRITE(GEN6_PMINTRMSK,
                                   I915_READ(GEN6_PMINTRMSK) |
                        adj *= 2;
                else
                        adj = 1;
-               new_delay = dev_priv->rps.cur_delay + adj;
+               new_delay = dev_priv->rps.cur_freq + adj;
 
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
                 */
-               if (new_delay < dev_priv->rps.rpe_delay)
-                       new_delay = dev_priv->rps.rpe_delay;
+               if (new_delay < dev_priv->rps.efficient_freq)
+                       new_delay = dev_priv->rps.efficient_freq;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-               if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-                       new_delay = dev_priv->rps.rpe_delay;
+               if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
+                       new_delay = dev_priv->rps.efficient_freq;
                else
-                       new_delay = dev_priv->rps.min_delay;
+                       new_delay = dev_priv->rps.min_freq_softlimit;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                        adj *= 2;
                else
                        adj = -1;
-               new_delay = dev_priv->rps.cur_delay + adj;
+               new_delay = dev_priv->rps.cur_freq + adj;
        } else { /* unknown event */
-               new_delay = dev_priv->rps.cur_delay;
+               new_delay = dev_priv->rps.cur_freq;
        }
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
        new_delay = clamp_t(int, new_delay,
-                           dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+                           dev_priv->rps.min_freq_softlimit,
+                           dev_priv->rps.max_freq_softlimit);
 
        gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
-       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
 
        if (IS_VALLEYVIEW(dev_priv->dev))
                valleyview_set_rps(dev_priv->dev, new_delay);
 
                freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
                ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
        } else {
-               ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
-                       vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
+                       vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
        else
-               ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
                val /= GT_FREQUENCY_MULTIPLIER;
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                non_oc_max = (rp_state_cap & 0xff);
                hw_min = ((rp_state_cap & 0xff0000) >> 16);
        }
 
        if (val < hw_min || val > hw_max ||
-           val < dev_priv->rps.min_delay) {
+           val < dev_priv->rps.min_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
                DRM_DEBUG("User requested overclocking to %d\n",
                          val * GT_FREQUENCY_MULTIPLIER);
 
-       dev_priv->rps.max_delay = val;
+       dev_priv->rps.max_freq_softlimit = val;
 
-       if (dev_priv->rps.cur_delay > val) {
+       if (dev_priv->rps.cur_freq > val) {
                if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev, val);
                else
                /* We still need gen6_set_rps to process the new max_delay and
                 * update the interrupt limits even though frequency request is
                 * unchanged. */
-               gen6_set_rps(dev, dev_priv->rps.cur_delay);
+               gen6_set_rps(dev, dev_priv->rps.cur_freq);
        }
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
        else
-               ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+               ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
                val /= GT_FREQUENCY_MULTIPLIER;
 
                rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               hw_max = dev_priv->rps.hw_max;
+               hw_max = dev_priv->rps.max_freq;
                hw_min = ((rp_state_cap & 0xff0000) >> 16);
        }
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_delay = val;
+       dev_priv->rps.min_freq_softlimit = val;
 
-       if (dev_priv->rps.cur_delay < val) {
+       if (dev_priv->rps.cur_freq < val) {
                if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev, val);
                else
                /* We still need gen6_set_rps to process the new min_delay and
                 * update the interrupt limits even though frequency request is
                 * unchanged. */
-               gen6_set_rps(dev, dev_priv->rps.cur_delay);
+               gen6_set_rps(dev, dev_priv->rps.cur_freq);
        }
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
-       limits = dev_priv->rps.max_delay << 24;
-       if (val <= dev_priv->rps.min_delay)
-               limits |= dev_priv->rps.min_delay << 16;
+       limits = dev_priv->rps.max_freq_softlimit << 24;
+       if (val <= dev_priv->rps.min_freq_softlimit)
+               limits |= dev_priv->rps.min_freq_softlimit << 16;
 
        return limits;
 }
        new_power = dev_priv->rps.power;
        switch (dev_priv->rps.power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+               if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+               if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+               else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
        }
        /* Max/min bins are special */
-       if (val == dev_priv->rps.min_delay)
+       if (val == dev_priv->rps.min_freq_softlimit)
                new_power = LOW_POWER;
-       if (val == dev_priv->rps.max_delay)
+       if (val == dev_priv->rps.max_freq_softlimit)
                new_power = HIGH_POWER;
        if (new_power == dev_priv->rps.power)
                return;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_delay);
-       WARN_ON(val < dev_priv->rps.min_delay);
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
-       if (val == dev_priv->rps.cur_delay) {
+       if (val == dev_priv->rps.cur_freq) {
                /* min/max delay may still have been modified so be sure to
                 * write the limits value */
                I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
 
        POSTING_READ(GEN6_RPNSWREQ);
 
-       dev_priv->rps.cur_delay = val;
+       dev_priv->rps.cur_freq = val;
 
        trace_intel_gpu_freq_change(val * 50);
 }
         * When we are idle.  Drop to min voltage state.
         */
 
-       if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+       if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
                return;
 
        /* Mask turbo interrupt so that they will not come in between */
                return;
        }
 
-       dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+       dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
 
        vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
-                                       dev_priv->rps.min_delay);
+                                       dev_priv->rps.min_freq_softlimit);
 
        if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
                                & GENFREQSTATUS) == 0, 5))
        /* Unmask Up interrupts */
        dev_priv->rps.rp_up_masked = true;
        gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
-                                               dev_priv->rps.min_delay);
+                                               dev_priv->rps.min_freq_softlimit);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
                if (IS_VALLEYVIEW(dev))
                        vlv_set_rps_idle(dev_priv);
                else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
                dev_priv->rps.last_adj = 0;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
                else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
                dev_priv->rps.last_adj = 0;
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       WARN_ON(val > dev_priv->rps.max_delay);
-       WARN_ON(val < dev_priv->rps.min_delay);
+       WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+       WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
        DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-                        dev_priv->rps.cur_delay,
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq,
                         vlv_gpu_freq(dev_priv, val), val);
 
-       if (val == dev_priv->rps.cur_delay)
+       if (val == dev_priv->rps.cur_freq)
                return;
 
        vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
-       dev_priv->rps.cur_delay = val;
+       dev_priv->rps.cur_freq = val;
 
        trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
 
        /* Docs recommend 900MHz, and 300 MHz respectively */
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_delay << 24 |
-                  dev_priv->rps.min_delay << 16);
+                  dev_priv->rps.max_freq_softlimit << 24 |
+                  dev_priv->rps.min_freq_softlimit << 16);
 
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
-       /* In units of 50MHz */
-       dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
+       /* All of these values are in units of 50MHz */
+       dev_priv->rps.cur_freq = 0;
+       /* hw_max = RP0 until we check for overclocking */
+       dev_priv->rps.max_freq = hw_max = rp_state_cap & 0xff;
+       /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+       dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+       dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+       dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
        dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff;
-       dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
-       dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
-       dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
-       dev_priv->rps.cur_delay = 0;
 
        /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_delay == 0)
-               dev_priv->rps.max_delay = hw_max;
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = hw_max;
 
-       if (dev_priv->rps.min_delay == 0)
-               dev_priv->rps.min_delay = hw_min;
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = hw_min;
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
        ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
        if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
                DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-                                (dev_priv->rps.max_delay & 0xff) * 50,
+                                (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
                                 (pcu_mbox & 0xff) * 50);
-               dev_priv->rps.hw_max = pcu_mbox & 0xff;
+               dev_priv->rps.max_freq = pcu_mbox & 0xff;
        }
 
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
        gen6_enable_rps_interrupts(dev);
 
         * to use for memory access.  We do this by specifying the IA frequency
         * the PCU should use as a reference to determine the ring frequency.
         */
-       for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+       for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
             gpu_freq--) {
-               int diff = dev_priv->rps.max_delay - gpu_freq;
+               int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
                unsigned int ia_freq = 0, ring_freq = 0;
 
                if (INTEL_INFO(dev)->gen >= 8) {
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-       dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
        DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-                        dev_priv->rps.cur_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
 
-       dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
+       dev_priv->rps.max_freq = hw_max = valleyview_rps_max_freq(dev_priv);
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, hw_max),
                         hw_max);
 
-       dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+       dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-                        dev_priv->rps.rpe_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
 
        hw_min = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
                         hw_min);
 
        /* Preserve min/max settings in case of re-init */
-       if (dev_priv->rps.max_delay == 0)
-               dev_priv->rps.max_delay = hw_max;
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = hw_max;
 
-       if (dev_priv->rps.min_delay == 0)
-               dev_priv->rps.min_delay = hw_min;
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = hw_min;
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-                        dev_priv->rps.rpe_delay);
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
        dev_priv->rps.rp_up_masked = false;
        dev_priv->rps.rp_down_masked = false;
 
        assert_spin_locked(&mchdev_lock);
 
-       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);