struct drm_device *dev = error_priv->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error = error_priv->error;
+       struct intel_ring_buffer *ring;
        int i, j, page, offset, elt;
 
        if (!error) {
                return 0;
        }
 
-
        seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
                seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
 
-       i915_ring_error_state(m, dev, error, RCS);
-       if (HAS_BLT(dev))
-               i915_ring_error_state(m, dev, error, BCS);
-       if (HAS_BSD(dev))
-               i915_ring_error_state(m, dev, error, VCS);
+       for_each_ring(ring, dev_priv, i)
+               i915_ring_error_state(m, dev, error, i);
 
        if (error->active_bo)
                print_error_buffers(m, "Active",
 
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
                        !dev_priv->mm.suspended) {
+               struct intel_ring_buffer *ring;
+               int i;
+
                dev_priv->mm.suspended = 0;
 
                i915_gem_init_swizzling(dev);
 
-               dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
-               if (HAS_BSD(dev))
-                   dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
-               if (HAS_BLT(dev))
-                   dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+               for_each_ring(ring, dev_priv, i)
+                       ring->init(ring);
 
                i915_gem_init_ppgtt(dev);
 
 
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
        struct timer_list hangcheck_timer;
        int hangcheck_count;
-       uint32_t last_acthd;
-       uint32_t last_acthd_bsd;
-       uint32_t last_acthd_blt;
+       uint32_t last_acthd[I915_NUM_RINGS];
        uint32_t last_instdone;
        uint32_t last_instdone1;
 
        struct drm_property *force_audio_property;
 } drm_i915_private_t;
 
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+       for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+               if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
        HDMI_AUDIO_OFF,                 /* force turn off HDMI audio */
 
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct intel_ring_buffer *ring;
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+       for_each_ring(ring, dev_priv, i)
+               i915_gem_reset_ring_lists(dev_priv, ring);
 
        /* Remove anything from the flushing lists. The GPU cache is likely
         * to be lost on reset along with the data, so simply move the
 i915_gem_retire_requests(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+       for_each_ring(ring, dev_priv, i)
+               i915_gem_retire_requests_ring(ring);
 }
 
 static void
 {
        drm_i915_private_t *dev_priv;
        struct drm_device *dev;
+       struct intel_ring_buffer *ring;
        bool idle;
        int i;
 
         * objects indefinitely.
         */
        idle = true;
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+       for_each_ring(ring, dev_priv, i) {
                if (!list_empty(&ring->gpu_write_list)) {
                        struct drm_i915_gem_request *request;
                        int ret;
 int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
        int ret, i;
 
        /* Flush everything onto the inactive list. */
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(&dev_priv->ring[i]);
+       for_each_ring(ring, dev_priv, i) {
+               ret = i915_ring_idle(ring);
                if (ret)
                        return ret;
+
+               /* Is the device fubar? */
+               if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+                       return -EBUSY;
        }
 
        return 0;
                /* GFX_MODE is per-ring on gen7+ */
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               ring = &dev_priv->ring[i];
-
+       for_each_ring(ring, dev_priv, i) {
                if (INTEL_INFO(dev)->gen >= 7)
                        I915_WRITE(RING_MODE_GEN7(ring),
                                   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+       for_each_ring(ring, dev_priv, i)
+               intel_cleanup_ring_buffer(ring);
 }
 
 int
                       struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret, i;
+       int ret;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
-               BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
-       }
        mutex_unlock(&dev->struct_mutex);
 
        ret = drm_irq_install(dev);
 
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
        bool lists_empty;
-       int ret,i;
+       int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
 
        trace_i915_gem_evict_everything(dev, purgeable_only);
 
-       ret = i915_gpu_idle(dev);
-       if (ret)
-               return ret;
-
        /* The gpu_idle will flush everything in the write domain to the
         * active list. Then we must move everything off the active list
         * with retire requests.
         */
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
-                       return -EBUSY;
+       ret = i915_gpu_idle(dev);
+       if (ret)
+               return ret;
 
        i915_gem_retire_requests(dev);
 
                }
        }
 
-       return ret;
+       return 0;
 }
 
                                  struct drm_i915_error_state *error)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
        struct drm_i915_gem_request *request;
        int i, count;
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
-               if (ring->obj == NULL)
-                       continue;
-
+       for_each_ring(ring, dev_priv, i) {
                i915_record_ring_state(dev, error, ring);
 
                error->ring[i].batchbuffer =
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
 
        i915_capture_error_state(dev);
        i915_report_and_clear_eir(dev);
                /*
                 * Wakeup waiting processes so they don't hang
                 */
-               wake_up_all(&dev_priv->ring[RCS].irq_queue);
-               if (HAS_BSD(dev))
-                       wake_up_all(&dev_priv->ring[VCS].irq_queue);
-               if (HAS_BLT(dev))
-                       wake_up_all(&dev_priv->ring[BCS].irq_queue);
+               for_each_ring(ring, dev_priv, i)
+                       wake_up_all(&ring->irq_queue);
        }
 
        queue_work(dev_priv->wq, &dev_priv->error_work);
 
 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
 {
-       /* We don't check whether the ring even exists before calling this
-        * function. Hence check whether it's initialized. */
-       if (ring->obj == NULL)
-               return true;
-
        if (list_empty(&ring->request_list) ||
            i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
                /* Issue a wake-up to catch stuck h/w. */
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (dev_priv->hangcheck_count++ > 1) {
+               bool hung = true;
+
                DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
                i915_handle_error(dev, true);
 
                if (!IS_GEN2(dev)) {
+                       struct intel_ring_buffer *ring;
+                       int i;
+
                        /* Is the chip hanging on a WAIT_FOR_EVENT?
                         * If so we can simply poke the RB_WAIT bit
                         * and break the hang. This should work on
                         * all but the second generation chipsets.
                         */
-                       if (kick_ring(&dev_priv->ring[RCS]))
-                               return false;
-
-                       if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
-                               return false;
-
-                       if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
-                               return false;
+                       for_each_ring(ring, dev_priv, i)
+                               hung &= !kick_ring(ring);
                }
 
-               return true;
+               return hung;
        }
 
        return false;
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
-       bool err = false;
+       uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+       struct intel_ring_buffer *ring;
+       bool err = false, idle;
+       int i;
 
        if (!i915_enable_hangcheck)
                return;
 
+       memset(acthd, 0, sizeof(acthd));
+       idle = true;
+       for_each_ring(ring, dev_priv, i) {
+           idle &= i915_hangcheck_ring_idle(ring, &err);
+           acthd[i] = intel_ring_get_active_head(ring);
+       }
+
        /* If all work is done then ACTHD clearly hasn't advanced. */
-       if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
-           i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
-           i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+       if (idle) {
                if (err) {
                        if (i915_hangcheck_hung(dev))
                                return;
                instdone = I915_READ(INSTDONE_I965);
                instdone1 = I915_READ(INSTDONE1);
        }
-       acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
-       acthd_bsd = HAS_BSD(dev) ?
-               intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
-       acthd_blt = HAS_BLT(dev) ?
-               intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
 
-       if (dev_priv->last_acthd == acthd &&
-           dev_priv->last_acthd_bsd == acthd_bsd &&
-           dev_priv->last_acthd_blt == acthd_blt &&
+       if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
            dev_priv->last_instdone == instdone &&
            dev_priv->last_instdone1 == instdone1) {
                if (i915_hangcheck_hung(dev))
        } else {
                dev_priv->hangcheck_count = 0;
 
-               dev_priv->last_acthd = acthd;
-               dev_priv->last_acthd_bsd = acthd_bsd;
-               dev_priv->last_acthd_blt = acthd_blt;
+               memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
                dev_priv->last_instdone = instdone;
                dev_priv->last_instdone1 = instdone1;
        }
 
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
+       struct intel_ring_buffer *ring;
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
        u32 pcu_mbox, rc6_mask = 0;
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
        I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
 
        void *private;
 };
 
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+       return ring->obj != NULL;
+}
+
 static inline unsigned
 intel_ring_flag(struct intel_ring_buffer *ring)
 {