* has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                     \
-       if (LP_RING(dev->dev_private)->obj == NULL)                     \
+       if (LP_RING(dev->dev_private)->buffer->obj == NULL)                     \
                LOCK_TEST_WITH_RETURN(dev, file);                       \
 } while (0)
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
-       ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-       ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-       ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
-       if (ring->space < 0)
-               ring->space += ring->size;
+       ring->buffer->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+       ring->buffer->space = ring->buffer->head - (ring->buffer->tail + I915_RING_FREE_SPACE);
+       if (ring->buffer->space < 0)
+               ring->buffer->space += ring->buffer->size;
 
        if (!dev->primary->master)
                return;
 
        master_priv = dev->primary->master->driver_priv;
-       if (ring->head == ring->tail && master_priv->sarea_priv)
+       if (ring->buffer->head == ring->buffer->tail && master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
        }
 
        if (init->ring_size != 0) {
-               if (LP_RING(dev_priv)->obj != NULL) {
+               if (LP_RING(dev_priv)->buffer->obj != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
 
        DRM_DEBUG_DRIVER("%s\n", __func__);
 
-       if (ring->virtual_start == NULL) {
+       if (ring->buffer->virtual_start == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i, ret;
 
-       if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+       if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
                return -EINVAL;
 
        for (i = 0; i < dwords;) {
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
-       if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+       if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
 
 
 static inline int ring_space(struct intel_engine_cs *ring)
 {
-       return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
+       return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size);
 }
 
 static bool intel_ring_stopped(struct intel_engine_cs *ring)
 
 void __intel_ring_advance(struct intel_engine_cs *ring)
 {
-       ring->tail &= ring->size - 1;
+       ring->buffer->tail &= ring->buffer->size - 1;
        if (intel_ring_stopped(ring))
                return;
-       ring->write_tail(ring, ring->tail);
+       ring->write_tail(ring, ring->buffer->tail);
 }
 
 static int
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj = ring->obj;
+       struct drm_i915_gem_object *obj = ring->buffer->obj;
        int ret = 0;
 
        gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
         * register values. */
        I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
-                       ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+                       ((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
        if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
                i915_kernel_lost_context(ring->dev);
        else {
-               ring->head = I915_READ_HEAD(ring);
-               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ring->space = ring_space(ring);
-               ring->last_retired_head = -1;
+               ring->buffer->head = I915_READ_HEAD(ring);
+               ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+               ring->buffer->space = ring_space(ring);
+               ring->buffer->last_retired_head = -1;
        }
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
        struct drm_i915_gem_object *obj;
        int ret;
 
-       if (ring->obj)
+       if (ring->buffer->obj)
                return 0;
 
        obj = NULL;
        if (!HAS_LLC(dev))
-               obj = i915_gem_object_create_stolen(dev, ring->size);
+               obj = i915_gem_object_create_stolen(dev, ring->buffer->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ring->size);
+               obj = i915_gem_alloc_object(dev, ring->buffer->size);
        if (obj == NULL)
                return -ENOMEM;
 
        if (ret)
                goto err_unpin;
 
-       ring->virtual_start =
+       ring->buffer->virtual_start =
                ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-                          ring->size);
-       if (ring->virtual_start == NULL) {
+                          ring->buffer->size);
+       if (ring->buffer->virtual_start == NULL) {
                ret = -EINVAL;
                goto err_unpin;
        }
 
-       ring->obj = obj;
+       ring->buffer->obj = obj;
        return 0;
 
 err_unpin:
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
-       ring->size = 32 * PAGE_SIZE;
+       ring->buffer->size = 32 * PAGE_SIZE;
        memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
        init_waitqueue_head(&ring->irq_queue);
         * the TAIL pointer points to within the last 2 cachelines
         * of the buffer.
         */
-       ring->effective_size = ring->size;
+       ring->buffer->effective_size = ring->buffer->size;
        if (IS_I830(dev) || IS_845G(dev))
-               ring->effective_size -= 2 * CACHELINE_BYTES;
+               ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
 
        ret = i915_cmd_parser_init_ring(ring);
        if (ret)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-       if (ring->obj == NULL)
+       if (ring->buffer->obj == NULL)
                return;
 
        intel_stop_ring_buffer(ring);
        WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-       iounmap(ring->virtual_start);
+       iounmap(ring->buffer->virtual_start);
 
-       i915_gem_object_ggtt_unpin(ring->obj);
-       drm_gem_object_unreference(&ring->obj->base);
-       ring->obj = NULL;
+       i915_gem_object_ggtt_unpin(ring->buffer->obj);
+       drm_gem_object_unreference(&ring->buffer->obj->base);
+       ring->buffer->obj = NULL;
        ring->preallocated_lazy_request = NULL;
        ring->outstanding_lazy_seqno = 0;
 
        u32 seqno = 0;
        int ret;
 
-       if (ring->last_retired_head != -1) {
-               ring->head = ring->last_retired_head;
-               ring->last_retired_head = -1;
+       if (ring->buffer->last_retired_head != -1) {
+               ring->buffer->head = ring->buffer->last_retired_head;
+               ring->buffer->last_retired_head = -1;
 
-               ring->space = ring_space(ring);
-               if (ring->space >= n)
+               ring->buffer->space = ring_space(ring);
+               if (ring->buffer->space >= n)
                        return 0;
        }
 
        list_for_each_entry(request, &ring->request_list, list) {
-               if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
+               if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) {
                        seqno = request->seqno;
                        break;
                }
                return ret;
 
        i915_gem_retire_requests_ring(ring);
-       ring->head = ring->last_retired_head;
-       ring->last_retired_head = -1;
+       ring->buffer->head = ring->buffer->last_retired_head;
+       ring->buffer->last_retired_head = -1;
 
-       ring->space = ring_space(ring);
+       ring->buffer->space = ring_space(ring);
        return 0;
 }
 
 
        trace_i915_ring_wait_begin(ring);
        do {
-               ring->head = I915_READ_HEAD(ring);
-               ring->space = ring_space(ring);
-               if (ring->space >= n) {
+               ring->buffer->head = I915_READ_HEAD(ring);
+               ring->buffer->space = ring_space(ring);
+               if (ring->buffer->space >= n) {
                        ret = 0;
                        break;
                }
 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 {
        uint32_t __iomem *virt;
-       int rem = ring->size - ring->tail;
+       int rem = ring->buffer->size - ring->buffer->tail;
 
-       if (ring->space < rem) {
+       if (ring->buffer->space < rem) {
                int ret = ring_wait_for_space(ring, rem);
                if (ret)
                        return ret;
        }
 
-       virt = ring->virtual_start + ring->tail;
+       virt = ring->buffer->virtual_start + ring->buffer->tail;
        rem /= 4;
        while (rem--)
                iowrite32(MI_NOOP, virt++);
 
-       ring->tail = 0;
-       ring->space = ring_space(ring);
+       ring->buffer->tail = 0;
+       ring->buffer->space = ring_space(ring);
 
        return 0;
 }
 {
        int ret;
 
-       if (unlikely(ring->tail + bytes > ring->effective_size)) {
+       if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) {
                ret = intel_wrap_ring_buffer(ring);
                if (unlikely(ret))
                        return ret;
        }
 
-       if (unlikely(ring->space < bytes)) {
+       if (unlikely(ring->buffer->space < bytes)) {
                ret = ring_wait_for_space(ring, bytes);
                if (unlikely(ret))
                        return ret;
        if (ret)
                return ret;
 
-       ring->space -= num_dwords * sizeof(uint32_t);
+       ring->buffer->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 {
-       int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+       int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
 
-       ring->size = size;
-       ring->effective_size = ring->size;
+       ring->buffer->size = size;
+       ring->buffer->effective_size = ring->buffer->size;
        if (IS_I830(ring->dev) || IS_845G(ring->dev))
-               ring->effective_size -= 2 * CACHELINE_BYTES;
+               ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
 
-       ring->virtual_start = ioremap_wc(start, size);
-       if (ring->virtual_start == NULL) {
+       ring->buffer->virtual_start = ioremap_wc(start, size);
+       if (ring->buffer->virtual_start == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                ret = -ENOMEM;
        return 0;
 
 err_vstart:
-       iounmap(ring->virtual_start);
+       iounmap(ring->buffer->virtual_start);
 err_ringbuf:
        kfree(ringbuf);
        ring->buffer = NULL;
 
 #define I915_NUM_RINGS 5
 #define LAST_USER_RING (VECS + 1)
        u32             mmio_base;
-       void            __iomem *virtual_start;
        struct          drm_device *dev;
-       struct          drm_i915_gem_object *obj;
        struct intel_ringbuffer *buffer;
 
-       u32             head;
-       u32             tail;
-       int             space;
-       int             size;
-       int             effective_size;
        struct intel_hw_status_page status_page;
 
-       /** We track the position of the requests in the ring buffer, and
-        * when each is retired we increment last_retired_head as the GPU
-        * must have finished processing the request and so we know we
-        * can advance the ringbuffer up to that position.
-        *
-        * last_retired_head is set to -1 after the value is consumed so
-        * we can detect new retirements.
-        */
-       u32             last_retired_head;
-
        unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
        u32             trace_irq_seqno;
 static inline bool
 intel_ring_initialized(struct intel_engine_cs *ring)
 {
-       return ring->buffer && ring->obj;
+       return ring->buffer && ring->buffer->obj;
 }
 
 static inline unsigned
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
                                   u32 data)
 {
-       iowrite32(data, ring->virtual_start + ring->tail);
-       ring->tail += 4;
+       iowrite32(data, ring->buffer->virtual_start + ring->buffer->tail);
+       ring->buffer->tail += 4;
 }
 static inline void intel_ring_advance(struct intel_engine_cs *ring)
 {
-       ring->tail &= ring->size - 1;
+       ring->buffer->tail &= ring->buffer->size - 1;
 }
 void __intel_ring_advance(struct intel_engine_cs *ring);
 
 
 static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
 {
-       return ring->tail;
+       return ring->buffer->tail;
 }
 
 static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)