return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_ring_buffer *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *ring,
                                 const struct drm_i915_cmd_table *cmd_tables,
                                 int cmd_table_count)
 {
        return ret;
 }
 
-static bool validate_regs_sorted(struct intel_ring_buffer *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *ring)
 {
        return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
                check_sorted(ring->id, ring->master_reg_table,
  */
 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
 
-static int init_hash_table(struct intel_ring_buffer *ring,
+static int init_hash_table(struct intel_engine_cs *ring,
                           const struct drm_i915_cmd_table *cmd_tables,
                           int cmd_table_count)
 {
        return 0;
 }
 
-static void fini_hash_table(struct intel_ring_buffer *ring)
+static void fini_hash_table(struct intel_engine_cs *ring)
 {
        struct hlist_node *tmp;
        struct cmd_node *desc_node;
  * @ring: the ringbuffer to initialize
  *
  * Optionally initializes fields related to batch buffer command parsing in the
- * struct intel_ring_buffer based on whether the platform requires software
+ * struct intel_engine_cs based on whether the platform requires software
  * command parsing.
  *
  * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
 {
        const struct drm_i915_cmd_table *cmd_tables;
        int cmd_table_count;
  * Releases any resources related to command parsing that may have been
  * initialized for the specified ring.
  */
-void i915_cmd_parser_fini_ring(struct intel_ring_buffer *ring)
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
 {
        if (!ring->needs_cmd_parser)
                return;
 }
 
 static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_ring_buffer *ring,
+find_cmd_in_table(struct intel_engine_cs *ring,
                  u32 cmd_header)
 {
        struct cmd_node *desc_node;
  * ring's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_ring_buffer *ring,
+find_cmd(struct intel_engine_cs *ring,
         u32 cmd_header,
         struct drm_i915_cmd_descriptor *default_desc)
 {
  *
  * Return: true if the ring requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        return (i915.enable_cmd_parser == 1);
 }
 
-static bool check_cmd(const struct intel_ring_buffer *ring,
+static bool check_cmd(const struct intel_engine_cs *ring,
                      const struct drm_i915_cmd_descriptor *desc,
                      const u32 *cmd,
                      const bool is_master,
  *
  * Return: non-zero if the parser finds violations or otherwise fails
  */
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_parse_cmds(struct intel_engine_cs *ring,
                    struct drm_i915_gem_object *batch_obj,
                    u32 batch_start_offset,
                    bool is_master)
 
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        struct drm_i915_gem_request *gem_request;
        int ret, count, i;
 
 }
 
 static void i915_ring_seqno_info(struct seq_file *m,
-                                struct intel_ring_buffer *ring)
+                                struct intel_engine_cs *ring)
 {
        if (ring->get_seqno) {
                seq_printf(m, "Current sequence (%s): %u\n",
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int ret, i;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int ret, i, pipe;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        const u32 *hws;
        int i;
 
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        struct i915_hw_context *ctx;
        int ret, i;
 
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
        int unused, i;
 
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        struct drm_file *file;
        int i;
 
 
 static void i915_free_hws(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       struct intel_engine_cs *ring = LP_RING(dev_priv);
 
        if (dev_priv->status_page_dmah) {
                drm_pci_free(dev, dev_priv->status_page_dmah);
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       struct intel_engine_cs *ring = LP_RING(dev_priv);
 
        /*
         * We should never lose context on the ring with modesetting
 static int i915_dma_resume(struct drm_device * dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       struct intel_engine_cs *ring = LP_RING(dev_priv);
 
        DRM_DEBUG_DRIVER("%s\n", __func__);
 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int ret = 0;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       struct intel_engine_cs *ring = LP_RING(dev_priv);
 
        DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
                          struct drm_framebuffer *fb,
                          struct drm_i915_gem_object *obj,
-                         struct intel_ring_buffer *ring,
+                         struct intel_engine_cs *ring,
                          uint32_t flags);
        void (*update_primary_plane)(struct drm_crtc *crtc,
                                     struct drm_framebuffer *fb,
        bool is_initialized;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
-       struct intel_ring_buffer *last_ring;
+       struct intel_engine_cs *last_ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
        struct i915_address_space *vm;
        wait_queue_head_t gmbus_wait_queue;
 
        struct pci_dev *bridge_dev;
-       struct intel_ring_buffer ring[I915_NUM_RINGS];
+       struct intel_engine_cs ring[I915_NUM_RINGS];
        uint32_t last_seqno, next_seqno;
 
        drm_dma_handle_t *status_page_dmah;
        void *dma_buf_vmapping;
        int vmapping_count;
 
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
        uint32_t last_read_seqno;
  */
 struct drm_i915_gem_request {
        /** On Which ring this request was generated */
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
 
        /** GEM sequence number associated with this request. */
        uint32_t seqno;
 
        struct i915_hw_context *private_default_ctx;
        atomic_t rps_wait_boost;
-       struct  intel_ring_buffer *bsd_ring;
+       struct  intel_engine_cs *bsd_ring;
 };
 
 /*
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                        struct intel_ring_buffer *to);
+                        struct intel_engine_cs *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_ring_buffer *ring);
+                            struct intel_engine_cs *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring);
+i915_gem_find_active_request(struct intel_engine_cs *ring);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
                       struct drm_i915_gem_object *batch_obj,
                       u32 *seqno);
 #define i915_add_request(ring, seqno) \
        __i915_add_request(ring, NULL, NULL, seqno)
-int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
                                 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 int __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
-                                    struct intel_ring_buffer *pipelined);
+                                    struct intel_engine_cs *pipelined);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
 int i915_gem_attach_phys_object(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 int i915_gem_context_enable(struct drm_i915_private *dev_priv);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_ring_buffer *ring,
+int i915_switch_context(struct intel_engine_cs *ring,
                        struct i915_hw_context *to);
 struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
                                   struct drm_file *file);
 
 /* i915_gem_render_state.c */
-int i915_gem_render_state_init(struct intel_ring_buffer *ring);
+int i915_gem_render_state_init(struct intel_engine_cs *ring);
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev,
                                          struct i915_address_space *vm,
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
-void i915_cmd_parser_fini_ring(struct intel_ring_buffer *ring);
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+int i915_parse_cmds(struct intel_engine_cs *ring,
                    struct drm_i915_gem_object *batch_obj,
                    u32 batch_start_offset,
                    bool is_master);
 
  * equal.
  */
 static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
 {
        int ret;
 
 }
 
 static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_ring_buffer *ring)
+                      struct intel_engine_cs *ring)
 {
        return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
 }
  * Returns 0 if the seqno was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        unsigned reset_counter,
                        bool interruptible,
                        struct timespec *timeout,
  * request and object lists appropriately for that event.
  */
 int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
 static int
 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
-                                    struct intel_ring_buffer *ring)
+                                    struct intel_engine_cs *ring)
 {
        if (!obj->active)
                return 0;
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
-       struct intel_ring_buffer *ring = obj->ring;
+       struct intel_engine_cs *ring = obj->ring;
        u32 seqno;
        int ret;
 
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = obj->ring;
+       struct intel_engine_cs *ring = obj->ring;
        unsigned reset_counter;
        u32 seqno;
        int ret;
 
 static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *ring)
+                              struct intel_engine_cs *ring)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_ring_buffer *ring)
+                            struct intel_engine_cs *ring)
 {
        list_move_tail(&vma->mm_list, &vma->vm->active_list);
        return i915_gem_object_move_to_active(vma->obj, ring);
 static void
 i915_gem_object_retire(struct drm_i915_gem_object *obj)
 {
-       struct intel_ring_buffer *ring = obj->ring;
+       struct intel_engine_cs *ring = obj->ring;
 
        if (ring == NULL)
                return;
 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int ret, i, j;
 
        /* Carefully retire all requests without writing to the rings */
        return 0;
 }
 
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
                       struct drm_i915_gem_object *obj,
                       u32 *out_seqno)
 }
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring)
+i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *request;
        u32 completed_seqno;
 }
 
 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-                                      struct intel_ring_buffer *ring)
+                                      struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 }
 
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_ring_buffer *ring)
+                                       struct intel_engine_cs *ring)
 {
        while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj;
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        /*
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
        uint32_t seqno;
 
 i915_gem_retire_requests(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        bool idle = true;
        int i;
 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
-       struct intel_ring_buffer *ring = NULL;
+       struct intel_engine_cs *ring = NULL;
        struct timespec timeout_stack, *timeout = NULL;
        unsigned reset_counter;
        u32 seqno = 0;
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct intel_ring_buffer *to)
+                    struct intel_engine_cs *to)
 {
-       struct intel_ring_buffer *from = obj->ring;
+       struct intel_engine_cs *from = obj->ring;
        u32 seqno;
        int ret, idx;
 
 int i915_gpu_idle(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int ret, i;
 
        /* Flush everything onto the inactive list. */
 int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
-                                    struct intel_ring_buffer *pipelined)
+                                    struct intel_engine_cs *pipelined)
 {
        u32 old_read_domains, old_write_domain;
        bool was_pin_display;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
        struct drm_i915_gem_request *request;
-       struct intel_ring_buffer *ring = NULL;
+       struct intel_engine_cs *ring = NULL;
        unsigned reset_counter;
        u32 seqno = 0;
        int ret;
 i915_gem_stop_ringbuffers(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        for_each_ring(ring, dev_priv, i)
        return ret;
 }
 
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        for_each_ring(ring, dev_priv, i)
 }
 
 static void
-init_ring_lists(struct intel_ring_buffer *ring)
+init_ring_lists(struct intel_engine_cs *ring)
 {
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
 
        /* Prevent the hardware from restoring the last context (which hung) on
         * the next switch */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               struct intel_engine_cs *ring = &dev_priv->ring[i];
                struct i915_hw_context *dctx = ring->default_context;
 
                /* Do a fake switch to the default context */
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               struct intel_engine_cs *ring = &dev_priv->ring[i];
 
                if (ring->last_context)
                        i915_gem_context_unreference(ring->last_context);
 
 int i915_gem_context_enable(struct drm_i915_private *dev_priv)
 {
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int ret, i;
 
        /* This is the only place the aliasing PPGTT gets enabled, which means
 }
 
 static inline int
-mi_set_context(struct intel_ring_buffer *ring,
+mi_set_context(struct intel_engine_cs *ring,
               struct i915_hw_context *new_context,
               u32 hw_flags)
 {
        return ret;
 }
 
-static int do_switch(struct intel_ring_buffer *ring,
+static int do_switch(struct intel_engine_cs *ring,
                     struct i915_hw_context *to)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
  * it will have a refoucnt > 1. This allows us to destroy the context abstract
  * object while letting the normal object tracking destroy the backing BO.
  */
-int i915_switch_context(struct intel_ring_buffer *ring,
+int i915_switch_context(struct intel_engine_cs *ring,
                        struct i915_hw_context *to)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
-                               struct intel_ring_buffer *ring,
+                               struct intel_engine_cs *ring,
                                bool *need_reloc)
 {
        struct drm_i915_gem_object *obj = vma->obj;
 }
 
 static int
-i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                            struct list_head *vmas,
                            bool *need_relocs)
 {
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_i915_gem_execbuffer2 *args,
                                  struct drm_file *file,
-                                 struct intel_ring_buffer *ring,
+                                 struct intel_engine_cs *ring,
                                  struct eb_vmas *eb,
                                  struct drm_i915_gem_exec_object2 *exec)
 {
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
                                struct list_head *vmas)
 {
        struct i915_vma *vma;
 
 static struct i915_hw_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-                         struct intel_ring_buffer *ring, const u32 ctx_id)
+                         struct intel_engine_cs *ring, const u32 ctx_id)
 {
        struct i915_hw_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
 
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-                                  struct intel_ring_buffer *ring)
+                                  struct intel_engine_cs *ring)
 {
        struct i915_vma *vma;
 
 static void
 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
                                    struct drm_file *file,
-                                   struct intel_ring_buffer *ring,
+                                   struct intel_engine_cs *ring,
                                    struct drm_i915_gem_object *obj)
 {
        /* Unconditionally force add_request to emit a full flush. */
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-                           struct intel_ring_buffer *ring)
+                           struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret, i;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        struct i915_hw_context *ctx;
        struct i915_address_space *vm;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 
 }
 
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
                           uint64_t val, bool synchronous)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_ring_buffer *ring,
+                         struct intel_engine_cs *ring,
                          bool synchronous)
 {
        int i, ret;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_ring_buffer *ring,
+                        struct intel_engine_cs *ring,
                         bool synchronous)
 {
        struct drm_device *dev = ppgtt->base.dev;
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_ring_buffer *ring,
+                         struct intel_engine_cs *ring,
                          bool synchronous)
 {
        struct drm_device *dev = ppgtt->base.dev;
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_ring_buffer *ring,
+                         struct intel_engine_cs *ring,
                          bool synchronous)
 {
        struct drm_device *dev = ppgtt->base.dev;
 {
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int j, ret;
 
        for_each_ring(ring, dev_priv, j) {
 {
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        uint32_t ecochk, ecobits;
        int i;
 
 {
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        uint32_t ecochk, gab_ctl, ecobits;
        int i;
 
 void i915_check_and_clear_faults(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        if (INTEL_INFO(dev)->gen < 6)
 
 
        int (*enable)(struct i915_hw_ppgtt *ppgtt);
        int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_ring_buffer *ring,
+                        struct intel_engine_cs *ring,
                         bool synchronous);
        void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
        return 0;
 }
 
-int i915_gem_render_state_init(struct intel_ring_buffer *ring)
+int i915_gem_render_state_init(struct intel_engine_cs *ring)
 {
        const int gen = INTEL_INFO(ring->dev)->gen;
        struct i915_render_state *so;
 
 }
 
 static void i915_record_ring_state(struct drm_device *dev,
-                                  struct intel_ring_buffer *ring,
+                                  struct intel_engine_cs *ring,
                                   struct drm_i915_error_ring *ering)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
                                           struct drm_i915_error_state *error,
                                           struct drm_i915_error_ring *ering)
 {
        int i, count;
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               struct intel_engine_cs *ring = &dev_priv->ring[i];
 
                if (ring->dev == NULL)
                        continue;
 
 }
 
 static void notify_ring(struct drm_device *dev,
-                       struct intel_ring_buffer *ring)
+                       struct intel_engine_cs *ring)
 {
        if (ring->obj == NULL)
                return;
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
                               bool reset_completed)
 {
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        /*
 }
 
 static u32
-ring_last_seqno(struct intel_ring_buffer *ring)
+ring_last_seqno(struct intel_engine_cs *ring)
 {
        return list_entry(ring->request_list.prev,
                          struct drm_i915_gem_request, list)->seqno;
 }
 
 static bool
-ring_idle(struct intel_ring_buffer *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
 {
        return (list_empty(&ring->request_list) ||
                i915_seqno_passed(seqno, ring_last_seqno(ring)));
        }
 }
 
-static struct intel_ring_buffer *
-semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct intel_ring_buffer *signaller;
+       struct intel_engine_cs *signaller;
        int i;
 
        if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
        return NULL;
 }
 
-static struct intel_ring_buffer *
-semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u32 cmd, ipehr, head;
        return semaphore_wait_to_signaller_ring(ring, ipehr);
 }
 
-static int semaphore_passed(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct intel_ring_buffer *signaller;
+       struct intel_engine_cs *signaller;
        u32 seqno, ctl;
 
        ring->hangcheck.deadlock = true;
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
 
        for_each_ring(ring, dev_priv, i)
 }
 
 static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 {
        struct drm_device *dev = (struct drm_device *)data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        int i;
        int busy_count = 0, rings_hung = 0;
        bool stuck[I915_NUM_RINGS] = { 0 };
 
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-           TP_PROTO(struct intel_ring_buffer *from,
-                    struct intel_ring_buffer *to,
+           TP_PROTO(struct intel_engine_cs *from,
+                    struct intel_engine_cs *to,
                     u32 seqno),
            TP_ARGS(from, to, seqno),
 
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
            TP_ARGS(ring, seqno, flags),
 
            TP_STRUCT__entry(
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+           TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
            TP_ARGS(ring, invalidate, flush),
 
            TP_STRUCT__entry(
 );
 
 DECLARE_EVENT_CLASS(i915_gem_request,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
            TP_ARGS(ring, seqno),
 
            TP_STRUCT__entry(
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
            TP_ARGS(ring, seqno)
 );
 
 TRACE_EVENT(i915_gem_request_complete,
-           TP_PROTO(struct intel_ring_buffer *ring),
+           TP_PROTO(struct intel_engine_cs *ring),
            TP_ARGS(ring),
 
            TP_STRUCT__entry(
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
            TP_ARGS(ring, seqno)
 );
 
 TRACE_EVENT(i915_gem_request_wait_begin,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
            TP_ARGS(ring, seqno),
 
            TP_STRUCT__entry(
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
            TP_ARGS(ring, seqno)
 );
 
 DECLARE_EVENT_CLASS(i915_ring,
-           TP_PROTO(struct intel_ring_buffer *ring),
+           TP_PROTO(struct intel_engine_cs *ring),
            TP_ARGS(ring),
 
            TP_STRUCT__entry(
 );
 
 DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-           TP_PROTO(struct intel_ring_buffer *ring),
+           TP_PROTO(struct intel_engine_cs *ring),
            TP_ARGS(ring)
 );
 
 DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-           TP_PROTO(struct intel_ring_buffer *ring),
+           TP_PROTO(struct intel_engine_cs *ring),
            TP_ARGS(ring)
 );
 
 
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
                           struct drm_i915_gem_object *obj,
-                          struct intel_ring_buffer *pipelined)
+                          struct intel_engine_cs *pipelined)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 alignment;
 }
 
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-                       struct intel_ring_buffer *ring)
+                       struct intel_engine_cs *ring)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_crtc *crtc;
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_ring_buffer *ring,
+                                struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_ring_buffer *ring,
+                                struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_ring_buffer *ring,
+                                struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_ring_buffer *ring,
+                                struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
                                 struct drm_i915_gem_object *obj,
-                                struct intel_ring_buffer *ring,
+                                struct intel_engine_cs *ring,
                                 uint32_t flags)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                                    struct drm_crtc *crtc,
                                    struct drm_framebuffer *fb,
                                    struct drm_i915_gem_object *obj,
-                                   struct intel_ring_buffer *ring,
+                                   struct intel_engine_cs *ring,
                                    uint32_t flags)
 {
        return -ENODEV;
        struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        unsigned long flags;
        int ret;
 
 
 int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-                       struct intel_ring_buffer *ring);
+                       struct intel_engine_cs *ring);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 void intel_crtc_update_dpms(struct drm_crtc *crtc);
                                    struct intel_load_detect_pipe *old);
 int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                               struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *pipelined);
+                              struct intel_engine_cs *pipelined);
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        BUG_ON(overlay->last_flip_req);
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        BUG_ON(overlay->active);
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
        int ret;
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        u32 flip_addr = overlay->flip_addr;
        int ret;
 
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        if (overlay->last_flip_req == 0)
 {
        struct drm_device *dev = overlay->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        /* Only wait if there is actually an old frame to release to
 
 static void gen8_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        uint32_t rc6_mask = 0, rp_state_cap;
        int unused;
 
 static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        u32 rp_state_cap;
        u32 gt_perf_status;
        u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
 static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        u32 gtfifodbg, val, rc6_mode = 0;
        int i;
 
 static void ironlake_enable_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        bool was_interruptible;
        int ret;
 
 bool i915_gpu_busy(void)
 {
        struct drm_i915_private *dev_priv;
-       struct intel_ring_buffer *ring;
+       struct intel_engine_cs *ring;
        bool ret = false;
        int i;
 
 
        return space;
 }
 
-static inline int ring_space(struct intel_ring_buffer *ring)
+static inline int ring_space(struct intel_engine_cs *ring)
 {
        return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
 }
 
-static bool intel_ring_stopped(struct intel_ring_buffer *ring)
+static bool intel_ring_stopped(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
 }
 
-void __intel_ring_advance(struct intel_ring_buffer *ring)
+void __intel_ring_advance(struct intel_engine_cs *ring)
 {
        ring->tail &= ring->size - 1;
        if (intel_ring_stopped(ring))
 }
 
 static int
-gen2_render_ring_flush(struct intel_ring_buffer *ring,
+gen2_render_ring_flush(struct intel_engine_cs *ring,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
 }
 
 static int
-gen4_render_ring_flush(struct intel_ring_buffer *ring,
+gen4_render_ring_flush(struct intel_engine_cs *ring,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
 {
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 }
 
 static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
+gen6_render_ring_flush(struct intel_engine_cs *ring,
                          u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
 {
        int ret;
 
        return 0;
 }
 
-static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
 {
        int ret;
 
 }
 
 static int
-gen7_render_ring_flush(struct intel_ring_buffer *ring,
+gen7_render_ring_flush(struct intel_engine_cs *ring,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
 }
 
 static int
-gen8_render_ring_flush(struct intel_ring_buffer *ring,
+gen8_render_ring_flush(struct intel_engine_cs *ring,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
 
 }
 
-static void ring_write_tail(struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_engine_cs *ring,
                            u32 value)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        I915_WRITE_TAIL(ring, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u64 acthd;
        return acthd;
 }
 
-static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u32 addr;
        I915_WRITE(HWS_PGA, addr);
 }
 
-static bool stop_ring(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
        return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
 }
 
-static int init_ring_common(struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static int
-init_pipe_control(struct intel_ring_buffer *ring)
+init_pipe_control(struct intel_engine_cs *ring)
 {
        int ret;
 
        return ret;
 }
 
-static int init_render_ring(struct intel_ring_buffer *ring)
+static int init_render_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        return ret;
 }
 
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
+static void render_ring_cleanup(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
 
        ring->scratch.obj = NULL;
 }
 
-static int gen6_signal(struct intel_ring_buffer *signaller,
+static int gen6_signal(struct intel_engine_cs *signaller,
                       unsigned int num_dwords)
 {
        struct drm_device *dev = signaller->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *useless;
+       struct intel_engine_cs *useless;
        int i, ret;
 
        /* NB: In order to be able to do semaphore MBOX updates for varying
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_ring_buffer *ring)
+gen6_add_request(struct intel_engine_cs *ring)
 {
        int ret;
 
  * @seqno - seqno which the waiter will block on
  */
 static int
-gen6_ring_sync(struct intel_ring_buffer *waiter,
-              struct intel_ring_buffer *signaller,
+gen6_ring_sync(struct intel_engine_cs *waiter,
+              struct intel_engine_cs *signaller,
               u32 seqno)
 {
        u32 dw1 = MI_SEMAPHORE_MBOX |
 } while (0)
 
 static int
-pc_render_add_request(struct intel_ring_buffer *ring)
+pc_render_add_request(struct intel_engine_cs *ring)
 {
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
 }
 
 static u32
-ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static void
-ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
 static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        return ring->scratch.cpu_page[0];
 }
 
 static void
-pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        ring->scratch.cpu_page[0] = seqno;
 }
 
 static bool
-gen5_ring_get_irq(struct intel_ring_buffer *ring)
+gen5_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-gen5_ring_put_irq(struct intel_ring_buffer *ring)
+gen5_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static bool
-i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static bool
-i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 }
 
 static int
-bsd_ring_flush(struct intel_ring_buffer *ring,
+bsd_ring_flush(struct intel_engine_cs *ring,
               u32     invalidate_domains,
               u32     flush_domains)
 {
 }
 
 static int
-i9xx_add_request(struct intel_ring_buffer *ring)
+i9xx_add_request(struct intel_engine_cs *ring)
 {
        int ret;
 
 }
 
 static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring)
+gen6_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static bool
-hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static bool
-gen8_ring_get_irq(struct intel_ring_buffer *ring)
+gen8_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static void
-gen8_ring_put_irq(struct intel_ring_buffer *ring)
+gen8_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i965_dispatch_execbuffer(struct intel_engine_cs *ring,
                         u64 offset, u32 length,
                         unsigned flags)
 {
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT (256*1024)
 static int
-i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                                u64 offset, u32 len,
                                unsigned flags)
 {
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_engine_cs *ring,
                         u64 offset, u32 len,
                         unsigned flags)
 {
        return 0;
 }
 
-static void cleanup_status_page(struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_object *obj;
 
        ring->status_page.obj = NULL;
 }
 
-static int init_status_page(struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_object *obj;
 
        return 0;
 }
 
-static int init_phys_status_page(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        return 0;
 }
 
-static int allocate_ring_buffer(struct intel_ring_buffer *ring)
+static int allocate_ring_buffer(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 }
 
 static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_ring_buffer *ring)
+                                 struct intel_engine_cs *ring)
 {
        int ret;
 
        return ring->init(ring);
 }
 
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
        i915_cmd_parser_fini_ring(ring);
 }
 
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 {
        struct drm_i915_gem_request *request;
        u32 seqno = 0;
        return 0;
 }
 
-static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        return ret;
 }
 
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 {
        uint32_t __iomem *virt;
        int rem = ring->size - ring->tail;
        return 0;
 }
 
-int intel_ring_idle(struct intel_ring_buffer *ring)
+int intel_ring_idle(struct intel_engine_cs *ring)
 {
        u32 seqno;
        int ret;
 }
 
 static int
-intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+intel_ring_alloc_seqno(struct intel_engine_cs *ring)
 {
        if (ring->outstanding_lazy_seqno)
                return 0;
        return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
-static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+static int __intel_ring_prepare(struct intel_engine_cs *ring,
                                int bytes)
 {
        int ret;
        return 0;
 }
 
-int intel_ring_begin(struct intel_ring_buffer *ring,
+int intel_ring_begin(struct intel_engine_cs *ring,
                     int num_dwords)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 {
        int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        ring->hangcheck.seqno = seqno;
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
                                     u32 value)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
                   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
                               u32 invalidate, u32 flush)
 {
        uint32_t cmd;
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
                              u64 offset, u32 len,
                              unsigned flags)
 {
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
                              u64 offset, u32 len,
                              unsigned flags)
 {
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
                              u64 offset, u32 len,
                              unsigned flags)
 {
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_ring_flush(struct intel_engine_cs *ring,
                           u32 invalidate, u32 flush)
 {
        struct drm_device *dev = ring->dev;
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 
        ring->name = "render ring";
        ring->id = RCS;
 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        int ret;
 
        ring->name = "render ring";
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS];
 
        ring->name = "bsd ring";
        ring->id = VCS;
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[VCS2];
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
        if ((INTEL_INFO(dev)->gen != 8)) {
                DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[BCS];
 
        ring->name = "blitter ring";
        ring->id = BCS;
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+       struct intel_engine_cs *ring = &dev_priv->ring[VECS];
 
        ring->name = "video enhancement ring";
        ring->id = VECS;
 }
 
 int
-intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+intel_ring_flush_all_caches(struct intel_engine_cs *ring)
 {
        int ret;
 
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
 {
        uint32_t flush_domains;
        int ret;
 }
 
 void
-intel_stop_ring_buffer(struct intel_ring_buffer *ring)
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
 {
        int ret;
 
 
        bool deadlock;
 };
 
-struct  intel_ring_buffer {
+struct  intel_engine_cs {
        const char      *name;
        enum intel_ring_id {
                RCS = 0x0,
        unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
        u32             trace_irq_seqno;
-       bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
-       void            (*irq_put)(struct intel_ring_buffer *ring);
+       bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+       void            (*irq_put)(struct intel_engine_cs *ring);
 
-       int             (*init)(struct intel_ring_buffer *ring);
+       int             (*init)(struct intel_engine_cs *ring);
 
-       void            (*write_tail)(struct intel_ring_buffer *ring,
+       void            (*write_tail)(struct intel_engine_cs *ring,
                                      u32 value);
-       int __must_check (*flush)(struct intel_ring_buffer *ring,
+       int __must_check (*flush)(struct intel_engine_cs *ring,
                                  u32   invalidate_domains,
                                  u32   flush_domains);
-       int             (*add_request)(struct intel_ring_buffer *ring);
+       int             (*add_request)(struct intel_engine_cs *ring);
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
         * seen value is good enough. Note that the seqno will always be
         * monotonic, even if not coherent.
         */
-       u32             (*get_seqno)(struct intel_ring_buffer *ring,
+       u32             (*get_seqno)(struct intel_engine_cs *ring,
                                     bool lazy_coherency);
-       void            (*set_seqno)(struct intel_ring_buffer *ring,
+       void            (*set_seqno)(struct intel_engine_cs *ring,
                                     u32 seqno);
-       int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+       int             (*dispatch_execbuffer)(struct intel_engine_cs *ring,
                                               u64 offset, u32 length,
                                               unsigned flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
-       void            (*cleanup)(struct intel_ring_buffer *ring);
+       void            (*cleanup)(struct intel_engine_cs *ring);
 
        struct {
                u32     sync_seqno[I915_NUM_RINGS-1];
                } mbox;
 
                /* AKA wait() */
-               int     (*sync_to)(struct intel_ring_buffer *ring,
-                                  struct intel_ring_buffer *to,
+               int     (*sync_to)(struct intel_engine_cs *ring,
+                                  struct intel_engine_cs *to,
                                   u32 seqno);
-               int     (*signal)(struct intel_ring_buffer *signaller,
+               int     (*signal)(struct intel_engine_cs *signaller,
                                  /* num_dwords needed by caller */
                                  unsigned int num_dwords);
        } semaphore;
 };
 
 static inline bool
-intel_ring_initialized(struct intel_ring_buffer *ring)
+intel_ring_initialized(struct intel_engine_cs *ring)
 {
        return ring->obj != NULL;
 }
 
 static inline unsigned
-intel_ring_flag(struct intel_ring_buffer *ring)
+intel_ring_flag(struct intel_engine_cs *ring)
 {
        return 1 << ring->id;
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_ring_buffer *ring,
-                     struct intel_ring_buffer *other)
+intel_ring_sync_index(struct intel_engine_cs *ring,
+                     struct intel_engine_cs *other)
 {
        int idx;
 
 }
 
 static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
+intel_read_status_page(struct intel_engine_cs *ring,
                       int reg)
 {
        /* Ensure that the compiler doesn't optimize away the load. */
 }
 
 static inline void
-intel_write_status_page(struct intel_ring_buffer *ring,
+intel_write_status_page(struct intel_engine_cs *ring,
                        int reg, u32 value)
 {
        ring->status_page.page_addr[reg] = value;
 #define I915_GEM_HWS_SCRATCH_INDEX     0x30
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+void intel_stop_ring_buffer(struct intel_engine_cs *ring);
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
-int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
-static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
                                   u32 data)
 {
        iowrite32(data, ring->virtual_start + ring->tail);
        ring->tail += 4;
 }
-static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *ring)
 {
        ring->tail &= ring->size - 1;
 }
-void __intel_ring_advance(struct intel_ring_buffer *ring);
+void __intel_ring_advance(struct intel_engine_cs *ring);
 
-int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
-int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
+int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
-static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
 {
        return ring->tail;
 }
 
-static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
 {
        BUG_ON(ring->outstanding_lazy_seqno == 0);
        return ring->outstanding_lazy_seqno;
 }
 
-static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
 {
        if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
                ring->trace_irq_seqno = seqno;