struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *engine;
-       enum intel_ring_id i;
+       enum intel_engine_id i;
        u64 total = 0;
 
        if (!HAS_GUC_SCHED(dev_priv->dev))
 
                value = 1;
                break;
        case I915_PARAM_HAS_BSD:
-               value = intel_ring_initialized(&dev_priv->engine[VCS]);
+               value = intel_engine_initialized(&dev_priv->engine[VCS]);
                break;
        case I915_PARAM_HAS_BLT:
-               value = intel_ring_initialized(&dev_priv->engine[BCS]);
+               value = intel_engine_initialized(&dev_priv->engine[BCS]);
                break;
        case I915_PARAM_HAS_VEBOX:
-               value = intel_ring_initialized(&dev_priv->engine[VECS]);
+               value = intel_engine_initialized(&dev_priv->engine[VECS]);
                break;
        case I915_PARAM_HAS_BSD2:
-               value = intel_ring_initialized(&dev_priv->engine[VCS2]);
+               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
                break;
        case I915_PARAM_HAS_RELAXED_FENCING:
                value = 1;
 
 cleanup_gem:
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
+       i915_gem_cleanup_engines(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
 
        intel_guc_ucode_fini(dev);
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
+       i915_gem_cleanup_engines(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        intel_fbc_cleanup_cfb(dev_priv);
 
                int (*execbuf_submit)(struct i915_execbuffer_params *params,
                                      struct drm_i915_gem_execbuffer2 *args,
                                      struct list_head *vmas);
-               int (*init_rings)(struct drm_device *dev);
-               void (*cleanup_ring)(struct intel_engine_cs *ring);
-               void (*stop_ring)(struct intel_engine_cs *ring);
+               int (*init_engines)(struct drm_device *dev);
+               void (*cleanup_engine)(struct intel_engine_cs *engine);
+               void (*stop_engine)(struct intel_engine_cs *engine);
        } gt;
 
        struct intel_context *kernel_context;
 /* Iterate over initialised rings */
 #define for_each_engine(ring__, dev_priv__, i__) \
        for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
-               for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__))))
+               for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
 
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       struct list_head ring_list[I915_NUM_ENGINES];
+       struct list_head engine_list[I915_NUM_ENGINES];
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
 
 void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_rings(struct drm_device *dev);
+int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
 
                drm_gem_object_reference(&obj->base);
        obj->active |= intel_engine_flag(engine);
 
-       list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
+       list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
        i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
        RQ_BUG_ON(obj->last_read_req[ring] == NULL);
        RQ_BUG_ON(!(obj->active & (1 << ring)));
 
-       list_del_init(&obj->ring_list[ring]);
+       list_del_init(&obj->engine_list[ring]);
        i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
        if (obj->last_write_req && obj->last_write_req->engine->id == ring)
 
                obj = list_first_entry(&engine->active_list,
                                       struct drm_i915_gem_object,
-                                      ring_list[engine->id]);
+                                      engine_list[engine->id]);
 
                i915_gem_object_retire__read(obj, engine->id);
        }
 
                obj = list_first_entry(&engine->active_list,
                                       struct drm_i915_gem_object,
-                                      ring_list[engine->id]);
+                                      engine_list[engine->id]);
 
                if (!list_empty(&obj->last_read_req[engine->id]->list))
                        break;
 
        INIT_LIST_HEAD(&obj->global_list);
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               INIT_LIST_HEAD(&obj->ring_list[i]);
+               INIT_LIST_HEAD(&obj->engine_list[i]);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 }
 
 static void
-i915_gem_stop_ringbuffers(struct drm_device *dev)
+i915_gem_stop_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int i;
 
        for_each_engine(engine, dev_priv, i)
-               dev_priv->gt.stop_ring(engine);
+               dev_priv->gt.stop_engine(engine);
 }
 
 int
 
        i915_gem_retire_requests(dev);
 
-       i915_gem_stop_ringbuffers(dev);
+       i915_gem_stop_engines(dev);
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        }
 }
 
-int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
        return 0;
 
 cleanup_vebox_ring:
-       intel_cleanup_ring_buffer(&dev_priv->engine[VECS]);
+       intel_cleanup_engine(&dev_priv->engine[VECS]);
 cleanup_blt_ring:
-       intel_cleanup_ring_buffer(&dev_priv->engine[BCS]);
+       intel_cleanup_engine(&dev_priv->engine[BCS]);
 cleanup_bsd_ring:
-       intel_cleanup_ring_buffer(&dev_priv->engine[VCS]);
+       intel_cleanup_engine(&dev_priv->engine[VCS]);
 cleanup_render_ring:
-       intel_cleanup_ring_buffer(&dev_priv->engine[RCS]);
+       intel_cleanup_engine(&dev_priv->engine[RCS]);
 
        return ret;
 }
                req = i915_gem_request_alloc(engine, NULL);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
                if (ret && ret != -EIO) {
                        DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
                if (ret && ret != -EIO) {
                        DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
 
        if (!i915.enable_execlists) {
                dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-               dev_priv->gt.init_rings = i915_gem_init_rings;
-               dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
-               dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+               dev_priv->gt.init_engines = i915_gem_init_engines;
+               dev_priv->gt.cleanup_engine = intel_cleanup_engine;
+               dev_priv->gt.stop_engine = intel_stop_engine;
        } else {
                dev_priv->gt.execbuf_submit = intel_execlists_submission;
-               dev_priv->gt.init_rings = intel_logical_rings_init;
-               dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
-               dev_priv->gt.stop_ring = intel_logical_ring_stop;
+               dev_priv->gt.init_engines = intel_logical_rings_init;
+               dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+               dev_priv->gt.stop_engine = intel_logical_ring_stop;
        }
 
        /* This is just a security blanket to placate dragons.
        if (ret)
                goto out_unlock;
 
-       ret = dev_priv->gt.init_rings(dev);
+       ret = dev_priv->gt.init_engines(dev);
        if (ret)
                goto out_unlock;
 
 }
 
 void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int i;
 
        for_each_engine(engine, dev_priv, i)
-               dev_priv->gt.cleanup_ring(engine);
+               dev_priv->gt.cleanup_engine(engine);
 
     if (i915.enable_execlists)
             /*
 
 
        for_each_engine(engine, dev_priv, i) {
                list_for_each_entry(obj, &engine->active_list,
-                                   ring_list[engine->id]) {
+                                   engine_list[engine->id]) {
                        if (obj->base.dev != dev ||
                            !atomic_read(&obj->base.refcount.refcount)) {
                                DRM_ERROR("%s: freed active obj %p\n",
 
 
 #define I915_USER_RINGS (4)
 
-static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
        [I915_EXEC_DEFAULT]     = RCS,
        [I915_EXEC_RENDER]      = RCS,
        [I915_EXEC_BLT]         = BCS,
                *ring = &dev_priv->engine[user_ring_map[user_ring_id]];
        }
 
-       if (!intel_ring_initialized(*ring)) {
+       if (!intel_engine_initialized(*ring)) {
                DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
                return -EINVAL;
        }
 
 
 static void notify_ring(struct intel_engine_cs *engine)
 {
-       if (!intel_ring_initialized(engine))
+       if (!intel_engine_initialized(engine))
                return;
 
        trace_i915_gem_request_notify(engine);
 
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance(ringbuf);
 
-       if (intel_ring_stopped(engine))
+       if (intel_engine_stopped(engine))
                return 0;
 
        if (engine->last_context != request->ctx) {
        struct drm_i915_private *dev_priv = engine->dev->dev_private;
        int ret;
 
-       if (!intel_ring_initialized(engine))
+       if (!intel_engine_initialized(engine))
                return;
 
        ret = intel_engine_idle(engine);
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(engine))
+       if (!intel_engine_initialized(engine))
                return;
 
        dev_priv = engine->dev->dev_private;
  * @dev: DRM device.
  *
  * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
  * those engines that are present in the hardware.
  *
  * Return: non-zero if the initialization failed.
 
        return result;
 }
 
-static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
 {
        switch (ring) {
        case RCS:
  */
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
                                   const struct drm_i915_mocs_table *table,
-                                  enum intel_ring_id ring)
+                                  enum intel_engine_id ring)
 {
        struct intel_ringbuffer *ringbuf = req->ringbuf;
        unsigned int index;
        if (get_mocs_settings(req->engine->dev, &t)) {
                struct drm_i915_private *dev_priv = req->i915;
                struct intel_engine_cs *engine;
-               enum intel_ring_id ring_id;
+               enum intel_engine_id ring_id;
 
                /* Program the control registers */
                for_each_engine(engine, dev_priv, ring_id) {
 
        return ringbuf->space;
 }
 
-bool intel_ring_stopped(struct intel_engine_cs *engine)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->dev->dev_private;
        return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 {
        struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_ring_stopped(engine))
+       if (intel_engine_stopped(engine))
                return;
        engine->write_tail(engine, ringbuf->tail);
 }
        return 0;
 
 error:
-       intel_cleanup_ring_buffer(engine);
+       intel_cleanup_engine(engine);
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *engine)
+void intel_cleanup_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(engine))
+       if (!intel_engine_initialized(engine))
                return;
 
        dev_priv = to_i915(engine->dev);
 
        if (engine->buffer) {
-               intel_stop_ring_buffer(engine);
+               intel_stop_engine(engine);
                WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
                intel_unpin_ringbuffer_obj(engine->buffer);
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *engine)
+intel_stop_engine(struct intel_engine_cs *engine)
 {
        int ret;
 
-       if (!intel_ring_initialized(engine))
+       if (!intel_engine_initialized(engine))
                return;
 
        ret = intel_engine_idle(engine);
 
 
 struct  intel_engine_cs {
        const char      *name;
-       enum intel_ring_id {
+       enum intel_engine_id {
                RCS = 0,
                BCS,
                VCS,
 };
 
 static inline bool
-intel_ring_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(struct intel_engine_cs *engine)
 {
        return engine->dev != NULL;
 }
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
-void intel_stop_ring_buffer(struct intel_engine_cs *engine);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *engine);
+void intel_stop_engine(struct intel_engine_cs *engine);
+void intel_cleanup_engine(struct intel_engine_cs *engine);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *engine);
+bool intel_engine_stopped(struct intel_engine_cs *engine);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);