return -ENOMEM;
 }
 
+static void i915_engines_cleanup(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               kfree(engine);
+}
+
 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 {
        destroy_workqueue(dev_priv->hotplug.dp_wq);
        mutex_init(&dev_priv->pps_mutex);
 
        intel_uc_init_early(dev_priv);
-
        i915_memcpy_init_early(dev_priv);
 
+       ret = intel_engines_init_early(dev_priv);
+       if (ret)
+               return ret;
+
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
-               return ret;
+               goto err_engines;
 
        ret = intel_gvt_init(dev_priv);
        if (ret < 0)
        intel_gvt_cleanup(dev_priv);
 err_workqueues:
        i915_workqueues_cleanup(dev_priv);
+err_engines:
+       i915_engines_cleanup(dev_priv);
        return ret;
 }
 
        i915_perf_fini(dev_priv);
        i915_gem_load_cleanup(dev_priv);
        i915_workqueues_cleanup(dev_priv);
+       i915_engines_cleanup(dev_priv);
 }
 
 static int i915_mmio_setup(struct drm_i915_private *dev_priv)
 
 }
 
 /**
- * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * intel_engines_init_early() - allocate the Engine Command Streamers
  * @dev_priv: i915 device private
  *
  * Return: non-zero if the initialization failed.
  */
-int intel_engines_init(struct drm_i915_private *dev_priv)
+int intel_engines_init_early(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
        unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
        unsigned int mask = 0;
-       int (*init)(struct intel_engine_cs *engine);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        unsigned int i;
-       int ret;
+       int err;
 
        WARN_ON(ring_mask == 0);
        WARN_ON(ring_mask &
                if (!HAS_ENGINE(dev_priv, i))
                        continue;
 
+               err = intel_engine_setup(dev_priv, i);
+               if (err)
+                       goto cleanup;
+
+               mask |= ENGINE_MASK(i);
+       }
+
+       /*
+        * Catch failures to update intel_engines table when the new engines
+        * are added to the driver by a warning and disabling the forgotten
+        * engines.
+        */
+       if (WARN_ON(mask != ring_mask))
+               device_info->ring_mask = mask;
+
+       device_info->num_rings = hweight32(mask);
+
+       return 0;
+
+cleanup:
+       for_each_engine(engine, dev_priv, id)
+               kfree(engine);
+       return err;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev_priv: i915 device private
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id, err_id;
+       unsigned int mask = 0;
+       int err = 0;
+
+       for_each_engine(engine, dev_priv, id) {
+               int (*init)(struct intel_engine_cs *engine);
+
                if (i915.enable_execlists)
-                       init = intel_engines[i].init_execlists;
+                       init = intel_engines[id].init_execlists;
                else
-                       init = intel_engines[i].init_legacy;
-
-               if (!init)
+                       init = intel_engines[id].init_legacy;
+               if (!init) {
+                       kfree(engine);
+                       dev_priv->engine[id] = NULL;
                        continue;
+               }
 
-               ret = intel_engine_setup(dev_priv, i);
-               if (ret)
-                       goto cleanup;
-
-               ret = init(dev_priv->engine[i]);
-               if (ret)
+               err = init(engine);
+               if (err) {
+                       err_id = id;
                        goto cleanup;
+               }
 
-               mask |= ENGINE_MASK(i);
+               mask |= ENGINE_MASK(id);
        }
 
        /*
         * are added to the driver by a warning and disabling the forgotten
         * engines.
         */
-       if (WARN_ON(mask != ring_mask))
+       if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
                device_info->ring_mask = mask;
 
        device_info->num_rings = hweight32(mask);
 
 cleanup:
        for_each_engine(engine, dev_priv, id) {
-               if (i915.enable_execlists)
+               if (id >= err_id)
+                       kfree(engine);
+               else if (i915.enable_execlists)
                        intel_logical_ring_cleanup(engine);
                else
                        intel_engine_cleanup(engine);
        }
-
-       return ret;
+       return err;
 }
 
 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)