}
 
 static void
-reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        memset(lists, 0, sizeof(*lists));
 
        wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
-       gt_init_workarounds(i915, &lists->gt_wa_list);
+       gt_init_workarounds(gt->i915, &lists->gt_wa_list);
        wa_init_finish(&lists->gt_wa_list);
 
-       for_each_engine(engine, i915, id) {
+       for_each_engine(engine, gt->i915, id) {
                struct i915_wa_list *wal = &lists->engine[id].wa_list;
 
                wa_init_start(wal, "REF", engine->name);
 }
 
 static void
-reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       for_each_engine(engine, i915, id)
+       for_each_engine(engine, gt->i915, id)
                intel_wa_list_free(&lists->engine[id].wa_list);
 
        intel_wa_list_free(&lists->gt_wa_list);
 
        err = 0;
        i915_gem_object_lock(results);
-       intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
+       intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
                err = i915_gem_object_set_to_cpu_domain(results, false);
        i915_gem_object_unlock(results);
-       if (intel_gt_is_wedged(&ctx->i915->gt))
+       if (intel_gt_is_wedged(engine->gt))
                err = -EIO;
        if (err)
                goto out_put;
                if (err) {
                        pr_err("%s: Futzing %x timedout; cancelling test\n",
                               engine->name, reg);
-                       intel_gt_set_wedged(&ctx->i915->gt);
+                       intel_gt_set_wedged(engine->gt);
                        goto out_batch;
                }
 
 
 static int live_dirty_whitelist(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        enum intel_engine_id id;
 
        /* Can the user write to the whitelisted registers? */
 
-       if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+       if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
                return 0;
 
-       file = mock_file(i915);
+       file = mock_file(gt->i915);
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       ctx = live_context(i915, file);
+       ctx = live_context(gt->i915, file);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto out_file;
        }
 
-       for_each_engine(engine, i915, id) {
+       for_each_engine(engine, gt->i915, id) {
                if (engine->whitelist.count == 0)
                        continue;
 
        }
 
 out_file:
-       mock_file_free(i915, file);
+       mock_file_free(gt->i915, file);
        return err;
 }
 
 static int live_reset_whitelist(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
-       struct intel_engine_cs *engine = i915->engine[RCS0];
+       struct intel_gt *gt = arg;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        int err = 0;
 
        /* If we reset the gpu, we should not lose the RING_NONPRIV */
+       igt_global_reset_lock(gt);
 
-       if (!engine || engine->whitelist.count == 0)
-               return 0;
-
-       igt_global_reset_lock(&i915->gt);
+       for_each_engine(engine, gt->i915, id) {
+               if (engine->whitelist.count == 0)
+                       continue;
 
-       if (intel_has_reset_engine(&i915->gt)) {
-               err = check_whitelist_across_reset(engine,
-                                                  do_engine_reset,
-                                                  "engine");
-               if (err)
-                       goto out;
-       }
+               if (intel_has_reset_engine(gt)) {
+                       err = check_whitelist_across_reset(engine,
+                                                          do_engine_reset,
+                                                          "engine");
+                       if (err)
+                               goto out;
+               }
 
-       if (intel_has_gpu_reset(&i915->gt)) {
-               err = check_whitelist_across_reset(engine,
-                                                  do_device_reset,
-                                                  "device");
-               if (err)
-                       goto out;
+               if (intel_has_gpu_reset(gt)) {
+                       err = check_whitelist_across_reset(engine,
+                                                          do_device_reset,
+                                                          "device");
+                       if (err)
+                               goto out;
+               }
        }
 
 out:
-       igt_global_reset_unlock(&i915->gt);
+       igt_global_reset_unlock(gt);
        return err;
 }
 
 
 static int live_isolated_whitelist(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = arg;
        struct {
                struct i915_gem_context *ctx;
                struct i915_vma *scratch[2];
         * invisible to a second context.
         */
 
-       if (!intel_engines_has_context_isolation(i915))
-               return 0;
-
-       if (!i915->kernel_context->vm)
+       if (!intel_engines_has_context_isolation(gt->i915))
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(client); i++) {
                struct i915_address_space *vm;
                struct i915_gem_context *c;
 
-               c = kernel_context(i915);
+               c = kernel_context(gt->i915);
                if (IS_ERR(c)) {
                        err = PTR_ERR(c);
                        goto err;
                i915_vm_put(vm);
        }
 
-       for_each_engine(engine, i915, id) {
+       for_each_engine(engine, gt->i915, id) {
+               if (!engine->kernel_context->vm)
+                       continue;
+
                if (!whitelist_writable_count(engine))
                        continue;
 
                kernel_context_close(client[i].ctx);
        }
 
-       if (igt_flush_test(i915))
+       if (igt_flush_test(gt->i915))
                err = -EIO;
 
        return err;
 static int
 live_gpu_reset_workarounds(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = arg;
        struct i915_gem_context *ctx;
        intel_wakeref_t wakeref;
        struct wa_lists lists;
        bool ok;
 
-       if (!intel_has_gpu_reset(&i915->gt))
+       if (!intel_has_gpu_reset(gt))
                return 0;
 
-       ctx = kernel_context(i915);
+       ctx = kernel_context(gt->i915);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
 
        pr_info("Verifying after GPU reset...\n");
 
-       igt_global_reset_lock(&i915->gt);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+       igt_global_reset_lock(gt);
+       wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
-       reference_lists_init(i915, &lists);
+       reference_lists_init(gt, &lists);
 
        ok = verify_wa_lists(ctx, &lists, "before reset");
        if (!ok)
                goto out;
 
-       intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
+       intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
 
        ok = verify_wa_lists(ctx, &lists, "after reset");
 
 out:
        i915_gem_context_unlock_engines(ctx);
        kernel_context_close(ctx);
-       reference_lists_fini(i915, &lists);
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       igt_global_reset_unlock(&i915->gt);
+       reference_lists_fini(gt, &lists);
+       intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+       igt_global_reset_unlock(gt);
 
        return ok ? 0 : -ESRCH;
 }
 static int
 live_engine_reset_workarounds(void *arg)
 {
-       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = arg;
        struct i915_gem_engines_iter it;
        struct i915_gem_context *ctx;
        struct intel_context *ce;
        struct wa_lists lists;
        int ret = 0;
 
-       if (!intel_has_reset_engine(&i915->gt))
+       if (!intel_has_reset_engine(gt))
                return 0;
 
-       ctx = kernel_context(i915);
+       ctx = kernel_context(gt->i915);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       igt_global_reset_lock(&i915->gt);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+       igt_global_reset_lock(gt);
+       wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
-       reference_lists_init(i915, &lists);
+       reference_lists_init(gt, &lists);
 
        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
                struct intel_engine_cs *engine = ce->engine;
        }
 err:
        i915_gem_context_unlock_engines(ctx);
-       reference_lists_fini(i915, &lists);
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       igt_global_reset_unlock(&i915->gt);
+       reference_lists_fini(gt, &lists);
+       intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+       igt_global_reset_unlock(gt);
        kernel_context_close(ctx);
 
-       igt_flush_test(i915);
+       igt_flush_test(gt->i915);
 
        return ret;
 }
        if (intel_gt_is_wedged(&i915->gt))
                return 0;
 
-       return i915_subtests(tests, i915);
+       return intel_gt_live_subtests(tests, &i915->gt);
 }