};
        struct drm_file *file;
        struct i915_gem_context *ctx;
+       intel_wakeref_t wakeref;
        int err;
 
        if (!HAS_PPGTT(dev_priv)) {
                return PTR_ERR(file);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        ctx = live_context(dev_priv, file);
        if (IS_ERR(ctx)) {
        err = i915_subtests(tests, ctx);
 
 out_unlock:
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        mock_file_free(dev_priv, file);
 
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = 0;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for_each_engine(engine, i915, id) {
                struct i915_request *rq;
                i915_request_add(rq);
        }
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return err;
 }
 
 static void simulate_hibernate(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(i915);
 
        /*
         * As a final sting in the tail, invalidate stolen. Under a real S4,
         */
        trash_stolen(i915);
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static int pm_prepare(struct drm_i915_private *i915)
 
 static void pm_suspend(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(i915);
 
        i915_gem_suspend_gtt_mappings(i915);
        i915_gem_suspend_late(i915);
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static void pm_hibernate(struct drm_i915_private *i915)
 {
-       intel_runtime_pm_get(i915);
+       intel_wakeref_t wakeref;
+
+       wakeref = intel_runtime_pm_get(i915);
 
        i915_gem_suspend_gtt_mappings(i915);
 
        i915_gem_freeze(i915);
        i915_gem_freeze_late(i915);
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static void pm_resume(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
+
        /*
         * Both suspend and hibernate follow the same wakeup path and assume
         * that runtime-pm just works.
         */
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        intel_engines_sanitize(i915, false);
        i915_gem_sanitize(i915);
        i915_gem_resume(i915);
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static int igt_gem_suspend(void *arg)
 
        struct drm_i915_private *i915 = arg;
        const struct igt_coherency_mode *read, *write, *over;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        unsigned long count, n;
        u32 *offsets, *values;
        int err = 0;
        values = offsets + ncachelines;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        for (over = igt_coherency_mode; over->name; over++) {
                if (!over->set)
                        continue;
                }
        }
 unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        kfree(offsets);
        return err;
 
        struct intel_engine_cs *engine;
        struct i915_gem_context **ctx;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        struct drm_file *file;
        struct live_test t;
        unsigned long n;
                return PTR_ERR(file);
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
        }
 
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        mock_file_free(i915, file);
        return err;
                }
 
                for_each_engine(engine, i915, id) {
+                       intel_wakeref_t wakeref;
+
                        if (!engine->context_size)
                                continue; /* No logical context support in HW */
 
                                }
                        }
 
-                       intel_runtime_pm_get(i915);
+                       wakeref = intel_runtime_pm_get(i915);
                        err = gpu_fill(obj, ctx, engine, dw);
-                       intel_runtime_pm_put_unchecked(i915);
+                       intel_runtime_pm_put(i915, wakeref);
                        if (err) {
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
                unsigned int id;
 
                for_each_engine(engine, i915, id) {
+                       intel_wakeref_t wakeref;
+
                        if (!intel_engine_can_store_dword(engine))
                                continue;
 
                                        i915_gem_object_set_readonly(obj);
                        }
 
-                       intel_runtime_pm_get(i915);
+                       wakeref = intel_runtime_pm_get(i915);
                        err = gpu_fill(obj, ctx, engine, dw);
-                       intel_runtime_pm_put_unchecked(i915);
+                       intel_runtime_pm_put(i915, wakeref);
                        if (err) {
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
        struct drm_i915_private *i915 = arg;
        struct i915_gem_context *ctx_a, *ctx_b;
        struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
        struct drm_file *file;
        I915_RND_STATE(prng);
        unsigned long count;
        GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
        vm_total -= I915_GTT_PAGE_SIZE;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        count = 0;
        for_each_engine(engine, i915, id) {
                count, RUNTIME_INFO(i915)->num_rings);
 
 out_rpm:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 out_unlock:
        if (end_live_test(&t))
                err = -EIO;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err;
 
        /*
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        ctx = kernel_context(i915);
        if (IS_ERR(ctx)) {
        if (igt_flush_test(i915, I915_WAIT_LOCKED))
                err = -EIO;
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        kernel_context_close(ctx);
 
                struct drm_mm_node node;
                struct reserved *next;
        } *reserved = NULL;
+       intel_wakeref_t wakeref;
        struct drm_mm_node hole;
        unsigned long count;
        int err;
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        /* Reserve a block so that we know we have enough to fit a few rq */
        memset(&hole, 0, sizeof(hole));
                struct drm_file *file;
 
                file = mock_file(i915);
-               if (IS_ERR(file))
-                       return PTR_ERR(file);
+               if (IS_ERR(file)) {
+                       err = PTR_ERR(file);
+                       break;
+               }
 
                count = 0;
                mutex_lock(&i915->drm.struct_mutex);
        }
        if (drm_mm_node_allocated(&hole))
                drm_mm_remove_node(&hole);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        return err;
                SUBTEST(igt_overcommit),
        };
        struct drm_i915_private *i915;
+       intel_wakeref_t wakeref;
        int err;
 
        i915 = mock_gem_device();
                return -ENOMEM;
 
        mutex_lock(&i915->drm.struct_mutex);
+       wakeref = intel_runtime_pm_get(i915);
+
        err = i915_subtests(tests, i915);
+
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        drm_dev_put(&i915->drm);
 
 
                for (n = 0; n < count; n++) {
                        u64 addr = hole_start + order[n] * BIT_ULL(size);
+                       intel_wakeref_t wakeref;
 
                        GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 
                        mock_vma.node.size = BIT_ULL(size);
                        mock_vma.node.start = addr;
 
-                       intel_runtime_pm_get(i915);
+                       wakeref = intel_runtime_pm_get(i915);
                        vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
-                       intel_runtime_pm_put_unchecked(i915);
+                       intel_runtime_pm_put(i915, wakeref);
                }
                count = n;
 
        struct drm_i915_private *i915 = arg;
        struct i915_ggtt *ggtt = &i915->ggtt;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        struct drm_mm_node tmp;
        unsigned int *order, n;
        int err;
        if (err)
                goto out_unpin;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for (n = 0; n < count; n++) {
                u64 offset = tmp.start + n * PAGE_SIZE;
        kfree(order);
 out_remove:
        ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        drm_mm_remove_node(&tmp);
 out_unpin:
        i915_gem_object_unpin_pages(obj);
 
        const unsigned int nreal = 1 << 12; /* largest tile row x2 */
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref;
        int tiling;
        int err;
 
        }
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (1) {
                IGT_TIMEOUT(end);
        }
 
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        i915_gem_object_unpin_pages(obj);
 out:
 
        mutex_lock(&i915->drm.struct_mutex);
        if (!i915->gt.active_requests++) {
-               intel_runtime_pm_get(i915);
+               intel_wakeref_t wakeref;
+
+               wakeref = intel_runtime_pm_get(i915);
                i915_gem_unpark(i915);
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
        }
        mutex_unlock(&i915->drm.struct_mutex);
+
        cancel_delayed_work_sync(&i915->gt.retire_work);
        cancel_delayed_work_sync(&i915->gt.idle_work);
 }
 
        /* Now fill with busy dead objects that we expect to reap */
        for (loop = 0; loop < 3; loop++) {
+               intel_wakeref_t wakeref;
+
                if (i915_terminally_wedged(&i915->gpu_error))
                        break;
 
                }
 
                mutex_lock(&i915->drm.struct_mutex);
-               intel_runtime_pm_get(i915);
+               wakeref = intel_runtime_pm_get(i915);
                err = make_obj_busy(obj);
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
                mutex_unlock(&i915->drm.struct_mutex);
                if (err) {
                        pr_err("[loop %d] Failed to busy the object\n", loop);
 
                SUBTEST(igt_request_rewind),
        };
        struct drm_i915_private *i915;
+       intel_wakeref_t wakeref;
        int err;
 
        i915 = mock_gem_device();
        if (!i915)
                return -ENOMEM;
 
+       wakeref = intel_runtime_pm_get(i915);
+
        err = i915_subtests(tests, i915);
+
+       intel_runtime_pm_put(i915, wakeref);
        drm_dev_put(&i915->drm);
 
        return err;
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
        struct live_test t;
        unsigned int id;
        int err = -ENODEV;
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        for_each_engine(engine, i915, id) {
                struct i915_request *request = NULL;
        }
 
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       struct live_test t;
+       intel_wakeref_t wakeref;
        struct i915_vma *batch;
+       struct live_test t;
        unsigned int id;
        int err = 0;
 
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        batch = empty_batch(i915);
        if (IS_ERR(batch)) {
        i915_vma_unpin(batch);
        i915_vma_put(batch);
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
        struct i915_request *request[I915_NUM_ENGINES];
+       intel_wakeref_t wakeref;
        struct i915_vma *batch;
        struct live_test t;
        unsigned int id;
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        err = begin_live_test(&t, i915, __func__, "");
        if (err)
        i915_vma_unpin(batch);
        i915_vma_put(batch);
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
        struct i915_request *request[I915_NUM_ENGINES] = {};
        struct i915_request *prev = NULL;
        struct intel_engine_cs *engine;
+       intel_wakeref_t wakeref;
        struct live_test t;
        unsigned int id;
        int err;
         */
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        err = begin_live_test(&t, i915, __func__, "");
        if (err)
                i915_request_put(request[id]);
        }
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
 static int igt_guc_clients(void *args)
 {
        struct drm_i915_private *dev_priv = args;
+       intel_wakeref_t wakeref;
        struct intel_guc *guc;
        int err = 0;
 
        GEM_BUG_ON(!HAS_GUC(dev_priv));
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        guc = &dev_priv->guc;
        if (!guc) {
        guc_clients_create(guc);
        guc_clients_enable(guc);
 unlock:
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return err;
 }
 static int igt_guc_doorbells(void *arg)
 {
        struct drm_i915_private *dev_priv = arg;
+       intel_wakeref_t wakeref;
        struct intel_guc *guc;
        int i, err = 0;
        u16 db_id;
 
        GEM_BUG_ON(!HAS_GUC(dev_priv));
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        guc = &dev_priv->guc;
        if (!guc) {
                        guc_client_free(clients[i]);
                }
 unlock:
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return err;
 }
 
 static int igt_wedged_reset(void *arg)
 {
        struct drm_i915_private *i915 = arg;
+       intel_wakeref_t wakeref;
 
        /* Check that we can recover a wedged device with a GPU reset */
 
        igt_global_reset_lock(i915);
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        i915_gem_set_wedged(i915);
        GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
        i915_reset(i915, ALL_ENGINES, NULL);
        GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        igt_global_reset_unlock(i915);
 
                { }
        };
        struct drm_i915_private *i915 = arg;
+       intel_wakeref_t wakeref;
        int err = 0;
 
        /* Check that the resets are usable from atomic context */
 
        igt_global_reset_lock(i915);
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        /* Flush any requests before we get started and check basics */
        force_reset(i915);
        force_reset(i915);
 
 unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        igt_global_reset_unlock(i915);
 
                SUBTEST(igt_handle_error),
                SUBTEST(igt_atomic_reset),
        };
+       intel_wakeref_t wakeref;
        bool saved_hangcheck;
        int err;
 
        if (i915_terminally_wedged(&i915->gpu_error))
                return -EIO; /* we're long past hope of a successful reset */
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
 
        err = i915_subtests(tests, i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
        i915_modparams.enable_hangcheck = saved_hangcheck;
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return err;
 }
 
        struct i915_gem_context *ctx;
        enum intel_engine_id id;
        struct igt_spinner spin;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_CONTEXTS(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin, i915))
                goto err_unlock;
        igt_spinner_fini(&spin);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
        struct igt_spinner spin_hi, spin_lo;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
        struct intel_engine_cs *engine;
        struct i915_sched_attr attr = {};
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 
        struct igt_spinner spin_hi, spin_lo;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
 
        if (!HAS_LOGICAL_RING_PREEMPTION(i915))
                return 0;
 
        mutex_lock(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (igt_spinner_init(&spin_hi, i915))
                goto err_unlock;
        igt_spinner_fini(&spin_hi);
 err_unlock:
        igt_flush_test(i915, I915_WAIT_LOCKED);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
                .ncontext = 1024,
        };
        const unsigned int phase[] = { 0, BATCH };
+       intel_wakeref_t wakeref;
        int err = -ENOMEM;
        u32 *cs;
        int n;
                return -ENOMEM;
 
        mutex_lock(&smoke.i915->drm.struct_mutex);
-       intel_runtime_pm_get(smoke.i915);
+       wakeref = intel_runtime_pm_get(smoke.i915);
 
        smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
        if (IS_ERR(smoke.batch)) {
 err_batch:
        i915_gem_object_put(smoke.batch);
 err_unlock:
-       intel_runtime_pm_put_unchecked(smoke.i915);
+       intel_runtime_pm_put(smoke.i915, wakeref);
        mutex_unlock(&smoke.i915->drm.struct_mutex);
        kfree(smoke.contexts);
 
 
 static struct drm_i915_gem_object *
 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 {
+       const u32 base = engine->mmio_base;
        struct drm_i915_gem_object *result;
+       intel_wakeref_t wakeref;
        struct i915_request *rq;
        struct i915_vma *vma;
-       const u32 base = engine->mmio_base;
        u32 srm, *cs;
        int err;
        int i;
        if (err)
                goto err_obj;
 
-       intel_runtime_pm_get(engine->i915);
+       wakeref = intel_runtime_pm_get(engine->i915);
        rq = i915_request_alloc(engine, ctx);
-       intel_runtime_pm_put_unchecked(engine->i915);
+       intel_runtime_pm_put(engine->i915, wakeref);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_pin;
 {
        struct i915_gem_context *ctx;
        struct i915_request *rq;
+       intel_wakeref_t wakeref;
        int err = 0;
 
        ctx = kernel_context(engine->i915);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       intel_runtime_pm_get(engine->i915);
+       wakeref = intel_runtime_pm_get(engine->i915);
 
        if (spin)
                rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
        else
                rq = i915_request_alloc(engine, ctx);
 
-       intel_runtime_pm_put_unchecked(engine->i915);
+       intel_runtime_pm_put(engine->i915, wakeref);
 
        kernel_context_close(ctx);
 
        bool want_spin = reset == do_engine_reset;
        struct i915_gem_context *ctx;
        struct igt_spinner spin;
+       intel_wakeref_t wakeref;
        int err;
 
        pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
        if (err)
                goto out;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        err = reset(engine);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        if (want_spin) {
                igt_spinner_end(&spin);
 {
        struct drm_i915_private *i915 = arg;
        struct i915_gpu_error *error = &i915->gpu_error;
+       intel_wakeref_t wakeref;
        struct wa_lists lists;
        bool ok;
 
        pr_info("Verifying after GPU reset...\n");
 
        igt_global_reset_lock(i915);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
+
        reference_lists_init(i915, &lists);
 
        ok = verify_gt_engine_wa(i915, &lists, "before reset");
 
 out:
        reference_lists_fini(i915, &lists);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        igt_global_reset_unlock(i915);
 
        return ok ? 0 : -ESRCH;
        struct igt_spinner spin;
        enum intel_engine_id id;
        struct i915_request *rq;
+       intel_wakeref_t wakeref;
        struct wa_lists lists;
        int ret = 0;
 
                return PTR_ERR(ctx);
 
        igt_global_reset_lock(i915);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
+
        reference_lists_init(i915, &lists);
 
        for_each_engine(engine, i915, id) {
 
 err:
        reference_lists_fini(i915, &lists);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        igt_global_reset_unlock(i915);
        kernel_context_close(ctx);