#define WA_SET_FIELD_MASKED(addr, mask, value) \
        wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
 
-static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                     struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
-
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
        /* WaDisableAsyncFlipPerfMode:bdw,chv */
                            GEN6_WIZ_HASHING_16x4);
 }
 
-static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-       gen8_ctx_workarounds_init(engine);
+       gen8_ctx_workarounds_init(engine, wal);
 
        /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
                          (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 }
 
-static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
-
-       gen8_ctx_workarounds_init(engine);
+       gen8_ctx_workarounds_init(engine, wal);
 
        /* WaDisableThreadStallDopClockGating:chv */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
        WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
 }
 
-static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                     struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
        if (HAS_LLC(i915)) {
                /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
                WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
 }
 
-static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
+                               struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
                            GEN9_IZ_HASHING(0, vals[0]));
 }
 
-static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
-       gen9_ctx_workarounds_init(engine);
-       skl_tune_iz_hashing(engine);
+       gen9_ctx_workarounds_init(engine, wal);
+       skl_tune_iz_hashing(engine, wal);
 }
 
-static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
-
-       gen9_ctx_workarounds_init(engine);
+       gen9_ctx_workarounds_init(engine, wal);
 
        /* WaDisableThreadStallDopClockGating:bxt */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 }
 
-static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-       gen9_ctx_workarounds_init(engine);
+       gen9_ctx_workarounds_init(engine, wal);
 
        /* WaToEnableHwFixForPushConstHWBug:kbl */
        if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
                          GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 }
 
-static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
-
-       gen9_ctx_workarounds_init(engine);
+       gen9_ctx_workarounds_init(engine, wal);
 
        /* WaToEnableHwFixForPushConstHWBug:glk */
        WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                          GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 }
 
-static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
-
-       gen9_ctx_workarounds_init(engine);
+       gen9_ctx_workarounds_init(engine, wal);
 
        /* WaToEnableHwFixForPushConstHWBug:cfl */
        WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                          GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 }
 
-static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
        /* WaForceContextSaveRestoreNonCoherent:cnl */
        WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
 }
 
-static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
+                                    struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
        /* Wa_1604370585:icl (pre-prod)
         * Formerly known as WaPushConstantDereferenceHoldDisable
                            GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 }
 
-void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+static void
+__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+                          struct i915_wa_list *wal,
+                          const char *name)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->ctx_wa_list;
 
-       wa_init_start(wal, "context");
+       if (engine->class != RENDER_CLASS)
+               return;
+
+       wa_init_start(wal, name);
 
        if (IS_GEN(i915, 11))
-               icl_ctx_workarounds_init(engine);
+               icl_ctx_workarounds_init(engine, wal);
        else if (IS_CANNONLAKE(i915))
-               cnl_ctx_workarounds_init(engine);
+               cnl_ctx_workarounds_init(engine, wal);
        else if (IS_COFFEELAKE(i915))
-               cfl_ctx_workarounds_init(engine);
+               cfl_ctx_workarounds_init(engine, wal);
        else if (IS_GEMINILAKE(i915))
-               glk_ctx_workarounds_init(engine);
+               glk_ctx_workarounds_init(engine, wal);
        else if (IS_KABYLAKE(i915))
-               kbl_ctx_workarounds_init(engine);
+               kbl_ctx_workarounds_init(engine, wal);
        else if (IS_BROXTON(i915))
-               bxt_ctx_workarounds_init(engine);
+               bxt_ctx_workarounds_init(engine, wal);
        else if (IS_SKYLAKE(i915))
-               skl_ctx_workarounds_init(engine);
+               skl_ctx_workarounds_init(engine, wal);
        else if (IS_CHERRYVIEW(i915))
-               chv_ctx_workarounds_init(engine);
+               chv_ctx_workarounds_init(engine, wal);
        else if (IS_BROADWELL(i915))
-               bdw_ctx_workarounds_init(engine);
+               bdw_ctx_workarounds_init(engine, wal);
        else if (INTEL_GEN(i915) < 8)
                return;
        else
        wa_init_finish(wal);
 }
 
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+{
+       __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
+}
+
 int intel_engine_emit_ctx_wa(struct i915_request *rq)
 {
        struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
        return 0;
 }
 
-static int engine_wa_list_verify(struct intel_engine_cs *engine,
+static int engine_wa_list_verify(struct intel_context *ce,
                                 const struct i915_wa_list * const wal,
                                 const char *from)
 {
        if (!wal->count)
                return 0;
 
-       vma = create_scratch(&engine->i915->ggtt.vm, wal->count);
+       vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
-       rq = i915_request_create(engine->kernel_context);
+       rq = intel_context_create_request(ce);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto err_vma;
 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
                                    const char *from)
 {
-       return engine_wa_list_verify(engine, &engine->wa_list, from);
+       return engine_wa_list_verify(engine->kernel_context,
+                                    &engine->wa_list,
+                                    from);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
        { INTEL_GEMINILAKE, 0x731c }
 };
 
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
 struct wa_lists {
        struct i915_wa_list gt_wa_list;
        struct {
                char name[REF_NAME_MAX];
                struct i915_wa_list wa_list;
+               struct i915_wa_list ctx_wa_list;
        } engine[I915_NUM_ENGINES];
 };
 
                wa_init_start(wal, name);
                engine_init_workarounds(engine, wal);
                wa_init_finish(wal);
+
+               snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
+
+               __intel_engine_init_ctx_wa(engine,
+                                          &lists->engine[id].ctx_wa_list,
+                                          name);
        }
 }
 
        return err;
 }
 
-static bool verify_gt_engine_wa(struct drm_i915_private *i915,
-                               struct wa_lists *lists, const char *str)
+static bool
+verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+               const char *str)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
        bool ok = true;
 
        ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
 
-       for_each_engine(engine, i915, id) {
-               ok &= engine_wa_list_verify(engine,
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               enum intel_engine_id id = ce->engine->id;
+
+               ok &= engine_wa_list_verify(ce,
                                            &lists->engine[id].wa_list,
                                            str) == 0;
+
+               ok &= engine_wa_list_verify(ce,
+                                           &lists->engine[id].ctx_wa_list,
+                                           str) == 0;
        }
+       i915_gem_context_unlock_engines(ctx);
 
        return ok;
 }
 
 static int
-live_gpu_reset_gt_engine_workarounds(void *arg)
+live_gpu_reset_workarounds(void *arg)
 {
        struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx;
        intel_wakeref_t wakeref;
        struct wa_lists lists;
        bool ok;
        if (!intel_has_gpu_reset(i915))
                return 0;
 
+       ctx = kernel_context(i915);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+
        pr_info("Verifying after GPU reset...\n");
 
        igt_global_reset_lock(i915);
 
        reference_lists_init(i915, &lists);
 
-       ok = verify_gt_engine_wa(i915, &lists, "before reset");
+       ok = verify_wa_lists(ctx, &lists, "before reset");
        if (!ok)
                goto out;
 
        i915_reset(i915, ALL_ENGINES, "live_workarounds");
 
-       ok = verify_gt_engine_wa(i915, &lists, "after reset");
+       ok = verify_wa_lists(ctx, &lists, "after reset");
 
 out:
+       kernel_context_close(ctx);
        reference_lists_fini(i915, &lists);
        intel_runtime_pm_put(i915, wakeref);
        igt_global_reset_unlock(i915);
 }
 
 static int
-live_engine_reset_gt_engine_workarounds(void *arg)
+live_engine_reset_workarounds(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
 
                pr_info("Verifying after %s reset...\n", engine->name);
 
-               ok = verify_gt_engine_wa(i915, &lists, "before reset");
+               ok = verify_wa_lists(ctx, &lists, "before reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
 
                i915_reset_engine(engine, "live_workarounds");
 
-               ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
+               ok = verify_wa_lists(ctx, &lists, "after idle reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
                igt_spinner_end(&spin);
                igt_spinner_fini(&spin);
 
-               ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
+               ok = verify_wa_lists(ctx, &lists, "after busy reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
                SUBTEST(live_dirty_whitelist),
                SUBTEST(live_reset_whitelist),
                SUBTEST(live_isolated_whitelist),
-               SUBTEST(live_gpu_reset_gt_engine_workarounds),
-               SUBTEST(live_engine_reset_gt_engine_workarounds),
+               SUBTEST(live_gpu_reset_workarounds),
+               SUBTEST(live_engine_reset_workarounds),
        };
        int err;