wa_write_masked_or(wal, reg, val, val);
 }
 
-static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
        /* WaDisableKillLogic:bxt,skl,kbl */
        if (!IS_COFFEELAKE(i915))
                wa_write_or(wal,
                    BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableGafsUnitClkGating:skl */
        wa_write_or(wal,
                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaInPlaceDecompressionHang:bxt */
        wa_write_or(wal,
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableDynamicCreditSharing:kbl */
        if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 }
 
-static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       gen9_gt_workarounds_init(i915);
+       gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableGafsUnitClkGating:cfl */
        wa_write_or(wal,
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void wa_init_mcr(struct drm_i915_private *dev_priv)
+static void
+wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
 {
        const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       struct i915_wa_list *wal = &dev_priv->gt_wa_list;
        u32 mcr_slice_subslice_mask;
 
        /*
                           intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_mcr(i915);
+       wa_init_mcr(i915, wal);
 
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
        if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_mcr(i915);
+       wa_init_mcr(i915, wal);
 
        /* WaInPlaceDecompressionHang:icl */
        wa_write_or(wal,
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+static void
+gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       struct i915_wa_list *wal = &i915->gt_wa_list;
-
-       wa_init_start(wal, "GT");
-
        if (INTEL_GEN(i915) < 8)
                return;
        else if (IS_BROADWELL(i915))
        else if (IS_CHERRYVIEW(i915))
                return;
        else if (IS_SKYLAKE(i915))
-               skl_gt_workarounds_init(i915);
+               skl_gt_workarounds_init(i915, wal);
        else if (IS_BROXTON(i915))
-               bxt_gt_workarounds_init(i915);
+               bxt_gt_workarounds_init(i915, wal);
        else if (IS_KABYLAKE(i915))
-               kbl_gt_workarounds_init(i915);
+               kbl_gt_workarounds_init(i915, wal);
        else if (IS_GEMINILAKE(i915))
-               glk_gt_workarounds_init(i915);
+               glk_gt_workarounds_init(i915, wal);
        else if (IS_COFFEELAKE(i915))
-               cfl_gt_workarounds_init(i915);
+               cfl_gt_workarounds_init(i915, wal);
        else if (IS_CANNONLAKE(i915))
-               cnl_gt_workarounds_init(i915);
+               cnl_gt_workarounds_init(i915, wal);
        else if (IS_ICELAKE(i915))
-               icl_gt_workarounds_init(i915);
+               icl_gt_workarounds_init(i915, wal);
        else
                MISSING_CASE(INTEL_GEN(i915));
+}
+
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
+{
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
+       wa_init_start(wal, "GT");
+       gt_init_workarounds(i915, wal);
        wa_init_finish(wal);
 }
 
                           i915_mmio_reg_offset(RING_NOPID(base)));
 }
 
-static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->wa_list;
 
        if (IS_ICELAKE(i915)) {
                /* This is not an Wa. Enable for better image quality */
        }
 }
 
-static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
-       struct i915_wa_list *wal = &engine->wa_list;
 
        /* WaKBLVECSSemaphoreWaitPoll:kbl */
        if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
        }
 }
 
+static void
+engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+       if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+               return;
+
+       if (engine->id == RCS)
+               rcs_engine_wa_init(engine, wal);
+       else
+               xcs_engine_wa_init(engine, wal);
+}
+
 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
 {
        struct i915_wa_list *wal = &engine->wa_list;
                return;
 
        wa_init_start(wal, engine->name);
-
-       if (engine->id == RCS)
-               rcs_engine_wa_init(engine);
-       else
-               xcs_engine_wa_init(engine);
-
+       engine_init_workarounds(engine, wal);
        wa_init_finish(wal);
 }
 
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
-                                           const char *from)
-{
-       return wa_list_verify(engine->i915, &engine->wa_list, from);
-}
-
 #include "selftests/intel_workarounds.c"
 #endif
 
 #include "igt_wedge_me.h"
 #include "mock_context.h"
 
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+struct wa_lists {
+       struct i915_wa_list gt_wa_list;
+       struct {
+               char name[REF_NAME_MAX];
+               struct i915_wa_list wa_list;
+       } engine[I915_NUM_ENGINES];
+};
+
+static void
+reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       memset(lists, 0, sizeof(*lists));
+
+       wa_init_start(&lists->gt_wa_list, "GT_REF");
+       gt_init_workarounds(i915, &lists->gt_wa_list);
+       wa_init_finish(&lists->gt_wa_list);
+
+       for_each_engine(engine, i915, id) {
+               struct i915_wa_list *wal = &lists->engine[id].wa_list;
+               char *name = lists->engine[id].name;
+
+               snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
+
+               wa_init_start(wal, name);
+               engine_init_workarounds(engine, wal);
+               wa_init_finish(wal);
+       }
+}
+
+static void
+reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               intel_wa_list_free(&lists->engine[id].wa_list);
+
+       intel_wa_list_free(&lists->gt_wa_list);
+}
+
 static struct drm_i915_gem_object *
 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 {
        return err;
 }
 
-static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+static bool verify_gt_engine_wa(struct drm_i915_private *i915,
+                               struct wa_lists *lists, const char *str)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        bool ok = true;
 
-       ok &= intel_gt_verify_workarounds(i915, str);
+       ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
+
        for_each_engine(engine, i915, id)
-               ok &= intel_engine_verify_workarounds(engine, str);
+               ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
 
        return ok;
 }
 {
        struct drm_i915_private *i915 = arg;
        struct i915_gpu_error *error = &i915->gpu_error;
+       struct wa_lists lists;
        bool ok;
 
        if (!intel_has_gpu_reset(i915))
 
        igt_global_reset_lock(i915);
        intel_runtime_pm_get(i915);
+       reference_lists_init(i915, &lists);
 
-       ok = verify_gt_engine_wa(i915, "before reset");
+       ok = verify_gt_engine_wa(i915, &lists, "before reset");
        if (!ok)
                goto out;
 
        set_bit(I915_RESET_HANDOFF, &error->flags);
        i915_reset(i915, ALL_ENGINES, "live_workarounds");
 
-       ok = verify_gt_engine_wa(i915, "after reset");
+       ok = verify_gt_engine_wa(i915, &lists, "after reset");
 
 out:
+       reference_lists_fini(i915, &lists);
        intel_runtime_pm_put(i915);
        igt_global_reset_unlock(i915);
 
        struct igt_spinner spin;
        enum intel_engine_id id;
        struct i915_request *rq;
+       struct wa_lists lists;
        int ret = 0;
 
        if (!intel_has_reset_engine(i915))
 
        igt_global_reset_lock(i915);
        intel_runtime_pm_get(i915);
+       reference_lists_init(i915, &lists);
 
        for_each_engine(engine, i915, id) {
                bool ok;
 
                pr_info("Verifying after %s reset...\n", engine->name);
 
-               ok = verify_gt_engine_wa(i915, "before reset");
+               ok = verify_gt_engine_wa(i915, &lists, "before reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
 
                i915_reset_engine(engine, "live_workarounds");
 
-               ok = verify_gt_engine_wa(i915, "after idle reset");
+               ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
                igt_spinner_end(&spin);
                igt_spinner_fini(&spin);
 
-               ok = verify_gt_engine_wa(i915, "after busy reset");
+               ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
                if (!ok) {
                        ret = -ESRCH;
                        goto err;
        }
 
 err:
+       reference_lists_fini(i915, &lists);
        intel_runtime_pm_put(i915);
        igt_global_reset_unlock(i915);
        kernel_context_close(ctx);