seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 
        intel_device_info_dump_flags(info, &p);
-       intel_device_info_dump_runtime(info, &p);
+       intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        intel_driver_caps_print(&dev_priv->caps, &p);
 
        kernel_param_lock(THIS_MODULE);
        seq_printf(m, "Global active requests: %d\n",
                   dev_priv->gt.active_requests);
        seq_printf(m, "CS timestamp frequency: %u kHz\n",
-                  dev_priv->info.cs_timestamp_frequency_khz);
+                  RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 
        p = drm_seq_file_printer(m);
        for_each_engine(engine, dev_priv, id)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_printer p = drm_seq_file_printer(m);
 
-       intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+       intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
 
        return 0;
 }
                                     struct sseu_dev_info *sseu)
 {
 #define SS_MAX 6
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
                                    struct sseu_dev_info *sseu)
 {
 #define SS_MAX 3
-       const struct intel_device_info *info = INTEL_INFO(dev_priv);
+       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
        int s, ss;
 
 
                if (IS_GEN9_BC(dev_priv))
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
 
                for (ss = 0; ss < info->sseu.max_subslices; ss++) {
                        unsigned int eu_cnt;
 
        if (sseu->slice_mask) {
                sseu->eu_per_subslice =
-                               INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+                       RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        sseu->subslice_mask[s] =
-                               INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
                }
                sseu->eu_total = sseu->eu_per_subslice *
                                 sseu_subslice_total(sseu);
                /* subtract fused off EU(s) from enabled slice(s) */
                for (s = 0; s < fls(sseu->slice_mask); s++) {
                        u8 subslice_7eu =
-                               INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
+                               RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
 
                        sseu->eu_total -= hweight8(subslice_7eu);
                }
                return -ENODEV;
 
        seq_puts(m, "SSEU Device Info\n");
-       i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
+       i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
 
        seq_puts(m, "SSEU Device Status\n");
        memset(&sseu, 0, sizeof(sseu));
-       sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
-       sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+       sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+       sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
        sseu.max_eus_per_subslice =
-               INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
+               RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
 
        intel_runtime_pm_get(dev_priv);
 
 
                value = i915_cmd_parser_get_version(dev_priv);
                break;
        case I915_PARAM_SUBSLICE_TOTAL:
-               value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
+               value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev_priv)->sseu.eu_total;
+               value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
                if (!value)
                        return -ENODEV;
                break;
                value = HAS_POOLED_EU(dev_priv);
                break;
        case I915_PARAM_MIN_EU_IN_POOL:
-               value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+               value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
                break;
        case I915_PARAM_HUC_STATUS:
                value = intel_huc_check_status(&dev_priv->huc);
                value = intel_engines_has_context_isolation(dev_priv);
                break;
        case I915_PARAM_SLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+               value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_SUBSLICE_MASK:
-               value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
+               value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
-               value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+               value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
                break;
        case I915_PARAM_MMAP_GTT_COHERENT:
                value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
                struct drm_printer p = drm_debug_printer("i915 device info:");
 
                intel_device_info_dump(&dev_priv->info, &p);
-               intel_device_info_dump_runtime(&dev_priv->info, &p);
+               intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
        }
 
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
        /* Setup the write-once "constant" device info */
        device_info = mkwrite_device_info(i915);
        memcpy(device_info, match_info, sizeof(*device_info));
-       device_info->device_id = pdev->device;
+       RUNTIME_INFO(i915)->device_id = pdev->device;
 
        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
                     BITS_PER_TYPE(device_info->platform_mask));
 
        struct kmem_cache *priorities;
 
        const struct intel_device_info info;
+       struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
 
        /**
 }
 
 #define INTEL_INFO(dev_priv)   intel_info((dev_priv))
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
 #define DRIVER_CAPS(dev_priv)  (&(dev_priv)->caps)
 
 #define INTEL_GEN(dev_priv)    ((dev_priv)->info.gen)
-#define INTEL_DEVID(dev_priv)  ((dev_priv)->info.device_id)
+#define INTEL_DEVID(dev_priv)  (RUNTIME_INFO(dev_priv)->device_id)
 
 #define REVID_FOREVER          0xff
 #define INTEL_REVID(dev_priv)  ((dev_priv)->drm.pdev->revision)
 
 
 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
                                   const struct intel_device_info *info,
+                                  const struct intel_runtime_info *runtime,
                                   const struct intel_driver_caps *caps)
 {
        struct drm_printer p = i915_error_printer(m);
 
        intel_device_info_dump_flags(info, &p);
        intel_driver_caps_print(caps, &p);
-       intel_device_info_dump_topology(&info->sseu, &p);
+       intel_device_info_dump_topology(&runtime->sseu, &p);
 }
 
 static void err_print_params(struct drm_i915_error_state_buf *m,
        if (error->display)
                intel_display_print_error_state(m, error->display);
 
-       err_print_capabilities(m, &error->device_info, &error->driver_caps);
+       err_print_capabilities(m, &error->device_info, &error->runtime_info,
+                              &error->driver_caps);
        err_print_params(m, &error->params);
        err_print_uc(m, &error->uc);
 }
        memcpy(&error->device_info,
               INTEL_INFO(i915),
               sizeof(error->device_info));
+       memcpy(&error->runtime_info,
+              RUNTIME_INFO(i915),
+              sizeof(error->runtime_info));
        error->driver_caps = i915->caps;
 }
 
 
        u32 reset_count;
        u32 suspend_count;
        struct intel_device_info device_info;
+       struct intel_runtime_info runtime_info;
        struct intel_driver_caps driver_caps;
        struct i915_params params;
 
 
 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
 {
        return div64_u64(1000000000ULL * (2ULL << exponent),
-                        1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+                        1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
 }
 
 /**
                spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
 
                oa_sample_rate_hard_limit = 1000 *
-                       (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
+                       (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
                dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
 
                mutex_init(&dev_priv->perf.metrics_lock);
 
 static int query_topology_info(struct drm_i915_private *dev_priv,
                               struct drm_i915_query_item *query_item)
 {
-       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        struct drm_i915_query_topology_info topo;
        u32 slice_length, subslice_length, eu_length, total_length;
 
 
        drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
 }
 
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
                                    struct drm_printer *p)
 {
        sseu_dump(&info->sseu, p);
 
 static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u8 s_en;
        u32 ss_en, ss_en_mask;
        u8 eu_en;
 
 static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        const u32 fuse2 = I915_READ(GEN8_FUSE2);
        int s, ss;
        const int eu_mask = 0xff;
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 fuse;
 
        fuse = I915_READ(CHV_FUSE_GT);
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       struct sseu_dev_info *sseu = &info->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        int s, ss;
        u32 fuse2, eu_disable, subslice_mask;
        const u8 eu_mask = 0xff;
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        int s, ss;
        u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
 
 
 static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       struct sseu_dev_info *sseu = &info->sseu;
+       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 fuse1;
        int s, ss;
 
         * There isn't a register to tell us how many slices/subslices. We
         * work off the PCI-ids here.
         */
-       switch (info->gt) {
+       switch (INTEL_INFO(dev_priv)->gt) {
        default:
-               MISSING_CASE(info->gt);
+               MISSING_CASE(INTEL_INFO(dev_priv)->gt);
                /* fall through */
        case 1:
                sseu->slice_mask = BIT(0);
 {
        struct drm_i915_private *dev_priv =
                container_of(info, struct drm_i915_private, info);
+       struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
        enum pipe pipe;
 
        if (INTEL_GEN(dev_priv) >= 10) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_scalers[pipe] = 2;
+                       runtime->num_scalers[pipe] = 2;
        } else if (IS_GEN(dev_priv, 9)) {
-               info->num_scalers[PIPE_A] = 2;
-               info->num_scalers[PIPE_B] = 2;
-               info->num_scalers[PIPE_C] = 1;
+               runtime->num_scalers[PIPE_A] = 2;
+               runtime->num_scalers[PIPE_B] = 2;
+               runtime->num_scalers[PIPE_C] = 1;
        }
 
        BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
 
        if (IS_GEN(dev_priv, 11))
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 6;
+                       runtime->num_sprites[pipe] = 6;
        else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 3;
+                       runtime->num_sprites[pipe] = 3;
        else if (IS_BROXTON(dev_priv)) {
                /*
                 * Skylake and Broxton currently don't expose the topmost plane as its
                 * down the line.
                 */
 
-               info->num_sprites[PIPE_A] = 2;
-               info->num_sprites[PIPE_B] = 2;
-               info->num_sprites[PIPE_C] = 1;
+               runtime->num_sprites[PIPE_A] = 2;
+               runtime->num_sprites[PIPE_B] = 2;
+               runtime->num_sprites[PIPE_C] = 1;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 2;
+                       runtime->num_sprites[pipe] = 2;
        } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 1;
+                       runtime->num_sprites[pipe] = 1;
        }
 
        if (i915_modparams.disable_display) {
        }
 
        /* Initialize command stream timestamp frequency */
-       info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+       runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
 }
 
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
 
        media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
 
-       info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-       info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-                            GEN11_GT_VEBOX_DISABLE_SHIFT;
+       RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+       RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+               GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-       DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
+       DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
        for (i = 0; i < I915_MAX_VCS; i++) {
                if (!HAS_ENGINE(dev_priv, _VCS(i)))
                        continue;
 
-               if (!(BIT(i) & info->vdbox_enable)) {
+               if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
                        info->ring_mask &= ~ENGINE_MASK(_VCS(i));
                        DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
                        continue;
                 * hooked up to an SFC (Scaler & Format Converter) unit.
                 */
                if (logical_vdbox++ % 2 == 0)
-                       info->vdbox_sfc_access |= BIT(i);
+                       RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
        }
 
-       DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
+       DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
        for (i = 0; i < I915_MAX_VECS; i++) {
                if (!HAS_ENGINE(dev_priv, _VECS(i)))
                        continue;
 
-               if (!(BIT(i) & info->vebox_enable)) {
+               if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
                        info->ring_mask &= ~ENGINE_MASK(_VECS(i));
                        DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
                }
 
 typedef u8 intel_ring_mask_t;
 
 struct intel_device_info {
-       u16 device_id;
        u16 gen_mask;
 
        u8 gen;
        u8 gt; /* GT number, 0 if undefined */
-       u8 num_rings;
        intel_ring_mask_t ring_mask; /* Rings supported by the HW */
 
        enum intel_platform platform;
        u32 display_mmio_offset;
 
        u8 num_pipes;
-       u8 num_sprites[I915_MAX_PIPES];
-       u8 num_scalers[I915_MAX_PIPES];
 
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
        int trans_offsets[I915_MAX_TRANSCODERS];
        int cursor_offsets[I915_MAX_PIPES];
 
+       struct color_luts {
+               u16 degamma_lut_size;
+               u16 gamma_lut_size;
+       } color;
+};
+
+struct intel_runtime_info {
+       u16 device_id;
+
+       u8 num_sprites[I915_MAX_PIPES];
+       u8 num_scalers[I915_MAX_PIPES];
+
+       u8 num_rings;
+
        /* Slice/subslice/EU info */
        struct sseu_dev_info sseu;
 
 
        /* Media engine access to SFC per instance */
        u8 vdbox_sfc_access;
-
-       struct color_luts {
-               u16 degamma_lut_size;
-               u16 gamma_lut_size;
-       } color;
 };
 
 struct intel_driver_caps {
                            struct drm_printer *p);
 void intel_device_info_dump_flags(const struct intel_device_info *info,
                                  struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
                                    struct drm_printer *p);
 void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
                                     struct drm_printer *p);
 
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        int i;
 
-       crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+       crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
        if (!crtc->num_scalers)
                return;
 
 
 };
 
 #define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
 
 /*
  * Per-pipe plane identifier.
 
 #define for_each_universal_plane(__dev_priv, __pipe, __p)              \
        for ((__p) = 0;                                                 \
-            (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+            (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;       \
             (__p)++)
 
 #define for_each_sprite(__dev_priv, __p, __s)                          \
        for ((__s) = 0;                                                 \
-            (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];        \
+            (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)];      \
             (__s)++)
 
 #define for_each_port_masked(__port, __ports_mask) \
 
                goto cleanup;
        }
 
-       device_info->num_rings = hweight32(mask);
+       RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
 
        i915_check_and_clear_faults(dev_priv);
 
 
 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
 {
-       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        u32 mcr_s_ss_select;
        u32 slice = fls(sseu->slice_mask);
        u32 subslice = fls(sseu->subslice_mask[slice]);
 
 static u32
 make_rpcs(struct drm_i915_private *dev_priv)
 {
-       bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
-       u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
-       u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
+       bool subslice_pg = RUNTIME_INFO(dev_priv)->sseu.has_subslice_pg;
+       u8 slices = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+       u8 subslices = hweight8(RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]);
        u32 rpcs = 0;
 
        /*
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+       if (RUNTIME_INFO(dev_priv)->sseu.has_slice_pg) {
                u32 mask, val = slices;
 
                if (INTEL_GEN(dev_priv) >= 11) {
                rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
        }
 
-       if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+       if (RUNTIME_INFO(dev_priv)->sseu.has_eu_pg) {
                u32 val;
 
-               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+               val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
                      GEN8_RPCS_EU_MIN_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
                val &= GEN8_RPCS_EU_MIN_MASK;
 
                rpcs |= val;
 
-               val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+               val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
                      GEN8_RPCS_EU_MAX_SHIFT;
                GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
                val &= GEN8_RPCS_EU_MAX_MASK;
 
 
        val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
+       switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
        case 8:
                /* (2 * 4) config */
                rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
 
        struct intel_engine_cs *engine = rq->engine;
        enum intel_engine_id id;
        const int num_rings =
-               IS_HSW_GT1(i915) ? INTEL_INFO(i915)->num_rings - 1 : 0;
+               IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
        bool force_restore = false;
        int len;
        u32 *cs;
 
 
 #define instdone_slice_mask(dev_priv__) \
        (IS_GEN(dev_priv__, 7) ? \
-        1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+        1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
 
 #define instdone_subslice_mask(dev_priv__) \
        (IS_GEN(dev_priv__, 7) ? \
-        1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
+        1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
 
 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
        for ((slice__) = 0, (subslice__) = 0; \
 
 static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
                          struct intel_engine_cs *engine)
 {
-       u8 vdbox_sfc_access = INTEL_INFO(dev_priv)->vdbox_sfc_access;
+       u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
        i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
        u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
        i915_reg_t sfc_usage;
 static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
                             struct intel_engine_cs *engine)
 {
-       u8 vdbox_sfc_access = INTEL_INFO(dev_priv)->vdbox_sfc_access;
+       u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
        i915_reg_t sfc_forced_lock;
        u32 sfc_forced_lock_bit;
 
 
                 * Only consider slices where one, and only one, subslice has 7
                 * EUs
                 */
-               if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
+               if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
                        continue;
 
                /*
                 *
                 * ->    0 <= ss <= 3;
                 */
-               ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+               ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
                vals[i] = 3 - ss;
        }
 
 
 static void wa_init_mcr(struct drm_i915_private *dev_priv)
 {
-       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
        struct i915_wa_list *wal = &dev_priv->gt_wa_list;
        u32 mcr_slice_subslice_mask;
 
 
                ncontexts++;
        }
        pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
-               ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+               ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
 
        dw = 0;
        list_for_each_entry(obj, &objects, st_link) {
                }
        }
        pr_info("Submitted %lu dwords (across %u engines)\n",
-               ndwords, INTEL_INFO(i915)->num_rings);
+               ndwords, RUNTIME_INFO(i915)->num_rings);
 
        dw = 0;
        list_for_each_entry(obj, &objects, st_link) {
                count += this;
        }
        pr_info("Checked %lu scratch offsets across %d engines\n",
-               count, INTEL_INFO(i915)->num_rings);
+               count, RUNTIME_INFO(i915)->num_rings);
 
 out_rpm:
        intel_runtime_pm_put(i915);
 
 
        pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
                count, flags,
-               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+               RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
        return 0;
 }
 
 
        pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
                count, flags,
-               INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+               RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
        return 0;
 }