gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
 
        rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                gen9_powergate_enable =
                        intel_uncore_read(uncore, GEN9_PG_ENABLE);
                gen9_powergate_status =
                        intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
        }
 
-       if (INTEL_GEN(i915) <= 7)
+       if (GRAPHICS_VER(i915) <= 7)
                sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
                                       &rc6vids, NULL);
 
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                seq_printf(m, "Render Well Gating Enabled: %s\n",
                           yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
                seq_printf(m, "Media Well Gating Enabled: %s\n",
 
        seq_printf(m, "Core Power Down: %s\n",
                   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                seq_printf(m, "Render Power Well: %s\n",
                           (gen9_powergate_status &
                            GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
        print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
        print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
 
-       if (INTEL_GEN(i915) <= 7) {
+       if (GRAPHICS_VER(i915) <= 7) {
                seq_printf(m, "RC6   voltage: %dmV\n",
                           GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
                seq_printf(m, "RC6+  voltage: %dmV\n",
 
        wakeref = intel_runtime_pm_get(uncore->rpm);
 
-       if (IS_GEN(i915, 5)) {
+       if (GRAPHICS_VER(i915) == 5) {
                u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
                u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
 
 
                seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
                           intel_gpu_freq(rps, rps->efficient_freq));
-       } else if (INTEL_GEN(i915) >= 6) {
+       } else if (GRAPHICS_VER(i915) >= 6) {
                u32 rp_state_limits;
                u32 gt_perf_status;
                u32 rp_state_cap;
                intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
                reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
-               if (INTEL_GEN(i915) >= 9) {
+               if (GRAPHICS_VER(i915) >= 9) {
                        reqf >>= 23;
                } else {
                        reqf &= ~GEN6_TURBO_DISABLE;
 
                intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 
-               if (INTEL_GEN(i915) >= 11) {
+               if (GRAPHICS_VER(i915) >= 11) {
                        pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
                        pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
                        /*
                         */
                        pm_isr = 0;
                        pm_iir = 0;
-               } else if (INTEL_GEN(i915) >= 8) {
+               } else if (GRAPHICS_VER(i915) >= 8) {
                        pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
                        pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
                        pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
 
                seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
                           pm_ier, pm_imr, pm_mask);
-               if (INTEL_GEN(i915) <= 10)
+               if (GRAPHICS_VER(i915) <= 10)
                        seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
                                   pm_isr, pm_iir);
                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
                           rps->pm_intrmsk_mbz);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
-                          (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+                          (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
                seq_printf(m, "Render p-state VID: %d\n",
                           gt_perf_status & 0xff);
                seq_printf(m, "Render p-state limit: %d\n",
                max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
 {
        struct intel_gt *gt = m->private;
        struct drm_i915_private *i915 = gt->i915;
-       const bool edram = INTEL_GEN(i915) > 8;
+       const bool edram = GRAPHICS_VER(i915) > 8;
        struct intel_rps *rps = >->rps;
        unsigned int max_gpu_freq, min_gpu_freq;
        intel_wakeref_t wakeref;
 
        min_gpu_freq = rps->min_freq;
        max_gpu_freq = rps->max_freq;
-       if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq /= GEN9_FREQ_SCALER;
                max_gpu_freq /= GEN9_FREQ_SCALER;
                           intel_gpu_freq(rps,
                                          (gpu_freq *
                                           (IS_GEN9_BC(i915) ||
-                                           INTEL_GEN(i915) >= 10 ?
+                                           GRAPHICS_VER(i915) >= 10 ?
                                            GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
 
        seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
 
-       if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
+       if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
                struct intel_uncore *uncore = gt->uncore;
                u32 rpup, rpupei;
                u32 rpdown, rpdownei;
 
        cmd = MI_FLUSH;
        if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
-               if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
+               if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
                        cmd |= MI_INVALIDATE_ISP;
        }
 
 
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (IS_GEN(rq->engine->i915, 9))
+               if (GRAPHICS_VER(rq->engine->i915) == 9)
                        vf_flush_wa = true;
 
                /* WaForGAMHang:kbl */
 
         *
         * Gen12 has inherited the same read-only fault issue from gen11.
         */
-       ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+       ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
 
        if (HAS_LMEM(gt->i915))
                ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
 
 {
        int ret;
 
-       GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+       GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
 
        ret = intel_context_lock_pinned(ce);
        if (ret)
 
         * Though they added more rings on g4x/ilk, they did not add
         * per-engine HWSTAM until gen6.
         */
-       if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
+       if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
                return;
 
-       if (INTEL_GEN(engine->i915) >= 3)
+       if (GRAPHICS_VER(engine->i915) >= 3)
                ENGINE_WRITE(engine, RING_HWSTAM, mask);
        else
                ENGINE_WRITE16(engine, RING_HWSTAM, mask);
                CONFIG_DRM_I915_TIMESLICE_DURATION;
 
        /* Override to uninterruptible for OpenCL workloads. */
-       if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+       if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
                engine->props.preempt_timeout_ms = 0;
 
        engine->defaults = engine->props; /* never to change again */
                 * HEVC support is present on first engine instance
                 * before Gen11 and on all instances afterwards.
                 */
-               if (INTEL_GEN(i915) >= 11 ||
-                   (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+               if (GRAPHICS_VER(i915) >= 11 ||
+                   (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
                        engine->uabi_capabilities |=
                                I915_VIDEO_CLASS_CAPABILITY_HEVC;
 
                 * SFC block is present only on even logical engine
                 * instances.
                 */
-               if ((INTEL_GEN(i915) >= 11 &&
+               if ((GRAPHICS_VER(i915) >= 11 &&
                     (engine->gt->info.vdbox_sfc_access &
                      BIT(engine->instance))) ||
-                   (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+                   (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
                        engine->uabi_capabilities |=
                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
        } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
-               if (INTEL_GEN(i915) >= 9)
+               if (GRAPHICS_VER(i915) >= 9)
                        engine->uabi_capabilities |=
                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
        }
 
        info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
 
-       if (INTEL_GEN(i915) < 11)
+       if (GRAPHICS_VER(i915) < 11)
                return info->engine_mask;
 
        media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
                 * hooked up to an SFC (Scaler & Format Converter) unit.
                 * In TGL each VDBOX has access to an SFC.
                 */
-               if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
+               if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0)
                        gt->info.vdbox_sfc_access |= BIT(i);
        }
        drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
        intel_engine_init_whitelist(engine);
        intel_engine_init_ctx_wa(engine);
 
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (GRAPHICS_VER(engine->i915) >= 12)
                engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
 
        return 0;
 
        u64 acthd;
 
-       if (INTEL_GEN(i915) >= 8)
+       if (GRAPHICS_VER(i915) >= 8)
                acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
-       else if (INTEL_GEN(i915) >= 4)
+       else if (GRAPHICS_VER(i915) >= 4)
                acthd = ENGINE_READ(engine, RING_ACTHD);
        else
                acthd = ENGINE_READ(engine, ACTHD);
 {
        u64 bbaddr;
 
-       if (INTEL_GEN(engine->i915) >= 8)
+       if (GRAPHICS_VER(engine->i915) >= 8)
                bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
        else
                bbaddr = ENGINE_READ(engine, RING_BBADDR);
 {
        int err = 0;
 
-       if (INTEL_GEN(engine->i915) < 3)
+       if (GRAPHICS_VER(engine->i915) < 3)
                return -ENODEV;
 
        ENGINE_TRACE(engine, "\n");
        u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
        enum forcewake_domains fw_domains;
 
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
                mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
        } else {
 
        memset(instdone, 0, sizeof(*instdone));
 
-       switch (INTEL_GEN(i915)) {
+       switch (GRAPHICS_VER(i915)) {
        default:
                instdone->instdone =
                        intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
 
                instdone->slice_common =
                        intel_uncore_read(uncore, GEN7_SC_INSTDONE);
-               if (INTEL_GEN(i915) >= 12) {
+               if (GRAPHICS_VER(i915) >= 12) {
                        instdone->slice_common_extra[0] =
                                intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
                        instdone->slice_common_extra[1] =
                idle = false;
 
        /* No bit for gen2, so assume the CS parser is idle */
-       if (INTEL_GEN(engine->i915) > 2 &&
+       if (GRAPHICS_VER(engine->i915) > 2 &&
            !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
                idle = false;
 
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 2:
                return false; /* uses physical not virtual addresses */
        case 3:
        struct intel_engine_execlists * const execlists = &engine->execlists;
        u64 addr;
 
-       if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
+       if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
                drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
        if (HAS_EXECLISTS(dev_priv)) {
                drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
        drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
                   ENGINE_READ(engine, RING_CTL),
                   ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
-       if (INTEL_GEN(engine->i915) > 2) {
+       if (GRAPHICS_VER(engine->i915) > 2) {
                drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
                           ENGINE_READ(engine, RING_MI_MODE),
                           ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
        }
 
-       if (INTEL_GEN(dev_priv) >= 6) {
+       if (GRAPHICS_VER(dev_priv) >= 6) {
                drm_printf(m, "\tRING_IMR:   0x%08x\n",
                           ENGINE_READ(engine, RING_IMR));
                drm_printf(m, "\tRING_ESR:   0x%08x\n",
        addr = intel_engine_get_last_batch_head(engine);
        drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
                   upper_32_bits(addr), lower_32_bits(addr));
-       if (INTEL_GEN(dev_priv) >= 8)
+       if (GRAPHICS_VER(dev_priv) >= 8)
                addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
-       else if (INTEL_GEN(dev_priv) >= 4)
+       else if (GRAPHICS_VER(dev_priv) >= 4)
                addr = ENGINE_READ(engine, RING_DMA_FADD);
        else
                addr = ENGINE_READ(engine, DMA_FADD_I8XX);
        drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
                   upper_32_bits(addr), lower_32_bits(addr));
-       if (INTEL_GEN(dev_priv) >= 4) {
+       if (GRAPHICS_VER(dev_priv) >= 4) {
                drm_printf(m, "\tIPEIR: 0x%08x\n",
                           ENGINE_READ(engine, RING_IPEIR));
                drm_printf(m, "\tIPEHR: 0x%08x\n",
                }
                rcu_read_unlock();
                execlists_active_unlock_bh(execlists);
-       } else if (INTEL_GEN(dev_priv) > 6) {
+       } else if (GRAPHICS_VER(dev_priv) > 6) {
                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                           ENGINE_READ(engine, RING_PP_DIR_BASE));
                drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
 
                ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
                             head, upper_32_bits(csb), lower_32_bits(csb));
 
-               if (INTEL_GEN(engine->i915) >= 12)
+               if (GRAPHICS_VER(engine->i915) >= 12)
                        promote = gen12_csb_parse(csb);
                else
                        promote = gen8_csb_parse(csb);
 
        intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
 
-       if (INTEL_GEN(engine->i915) >= 11)
+       if (GRAPHICS_VER(engine->i915) >= 11)
                mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
        else
                mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
 
 static bool can_preempt(struct intel_engine_cs *engine)
 {
-       if (INTEL_GEN(engine->i915) > 8)
+       if (GRAPHICS_VER(engine->i915) > 8)
                return true;
 
        /* GPGPU on bdw requires extra w/a; not implemented */
        engine->emit_flush = gen8_emit_flush_xcs;
        engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
        engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
-       if (INTEL_GEN(engine->i915) >= 12) {
+       if (GRAPHICS_VER(engine->i915) >= 12) {
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
                engine->emit_flush = gen12_emit_flush_xcs;
        }
        engine->set_default_submission = execlists_set_default_submission;
 
-       if (INTEL_GEN(engine->i915) < 11) {
+       if (GRAPHICS_VER(engine->i915) < 11) {
                engine->irq_enable = gen8_logical_ring_enable_irq;
                engine->irq_disable = gen8_logical_ring_disable_irq;
        } else {
 {
        unsigned int shift = 0;
 
-       if (INTEL_GEN(engine->i915) < 11) {
+       if (GRAPHICS_VER(engine->i915) < 11) {
                const u8 irq_shifts[] = {
                        [RCS0]  = GEN8_RCS_IRQ_SHIFT,
                        [BCS0]  = GEN8_BCS_IRQ_SHIFT,
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
 {
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 12:
                engine->emit_flush = gen12_emit_flush_rcs;
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
        execlists->csb_write =
                &engine->status_page.addr[intel_hws_csb_write_index(i915)];
 
-       if (INTEL_GEN(i915) < 11)
+       if (GRAPHICS_VER(i915) < 11)
                execlists->csb_size = GEN8_CSB_ENTRIES;
        else
                execlists->csb_size = GEN11_CSB_ENTRIES;
 
        engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-       if (INTEL_GEN(engine->i915) >= 11) {
+       if (GRAPHICS_VER(engine->i915) >= 11) {
                execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
                execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
        }
 
        if (!intel_vtd_active())
                return false;
 
-       if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+       if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
                return true;
 
-       if (IS_GEN(i915, 12))
+       if (GRAPHICS_VER(i915) == 12)
                return true; /* XXX DMAR fault reason 7 */
 
        return false;
 
        gen8_ggtt_invalidate(ggtt);
 
-       if (INTEL_GEN(i915) >= 12)
+       if (GRAPHICS_VER(i915) >= 12)
                intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
                                      GEN12_GUC_TLB_INV_CR_INVALIDATE);
        else
         * resort to an uncached mapping. The WC issue is easily caught by the
         * readback check when writing GTT PTE entries.
         */
-       if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+       if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10)
                ggtt->gsm = ioremap(phys_addr, size);
        else
                ggtt->gsm = ioremap_wc(phys_addr, size);
                ggtt->vm.pte_encode = hsw_pte_encode;
        else if (IS_VALLEYVIEW(i915))
                ggtt->vm.pte_encode = byt_pte_encode;
-       else if (INTEL_GEN(i915) >= 7)
+       else if (GRAPHICS_VER(i915) >= 7)
                ggtt->vm.pte_encode = ivb_pte_encode;
        else
                ggtt->vm.pte_encode = snb_pte_encode;
        ggtt->vm.dma = i915->drm.dev;
        dma_resv_init(&ggtt->vm._resv);
 
-       if (INTEL_GEN(i915) <= 5)
+       if (GRAPHICS_VER(i915) <= 5)
                ret = i915_gmch_probe(ggtt);
-       else if (INTEL_GEN(i915) < 8)
+       else if (GRAPHICS_VER(i915) < 8)
                ret = gen6_gmch_probe(ggtt);
        else
                ret = gen8_gmch_probe(ggtt);
 
 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
 {
-       if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+       if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
                return -EIO;
 
        return 0;
        if (flush)
                wbinvd_on_all_cpus();
 
-       if (INTEL_GEN(ggtt->vm.i915) >= 8)
+       if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
                setup_private_pat(ggtt->vm.gt->uncore);
 
        intel_ggtt_restore_fences(ggtt);
 
        int fence_pitch_shift;
        u64 val;
 
-       if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
+       if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
                fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
                fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
                fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
         * and explicitly managed for internal users.
         */
 
-       if (IS_GEN(i915, 2))
+       if (GRAPHICS_VER(i915) == 2)
                i830_write_fence_reg(fence);
-       else if (IS_GEN(i915, 3))
+       else if (GRAPHICS_VER(i915) == 3)
                i915_write_fence_reg(fence);
        else
                i965_write_fence_reg(fence);
 
 static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
 {
-       return INTEL_GEN(fence_to_i915(fence)) < 4;
+       return GRAPHICS_VER(fence_to_i915(fence)) < 4;
 }
 
 static int fence_update(struct i915_fence_reg *fence,
        u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
        u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
-       if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
+       if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
                /*
                 * On BDW+, swizzling is not used. We leave the CPU memory
                 * controller in charge of optimizing memory accesses without
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_NONE;
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-       } else if (INTEL_GEN(i915) >= 6) {
+       } else if (GRAPHICS_VER(i915) >= 6) {
                if (i915->preserve_bios_swizzle) {
                        if (intel_uncore_read(uncore, DISP_ARB_CTL) &
                            DISP_TILE_SURFACE_SWIZZLING) {
                                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
                        }
                }
-       } else if (IS_GEN(i915, 5)) {
+       } else if (GRAPHICS_VER(i915) == 5) {
                /*
                 * On Ironlake whatever DRAM config, GPU always do
                 * same swizzling setup.
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                swizzle_y = I915_BIT_6_SWIZZLE_9;
-       } else if (IS_GEN(i915, 2)) {
+       } else if (GRAPHICS_VER(i915) == 2) {
                /*
                 * As far as we know, the 865 doesn't have these bit 6
                 * swizzling issues.
                }
 
                /* check for L-shaped memory aka modified enhanced addressing */
-               if (IS_GEN(i915, 4) &&
+               if (GRAPHICS_VER(i915) == 4 &&
                    !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
                        swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
                        swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
        if (!i915_ggtt_has_aperture(ggtt))
                num_fences = 0;
-       else if (INTEL_GEN(i915) >= 7 &&
+       else if (GRAPHICS_VER(i915) >= 7 &&
                 !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
                num_fences = 32;
-       else if (INTEL_GEN(i915) >= 4 ||
+       else if (GRAPHICS_VER(i915) >= 4 ||
                 IS_I945G(i915) || IS_I945GM(i915) ||
                 IS_G33(i915) || IS_PINEVIEW(i915))
                num_fences = 16;
        struct drm_i915_private *i915 = gt->i915;
        struct intel_uncore *uncore = gt->uncore;
 
-       if (INTEL_GEN(i915) < 5 ||
+       if (GRAPHICS_VER(i915) < 5 ||
            i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
                return;
 
        intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
 
-       if (IS_GEN(i915, 5))
+       if (GRAPHICS_VER(i915) == 5)
                return;
 
        intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
 
-       if (IS_GEN(i915, 6))
+       if (GRAPHICS_VER(i915) == 6)
                intel_uncore_write(uncore,
                                   ARB_MODE,
                                   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-       else if (IS_GEN(i915, 7))
+       else if (GRAPHICS_VER(i915) == 7)
                intel_uncore_write(uncore,
                                   ARB_MODE,
                                   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-       else if (IS_GEN(i915, 8))
+       else if (GRAPHICS_VER(i915) == 8)
                intel_uncore_write(uncore,
                                   GAMTARBMODE,
                                   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
        else
-               MISSING_CASE(INTEL_GEN(i915));
+               MISSING_CASE(GRAPHICS_VER(i915));
 }
 
                init_unused_ring(gt, SRB1_BASE);
                init_unused_ring(gt, SRB2_BASE);
                init_unused_ring(gt, SRB3_BASE);
-       } else if (IS_GEN(i915, 2)) {
+       } else if (GRAPHICS_VER(i915) == 2) {
                init_unused_ring(gt, SRB0_BASE);
                init_unused_ring(gt, SRB1_BASE);
-       } else if (IS_GEN(i915, 3)) {
+       } else if (GRAPHICS_VER(i915) == 3) {
                init_unused_ring(gt, PRB1_BASE);
                init_unused_ring(gt, PRB2_BASE);
        }
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
-       if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+       if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
                intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
 
        if (IS_HASWELL(i915))
        struct intel_uncore *uncore = gt->uncore;
        u32 eir;
 
-       if (!IS_GEN(i915, 2))
+       if (GRAPHICS_VER(i915) != 2)
                clear_register(uncore, PGTBL_ER);
 
-       if (INTEL_GEN(i915) < 4)
+       if (GRAPHICS_VER(i915) < 4)
                clear_register(uncore, IPEIR(RENDER_RING_BASE));
        else
                clear_register(uncore, IPEIR_I965);
                                   I915_MASTER_ERROR_INTERRUPT);
        }
 
-       if (INTEL_GEN(i915) >= 12) {
+       if (GRAPHICS_VER(i915) >= 12) {
                rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
                intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
-       } else if (INTEL_GEN(i915) >= 8) {
+       } else if (GRAPHICS_VER(i915) >= 8) {
                rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
                intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
-       } else if (INTEL_GEN(i915) >= 6) {
+       } else if (GRAPHICS_VER(i915) >= 6) {
                struct intel_engine_cs *engine;
                enum intel_engine_id id;
 
        i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
        u32 fault;
 
-       if (INTEL_GEN(gt->i915) >= 12) {
+       if (GRAPHICS_VER(gt->i915) >= 12) {
                fault_reg = GEN12_RING_FAULT_REG;
                fault_data0_reg = GEN12_FAULT_TLB_DATA0;
                fault_data1_reg = GEN12_FAULT_TLB_DATA1;
        struct drm_i915_private *i915 = gt->i915;
 
        /* From GEN8 onwards we only have one 'All Engine Fault Register' */
-       if (INTEL_GEN(i915) >= 8)
+       if (GRAPHICS_VER(i915) >= 8)
                gen8_check_faults(gt);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                gen6_check_faults(gt);
        else
                return;
 void intel_gt_chipset_flush(struct intel_gt *gt)
 {
        wmb();
-       if (INTEL_GEN(gt->i915) < 6)
+       if (GRAPHICS_VER(gt->i915) < 6)
                intel_gtt_chipset_flush();
 }
 
         */
        intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 
-       err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+       err = intel_gt_init_scratch(gt,
+                                   GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
        if (err)
                goto out_fw;
 
 
        u32 f19_2_mhz = 19200000;
        u32 f24_mhz = 24000000;
 
-       if (INTEL_GEN(uncore->i915) <= 4) {
+       if (GRAPHICS_VER(uncore->i915) <= 4) {
                /*
                 * PRMs say:
                 *
                 *      (“CLKCFG”) MCHBAR register)
                 */
                return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
-       } else if (INTEL_GEN(uncore->i915) <= 8) {
+       } else if (GRAPHICS_VER(uncore->i915) <= 8) {
                /*
                 * PRMs say:
                 *
                 *      rolling over every 1.5 hours).
                 */
                return f12_5_mhz;
-       } else if (INTEL_GEN(uncore->i915) <= 9) {
+       } else if (GRAPHICS_VER(uncore->i915) <= 9) {
                u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
                u32 freq = 0;
 
                }
 
                return freq;
-       } else if (INTEL_GEN(uncore->i915) <= 12) {
+       } else if (GRAPHICS_VER(uncore->i915) <= 12) {
                u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
                u32 freq = 0;
 
                } else {
                        u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
 
-                       if (INTEL_GEN(uncore->i915) <= 10)
+                       if (GRAPHICS_VER(uncore->i915) <= 10)
                                freq = gen10_get_crystal_clock_freq(uncore, c0);
                        else
                                freq = gen11_get_crystal_clock_freq(uncore, c0);
         * frozen machine.
         */
        val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
-       if (IS_GEN(gt->i915, 6))
+       if (GRAPHICS_VER(gt->i915) == 6)
                val = div_u64_roundup(val, 25) * 25;
 
        return val;
 
        struct intel_uncore *uncore = gt->uncore;
 
        GEN3_IRQ_RESET(uncore, GT);
-       if (INTEL_GEN(gt->i915) >= 6)
+       if (GRAPHICS_VER(gt->i915) >= 6)
                GEN3_IRQ_RESET(uncore, GEN6_PM);
 }
 
        }
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
-       if (IS_GEN(gt->i915, 5))
+       if (GRAPHICS_VER(gt->i915) == 5)
                gt_irqs |= ILK_BSD_USER_INTERRUPT;
        else
                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
 
        GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
 
-       if (INTEL_GEN(gt->i915) >= 6) {
+       if (GRAPHICS_VER(gt->i915) >= 6) {
                /*
                 * RPS interrupts will get enabled/disabled on demand when RPS
                 * itself is enabled/disabled.
 
        u32 mask = gt->pm_imr;
        i915_reg_t reg;
 
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
                mask <<= 16; /* pm is in upper half */
-       } else if (INTEL_GEN(i915) >= 8) {
+       } else if (GRAPHICS_VER(i915) >= 8) {
                reg = GEN8_GT_IMR(2);
        } else {
                reg = GEN6_PMIMR;
 void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
 {
        struct intel_uncore *uncore = gt->uncore;
-       i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+       i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 
        lockdep_assert_held(>->irq_lock);
 
        u32 mask = gt->pm_ier;
        i915_reg_t reg;
 
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
                mask <<= 16; /* pm is in upper half */
-       } else if (INTEL_GEN(i915) >= 8) {
+       } else if (GRAPHICS_VER(i915) >= 8) {
                reg = GEN8_GT_IER(2);
        } else {
                reg = GEN6_PMIER;
 
                intel_uncore_write(uncore,
                                   GEN8_L3_LRA_1_GPGPU,
                                   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
-       else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+       else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
                intel_uncore_write(uncore,
                                   GEN8_L3_LRA_1_GPGPU,
                                   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
         * driver.
         */
        if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
-           INTEL_GEN(i915) <= 10)
+           GRAPHICS_VER(i915) <= 10)
                intel_uncore_rmw(uncore,
                                 GEN8_GAMW_ECO_DEV_RW_IA,
                                 0,
                                 GAMW_ECO_ENABLE_64K_IPS_FIELD);
 
-       if (IS_GEN_RANGE(i915, 8, 11)) {
+       if (IS_GRAPHICS_VER(i915, 8, 11)) {
                bool can_use_gtt_cache = true;
 
                /*
              GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
        /* for scanout with eLLC */
-       if (INTEL_GEN(i915) >= 9)
+       if (GRAPHICS_VER(i915) >= 9)
                pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
        else
                pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
 {
        struct drm_i915_private *i915 = uncore->i915;
 
-       GEM_BUG_ON(INTEL_GEN(i915) < 8);
+       GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
 
-       if (INTEL_GEN(i915) >= 12)
+       if (GRAPHICS_VER(i915) >= 12)
                tgl_setup_private_ppat(uncore);
-       else if (INTEL_GEN(i915) >= 10)
+       else if (GRAPHICS_VER(i915) >= 10)
                cnl_setup_private_ppat(uncore);
        else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
                chv_setup_private_ppat(uncore);
 
 
        consts->min_gpu_freq = rps->min_freq;
        consts->max_gpu_freq = rps->max_freq;
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                /* Convert GT frequency to 50 HZ units */
                consts->min_gpu_freq /= GEN9_FREQ_SCALER;
                consts->max_gpu_freq /= GEN9_FREQ_SCALER;
        const int diff = consts->max_gpu_freq - gpu_freq;
        unsigned int ia_freq = 0, ring_freq = 0;
 
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                /*
                 * ring_freq = 2 * GT. ring_freq is in 100MHz units
                 * No floor required for ring frequency on SKL.
                 */
                ring_freq = gpu_freq;
-       } else if (INTEL_GEN(i915) >= 8) {
+       } else if (GRAPHICS_VER(i915) >= 8) {
                /* max(2 * GT, DDR). NB: GT is 50MHz units */
                ring_freq = max(consts->min_ring_freq, gpu_freq);
        } else if (IS_HASWELL(i915)) {
 
                *regs = MI_LOAD_REGISTER_IMM(count);
                if (flags & POSTED)
                        *regs |= MI_LRI_FORCE_POSTED;
-               if (INTEL_GEN(engine->i915) >= 11)
+               if (GRAPHICS_VER(engine->i915) >= 11)
                        *regs |= MI_LRI_LRM_CS_MMIO;
                regs++;
 
        if (close) {
                /* Close the batch; used mainly by live_lrc_layout() */
                *regs = MI_BATCH_BUFFER_END;
-               if (INTEL_GEN(engine->i915) >= 10)
+               if (GRAPHICS_VER(engine->i915) >= 10)
                        *regs |= BIT(0);
        }
 }
         * addressing to automatic fixup the register state between the
         * physical engines for virtual engine.
         */
-       GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+       GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
                   !intel_engine_has_relative_mmio(engine));
 
        if (engine->class == RENDER_CLASS) {
-               if (INTEL_GEN(engine->i915) >= 12)
+               if (GRAPHICS_VER(engine->i915) >= 12)
                        return gen12_rcs_offsets;
-               else if (INTEL_GEN(engine->i915) >= 11)
+               else if (GRAPHICS_VER(engine->i915) >= 11)
                        return gen11_rcs_offsets;
-               else if (INTEL_GEN(engine->i915) >= 9)
+               else if (GRAPHICS_VER(engine->i915) >= 9)
                        return gen9_rcs_offsets;
                else
                        return gen8_rcs_offsets;
        } else {
-               if (INTEL_GEN(engine->i915) >= 12)
+               if (GRAPHICS_VER(engine->i915) >= 12)
                        return gen12_xcs_offsets;
-               else if (INTEL_GEN(engine->i915) >= 9)
+               else if (GRAPHICS_VER(engine->i915) >= 9)
                        return gen9_xcs_offsets;
                else
                        return gen8_xcs_offsets;
 
 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
 {
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (GRAPHICS_VER(engine->i915) >= 12)
                return 0x60;
-       else if (INTEL_GEN(engine->i915) >= 9)
+       else if (GRAPHICS_VER(engine->i915) >= 9)
                return 0x54;
        else if (engine->class == RENDER_CLASS)
                return 0x58;
 
 static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
 {
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (GRAPHICS_VER(engine->i915) >= 12)
                return 0x74;
-       else if (INTEL_GEN(engine->i915) >= 9)
+       else if (GRAPHICS_VER(engine->i915) >= 9)
                return 0x68;
        else if (engine->class == RENDER_CLASS)
                return 0xd8;
 
 static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
 {
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (GRAPHICS_VER(engine->i915) >= 12)
                return 0x12;
-       else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+       else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
                return 0x18;
        else
                return -1;
        if (engine->class != RENDER_CLASS)
                return -1;
 
-       if (INTEL_GEN(engine->i915) >= 12)
+       if (GRAPHICS_VER(engine->i915) >= 12)
                return 0xb6;
-       else if (INTEL_GEN(engine->i915) >= 11)
+       else if (GRAPHICS_VER(engine->i915) >= 11)
                return 0xaa;
        else
                return -1;
 static u32
 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
 {
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        default:
-               MISSING_CASE(INTEL_GEN(engine->i915));
+               MISSING_CASE(GRAPHICS_VER(engine->i915));
                fallthrough;
        case 12:
                return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
        ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
        if (inhibit)
                ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
-       if (INTEL_GEN(engine->i915) < 11)
+       if (GRAPHICS_VER(engine->i915) < 11)
                ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
                                           CTX_CTRL_RS_CTX_ENABLE);
        regs[CTX_CONTEXT_CONTROL] = ctl;
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                context_size += I915_GTT_PAGE_SIZE; /* for redzone */
 
-       if (INTEL_GEN(engine->i915) == 12) {
+       if (GRAPHICS_VER(engine->i915) == 12) {
                ce->wa_bb_page = context_size / PAGE_SIZE;
                context_size += PAGE_SIZE;
        }
        desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
        desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-       if (IS_GEN(ce->vm->i915, 8))
+       if (GRAPHICS_VER(ce->vm->i915) == 8)
                desc |= GEN8_CTX_L3LLC_COHERENT;
 
        return i915_ggtt_offset(ce->state) | desc;
        if (engine->class != RENDER_CLASS)
                return;
 
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 12:
        case 11:
                return;
                wa_bb_fn[1] = NULL;
                break;
        default:
-               MISSING_CASE(INTEL_GEN(engine->i915));
+               MISSING_CASE(GRAPHICS_VER(engine->i915));
                return;
        }
 
 
                table->size = ARRAY_SIZE(dg1_mocs_table);
                table->table = dg1_mocs_table;
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
-       } else if (INTEL_GEN(i915) >= 12) {
+       } else if (GRAPHICS_VER(i915) >= 12) {
                table->size  = ARRAY_SIZE(tgl_mocs_table);
                table->table = tgl_mocs_table;
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
-       } else if (IS_GEN(i915, 11)) {
+       } else if (GRAPHICS_VER(i915) == 11) {
                table->size  = ARRAY_SIZE(icl_mocs_table);
                table->table = icl_mocs_table;
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
                table->table = broxton_mocs_table;
        } else {
-               drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+               drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
                              "Platform that should have a MOCS table does not.\n");
                return 0;
        }
                return 0;
 
        /* WaDisableSkipCaching:skl,bxt,kbl,glk */
-       if (IS_GEN(i915, 9)) {
+       if (GRAPHICS_VER(i915) == 9) {
                int i;
 
                for (i = 0; i < table->size; i++)
 
 
        gtt_write_workarounds(gt);
 
-       if (IS_GEN(i915, 6))
+       if (GRAPHICS_VER(i915) == 6)
                gen6_ppgtt_enable(gt);
-       else if (IS_GEN(i915, 7))
+       else if (GRAPHICS_VER(i915) == 7)
                gen7_ppgtt_enable(gt);
 
        return 0;
 static struct i915_ppgtt *
 __ppgtt_create(struct intel_gt *gt)
 {
-       if (INTEL_GEN(gt->i915) < 8)
+       if (GRAPHICS_VER(gt->i915) < 8)
                return gen6_ppgtt_create(gt);
        else
                return gen8_ppgtt_create(gt);
 
                GEN9_MEDIA_PG_ENABLE |
                GEN11_MEDIA_SAMPLER_PG_ENABLE;
 
-       if (INTEL_GEN(gt->i915) >= 12) {
+       if (GRAPHICS_VER(gt->i915) >= 12) {
                for (i = 0; i < I915_MAX_VCS; i++)
                        if (HAS_ENGINE(gt, _VCS(i)))
                                pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
        enum intel_engine_id id;
 
        /* 2b: Program RC6 thresholds.*/
-       if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+       if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) {
                set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
                set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
        } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
        rc6vids = 0;
        ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
                                     &rc6vids, NULL);
-       if (IS_GEN(i915, 6) && ret) {
+       if (GRAPHICS_VER(i915) == 6 && ret) {
                drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
-       } else if (IS_GEN(i915, 6) &&
+       } else if (GRAPHICS_VER(i915) == 6 &&
                   (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
                drm_dbg(&i915->drm,
                        "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
        struct intel_uncore *uncore = rc6_to_uncore(rc6);
 
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-       if (INTEL_GEN(i915) >= 9)
+       if (GRAPHICS_VER(i915) >= 9)
                set(uncore, GEN9_PG_ENABLE, 0);
        set(uncore, GEN6_RC_CONTROL, 0);
        set(uncore, GEN6_RC_STATE, 0);
                chv_rc6_enable(rc6);
        else if (IS_VALLEYVIEW(i915))
                vlv_rc6_enable(rc6);
-       else if (INTEL_GEN(i915) >= 11)
+       else if (GRAPHICS_VER(i915) >= 11)
                gen11_rc6_enable(rc6);
-       else if (INTEL_GEN(i915) >= 9)
+       else if (GRAPHICS_VER(i915) >= 9)
                gen9_rc6_enable(rc6);
        else if (IS_BROADWELL(i915))
                gen8_rc6_enable(rc6);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                gen6_rc6_enable(rc6);
 
        rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
 
        if (engine->class != RENDER_CLASS)
                return NULL;
 
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 6:
                return &gen6_null_state;
        case 7:
 
                struct intel_engine_cs *paired_vecs;
 
                if (engine->class != VIDEO_DECODE_CLASS ||
-                   !IS_GEN(engine->i915, 12))
+                   GRAPHICS_VER(engine->i915) != 12)
                        return 0;
 
                /*
                 */
        }
 
-       if (INTEL_GEN(gt->i915) >= 11)
+       if (GRAPHICS_VER(gt->i915) >= 11)
                ret = gen11_reset_engines(gt, engine_mask, retry);
        else
                ret = gen6_reset_engines(gt, engine_mask, retry);
 
        if (is_mock_gt(gt))
                return mock_reset;
-       else if (INTEL_GEN(i915) >= 8)
+       else if (GRAPHICS_VER(i915) >= 8)
                return gen8_reset_engines;
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                return gen6_reset_engines;
-       else if (INTEL_GEN(i915) >= 5)
+       else if (GRAPHICS_VER(i915) >= 5)
                return ilk_do_reset;
        else if (IS_G4X(i915))
                return g4x_do_reset;
        else if (IS_G33(i915) || IS_PINEVIEW(i915))
                return g33_do_reset;
-       else if (INTEL_GEN(i915) >= 3)
+       else if (GRAPHICS_VER(i915) >= 3)
                return i915_do_reset;
        else
                return NULL;
 int intel_reset_guc(struct intel_gt *gt)
 {
        u32 guc_domain =
-               INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+               GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
        int ret;
 
        GEM_BUG_ON(!HAS_GT_UC(gt->i915));
 
         * lost interrupts following a reset.
         */
        if (engine->class == RENDER_CLASS) {
-               if (INTEL_GEN(engine->i915) >= 6)
+               if (GRAPHICS_VER(engine->i915) >= 6)
                        mask &= ~BIT(0);
                else
                        mask &= ~I915_USER_INTERRUPT;
        u32 addr;
 
        addr = lower_32_bits(phys);
-       if (INTEL_GEN(engine->i915) >= 4)
+       if (GRAPHICS_VER(engine->i915) >= 4)
                addr |= (phys >> 28) & 0xf0;
 
        intel_uncore_write(engine->uncore, HWS_PGA, addr);
         * The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN(engine->i915, 7)) {
+       if (GRAPHICS_VER(engine->i915) == 7) {
                switch (engine->id) {
                /*
                 * No more rings exist on Gen7. Default case is only to shut up
                        hwsp = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN(engine->i915, 6)) {
+       } else if (GRAPHICS_VER(engine->i915) == 6) {
                hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                hwsp = RING_HWS_PGA(engine->mmio_base);
 
 static void flush_cs_tlb(struct intel_engine_cs *engine)
 {
-       if (!IS_GEN_RANGE(engine->i915, 6, 7))
+       if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
                return;
 
        /* ring should be idle before issuing a sync flush*/
        ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
        ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
 
-       if (INTEL_GEN(engine->i915) >= 7) {
+       if (GRAPHICS_VER(engine->i915) >= 7) {
                ENGINE_WRITE_FW(engine,
                                RING_MODE_GEN7,
                                _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
                                         5000, 0, NULL))
                goto err;
 
-       if (INTEL_GEN(engine->i915) > 2)
+       if (GRAPHICS_VER(engine->i915) > 2)
                ENGINE_WRITE_FW(engine,
                                RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 
        u32 *cs;
 
        len = 4;
-       if (IS_GEN(i915, 7))
+       if (GRAPHICS_VER(i915) == 7)
                len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
-       else if (IS_GEN(i915, 5))
+       else if (GRAPHICS_VER(i915) == 5)
                len += 2;
        if (flags & MI_FORCE_RESTORE) {
                GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
                return PTR_ERR(cs);
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (IS_GEN(i915, 7)) {
+       if (GRAPHICS_VER(i915) == 7) {
                *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
                if (num_engines) {
                        struct intel_engine_cs *signaller;
                                                GEN6_PSMI_SLEEP_MSG_DISABLE);
                        }
                }
-       } else if (IS_GEN(i915, 5)) {
+       } else if (GRAPHICS_VER(i915) == 5) {
                /*
                 * This w/a is only listed for pre-production ilk a/b steppings,
                 * but is also mentioned for programming the powerctx. To be
         */
        *cs++ = MI_NOOP;
 
-       if (IS_GEN(i915, 7)) {
+       if (GRAPHICS_VER(i915) == 7) {
                if (num_engines) {
                        struct intel_engine_cs *signaller;
                        i915_reg_t last_reg = {}; /* keep gcc quiet */
                        *cs++ = MI_NOOP;
                }
                *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-       } else if (IS_GEN(i915, 5)) {
+       } else if (GRAPHICS_VER(i915) == 5) {
                *cs++ = MI_SUSPEND_FLUSH;
        }
 
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+       drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
                    (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
        intel_engine_cleanup_common(engine);
 
        intel_engine_set_irq_handler(engine, irq_handler);
 
-       if (INTEL_GEN(i915) >= 6) {
+       if (GRAPHICS_VER(i915) >= 6) {
                engine->irq_enable = gen6_irq_enable;
                engine->irq_disable = gen6_irq_disable;
-       } else if (INTEL_GEN(i915) >= 5) {
+       } else if (GRAPHICS_VER(i915) >= 5) {
                engine->irq_enable = gen5_irq_enable;
                engine->irq_disable = gen5_irq_disable;
-       } else if (INTEL_GEN(i915) >= 3) {
+       } else if (GRAPHICS_VER(i915) >= 3) {
                engine->irq_enable = gen3_irq_enable;
                engine->irq_disable = gen3_irq_disable;
        } else {
        struct drm_i915_private *i915 = engine->i915;
 
        /* gen8+ are only supported with execlists */
-       GEM_BUG_ON(INTEL_GEN(i915) >= 8);
+       GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
 
        setup_irq(engine);
 
         * engine->emit_init_breadcrumb().
         */
        engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
-       if (IS_GEN(i915, 5))
+       if (GRAPHICS_VER(i915) == 5)
                engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
 
        engine->set_default_submission = i9xx_set_default_submission;
 
-       if (INTEL_GEN(i915) >= 6)
+       if (GRAPHICS_VER(i915) >= 6)
                engine->emit_bb_start = gen6_emit_bb_start;
-       else if (INTEL_GEN(i915) >= 4)
+       else if (GRAPHICS_VER(i915) >= 4)
                engine->emit_bb_start = gen4_emit_bb_start;
        else if (IS_I830(i915) || IS_I845G(i915))
                engine->emit_bb_start = i830_emit_bb_start;
 
        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 
-       if (INTEL_GEN(i915) >= 7) {
+       if (GRAPHICS_VER(i915) >= 7) {
                engine->emit_flush = gen7_emit_flush_rcs;
                engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
-       } else if (IS_GEN(i915, 6)) {
+       } else if (GRAPHICS_VER(i915) == 6) {
                engine->emit_flush = gen6_emit_flush_rcs;
                engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
-       } else if (IS_GEN(i915, 5)) {
+       } else if (GRAPHICS_VER(i915) == 5) {
                engine->emit_flush = gen4_emit_flush_rcs;
        } else {
-               if (INTEL_GEN(i915) < 4)
+               if (GRAPHICS_VER(i915) < 4)
                        engine->emit_flush = gen2_emit_flush;
                else
                        engine->emit_flush = gen4_emit_flush_rcs;
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       if (INTEL_GEN(i915) >= 6) {
+       if (GRAPHICS_VER(i915) >= 6) {
                /* gen6 bsd needs a special wa for tail updates */
-               if (IS_GEN(i915, 6))
+               if (GRAPHICS_VER(i915) == 6)
                        engine->set_default_submission = gen6_bsd_set_default_submission;
                engine->emit_flush = gen6_emit_flush_vcs;
                engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 
-               if (IS_GEN(i915, 6))
+               if (GRAPHICS_VER(i915) == 6)
                        engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
                else
                        engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
        } else {
                engine->emit_flush = gen4_emit_flush_vcs;
-               if (IS_GEN(i915, 5))
+               if (GRAPHICS_VER(i915) == 5)
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
                else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
        engine->emit_flush = gen6_emit_flush_xcs;
        engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
-       if (IS_GEN(i915, 6))
+       if (GRAPHICS_VER(i915) == 6)
                engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
        else
                engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       GEM_BUG_ON(INTEL_GEN(i915) < 7);
+       GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
 
        engine->emit_flush = gen6_emit_flush_xcs;
        engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
        struct i915_vma *vma;
        int size, err;
 
-       if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
+       if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
                return 0;
 
        err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
 
        struct intel_gt *gt = rps_to_gt(rps);
 
        spin_lock_irq(>->irq_lock);
-       if (INTEL_GEN(gt->i915) >= 11)
+       if (GRAPHICS_VER(gt->i915) >= 11)
                gen11_rps_reset_interrupts(rps);
        else
                gen6_rps_reset_interrupts(rps);
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt.
         */
-       if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+       if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
                limits = rps->max_freq_softlimit << 23;
                if (val <= rps->min_freq_softlimit)
                        limits |= rps->min_freq_softlimit << 14;
            intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
 
        set(uncore, GEN6_RP_CONTROL,
-           (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+           (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
            GEN6_RP_MEDIA_HW_NORMAL_MODE |
            GEN6_RP_MEDIA_IS_GFX |
            GEN6_RP_ENABLE |
        struct drm_i915_private *i915 = rps_to_i915(rps);
        u32 swreq;
 
-       if (INTEL_GEN(i915) >= 9)
+       if (GRAPHICS_VER(i915) >= 9)
                swreq = GEN9_FREQUENCY(val);
        else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
                swreq = HSW_FREQUENCY(val);
 
        if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
                err = vlv_rps_set(rps, val);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                err = gen6_rps_set(rps, val);
        else
                err = gen5_rps_set(rps, val);
        if (err)
                return err;
 
-       if (update && INTEL_GEN(i915) >= 6)
+       if (update && GRAPHICS_VER(i915) >= 6)
                gen6_rps_set_thresholds(rps, val);
        rps->last_freq = val;
 
        if (intel_rps_uses_timer(rps))
                rps_start_timer(rps);
 
-       if (IS_GEN(rps_to_i915(rps), 5))
+       if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
                gen5_rps_update(rps);
 }
 
 
        rps->efficient_freq = rps->rp1_freq;
        if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
-           IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+           IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
                u32 ddcc_status = 0;
 
                if (sandybridge_pcode_read(i915,
                                        rps->max_freq);
        }
 
-       if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
                /* Store the frequency values in 16.66 MHZ units, which is
                 * the natural hardware unit for SKL
                 */
        struct intel_uncore *uncore = gt->uncore;
 
        /* Program defaults and thresholds for RPS */
-       if (IS_GEN(gt->i915, 9))
+       if (GRAPHICS_VER(gt->i915) == 9)
                intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
                                      GEN9_FREQUENCY(rps->rp1_freq));
 
                enabled = chv_rps_enable(rps);
        else if (IS_VALLEYVIEW(i915))
                enabled = vlv_rps_enable(rps);
-       else if (INTEL_GEN(i915) >= 9)
+       else if (GRAPHICS_VER(i915) >= 9)
                enabled = gen9_rps_enable(rps);
-       else if (INTEL_GEN(i915) >= 8)
+       else if (GRAPHICS_VER(i915) >= 8)
                enabled = gen8_rps_enable(rps);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                enabled = gen6_rps_enable(rps);
        else if (IS_IRONLAKE_M(i915))
                enabled = gen5_rps_enable(rps);
        else
-               MISSING_CASE(INTEL_GEN(i915));
+               MISSING_CASE(GRAPHICS_VER(i915));
        intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
        if (!enabled)
                return;
 
        if (has_busy_stats(rps))
                intel_rps_set_timer(rps);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                intel_rps_set_interrupts(rps);
        else
                /* Ironlake currently uses intel_ips.ko */ {}
        intel_rps_clear_interrupts(rps);
        intel_rps_clear_timer(rps);
 
-       if (INTEL_GEN(i915) >= 6)
+       if (GRAPHICS_VER(i915) >= 6)
                gen6_rps_disable(rps);
        else if (IS_IRONLAKE_M(i915))
                gen5_rps_disable(rps);
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
 
-       if (INTEL_GEN(i915) >= 9)
+       if (GRAPHICS_VER(i915) >= 9)
                return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
                                         GEN9_FREQ_SCALER);
        else if (IS_CHERRYVIEW(i915))
                return chv_gpu_freq(rps, val);
        else if (IS_VALLEYVIEW(i915))
                return byt_gpu_freq(rps, val);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                return val * GT_FREQUENCY_MULTIPLIER;
        else
                return val;
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
 
-       if (INTEL_GEN(i915) >= 9)
+       if (GRAPHICS_VER(i915) >= 9)
                return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
                                         GT_FREQUENCY_MULTIPLIER);
        else if (IS_CHERRYVIEW(i915))
                return chv_freq_opcode(rps, val);
        else if (IS_VALLEYVIEW(i915))
                return byt_freq_opcode(rps, val);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
        else
                return val;
                spin_unlock(>->irq_lock);
        }
 
-       if (INTEL_GEN(gt->i915) >= 8)
+       if (GRAPHICS_VER(gt->i915) >= 8)
                return;
 
        if (pm_iir & PM_VEBOX_USER_INTERRUPT)
                chv_rps_init(rps);
        else if (IS_VALLEYVIEW(i915))
                vlv_rps_init(rps);
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                gen6_rps_init(rps);
        else if (IS_IRONLAKE_M(i915))
                gen5_rps_init(rps);
        rps->min_freq_softlimit = rps->min_freq;
 
        /* After setting max-softlimit, find the overclock max freq */
-       if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+       if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
                u32 params = 0;
 
                sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
         *
         * TODO: verify if this can be reproduced on VLV,CHV.
         */
-       if (INTEL_GEN(i915) <= 7)
+       if (GRAPHICS_VER(i915) <= 7)
                rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
 
-       if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
+       if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 }
 
 void intel_rps_sanitize(struct intel_rps *rps)
 {
-       if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+       if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
                rps_disable_interrupts(rps);
 }
 
 
        if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
                cagf = (rpstat >> 8) & 0xff;
-       else if (INTEL_GEN(i915) >= 9)
+       else if (GRAPHICS_VER(i915) >= 9)
                cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
        else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
                cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
-       else if (INTEL_GEN(i915) >= 6)
+       else if (GRAPHICS_VER(i915) >= 6)
                cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
        else
                cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
                vlv_punit_get(i915);
                freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
                vlv_punit_put(i915);
-       } else if (INTEL_GEN(i915) >= 6) {
+       } else if (GRAPHICS_VER(i915) >= 6) {
                freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
        } else {
                freq = intel_uncore_read(uncore, MEMSTAT_ILK);
         * We only register the i915 ips part with intel-ips once everything is
         * set up, to avoid intel-ips sneaking in and reading bogus values.
         */
-       if (IS_GEN(gt->i915, 5)) {
+       if (GRAPHICS_VER(gt->i915) == 5) {
                GEM_BUG_ON(ips_mchdev);
                rcu_assign_pointer(ips_mchdev, gt->i915);
                ips_ping_for_i915_load();
 
                cherryview_sseu_info_init(gt);
        else if (IS_BROADWELL(i915))
                bdw_sseu_info_init(gt);
-       else if (IS_GEN(i915, 9))
+       else if (GRAPHICS_VER(i915) == 9)
                gen9_sseu_info_init(gt);
-       else if (IS_GEN(i915, 10))
+       else if (GRAPHICS_VER(i915) == 10)
                gen10_sseu_info_init(gt);
-       else if (IS_GEN(i915, 11))
+       else if (GRAPHICS_VER(i915) == 11)
                gen11_sseu_info_init(gt);
-       else if (INTEL_GEN(i915) >= 12)
+       else if (GRAPHICS_VER(i915) >= 12)
                gen12_sseu_info_init(gt);
 }
 
         * No explicit RPCS request is needed to ensure full
         * slice/subslice/EU enablement prior to Gen9.
         */
-       if (INTEL_GEN(i915) < 9)
+       if (GRAPHICS_VER(i915) < 9)
                return 0;
 
        /*
         * subslices are enabled, or a count between one and four on the first
         * slice.
         */
-       if (IS_GEN(i915, 11) &&
+       if (GRAPHICS_VER(i915) == 11 &&
            slices == 1 &&
            subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
                GEM_BUG_ON(subslices & 1);
        if (sseu->has_slice_pg) {
                u32 mask, val = slices;
 
-               if (INTEL_GEN(i915) >= 11) {
+               if (GRAPHICS_VER(i915) >= 11) {
                        mask = GEN11_RPCS_S_CNT_MASK;
                        val <<= GEN11_RPCS_S_CNT_SHIFT;
                } else {
 
 
        if (IS_DG1(i915))
                dg1_ctx_workarounds_init(engine, wal);
-       else if (IS_GEN(i915, 12))
+       else if (GRAPHICS_VER(i915) == 12)
                gen12_ctx_workarounds_init(engine, wal);
-       else if (IS_GEN(i915, 11))
+       else if (GRAPHICS_VER(i915) == 11)
                icl_ctx_workarounds_init(engine, wal);
        else if (IS_CANNONLAKE(i915))
                cnl_ctx_workarounds_init(engine, wal);
                chv_ctx_workarounds_init(engine, wal);
        else if (IS_BROADWELL(i915))
                bdw_ctx_workarounds_init(engine, wal);
-       else if (IS_GEN(i915, 7))
+       else if (GRAPHICS_VER(i915) == 7)
                gen7_ctx_workarounds_init(engine, wal);
-       else if (IS_GEN(i915, 6))
+       else if (GRAPHICS_VER(i915) == 6)
                gen6_ctx_workarounds_init(engine, wal);
-       else if (INTEL_GEN(i915) < 8)
+       else if (GRAPHICS_VER(i915) < 8)
                ;
        else
-               MISSING_CASE(INTEL_GEN(i915));
+               MISSING_CASE(GRAPHICS_VER(i915));
 
        wa_init_finish(wal);
 }
        unsigned int slice, subslice;
        u32 l3_en, mcr, mcr_mask;
 
-       GEM_BUG_ON(INTEL_GEN(i915) < 10);
+       GEM_BUG_ON(GRAPHICS_VER(i915) < 10);
 
        /*
         * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
         * of every MMIO read.
         */
 
-       if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
+       if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
                u32 l3_fuse =
                        intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
                        GEN10_L3BANK_MASK;
        }
        subslice--;
 
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
                mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
        } else {
                dg1_gt_workarounds_init(i915, wal);
        else if (IS_TIGERLAKE(i915))
                tgl_gt_workarounds_init(i915, wal);
-       else if (IS_GEN(i915, 12))
+       else if (GRAPHICS_VER(i915) == 12)
                gen12_gt_workarounds_init(i915, wal);
-       else if (IS_GEN(i915, 11))
+       else if (GRAPHICS_VER(i915) == 11)
                icl_gt_workarounds_init(i915, wal);
        else if (IS_CANNONLAKE(i915))
                cnl_gt_workarounds_init(i915, wal);
                vlv_gt_workarounds_init(i915, wal);
        else if (IS_IVYBRIDGE(i915))
                ivb_gt_workarounds_init(i915, wal);
-       else if (IS_GEN(i915, 6))
+       else if (GRAPHICS_VER(i915) == 6)
                snb_gt_workarounds_init(i915, wal);
-       else if (IS_GEN(i915, 5))
+       else if (GRAPHICS_VER(i915) == 5)
                ilk_gt_workarounds_init(i915, wal);
        else if (IS_G4X(i915))
                g4x_gt_workarounds_init(i915, wal);
-       else if (IS_GEN(i915, 4))
+       else if (GRAPHICS_VER(i915) == 4)
                gen4_gt_workarounds_init(i915, wal);
-       else if (INTEL_GEN(i915) <= 8)
+       else if (GRAPHICS_VER(i915) <= 8)
                ;
        else
-               MISSING_CASE(INTEL_GEN(i915));
+               MISSING_CASE(GRAPHICS_VER(i915));
 }
 
 void intel_gt_init_workarounds(struct drm_i915_private *i915)
 
        if (IS_DG1(i915))
                dg1_whitelist_build(engine);
-       else if (IS_GEN(i915, 12))
+       else if (GRAPHICS_VER(i915) == 12)
                tgl_whitelist_build(engine);
-       else if (IS_GEN(i915, 11))
+       else if (GRAPHICS_VER(i915) == 11)
                icl_whitelist_build(engine);
        else if (IS_CANNONLAKE(i915))
                cnl_whitelist_build(engine);
                bxt_whitelist_build(engine);
        else if (IS_SKYLAKE(i915))
                skl_whitelist_build(engine);
-       else if (INTEL_GEN(i915) <= 8)
+       else if (GRAPHICS_VER(i915) <= 8)
                ;
        else
-               MISSING_CASE(INTEL_GEN(i915));
+               MISSING_CASE(GRAPHICS_VER(i915));
 
        wa_init_finish(w);
 }
                             ENABLE_SMALLPL);
        }
 
-       if (IS_GEN(i915, 11)) {
+       if (GRAPHICS_VER(i915) == 11) {
                /* This is not an Wa. Enable for better image quality */
                wa_masked_en(wal,
                             _3D_CHICKEN3,
                             FF_DOP_CLOCK_GATE_DISABLE);
        }
 
-       if (IS_GEN_RANGE(i915, 9, 12)) {
+       if (IS_GRAPHICS_VER(i915, 9, 12)) {
                /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
                wa_masked_en(wal,
                             GEN7_FF_SLICE_CS_CHICKEN1,
                             GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
        }
 
-       if (IS_GEN(i915, 9)) {
+       if (GRAPHICS_VER(i915) == 9) {
                /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
                wa_masked_en(wal,
                             GEN9_CSFE_CHICKEN1_RCS,
                                     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
        }
 
-       if (IS_GEN(i915, 7)) {
+       if (GRAPHICS_VER(i915) == 7) {
                /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
                wa_masked_en(wal,
                             GFX_MODE_GEN7,
                       GEN6_WIZ_HASHING_16x4);
        }
 
-       if (IS_GEN_RANGE(i915, 6, 7))
+       if (IS_GRAPHICS_VER(i915, 6, 7))
                /*
                 * We need to disable the AsyncFlip performance optimisations in
                 * order to use MI_WAIT_FOR_EVENT within the CS. It should
                             MI_MODE,
                             ASYNC_FLIP_PERF_DISABLE);
 
-       if (IS_GEN(i915, 6)) {
+       if (GRAPHICS_VER(i915) == 6) {
                /*
                 * Required for the hardware to program scanline values for
                 * waiting
                              CM0_STC_EVICT_DISABLE_LRA_SNB);
        }
 
-       if (IS_GEN_RANGE(i915, 4, 6))
+       if (IS_GRAPHICS_VER(i915, 4, 6))
                /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
                wa_add(wal, MI_MODE,
                       0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
                       /* XXX bit doesn't stick on Broadwater */
                       IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
 
-       if (IS_GEN(i915, 4))
+       if (GRAPHICS_VER(i915) == 4)
                /*
                 * Disable CONSTANT_BUFFER before it is loaded from the context
                 * image. For as it is loaded, it is executed and the stored
 static void
 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
-       if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
+       if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
                return;
 
        if (engine->class == RENDER_CLASS)
 {
        struct i915_wa_list *wal = &engine->wa_list;
 
-       if (INTEL_GEN(engine->i915) < 4)
+       if (GRAPHICS_VER(engine->i915) < 4)
                return;
 
        wa_init_start(wal, "engine", engine->name);
        const struct mcr_range *mcr_ranges;
        int i;
 
-       if (INTEL_GEN(i915) >= 12)
+       if (GRAPHICS_VER(i915) >= 12)
                mcr_ranges = mcr_ranges_gen12;
-       else if (INTEL_GEN(i915) >= 8)
+       else if (GRAPHICS_VER(i915) >= 8)
                mcr_ranges = mcr_ranges_gen8;
        else
                return false;
        u32 srm, *cs;
 
        srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
-       if (INTEL_GEN(i915) >= 8)
+       if (GRAPHICS_VER(i915) >= 8)
                srm++;
 
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
 
                return PTR_ERR(cs);
 
        cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-       if (INTEL_GEN(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->engine->i915) >= 8)
                cmd++;
        *cs++ = cmd;
        *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
        enum intel_engine_id id;
        int err = 0;
 
-       if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+       if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
                return 0;
 
        perf_begin(gt);
        enum intel_engine_id id;
        int err = 0;
 
-       if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+       if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
                return 0;
 
        perf_begin(gt);
 
         * the same CS clock.
         */
 
-       if (INTEL_GEN(gt->i915) < 8)
+       if (GRAPHICS_VER(gt->i915) < 8)
                return 0;
 
        for_each_engine(engine, gt, id) {
 
                if (!intel_engine_has_preemption(engine))
                        continue;
 
-               if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+               if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
                        continue; /* we need per-context GPR */
 
                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
                return 0;
 
        /* As we use CS_GPR we cannot run before they existed on all engines. */
-       if (INTEL_GEN(gt->i915) < 9)
+       if (GRAPHICS_VER(gt->i915) < 9)
                return 0;
 
        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
 
                return 0;
        }
 
-       if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
+       if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
                return 0;
 
-       if (IS_GEN(gt->i915, 5))
+       if (GRAPHICS_VER(gt->i915) == 5)
                /*
                 * XXX CS_TIMESTAMP low dword is dysfunctional?
                 *
                 */
                return 0;
 
-       if (IS_GEN(gt->i915, 4))
+       if (GRAPHICS_VER(gt->i915) == 4)
                /*
                 * XXX CS_TIMESTAMP appears gibberish
                 *
                u64 time;
                u64 dt;
 
-               if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
+               if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
                        continue;
 
                measure_clocks(engine, &cycles, &dt);
 
                goto cancel_rq;
 
        batch = h->batch;
-       if (INTEL_GEN(gt->i915) >= 8) {
+       if (GRAPHICS_VER(gt->i915) >= 8) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = lower_32_bits(hws_address(hws, rq));
                *batch++ = upper_32_bits(hws_address(hws, rq));
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
                *batch++ = lower_32_bits(vma->node.start);
                *batch++ = upper_32_bits(vma->node.start);
-       } else if (INTEL_GEN(gt->i915) >= 6) {
+       } else if (GRAPHICS_VER(gt->i915) >= 6) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = 0;
                *batch++ = lower_32_bits(hws_address(hws, rq));
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
                *batch++ = lower_32_bits(vma->node.start);
-       } else if (INTEL_GEN(gt->i915) >= 4) {
+       } else if (GRAPHICS_VER(gt->i915) >= 4) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *batch++ = 0;
                *batch++ = lower_32_bits(hws_address(hws, rq));
        }
 
        flags = 0;
-       if (INTEL_GEN(gt->i915) <= 5)
+       if (GRAPHICS_VER(gt->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
                if (found != ia_freq) {
                        pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
                               gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
-                              intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+                              intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
                               found, ia_freq);
                        err = -EINVAL;
                        break;
                if (found != ring_freq) {
                        pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
                               gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
-                              intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+                              intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
                               found, ring_freq);
                        err = -EINVAL;
                        break;
 
        int err;
        int n;
 
-       if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+       if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
                return 0; /* GPR only on rcs0 for gen8 */
 
        err = gpr_make_dirty(engine->kernel_context);
 
 static bool skip_isolation(const struct intel_engine_cs *engine)
 {
-       if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+       if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
                return true;
 
-       if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+       if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
                return true;
 
        return false;
        /* We use the already reserved extra page in context state */
        if (!a->wa_bb_page) {
                GEM_BUG_ON(b->wa_bb_page);
-               GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
+               GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
                goto unpin_b;
        }
 
 
         * which only controls CPU initiated MMIO. Routing does not
         * work for CS access so we cannot verify them on this path.
         */
-       return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+       return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
 }
 
 static int check_l3cc_table(struct intel_engine_cs *engine,
 
        }
 
        cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
-       if (INTEL_GEN(rq->engine->i915) >= 8)
+       if (GRAPHICS_VER(rq->engine->i915) >= 8)
                cmd++;
 
        *cs++ = cmd;
        int err = 0;
 
        /* A read of CTX_INFO upsets rc6. Poke the bear! */
-       if (INTEL_GEN(gt->i915) < 8)
+       if (GRAPHICS_VER(gt->i915) < 8)
                return 0;
 
        engines = randomised_engines(gt, &prng, &count);
 
                return ERR_CAST(cs);
        }
 
-       if (INTEL_GEN(engine->i915) >= 6) {
+       if (GRAPHICS_VER(engine->i915) >= 6) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4;
                *cs++ = 0;
-       } else if (INTEL_GEN(engine->i915) >= 4) {
+       } else if (GRAPHICS_VER(engine->i915) >= 4) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = 0;
        } else {
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
-               if (IS_GEN_RANGE(gt->i915, 4, 5))
+               if (IS_GRAPHICS_VER(gt->i915, 4, 5))
                        continue; /* MI_STORE_DWORD is privileged! */
 
                saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
 
                        i915_mmio_reg_offset(BXT_RP_STATE_CAP),
                        intel_uncore_read(rps_to_uncore(rps),
                                          BXT_RP_STATE_CAP));
-       } else if (IS_GEN(i915, 9)) {
+       } else if (GRAPHICS_VER(i915) == 9) {
                pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
                        i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
                        intel_uncore_read(rps_to_uncore(rps),
        struct igt_spinner spin;
        int err = 0;
 
-       if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+       if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
                return 0;
 
        if (igt_spinner_init(&spin, gt))
 
        min_gpu_freq = rps->min_freq;
        max_gpu_freq = rps->max_freq;
-       if (INTEL_GEN(i915) >= 9) {
+       if (GRAPHICS_VER(i915) >= 9) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq /= GEN9_FREQ_SCALER;
                max_gpu_freq /= GEN9_FREQ_SCALER;
        if (!intel_rps_is_enabled(rps))
                return 0;
 
-       if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+       if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
                return 0;
 
        if (CPU_LATENCY >= 0)
        if (!intel_rps_is_enabled(rps))
                return 0;
 
-       if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+       if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
                return 0;
 
        if (CPU_LATENCY >= 0)
         * First, let's check whether or not we are receiving interrupts.
         */
 
-       if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6)
+       if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
                return 0;
 
        intel_gt_pm_get(gt);
         * that theory.
         */
 
-       if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+       if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
                return 0;
 
        if (!librapl_supported(gt->i915))
         * moving parts into dynamic reclocking based on load.
         */
 
-       if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+       if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
                return 0;
 
        if (igt_spinner_init(&spin, gt))
 
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       if (INTEL_GEN(rq->engine->i915) >= 8) {
+       if (GRAPHICS_VER(rq->engine->i915) >= 8) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = addr;
                *cs++ = 0;
                *cs++ = value;
-       } else if (INTEL_GEN(rq->engine->i915) >= 4) {
+       } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = 0;
                *cs++ = addr;
         * even across multiple wraps.
         */
 
-       if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */
+       if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
                return 0;
 
        tl = intel_timeline_create(gt);
 
                goto err_req;
 
        srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
-       if (INTEL_GEN(engine->i915) >= 8)
+       if (GRAPHICS_VER(engine->i915) >= 8)
                srm++;
 
        cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
 
                srm = MI_STORE_REGISTER_MEM;
                lrm = MI_LOAD_REGISTER_MEM;
-               if (INTEL_GEN(engine->i915) >= 8)
+               if (GRAPHICS_VER(engine->i915) >= 8)
                        lrm++, srm++;
 
                pr_debug("%s: Writing garbage to %x\n",
 
        /* Can the user write to the whitelisted registers? */
 
-       if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+       if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
                return 0;
 
        for_each_engine(engine, gt, id) {
                goto err_req;
 
        srm = MI_STORE_REGISTER_MEM;
-       if (INTEL_GEN(engine->i915) >= 8)
+       if (GRAPHICS_VER(engine->i915) >= 8)
                srm++;
 
        cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
 
 
        mutex_init(&guc->send_mutex);
        spin_lock_init(&guc->irq_lock);
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
                guc->interrupts.reset = gen11_reset_guc_interrupts;
                guc->interrupts.enable = gen11_enable_guc_interrupts;
 
        blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] =
                gt->info.vdbox_sfc_access;
 
-       if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) {
+       if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
                u32 distdbreg = intel_uncore_read(gt->uncore,
                                                  GEN12_DIST_DBS_POPULATED);
                blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] =
 
        else
                intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 
-       if (IS_GEN(uncore->i915, 9)) {
+       if (GRAPHICS_VER(uncore->i915) == 9) {
                /* DOP Clock Gating Enable for GuC clocks */
                intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
                                 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
 
        engine->emit_flush = gen8_emit_flush_xcs;
        engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
        engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
-       if (INTEL_GEN(engine->i915) >= 12) {
+       if (GRAPHICS_VER(engine->i915) >= 12) {
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
                engine->emit_flush = gen12_emit_flush_xcs;
        }
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
 {
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 12:
                engine->emit_flush = gen12_emit_flush_rcs;
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
         * The setup relies on several assumptions (e.g. irqs always enabled)
         * that are only valid on gen11+
         */
-       GEM_BUG_ON(INTEL_GEN(i915) < 11);
+       GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
 
        tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
 
 
 
        intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
 
-       if (INTEL_GEN(i915) >= 11) {
+       if (GRAPHICS_VER(i915) >= 11) {
                huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
                huc->status.mask = HUC_LOAD_SUCCESSFUL;
                huc->status.value = HUC_LOAD_SUCCESSFUL;
 
                return;
 
        /* Don't enable GuC/HuC on pre-Gen12 */
-       if (INTEL_GEN(i915) < 12) {
+       if (GRAPHICS_VER(i915) < 12) {
                i915->params.enable_guc = 0;
                return;
        }
 
        /* WaEnableuKernelHeaderValidFix:skl */
        /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
-       if (IS_GEN(i915, 9))
+       if (GRAPHICS_VER(i915) == 9)
                attempts = 3;
        else
                attempts = 1;