fallthrough;
        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
-                               (idx - INTEL_PMC_IDX_FIXED);
+               hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
                hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
                                        INTEL_PMC_FIXED_RDPMC_BASE;
                break;
        for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
+               rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
 
                pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
                        cpu, idx, pmc_count);
                        if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
                                continue;
 
-                       wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+                       wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
                } else {
                        wrmsrl(x86_pmu_event_addr(i), 0);
                }
 
        for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
        }
 
        if (ds)
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
+       .fixedctr               = MSR_ARCH_PERFMON_FIXED_CTR0,
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
+       .fixedctr               = MSR_ARCH_PERFMON_FIXED_CTR0,
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
        }
 }
 
+static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
+{
+       return MSR_IA32_PMC_V6_STEP * index;
+}
+
 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
        { hybrid_small, "cpu_atom" },
        { hybrid_big, "cpu_core" },
                pr_cont("full-width counters, ");
        }
 
+       /* Support V6+ MSR Aliasing */
+       if (x86_pmu.version >= 6) {
+               x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
+               x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
+               x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
+               x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
+       }
+
        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
                x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
 
 
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
+       unsigned        fixedctr;
        int             (*addr_offset)(int index, bool eventsel);
        int             (*rdpmc_index)(int index);
        u64             (*event_map)(int);
                                  x86_pmu.addr_offset(index, false) : index);
 }
 
+static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
+{
+       return x86_pmu.fixedctr + (x86_pmu.addr_offset ?
+                                  x86_pmu.addr_offset(index, false) : index);
+}
+
 static inline int x86_pmu_rdpmc_index(int index)
 {
        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 
 #define MSR_RELOAD_PMC0                        0x000014c1
 #define MSR_RELOAD_FIXED_CTR0          0x00001309
 
+/* V6 PMON MSR range */
+#define MSR_IA32_PMC_V6_GP0_CTR                0x1900
+#define MSR_IA32_PMC_V6_GP0_CFG_A      0x1901
+#define MSR_IA32_PMC_V6_FX0_CTR                0x1980
+#define MSR_IA32_PMC_V6_STEP           4
+
 /* KeyID partitioning between MKTME and TDX */
 #define MSR_IA32_MKTME_KEYID_PARTITIONING      0x00000087