return &counters[array_index_nospec(idx, num_counters)];
 }
 
-static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
+static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
 {
        if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
-               return false;
+               return 0;
 
-       return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES;
+       return vcpu->arch.perf_capabilities;
+}
+
+static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
+{
+       return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
 }
 
 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->version = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
-       vcpu->arch.perf_capabilities = 0;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
        if (!entry)
                return;
 
        perf_get_x86_pmu_capability(&x86_pmu);
-       if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
-               vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
 
        pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
                                         x86_pmu.num_counters_gp);
                pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
                pmu->fixed_counters[i].current_config = 0;
        }
+
+       vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
 }
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)