return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
 }
 
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
-{
-       struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
-
-       return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
-}
-
 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 {
        struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
        bitmap_set(pmu->all_valid_pmc_idx,
                INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
 
-       if (cpuid_model_is_consistent(vcpu))
+       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+       if (cpuid_model_is_consistent(vcpu) &&
+           (perf_capabilities & PMU_CAP_LBR_FMT))
                x86_perf_get_lbr(&lbr_desc->records);
        else
                lbr_desc->records.nr = 0;
        if (lbr_desc->records.nr)
                bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
 
-       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
        if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
                if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
                        pmu->pebs_enable_mask = counter_mask;
 
        return &vcpu_to_lbr_desc(vcpu)->records;
 }
 
-void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
+static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
+{
+       return !!vcpu_to_lbr_records(vcpu)->nr;
+}
 
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);