Move CR0 caching into the standard register caching mechanism in order
to take advantage of the availability checks provided by regs_avail.
This avoids multiple VMREADs in the (uncommon) case where kvm_read_cr0()
is called multiple times in a single VM-Exit, and more importantly
eliminates a kvm_x86_ops hook, saves a retpoline on SVM when reading
CR0, and squashes the confusing naming discrepancy of "cache_reg" vs.
"decache_cr0_guest_bits".
No functional change intended.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <
20200502043234.12481-8-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
 
        NR_VCPU_REGS,
 
        VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+       VCPU_EXREG_CR0,
        VCPU_EXREG_CR3,
        VCPU_EXREG_CR4,
        VCPU_EXREG_RFLAGS,
        void (*set_segment)(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
-       void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
 
 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
 {
        ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
-       if (tmask & vcpu->arch.cr0_guest_owned_bits)
-               kvm_x86_ops.decache_cr0_guest_bits(vcpu);
+       if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
+           !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
+               kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
        return vcpu->arch.cr0 & mask;
 }
 
 
        mark_dirty(svm->vmcb, VMCB_DT);
 }
 
-static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
-{
-}
-
 static void update_cr0_intercept(struct vcpu_svm *svm)
 {
        ulong gcr0 = svm->vcpu.arch.cr0;
        .set_segment = svm_set_segment,
        .get_cpl = svm_get_cpl,
        .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
-       .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
        .set_cr0 = svm_set_cr0,
        .set_cr4 = svm_set_cr4,
        .set_efer = svm_set_efer,
 
                if (enable_ept)
                        ept_save_pdptrs(vcpu);
                break;
+       case VCPU_EXREG_CR0:
+               guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+
+               vcpu->arch.cr0 &= ~guest_owned_bits;
+               vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
+               break;
        case VCPU_EXREG_CR3:
                if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
                        vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
        vpid_sync_context(to_vmx(vcpu)->vpid);
 }
 
-static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
-{
-       ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
-
-       vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
-       vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
-}
-
 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
        vmcs_writel(CR0_READ_SHADOW, cr0);
        vmcs_writel(GUEST_CR0, hw_cr0);
        vcpu->arch.cr0 = cr0;
+       kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
 
        /* depends on vcpu->arch.cr0 to be set to a new value */
        vmx->emulation_required = emulation_required(vcpu);
        .set_segment = vmx_set_segment,
        .get_cpl = vmx_get_cpl,
        .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
-       .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
        .set_cr0 = vmx_set_cr0,
        .set_cr4 = vmx_set_cr4,
        .set_efer = vmx_set_efer,
 
                                  | (1 << VCPU_EXREG_RFLAGS)
                                  | (1 << VCPU_EXREG_PDPTR)
                                  | (1 << VCPU_EXREG_SEGMENTS)
+                                 | (1 << VCPU_EXREG_CR0)
                                  | (1 << VCPU_EXREG_CR3)
                                  | (1 << VCPU_EXREG_CR4)
                                  | (1 << VCPU_EXREG_EXIT_INFO_1)