} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
        { .index = MSR_STAR,                            .always = true  },
        { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
+       { .index = MSR_IA32_SYSENTER_EIP,               .always = false },
+       { .index = MSR_IA32_SYSENTER_ESP,               .always = false },
 #ifdef CONFIG_X86_64
        { .index = MSR_GS_BASE,                         .always = true  },
        { .index = MSR_FS_BASE,                         .always = true  },
        if (kvm_vcpu_apicv_active(vcpu))
                avic_init_vmcb(svm);
 
-       /*
-        * If hardware supports Virtual VMLOAD VMSAVE then enable it
-        * in VMCB and clear intercepts to avoid #VMEXIT.
-        */
-       if (vls) {
-               svm_clr_intercept(svm, INTERCEPT_VMLOAD);
-               svm_clr_intercept(svm, INTERCEPT_VMSAVE);
-               svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
-       }
-
        if (vgif) {
                svm_clr_intercept(svm, INTERCEPT_STGI);
                svm_clr_intercept(svm, INTERCEPT_CLGI);
 
        ret = kvm_skip_emulated_instruction(vcpu);
 
-       if (vmload)
+       if (vmload) {
                nested_svm_vmloadsave(vmcb12, svm->vmcb);
-       else
+               svm->sysenter_eip_hi = 0;
+               svm->sysenter_esp_hi = 0;
+       } else
                nested_svm_vmloadsave(svm->vmcb, vmcb12);
 
        kvm_vcpu_unmap(vcpu, &map, true);
                msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
                break;
        case MSR_IA32_SYSENTER_EIP:
-               msr_info->data = svm->sysenter_eip;
+               msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
+               if (guest_cpuid_is_intel(vcpu))
+                       msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
                break;
        case MSR_IA32_SYSENTER_ESP:
-               msr_info->data = svm->sysenter_esp;
+               msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
+               if (guest_cpuid_is_intel(vcpu))
+                       msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
                break;
        case MSR_TSC_AUX:
                if (!boot_cpu_has(X86_FEATURE_RDTSCP))
                svm->vmcb01.ptr->save.sysenter_cs = data;
                break;
        case MSR_IA32_SYSENTER_EIP:
-               svm->sysenter_eip = data;
-               svm->vmcb01.ptr->save.sysenter_eip = data;
+               svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
+               /*
+                * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
+                * when we spoof an Intel vendor ID (for cross vendor migration).
+                * In this case we use this intercept to track the high
+                * 32 bit part of these msrs to support Intel's
+                * implementation of SYSENTER/SYSEXIT.
+                */
+               svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
                break;
        case MSR_IA32_SYSENTER_ESP:
-               svm->sysenter_esp = data;
-               svm->vmcb01.ptr->save.sysenter_esp = data;
+               svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
+               svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
                break;
        case MSR_TSC_AUX:
                if (!boot_cpu_has(X86_FEATURE_RDTSCP))
                        vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
        }
 
-       if (!kvm_vcpu_apicv_active(vcpu))
-               return;
+       if (kvm_vcpu_apicv_active(vcpu)) {
+               /*
+                * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
+                * is exposed to the guest, disable AVIC.
+                */
+               if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
+                       kvm_request_apicv_update(vcpu->kvm, false,
+                                                APICV_INHIBIT_REASON_X2APIC);
 
-       /*
-        * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
-        * is exposed to the guest, disable AVIC.
-        */
-       if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
-               kvm_request_apicv_update(vcpu->kvm, false,
-                                        APICV_INHIBIT_REASON_X2APIC);
+               /*
+                * Currently, AVIC does not work with nested virtualization.
+                * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
+                */
+               if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+                       kvm_request_apicv_update(vcpu->kvm, false,
+                                                APICV_INHIBIT_REASON_NESTED);
+       }
 
-       /*
-        * Currently, AVIC does not work with nested virtualization.
-        * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
-        */
-       if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-               kvm_request_apicv_update(vcpu->kvm, false,
-                                        APICV_INHIBIT_REASON_NESTED);
+       if (guest_cpuid_is_intel(vcpu)) {
+               /*
+                * We must intercept SYSENTER_EIP and SYSENTER_ESP
+                * accesses because the processor only stores 32 bits.
+                * For the same reason we cannot use virtual VMLOAD/VMSAVE.
+                */
+               svm_set_intercept(svm, INTERCEPT_VMLOAD);
+               svm_set_intercept(svm, INTERCEPT_VMSAVE);
+               svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
+       } else {
+               /*
+                * If hardware supports Virtual VMLOAD VMSAVE then enable it
+                * in VMCB and clear intercepts to avoid #VMEXIT.
+                */
+               if (vls) {
+                       svm_clr_intercept(svm, INTERCEPT_VMLOAD);
+                       svm_clr_intercept(svm, INTERCEPT_VMSAVE);
+                       svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+               }
+               /* No need to intercept these MSRs */
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
+       }
 }
 
 static bool svm_has_wbinvd_exit(void)