]> www.infradead.org Git - users/hch/misc.git/commitdiff
Merge branch 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm.git
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 May 2025 07:22:21 +0000 (17:22 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 May 2025 07:22:21 +0000 (17:22 +1000)
# Conflicts:
# arch/x86/kvm/vmx/vmx.c

1  2 
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 157c23db22bed3d66763b321f062bb136cb8db95,ef2d7208dd208a4ccad3ea5a0cf4b657f7a50f11..b12414108cbff06aa2e89dde0729fc579d433154
@@@ -1337,10 -1337,10 +1339,10 @@@ void vmx_prepare_switch_to_guest(struc
                savesegment(fs, fs_sel);
                savesegment(gs, gs_sel);
                fs_base = read_msr(MSR_FS_BASE);
-               vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+               vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
        }
  
 -      wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 +      wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
  #else
        savesegment(fs, fs_sel);
        savesegment(gs, gs_sel);
@@@ -1384,10 -1384,10 +1386,10 @@@ static void vmx_prepare_switch_to_host(
  #endif
        invalidate_tss_limit();
  #ifdef CONFIG_X86_64
-       wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
 -      wrmsrl(MSR_KERNEL_GS_BASE, vmx->vt.msr_host_kernel_gs_base);
++      wrmsrq(MSR_KERNEL_GS_BASE, vmx->vt.msr_host_kernel_gs_base);
  #endif
        load_fixmap_gdt(raw_smp_processor_id());
-       vmx->guest_state_loaded = false;
+       vmx->vt.guest_state_loaded = false;
        vmx->guest_uret_msrs_loaded = false;
  }
  
  static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
  {
        preempt_disable();
-       if (vmx->guest_state_loaded)
+       if (vmx->vt.guest_state_loaded)
 -              rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 +              rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
        preempt_enable();
        return vmx->msr_guest_kernel_gs_base;
  }
  static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
  {
        preempt_disable();
-       if (vmx->guest_state_loaded)
+       if (vmx->vt.guest_state_loaded)
 -              wrmsrl(MSR_KERNEL_GS_BASE, data);
 +              wrmsrq(MSR_KERNEL_GS_BASE, data);
        preempt_enable();
        vmx->msr_guest_kernel_gs_base = data;
  }
index 5bdb5b8549240f4c87c73530707e5f34b3e41d8d,f6ce044b090a17cb17e0dfe4caf46f2691a75aa9..570e7f8cbf646defcc9c29e40a26bec3d7f34c8c
@@@ -9771,11 -9792,13 +9794,13 @@@ int kvm_x86_vendor_init(struct kvm_x86_
                kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
                kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
        }
+       kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
+       kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
  
 -      rdmsrl_safe(MSR_EFER, &kvm_host.efer);
 +      rdmsrq_safe(MSR_EFER, &kvm_host.efer);
  
        if (boot_cpu_has(X86_FEATURE_XSAVES))
 -              rdmsrl(MSR_IA32_XSS, kvm_host.xss);
 +              rdmsrq(MSR_IA32_XSS, kvm_host.xss);
  
        kvm_init_pmu_capability(ops->pmu_ops);
  
@@@ -10976,9 -11004,10 +11006,10 @@@ static int vcpu_enter_guest(struct kvm_
                switch_fpu_return();
  
        if (vcpu->arch.guest_fpu.xfd_err)
 -              wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
 +              wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
  
-       if (unlikely(vcpu->arch.switch_db_regs)) {
+       if (unlikely(vcpu->arch.switch_db_regs &&
+                    !(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
                set_debugreg(0, 7);
                set_debugreg(vcpu->arch.eff_db[0], 0);
                set_debugreg(vcpu->arch.eff_db[1], 1);