]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: VMX: require virtual NMI support
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 27 Mar 2017 12:37:28 +0000 (14:37 +0200)
committerKrish Sadhukhan <krish.sadhukhan@oracle.com>
Thu, 2 Nov 2017 17:30:25 +0000 (13:30 -0400)
Virtual NMIs are only missing in Prescott and Yonah chips.  Both are obsolete
for virtualization usage---Yonah is 32-bit only even---so drop vNMI emulation.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit 2c82878b0cb38fd516fd612c67852a6bbf282003. It is a
partical cherry-pick in that PIN_BASED_VMX_PREEMPTION_TIMER in
setup_vmcs_config in file arch/x86/kvm/vmx.c has been omitted due to two things:
 - it being not required in the fix for bug 27031246 (which is about NMIs)
 - no need as we do not have commit 64672c95ea4c2f7096e519e826076867e8ef0938
   (kvm: vmx: hook preemption timer support) backported in UEK4.)
OraBug: 27031246 nested virt: L2 windows guest reboot hangs with L1 KVM hypervisor
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Tested-by: Xuan Bai <xuan.bai@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
arch/x86/kvm/vmx.c

index d782150e64976d55a0a9f13284629a94c1c5295e..cf3323f4ac2dce70ac38d7a1553b4c4f45e4839a 100644 (file)
@@ -527,10 +527,6 @@ struct vcpu_vmx {
        int vpid;
        bool emulation_required;
 
-       /* Support for vnmi-less CPUs */
-       int soft_vnmi_blocked;
-       ktime_t entry_time;
-       s64 vnmi_blocked_time;
        u32 exit_reason;
 
        bool rdtscp_enabled;
@@ -1086,11 +1082,6 @@ static inline bool cpu_has_vmx_invpcid(void)
                SECONDARY_EXEC_ENABLE_INVPCID;
 }
 
-static inline bool cpu_has_virtual_nmis(void)
-{
-       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
-}
-
 static inline bool cpu_has_vmx_wbinvd_exit(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -3080,8 +3071,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                                &_vmexit_control) < 0)
                return -EIO;
 
-       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
-       opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
+       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+               PIN_BASED_VIRTUAL_NMIS;
+       opt = PIN_BASED_POSTED_INTR;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
                                &_pin_based_exec_control) < 0)
                return -EIO;
@@ -4754,8 +4746,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 
        vmx->rmode.vm86_active = 0;
 
-       vmx->soft_vnmi_blocked = 0;
-
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        kvm_set_cr8(&vmx->vcpu, 0);
        apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
@@ -4875,8 +4865,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
 
-       if (!cpu_has_virtual_nmis() ||
-           vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+       if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
                enable_irq_window(vcpu);
                return;
        }
@@ -4918,19 +4907,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        if (!is_guest_mode(vcpu)) {
-               if (!cpu_has_virtual_nmis()) {
-                       /*
-                        * Tracking the NMI-blocked state in software is built upon
-                        * finding the next open IRQ window. This, in turn, depends on
-                        * well-behaving guests: They have to keep IRQs disabled at
-                        * least as long as the NMI handler runs. Otherwise we may
-                        * cause NMI nesting, maybe breaking the guest. But as this is
-                        * highly unlikely, we can live with the residual risk.
-                        */
-                       vmx->soft_vnmi_blocked = 1;
-                       vmx->vnmi_blocked_time = 0;
-               }
-
                ++vcpu->stat.nmi_injections;
                vmx->nmi_known_unmasked = false;
        }
@@ -4947,8 +4923,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
 
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
 {
-       if (!cpu_has_virtual_nmis())
-               return to_vmx(vcpu)->soft_vnmi_blocked;
        if (to_vmx(vcpu)->nmi_known_unmasked)
                return false;
        return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@@ -4958,20 +4932,13 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (!cpu_has_virtual_nmis()) {
-               if (vmx->soft_vnmi_blocked != masked) {
-                       vmx->soft_vnmi_blocked = masked;
-                       vmx->vnmi_blocked_time = 0;
-               }
-       } else {
-               vmx->nmi_known_unmasked = !masked;
-               if (masked)
-                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                                     GUEST_INTR_STATE_NMI);
-               else
-                       vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
-                                       GUEST_INTR_STATE_NMI);
-       }
+       vmx->nmi_known_unmasked = !masked;
+       if (masked)
+               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                             GUEST_INTR_STATE_NMI);
+       else
+               vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+                               GUEST_INTR_STATE_NMI);
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -4979,9 +4946,6 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
        if (to_vmx(vcpu)->nested.nested_run_pending)
                return 0;
 
-       if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
-               return 0;
-
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
                  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
                   | GUEST_INTR_STATE_NMI));
@@ -5793,7 +5757,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * AAK134, BY25.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
-                       cpu_has_virtual_nmis() &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
 
@@ -7286,7 +7249,6 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
         * "blocked by NMI" bit has to be set before next VM entry.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
-                       cpu_has_virtual_nmis() &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
                                GUEST_INTR_STATE_NMI);
@@ -7804,26 +7766,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
-           !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
-                                       get_vmcs12(vcpu))))) {
-               if (vmx_interrupt_allowed(vcpu)) {
-                       vmx->soft_vnmi_blocked = 0;
-               } else if (vmx->vnmi_blocked_time > 1000000000LL &&
-                          vcpu->arch.nmi_pending) {
-                       /*
-                        * This CPU don't support us in finding the end of an
-                        * NMI-blocked window if the guest runs with IRQs
-                        * disabled. So we pull the trigger after 1 s of
-                        * futile waiting, but inform the user about this.
-                        */
-                       printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
-                              "state on VCPU %d after 1 s timeout\n",
-                              __func__, vcpu->vcpu_id);
-                       vmx->soft_vnmi_blocked = 0;
-               }
-       }
-
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu);
@@ -8065,37 +8007,33 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 
        idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
-       if (cpu_has_virtual_nmis()) {
-               if (vmx->nmi_known_unmasked)
-                       return;
-               /*
-                * Can't use vmx->exit_intr_info since we're not sure what
-                * the exit reason is.
-                */
-               exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-               unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
-               vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
-               /*
-                * SDM 3: 27.7.1.2 (September 2008)
-                * Re-set bit "block by NMI" before VM entry if vmexit caused by
-                * a guest IRET fault.
-                * SDM 3: 23.2.2 (September 2008)
-                * Bit 12 is undefined in any of the following cases:
-                *  If the VM exit sets the valid bit in the IDT-vectoring
-                *   information field.
-                *  If the VM exit is due to a double fault.
-                */
-               if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
-                   vector != DF_VECTOR && !idtv_info_valid)
-                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                                     GUEST_INTR_STATE_NMI);
-               else
-                       vmx->nmi_known_unmasked =
-                               !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
-                                 & GUEST_INTR_STATE_NMI);
-       } else if (unlikely(vmx->soft_vnmi_blocked))
-               vmx->vnmi_blocked_time +=
-                       ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
+       if (vmx->nmi_known_unmasked)
+               return;
+       /*
+        * Can't use vmx->exit_intr_info since we're not sure what
+        * the exit reason is.
+        */
+       exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+       unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+       vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+       /*
+        * SDM 3: 27.7.1.2 (September 2008)
+        * Re-set bit "block by NMI" before VM entry if vmexit caused by
+        * a guest IRET fault.
+        * SDM 3: 23.2.2 (September 2008)
+        * Bit 12 is undefined in any of the following cases:
+        *  If the VM exit sets the valid bit in the IDT-vectoring
+        *   information field.
+        *  If the VM exit is due to a double fault.
+        */
+       if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+           vector != DF_VECTOR && !idtv_info_valid)
+               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                             GUEST_INTR_STATE_NMI);
+       else
+               vmx->nmi_known_unmasked =
+                       !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+                         & GUEST_INTR_STATE_NMI);
 }
 
 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@@ -8192,10 +8130,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long debugctlmsr, cr4;
 
-       /* Record the guest's net vcpu time for enforced NMI injections. */
-       if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
-               vmx->entry_time = ktime_get();
-
        /* Don't enter VMX if guest state is invalid, let the exit handler
           start emulation until we arrive back to a valid state */
        if (vmx->emulation_required)