]> www.infradead.org Git - users/hch/block.git/commitdiff
KVM: VMX: Introduce test mode related to EPT violation VE
authorIsaku Yamahata <isaku.yamahata@intel.com>
Mon, 22 Jan 2024 23:53:18 +0000 (15:53 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 19 Apr 2024 16:15:21 +0000 (12:15 -0400)
To support TDX, KVM is enhanced to operate with #VE.  For TDX, KVM uses the
suppress #VE bit in EPT entries selectively, in order to be able to trap
non-present conditions.  However, #VE isn't used for VMX and it's a bug
if it happens.  To be defensive and test that VMX case isn't broken
introduce an option ept_violation_ve_test and when it's set, BUG the vm.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Message-Id: <d6db6ba836605c0412e166359ba5c46a63c22f86.1705965635.git.isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Kconfig
arch/x86/kvm/vmx/vmcs.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 0ebdd088f28b852261786cb5f90d285b2af42ebc..d64fb2b3eb69e364772b1d9e515cb232a325f4b8 100644 (file)
@@ -95,6 +95,19 @@ config KVM_INTEL
          To compile this as a module, choose M here: the module
          will be called kvm-intel.
 
+config KVM_INTEL_PROVE_VE
+        bool "Check that guests do not receive #VE exceptions"
+        default KVM_PROVE_MMU || DEBUG_KERNEL
+        depends on KVM_INTEL
+        help
+
+          Checks that KVM's page table management code will not incorrectly
+          let guests receive a virtualization exception.  Virtualization
+          exceptions will be trapped by the hypervisor rather than injected
+          in the guest.
+
+          If unsure, say N.
+
 config X86_SGX_KVM
        bool "Software Guard eXtensions (SGX) Virtualization"
        depends on X86_SGX && KVM_INTEL
index 7c1996b433e262fa0c67d06a7a71baa6304db476..b2562531465882ff132a8871bcdfb5317052d31b 100644 (file)
@@ -140,6 +140,11 @@ static inline bool is_nm_fault(u32 intr_info)
        return is_exception_n(intr_info, NM_VECTOR);
 }
 
+static inline bool is_ve_fault(u32 intr_info)
+{
+       return is_exception_n(intr_info, VE_VECTOR);
+}
+
 /* Undocumented: icebp/int1 */
 static inline bool is_icebp(u32 intr_info)
 {
index d780eee9b6975ec34de898c53ba52e6fc995dffd..f4644f61d770317a9368dae4e9d7a098827373cf 100644 (file)
@@ -869,6 +869,12 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
 
        eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
             (1u << DB_VECTOR) | (1u << AC_VECTOR);
+       /*
+        * #VE isn't used for VMX.  To test against unexpected changes
+        * related to #VE for VMX, intercept unexpected #VE and warn on it.
+        */
+       if (IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
+               eb |= 1u << VE_VECTOR;
        /*
         * Guest access to VMware backdoor ports could legitimately
         * trigger #GP because of TSS I/O permission bitmap.
@@ -2602,6 +2608,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                                        &_cpu_based_2nd_exec_control))
                        return -EIO;
        }
+       if (!IS_ENABLED(CONFIG_KVM_INTEL_PROVE_VE))
+               _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
+
 #ifndef CONFIG_X86_64
        if (!(_cpu_based_2nd_exec_control &
                                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
@@ -2626,6 +2635,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                        return -EIO;
 
                vmx_cap->ept = 0;
+               _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
        }
        if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
            vmx_cap->vpid) {
@@ -4588,6 +4598,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
                exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
        if (!enable_ept) {
                exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+               exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
                enable_unrestricted_guest = 0;
        }
        if (!enable_unrestricted_guest)
@@ -4711,8 +4722,12 @@ static void init_vmcs(struct vcpu_vmx *vmx)
 
        exec_controls_set(vmx, vmx_exec_control(vmx));
 
-       if (cpu_has_secondary_exec_ctrls())
+       if (cpu_has_secondary_exec_ctrls()) {
                secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
+               if (vmx->ve_info)
+                       vmcs_write64(VE_INFORMATION_ADDRESS,
+                                    __pa(vmx->ve_info));
+       }
 
        if (cpu_has_tertiary_exec_ctrls())
                tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
@@ -5200,6 +5215,9 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
        if (is_invalid_opcode(intr_info))
                return handle_ud(vcpu);
 
+       if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
+               return -EIO;
+
        error_code = 0;
        if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
                error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
@@ -6409,8 +6427,22 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
                pr_err("Virtual processor ID = 0x%04x\n",
                       vmcs_read16(VIRTUAL_PROCESSOR_ID));
        if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
-               pr_err("VE info address = 0x%016llx\n",
-                      vmcs_read64(VE_INFORMATION_ADDRESS));
+               struct vmx_ve_information *ve_info = vmx->ve_info;
+               u64 ve_info_pa = vmcs_read64(VE_INFORMATION_ADDRESS);
+
+               /*
+                * If KVM is dumping the VMCS, then something has gone wrong
+                * already.  Derefencing an address from the VMCS, which could
+                * very well be corrupted, is a terrible idea.  The virtual
+                * address is known so use it.
+                */
+               pr_err("VE info address = 0x%016llx%s\n", ve_info_pa,
+                      ve_info_pa == __pa(ve_info) ? "" : "(corrupted!)");
+               pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
+                      ve_info->exit_reason, ve_info->delivery,
+                      ve_info->exit_qualification,
+                      ve_info->guest_linear_address,
+                      ve_info->guest_physical_address, ve_info->eptp_index);
        }
 }
 
@@ -7466,6 +7498,7 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu)
        free_vpid(vmx->vpid);
        nested_vmx_free_vcpu(vcpu);
        free_loaded_vmcs(vmx->loaded_vmcs);
+       free_page((unsigned long)vmx->ve_info);
 }
 
 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
@@ -7559,6 +7592,20 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
                        goto free_vmcs;
        }
 
+       err = -ENOMEM;
+       if (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE) {
+               struct page *page;
+
+               BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
+
+               /* ve_info must be page aligned. */
+               page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+               if (!page)
+                       goto free_vmcs;
+
+               vmx->ve_info = page_to_virt(page);
+       }
+
        if (vmx_can_use_ipiv(vcpu))
                WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
                           __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
index 65786dbe7d60bdf753db779312bb70754ccc6f1e..0da79a3868259b00d8a44d4136747ea592b52510 100644 (file)
@@ -362,6 +362,9 @@ struct vcpu_vmx {
                DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
                DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
        } shadow_msr_intercept;
+
+       /* ve_info must be page aligned. */
+       struct vmx_ve_information *ve_info;
 };
 
 struct kvm_vmx {
@@ -574,7 +577,8 @@ static inline u8 vmx_get_rvi(void)
         SECONDARY_EXEC_ENABLE_VMFUNC |                                 \
         SECONDARY_EXEC_BUS_LOCK_DETECTION |                            \
         SECONDARY_EXEC_NOTIFY_VM_EXITING |                             \
-        SECONDARY_EXEC_ENCLS_EXITING)
+        SECONDARY_EXEC_ENCLS_EXITING |                                 \
+        SECONDARY_EXEC_EPT_VIOLATION_VE)
 
 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL                      \