]> www.infradead.org Git - users/hch/block.git/commitdiff
KVM: x86: Move synthetic PFERR_* sanity checks to SVM's #NPF handler
authorSean Christopherson <seanjc@google.com>
Wed, 17 Apr 2024 11:30:29 +0000 (07:30 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 7 May 2024 15:59:18 +0000 (11:59 -0400)
Move the sanity check that hardware never sets bits that collide with KVM-
define synthetic bits from kvm_mmu_page_fault() to npf_interception(),
i.e. make the sanity check #NPF specific.  The legacy #PF path already
WARNs if _any_ of bits 63:32 are set, and the error code that comes from
VMX's EPT Violatation and Misconfig is 100% synthesized (KVM morphs VMX's
EXIT_QUALIFICATION into error code flags).

Add a compile-time assert in the legacy #PF handler to make sure that KVM-
define flags are covered by its existing sanity check on the upper bits.

Opportunistically add a description of PFERR_IMPLICIT_ACCESS, since we
are removing the comment that defined it.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Message-ID: <20240228024147.41573-8-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/svm.c

index 1be4608669ea9ee450bf9f4cd0e8fe17902ba428..0e63dcd8ac1ca62f6077b55b69a238609321666a 100644 (file)
@@ -267,7 +267,13 @@ enum x86_intercept_stage;
 #define PFERR_GUEST_ENC_MASK   BIT_ULL(34)
 #define PFERR_GUEST_SIZEM_MASK BIT_ULL(35)
 #define PFERR_GUEST_VMPL_MASK  BIT_ULL(36)
+
+/*
+ * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
+ * when emulating instructions that triggers implicit access.
+ */
 #define PFERR_IMPLICIT_ACCESS  BIT_ULL(48)
+#define PFERR_SYNTHETIC_MASK   (PFERR_IMPLICIT_ACCESS)
 
 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |       \
                                 PFERR_WRITE_MASK |             \
index 955de52e0c182907ac670b95d32ab3966f093b50..1b2ae00a7970e03e0482eaf86455cf58a89ab016 100644 (file)
@@ -4501,6 +4501,9 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                return -EFAULT;
 #endif
 
+       /* Ensure the above sanity check also covers KVM-defined flags. */
+       BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
+
        vcpu->arch.l1tf_flush_l1d = true;
        if (!flags) {
                trace_kvm_page_fault(vcpu, fault_address, error_code);
@@ -5785,17 +5788,6 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
        int r, emulation_type = EMULTYPE_PF;
        bool direct = vcpu->arch.mmu->root_role.direct;
 
-       /*
-        * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP
-        * checks when emulating instructions that triggers implicit access.
-        * WARN if hardware generates a fault with an error code that collides
-        * with the KVM-defined value.  Clear the flag and continue on, i.e.
-        * don't terminate the VM, as KVM can't possibly be relying on a flag
-        * that KVM doesn't know about.
-        */
-       if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS))
-               error_code &= ~PFERR_IMPLICIT_ACCESS;
-
        if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
                return RET_PF_RETRY;
 
index 0f3b59da0d4a891431b9474c575552a133407026..535018f152a3a5260ebf1194eaaec6c27b5bb26d 100644 (file)
@@ -2047,6 +2047,15 @@ static int npf_interception(struct kvm_vcpu *vcpu)
        u64 fault_address = svm->vmcb->control.exit_info_2;
        u64 error_code = svm->vmcb->control.exit_info_1;
 
+       /*
+        * WARN if hardware generates a fault with an error code that collides
+        * with KVM-defined sythentic flags.  Clear the flags and continue on,
+        * i.e. don't terminate the VM, as KVM can't possibly be relying on a
+        * flag that KVM doesn't know about.
+        */
+       if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
+               error_code &= ~PFERR_SYNTHETIC_MASK;
+
        trace_kvm_page_fault(vcpu, fault_address, error_code);
        return kvm_mmu_page_fault(vcpu, fault_address, error_code,
                        static_cpu_has(X86_FEATURE_DECODEASSISTS) ?