]> www.infradead.org Git - users/hch/block.git/commitdiff
KVM: x86/mmu: Pass full 64-bit error code when handling page faults
authorIsaku Yamahata <isaku.yamahata@intel.com>
Wed, 28 Feb 2024 02:41:35 +0000 (18:41 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 7 May 2024 15:59:18 +0000 (11:59 -0400)
Plumb the full 64-bit error code throughout the page fault handling code
so that KVM can use the upper 32 bits, e.g. SNP's PFERR_GUEST_ENC_MASK
will be used to determine whether or not a fault is private vs. shared.

Note, passing the 64-bit error code to FNAME(walk_addr)() does NOT change
the behavior of permission_fault() when invoked in the page fault path, as
KVM explicitly clears PFERR_IMPLICIT_ACCESS in kvm_mmu_page_fault().

Continue passing '0' from the async #PF worker, as guest_memfd and thus
private memory doesn't support async page faults.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
[mdr: drop references/changes on rebase, update commit message]
Signed-off-by: Michael Roth <michael.roth@amd.com>
[sean: drop truncation in call to FNAME(walk_addr)(), rewrite changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Message-ID: <20240228024147.41573-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/mmutrace.h

index 1b2ae00a7970e03e0482eaf86455cf58a89ab016..dde871a28c495012f7c903bd6832051e9770da77 100644 (file)
@@ -5799,8 +5799,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
        }
 
        if (r == RET_PF_INVALID) {
-               r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
-                                         lower_32_bits(error_code), false,
+               r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
                                          &emulation_type);
                if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
                        return -EIO;
index 61f49967047ad1a3e2b0f605963f2546441bdbd4..797b80f996a773dd171bba5422160d30b845ec67 100644 (file)
@@ -190,7 +190,7 @@ static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
 struct kvm_page_fault {
        /* arguments to kvm_mmu_do_page_fault.  */
        const gpa_t addr;
-       const u32 error_code;
+       const u64 error_code;
        const bool prefetch;
 
        /* Derived from error_code.  */
@@ -288,7 +288,7 @@ static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
 }
 
 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                                       u32 err, bool prefetch, int *emulation_type)
+                                       u64 err, bool prefetch, int *emulation_type)
 {
        struct kvm_page_fault fault = {
                .addr = cr2_or_gpa,
index ae86820cef697a8c3ba224ab5c5eb7b8eea0afe1..195d98bc8de85e4e377bc084ea3d14e1bab126ce 100644 (file)
@@ -260,7 +260,7 @@ TRACE_EVENT(
        TP_STRUCT__entry(
                __field(int, vcpu_id)
                __field(gpa_t, cr2_or_gpa)
-               __field(u32, error_code)
+               __field(u64, error_code)
                __field(u64 *, sptep)
                __field(u64, old_spte)
                __field(u64, new_spte)