]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/mmu: Don't install bogus MMIO SPTEs if MMIO caching is disabled
authorSean Christopherson <seanjc@google.com>
Thu, 25 Feb 2021 20:47:31 +0000 (12:47 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 26 Feb 2021 13:36:12 +0000 (08:36 -0500)
If MMIO caching is disabled, e.g. when using shadow paging on CPUs with
52 bits of PA space, go straight to MMIO emulation and don't install an
MMIO SPTE.  The SPTE will just generate a !PRESENT #PF, i.e. can't
actually accelerate future MMIO.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210225204749.1512652-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/spte.c

index a6b89d74b7a2522ef7e8982928af04959c74a7da..7915670467d6d3514bf864a1f0a03b4d8f4a4f0d 100644 (file)
@@ -2939,9 +2939,19 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
                return true;
        }
 
-       if (unlikely(is_noslot_pfn(pfn)))
+       if (unlikely(is_noslot_pfn(pfn))) {
                vcpu_cache_mmio_info(vcpu, gva, gfn,
                                     access & shadow_mmio_access_mask);
+               /*
+                * If MMIO caching is disabled, emulate immediately without
+                * touching the shadow page tables as attempting to install an
+                * MMIO SPTE will just be an expensive nop.
+                */
+               if (unlikely(!shadow_mmio_value)) {
+                       *ret_val = RET_PF_EMULATE;
+                       return true;
+               }
+       }
 
        return false;
 }
index 9ea097bcb491d17cf3a0e5d5a89afc2989585d78..dcba9c1cbe299eba596b8e10d428dd0c2bcb633f 100644 (file)
@@ -51,6 +51,8 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
        u64 mask = generation_mmio_spte_mask(gen);
        u64 gpa = gfn << PAGE_SHIFT;
 
+       WARN_ON_ONCE(!shadow_mmio_value);
+
        access &= shadow_mmio_access_mask;
        mask |= shadow_mmio_value | access;
        mask |= gpa | shadow_nonpresent_or_rsvd_mask;
@@ -258,7 +260,10 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
                                  SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
                mmio_value = 0;
 
-       shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+       if (mmio_value)
+               shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+       else
+               shadow_mmio_value = 0;
        shadow_mmio_access_mask = access_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);