]> www.infradead.org Git - nvme.git/commitdiff
KVM: SEV: Implement gmem hook for invalidating private pages
authorMichael Roth <michael.roth@amd.com>
Wed, 1 May 2024 08:52:04 +0000 (03:52 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 12 May 2024 08:09:32 +0000 (04:09 -0400)
Implement a platform hook to do the work of restoring the direct map
entries of gmem-managed pages and transitioning the corresponding RMP
table entries back to the default shared/hypervisor-owned state.

Signed-off-by: Michael Roth <michael.roth@amd.com>
Message-ID: <20240501085210.2213060-15-michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Kconfig
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 10768f13b240be5f12381d3cd50f310b4b2bea2e..2a7f69abcac3448910988c93cf9d06a614babe0b 100644 (file)
@@ -138,6 +138,7 @@ config KVM_AMD_SEV
        select ARCH_HAS_CC_PLATFORM
        select KVM_GENERIC_PRIVATE_MEM
        select HAVE_KVM_GMEM_PREPARE
+       select HAVE_KVM_GMEM_INVALIDATE
        help
          Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
          with Encrypted State (SEV-ES) on AMD processors.
index 0ed6b96c01c352d6be1aecb3fc7915c1fcae35fd..dc00b89404a2d8fce70fcb3e5d91b240c87b82ea 100644 (file)
@@ -4663,3 +4663,67 @@ int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
 
        return 0;
 }
+
+void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
+{
+       kvm_pfn_t pfn;
+
+       pr_debug("%s: PFN start 0x%llx PFN end 0x%llx\n", __func__, start, end);
+
+       for (pfn = start; pfn < end;) {
+               bool use_2m_update = false;
+               int rc, rmp_level;
+               bool assigned;
+
+               rc = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
+               if (WARN_ONCE(rc, "SEV: Failed to retrieve RMP entry for PFN 0x%llx error %d\n",
+                             pfn, rc))
+                       goto next_pfn;
+
+               if (!assigned)
+                       goto next_pfn;
+
+               use_2m_update = IS_ALIGNED(pfn, PTRS_PER_PMD) &&
+                               end >= (pfn + PTRS_PER_PMD) &&
+                               rmp_level > PG_LEVEL_4K;
+
+               /*
+                * If an unaligned PFN corresponds to a 2M region assigned as a
+                * large page in the RMP table, PSMASH the region into individual
+                * 4K RMP entries before attempting to convert a 4K sub-page.
+                */
+               if (!use_2m_update && rmp_level > PG_LEVEL_4K) {
+                       /*
+                        * This shouldn't fail, but if it does, report it, but
+                        * still try to update RMP entry to shared and pray this
+                        * was a spurious error that can be addressed later.
+                        */
+                       rc = snp_rmptable_psmash(pfn);
+                       WARN_ONCE(rc, "SEV: Failed to PSMASH RMP entry for PFN 0x%llx error %d\n",
+                                 pfn, rc);
+               }
+
+               rc = rmp_make_shared(pfn, use_2m_update ? PG_LEVEL_2M : PG_LEVEL_4K);
+               if (WARN_ONCE(rc, "SEV: Failed to update RMP entry for PFN 0x%llx error %d\n",
+                             pfn, rc))
+                       goto next_pfn;
+
+               /*
+                * SEV-ES avoids host/guest cache coherency issues through
+                * WBINVD hooks issued via MMU notifiers during run-time, and
+                * KVM's VM destroy path at shutdown. Those MMU notifier events
+                * don't cover gmem since there is no requirement to map pages
+                * to a HVA in order to use them for a running guest. While the
+                * shutdown path would still likely cover things for SNP guests,
+                * userspace may also free gmem pages during run-time via
+                * hole-punching operations on the guest_memfd, so flush the
+                * cache entries for these pages before free'ing them back to
+                * the host.
+                */
+               clflush_cache_range(__va(pfn_to_hpa(pfn)),
+                                   use_2m_update ? PMD_SIZE : PAGE_SIZE);
+next_pfn:
+               pfn += use_2m_update ? PTRS_PER_PMD : 1;
+               cond_resched();
+       }
+}
index b9ecc06f893423130d4342cd3c7fb9f5837c48e1..653cdb23a7d1b6833855135859ccbe17f22176b1 100644 (file)
@@ -5083,6 +5083,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .alloc_apic_backing_page = svm_alloc_apic_backing_page,
 
        .gmem_prepare = sev_gmem_prepare,
+       .gmem_invalidate = sev_gmem_invalidate,
 };
 
 /*
index 4203bd9012e9a4e55c0995ed92bb6297ee39fa2d..3cea024a7c185bc133b71b497a7459d09ca4836f 100644 (file)
@@ -737,6 +737,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
 void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
+void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
 #else
 static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
        return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
@@ -757,6 +758,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in
 {
        return 0;
 }
+static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
 
 #endif