]> www.infradead.org Git - linux.git/commitdiff
KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_...
authorHou Wenlong <houwenlong.hwl@antgroup.com>
Sat, 11 May 2024 03:46:37 +0000 (11:46 +0800)
committerSean Christopherson <seanjc@google.com>
Mon, 3 Jun 2024 21:06:39 +0000 (14:06 -0700)
Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
have leaf gptes, so allocation of shadowed translation cache is needed
only for it. Then, it can use sp->shadowed_translation to determine
whether to use the information in the shadowed translation cache or not.
Also, extend the WARN in FNAME(sync_spte)() to ensure that this won't
break shadow_mmu_get_sp_for_split().

Suggested-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
Link: https://lore.kernel.org/r/5b0cda8a7456cda476b14fca36414a56f921dd52.1715398655.git.houwenlong.hwl@antgroup.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index afc7489513bc45c5896fa8ecdf75b80fd11f5c22..fd5378b72896f4f01fa51f28b768b9732e55bdc3 100644 (file)
@@ -719,7 +719,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
        if (sp->role.passthrough)
                return sp->gfn;
 
-       if (!sp->role.direct)
+       if (sp->shadowed_translation)
                return sp->shadowed_translation[index] >> PAGE_SHIFT;
 
        return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
@@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
  */
 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
 {
-       if (sp_has_gptes(sp))
+       if (sp->shadowed_translation)
                return sp->shadowed_translation[index] & ACC_ALL;
 
        /*
@@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
                                         gfn_t gfn, unsigned int access)
 {
-       if (sp_has_gptes(sp)) {
+       if (sp->shadowed_translation) {
                sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
                return;
        }
@@ -1697,8 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
        hlist_del(&sp->hash_link);
        list_del(&sp->link);
        free_page((unsigned long)sp->spt);
-       if (!sp->role.direct)
-               free_page((unsigned long)sp->shadowed_translation);
+       free_page((unsigned long)sp->shadowed_translation);
        kmem_cache_free(mmu_page_header_cache, sp);
 }
 
@@ -2200,7 +2199,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
 
        sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
        sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
-       if (!role.direct)
+       if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
                sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
 
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
index d3dbcf382ed2d43977e909f9760d2648bf2c745e..69941cebb3a87eae90b63b7cd49975d00b11315e 100644 (file)
@@ -911,7 +911,8 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
        gpa_t pte_gpa;
        gfn_t gfn;
 
-       if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE))
+       if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE ||
+                        !sp->shadowed_translation))
                return 0;
 
        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);