]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: Use vcpu-specific gva->hva translation when querying host page size
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 8 Jan 2020 20:24:37 +0000 (12:24 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2020 12:37:29 +0000 (04:37 -0800)
[ Upstream commit f9b84e19221efc5f493156ee0329df3142085f28 ]

Use kvm_vcpu_gfn_to_hva() when retrieving the host page size so that the
correct set of memslots is used when handling x86 page faults in SMM.

Fixes: 54bf36aac520 ("KVM: x86: use vcpu-specific functions to read/write/translate GFNs")
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/kvm/book3s_xive_native.c
arch/x86/kvm/mmu/mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index d83adb1e14902c241d645a749961778087eab365..6ef0151ff70a9617352d7f7803f808c12ed4b686 100644 (file)
@@ -631,7 +631,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
        srcu_idx = srcu_read_lock(&kvm->srcu);
        gfn = gpa_to_gfn(kvm_eq.qaddr);
 
-       page_size = kvm_host_page_size(kvm, gfn);
+       page_size = kvm_host_page_size(vcpu, gfn);
        if (1ull << kvm_eq.qshift > page_size) {
                srcu_read_unlock(&kvm->srcu, srcu_idx);
                pr_warn("Incompatible host page size %lx!\n", page_size);
index 5eb14442929c87c3133be05d580c71649349f4c5..d21b69bbd6f48c21034e49700166b8a19a5a0104 100644 (file)
@@ -1286,12 +1286,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
        return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
 }
 
-static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
+static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        unsigned long page_size;
        int i, ret = 0;
 
-       page_size = kvm_host_page_size(kvm, gfn);
+       page_size = kvm_host_page_size(vcpu, gfn);
 
        for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
                if (page_size >= KVM_HPAGE_SIZE(i))
@@ -1341,7 +1341,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
        if (unlikely(*force_pt_level))
                return PT_PAGE_TABLE_LEVEL;
 
-       host_level = host_mapping_level(vcpu->kvm, large_gfn);
+       host_level = host_mapping_level(vcpu, large_gfn);
 
        if (host_level == PT_PAGE_TABLE_LEVEL)
                return host_level;
index 7a4e346b0cb382ffdb2de86d2821ac3bfb6a901c..eacb8c48e768919b098661d04300418c41aa6753 100644 (file)
@@ -768,7 +768,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 
 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
index 68a3e0aa625f4df2c09aa0a814ee6381af143467..cb1a4bbe3b30f097c8839fc39f3d09501df9e565 100644 (file)
@@ -1406,14 +1406,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        struct vm_area_struct *vma;
        unsigned long addr, size;
 
        size = PAGE_SIZE;
 
-       addr = gfn_to_hva(kvm, gfn);
+       addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
        if (kvm_is_error_hva(addr))
                return PAGE_SIZE;