From 661fa987e4b5a020dd697689fabcd3701600ba6f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 11:23:12 -0700 Subject: [PATCH] KVM: x86/mmu: Use gfn_to_page_many_atomic() when prefetching indirect PTEs MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Use gfn_to_page_many_atomic() instead of gfn_to_pfn_memslot_atomic() when prefetching indirect PTEs (direct_pte_prefetch_many() already uses the "to page" APIS). Functionally, the two are subtly equivalent, as the "to pfn" API short-circuits hva_to_pfn() if hva_to_pfn_fast() fails, i.e. is just a wrapper for get_user_page_fast_only()/get_user_pages_fast_only(). Switching to the "to page" API will allow dropping the @atomic parameter from the entire hva_to_pfn() callchain. Tested-by: Alex Bennée Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-11-seanjc@google.com> --- arch/x86/kvm/mmu/paging_tmpl.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index fbaae040218b..36b2607280f0 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -535,8 +535,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, { struct kvm_memory_slot *slot; unsigned pte_access; + struct page *page; gfn_t gfn; - kvm_pfn_t pfn; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) return false; @@ -549,12 +549,11 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (!slot) return false; - pfn = gfn_to_pfn_memslot_atomic(slot, gfn); - if (is_error_pfn(pfn)) + if (gfn_to_page_many_atomic(slot, gfn, &page, 1) != 1) return false; - mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL); - kvm_release_pfn_clean(pfn); + mmu_set_spte(vcpu, slot, spte, pte_access, gfn, page_to_pfn(page), NULL); + kvm_release_page_clean(page); return true; } -- 2.50.1