/* arguments to kvm_mmu_do_page_fault. */
const gpa_t addr;
const u32 error_code;
- const bool prefault;
+ const bool prefetch;
/* Derived from error_code. */
const bool exec;
}
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- u32 err, bool prefault)
+ u32 err, bool prefetch)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
.present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK,
- .prefault = prefault,
+ .prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
* be write-protected.
*/
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
- gfn_t gfn, bool can_unsync, bool speculative)
+ gfn_t gfn, bool can_unsync, bool prefetch)
{
struct kvm_mmu_page *sp;
bool locked = false;
if (sp->unsync)
continue;
- if (speculative)
+ if (prefetch)
return -EEXIST;
/*
/* Prefetching always gets a writable pfn. */
bool host_writable = !fault || fault->map_writable;
- bool speculative = !fault || fault->prefault;
+ bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
was_rmapped = 1;
}
- wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative,
+ wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
true, host_writable, &spte);
if (*sptep == spte) {
if (!async)
return false; /* *pfn has correct page already */
- if (!fault->prefault && kvm_can_do_async_pf(vcpu)) {
+ if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(fault->addr, fault->gfn);
if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
}
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
- gfn_t gfn, bool can_unsync, bool speculative);
+ gfn_t gfn, bool can_unsync, bool prefetch);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
*/
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
- if (!fault->prefault)
+ if (!fault->prefetch)
kvm_inject_emulated_page_fault(vcpu, &walker.fault);
return RET_PF_RETRY;
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
- u64 old_spte, bool speculative, bool can_unsync,
+ u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte)
{
int level = sp->role.level;
* read access. See FNAME(gpte_access) in paging_tmpl.h.
*/
spte |= shadow_present_mask;
- if (!speculative)
+ if (!prefetch)
spte |= spte_shadow_accessed_mask(spte);
if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
*/
- if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) {
+ if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
wrprot = true;
spte |= spte_shadow_dirty_mask(spte);
out:
- if (speculative)
+ if (prefetch)
spte = mark_spte_for_access_track(spte);
WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
- u64 old_spte, bool speculative, bool can_unsync,
+ u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte);
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
- fault->pfn, iter->old_spte, fault->prefault, true,
+ fault->pfn, iter->old_spte, fault->prefetch, true,
fault->map_writable, &new_spte);
if (new_spte == iter->old_spte)