static struct kmem_cache *mmu_page_header_cache;
 static struct percpu_counter kvm_total_used_mmu_pages;
 
-static u64 __read_mostly shadow_trap_nonpresent_pte;
-static u64 __read_mostly shadow_notrap_nonpresent_pte;
 static u64 __read_mostly shadow_nx_mask;
 static u64 __read_mostly shadow_x_mask;        /* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
        return ((1ULL << (e - s + 1)) - 1) << s;
 }
 
-void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
-{
-       shadow_trap_nonpresent_pte = trap_pte;
-       shadow_notrap_nonpresent_pte = notrap_pte;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
-
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
                u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
 
 static int is_shadow_present_pte(u64 pte)
 {
-       return pte != shadow_trap_nonpresent_pte
-               && pte != shadow_notrap_nonpresent_pte;
+       return pte & PT_PRESENT_MASK;
 }
 
 static int is_large_pte(u64 pte)
        return 1;
 }
 
-static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
+static void drop_spte(struct kvm *kvm, u64 *sptep)
 {
-       if (set_spte_track_bits(sptep, new_spte))
+       if (set_spte_track_bits(sptep, 0ull))
                rmap_remove(kvm, sptep);
 }
 
                        BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
                        pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
                        if (is_writable_pte(*spte)) {
-                               drop_spte(kvm, spte,
-                                         shadow_trap_nonpresent_pte);
+                               drop_spte(kvm, spte);
                                --kvm->stat.lpages;
                                spte = NULL;
                                write_protected = 1;
        while ((spte = rmap_next(kvm, rmapp, NULL))) {
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
-               drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+               drop_spte(kvm, spte);
                need_tlb_flush = 1;
        }
        return need_tlb_flush;
                rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
                need_flush = 1;
                if (pte_write(*ptep)) {
-                       drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+                       drop_spte(kvm, spte);
                        spte = rmap_next(kvm, rmapp, NULL);
                } else {
                        new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
                            u64 *parent_pte)
 {
        mmu_page_remove_parent_pte(sp, parent_pte);
-       __set_spte(parent_pte, shadow_trap_nonpresent_pte);
+       __set_spte(parent_pte, 0ull);
 }
 
 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        kvm_mmu_mark_parents_unsync(sp);
 }
 
-static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
-                                   struct kvm_mmu_page *sp)
-{
-       int i;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
-               sp->spt[i] = shadow_trap_nonpresent_pte;
-}
-
 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
                               struct kvm_mmu_page *sp)
 {
        }
 }
 
+static void init_shadow_page_table(struct kvm_mmu_page *sp)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               sp->spt[i] = 0ull;
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gfn_t gfn,
                                             gva_t gaddr,
 
                account_shadowed(vcpu->kvm, gfn);
        }
-       if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
-               vcpu->arch.mmu.prefetch_page(vcpu, sp);
-       else
-               nonpaging_prefetch_page(vcpu, sp);
+       init_shadow_page_table(sp);
        trace_kvm_mmu_get_page(sp, true);
        return sp;
 }
 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
 {
        if (is_large_pte(*sptep)) {
-               drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+               drop_spte(vcpu->kvm, sptep);
                kvm_flush_remote_tlbs(vcpu->kvm);
        }
 }
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
                if (is_last_spte(pte, sp->role.level))
-                       drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+                       drop_spte(kvm, spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, spte);
                }
        }
-       __set_spte(spte, shadow_trap_nonpresent_pte);
+
        if (is_large_pte(pte))
                --kvm->stat.lpages;
 }
        __set_bit(slot, sp->slot_bitmap);
 }
 
-static void mmu_convert_notrap(struct kvm_mmu_page *sp)
-{
-       int i;
-       u64 *pt = sp->spt;
-
-       if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
-               return;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-               if (pt[i] == shadow_notrap_nonpresent_pte)
-                       __set_spte(&pt[i], shadow_trap_nonpresent_pte);
-       }
-}
-
 /*
  * The function is based on mtrr_type_lookup() in
  * arch/x86/kernel/cpu/mtrr/generic.c
        sp->unsync = 1;
 
        kvm_mmu_mark_parents_unsync(sp);
-       mmu_convert_notrap(sp);
 }
 
 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
                if (level > PT_PAGE_TABLE_LEVEL &&
                    has_wrprotected_page(vcpu->kvm, gfn, level)) {
                        ret = 1;
-                       drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+                       drop_spte(vcpu->kvm, sptep);
                        goto done;
                }
 
                } else if (pfn != spte_to_pfn(*sptep)) {
                        pgprintk("hfn old %llx new %llx\n",
                                 spte_to_pfn(*sptep), pfn);
-                       drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+                       drop_spte(vcpu->kvm, sptep);
                        kvm_flush_remote_tlbs(vcpu->kvm);
                } else
                        was_rmapped = 1;
        spte = sp->spt + i;
 
        for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
-               if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
+               if (is_shadow_present_pte(*spte) || spte == sptep) {
                        if (!start)
                                continue;
                        if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
                        break;
                }
 
-               if (*iterator.sptep == shadow_trap_nonpresent_pte) {
+               if (!is_shadow_present_pte(*iterator.sptep)) {
                        u64 base_addr = iterator.addr;
 
                        base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->free = nonpaging_free;
-       context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = nonpaging_invlpg;
        context->update_pte = nonpaging_update_pte;
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
-       context->prefetch_page = paging64_prefetch_page;
        context->sync_page = paging64_sync_page;
        context->invlpg = paging64_invlpg;
        context->update_pte = paging64_update_pte;
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->free = paging_free;
-       context->prefetch_page = paging32_prefetch_page;
        context->sync_page = paging32_sync_page;
        context->invlpg = paging32_invlpg;
        context->update_pte = paging32_update_pte;
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = tdp_page_fault;
        context->free = nonpaging_free;
-       context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = nonpaging_invlpg;
        context->update_pte = nonpaging_update_pte;
                                continue;
 
                        if (is_large_pte(pt[i])) {
-                               drop_spte(kvm, &pt[i],
-                                         shadow_trap_nonpresent_pte);
+                               drop_spte(kvm, &pt[i]);
                                --kvm->stat.lpages;
                                continue;
                        }
 
                                    struct kvm_mmu_page *sp, u64 *spte,
                                    pt_element_t gpte)
 {
-       u64 nonpresent = shadow_trap_nonpresent_pte;
-
        if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
                goto no_present;
 
-       if (!is_present_gpte(gpte)) {
-               if (!sp->unsync)
-                       nonpresent = shadow_notrap_nonpresent_pte;
+       if (!is_present_gpte(gpte))
                goto no_present;
-       }
 
        if (!(gpte & PT_ACCESSED_MASK))
                goto no_present;
        return false;
 
 no_present:
-       drop_spte(vcpu->kvm, spte, nonpresent);
+       drop_spte(vcpu->kvm, spte);
        return true;
 }
 
                if (spte == sptep)
                        continue;
 
-               if (*spte != shadow_trap_nonpresent_pte)
+               if (is_shadow_present_pte(*spte))
                        continue;
 
                gpte = gptep[i];
                        if (is_shadow_present_pte(*sptep)) {
                                if (is_large_pte(*sptep))
                                        --vcpu->kvm->stat.lpages;
-                               drop_spte(vcpu->kvm, sptep,
-                                         shadow_trap_nonpresent_pte);
+                               drop_spte(vcpu->kvm, sptep);
                                need_flush = 1;
-                       } else
-                               __set_spte(sptep, shadow_trap_nonpresent_pte);
+                       }
+
                        break;
                }
 
        return gpa;
 }
 
-static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
-                                struct kvm_mmu_page *sp)
-{
-       int i, j, offset, r;
-       pt_element_t pt[256 / sizeof(pt_element_t)];
-       gpa_t pte_gpa;
-
-       if (sp->role.direct
-           || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
-               nonpaging_prefetch_page(vcpu, sp);
-               return;
-       }
-
-       pte_gpa = gfn_to_gpa(sp->gfn);
-       if (PTTYPE == 32) {
-               offset = sp->role.quadrant << PT64_LEVEL_BITS;
-               pte_gpa += offset * sizeof(pt_element_t);
-       }
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
-               r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
-               pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
-               for (j = 0; j < ARRAY_SIZE(pt); ++j)
-                       if (r || is_present_gpte(pt[j]))
-                               sp->spt[i+j] = shadow_trap_nonpresent_pte;
-                       else
-                               sp->spt[i+j] = shadow_notrap_nonpresent_pte;
-       }
-}
-
 /*
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
                }
 
                if (gfn != sp->gfns[i]) {
-                       drop_spte(vcpu->kvm, &sp->spt[i],
-                                     shadow_trap_nonpresent_pte);
+                       drop_spte(vcpu->kvm, &sp->spt[i]);
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }