was_rmapped = 1;
        }
 
-       wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
-                          true, host_writable, sp_ad_disabled(sp), &spte);
+       wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative,
+                          true, host_writable, &spte);
 
        if (*sptep == spte) {
                ret = RET_PF_SPURIOUS;
 
                sptep = &sp->spt[i];
                spte = *sptep;
                host_writable = spte & shadow_host_writable_mask;
-               make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
+               make_spte(vcpu, sp, pte_access, gfn,
                          spte_to_pfn(spte), spte, true, false,
-                         host_writable, sp_ad_disabled(sp), &spte);
+                         host_writable, &spte);
 
                flush |= mmu_spte_update(sptep, spte);
        }
 
                                     E820_TYPE_RAM);
 }
 
-bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
-                    gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
-                    bool can_unsync, bool host_writable, bool ad_disabled,
-                    u64 *new_spte)
+bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+              unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
+              u64 old_spte, bool speculative, bool can_unsync,
+              bool host_writable, u64 *new_spte)
 {
+       int level = sp->role.level;
        u64 spte = SPTE_MMU_PRESENT_MASK;
        bool wrprot = false;
 
-       if (ad_disabled)
+       if (sp->role.ad_disabled)
                spte |= SPTE_TDP_AD_DISABLED_MASK;
        else if (kvm_vcpu_ad_need_write_protect(vcpu))
                spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
 
        return gen;
 }
 
-bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
-                    gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
-                    bool can_unsync, bool host_writable, bool ad_disabled,
-                    u64 *new_spte);
+bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+              unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
+              u64 old_spte, bool speculative, bool can_unsync,
+              bool host_writable, u64 *new_spte);
 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
 u64 mark_spte_for_access_track(u64 spte);
 
                                          struct kvm_page_fault *fault,
                                          struct tdp_iter *iter)
 {
+       struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep);
        u64 new_spte;
        int ret = RET_PF_FIXED;
        bool wrprot = false;
 
+       WARN_ON(sp->role.level != fault->goal_level);
        if (unlikely(!fault->slot))
                new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
        else
-               wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
+               wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn,
                                         fault->pfn, iter->old_spte, fault->prefault, true,
-                                        fault->map_writable, !shadow_accessed_mask,
-                                        &new_spte);
+                                        fault->map_writable, &new_spte);
 
        if (new_spte == iter->old_spte)
                ret = RET_PF_SPURIOUS;