int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
-       return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+       int r;
+
+       r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+
+       if (kvm->arch.tdp_mmu_enabled)
+               r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
+
+       return r;
 }
 
 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 
        return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
                                            test_age_gfn);
 }
+
+/*
+ * Handle the changed_pte MMU notifier for the TDP MMU.
+ * data is a pointer to the new pte_t mapping the HVA specified by the MMU
+ * notifier.
+ * Returns non-zero if a flush is needed before releasing the MMU lock.
+ */
+static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
+                       struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
+                       unsigned long data)
+{
+       struct tdp_iter iter;
+       pte_t *ptep = (pte_t *)data;
+       kvm_pfn_t new_pfn;
+       u64 new_spte;
+       int need_flush = 0;
+
+       WARN_ON(pte_huge(*ptep));
+
+       new_pfn = pte_pfn(*ptep);
+
+       tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
+               if (iter.level != PG_LEVEL_4K)
+                       continue;
+
+               if (!is_shadow_present_pte(iter.old_spte))
+                       break;
+
+               tdp_mmu_set_spte(kvm, &iter, 0);
+
+               kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
+
+               if (!pte_write(*ptep)) {
+                       new_spte = kvm_mmu_changed_pte_notifier_make_spte(
+                                       iter.old_spte, new_pfn);
+
+                       tdp_mmu_set_spte(kvm, &iter, new_spte);
+               }
+
+               need_flush = 1;
+       }
+
+       if (need_flush)
+               kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+
+       return 0;
+}
+
+int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
+                            pte_t *host_ptep)
+{
+       return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
+                                           (unsigned long)host_ptep,
+                                           set_tdp_spte);
+}
+
 
 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
                              unsigned long end);
 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
+
+int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
+                            pte_t *host_ptep);
 #endif /* __KVM_X86_MMU_TDP_MMU_H */