static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
 
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                          bool clear_unsync)
 {
        if (sp->role.cr4_pae != !!is_pae(vcpu)) {
                kvm_mmu_zap_page(vcpu->kvm, sp);
                return 1;
        }
 
-       if (rmap_write_protect(vcpu->kvm, sp->gfn))
-               kvm_flush_remote_tlbs(vcpu->kvm);
-       kvm_unlink_unsync_page(vcpu->kvm, sp);
+       if (clear_unsync) {
+               if (rmap_write_protect(vcpu->kvm, sp->gfn))
+                       kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_unlink_unsync_page(vcpu->kvm, sp);
+       }
+
        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
                kvm_mmu_zap_page(vcpu->kvm, sp);
                return 1;
        return 0;
 }
 
+static void mmu_convert_notrap(struct kvm_mmu_page *sp);
+static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
+                                  struct kvm_mmu_page *sp)
+{
+       int ret;
+
+       ret = __kvm_sync_page(vcpu, sp, false);
+       if (!ret)
+               mmu_convert_notrap(sp);
+       return ret;
+}
+
+static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       return __kvm_sync_page(vcpu, sp, true);
+}
+
 struct mmu_page_path {
        struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
        unsigned int idx[PT64_ROOT_LEVEL-1];