kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
                        kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+                       kvm_x86_ops->tlb_flush(vcpu, true);
                        __clear_sp_write_flooding_count(
                                page_header(mmu->root_hpa));
 
        if (r)
                goto out;
        kvm_mmu_load_cr3(vcpu);
+       kvm_x86_ops->tlb_flush(vcpu, true);
 out:
        return r;
 }
 
 
 static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
 {
-       /* set_cr3() should ensure TLB has been flushed */
        if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
                vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
 }
 
 
        svm->vmcb->control.nested_cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_NPT);
-       svm_flush_tlb(vcpu, true);
 }
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
 
        svm->vmcb->save.cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_CR);
-       svm_flush_tlb(vcpu, true);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        /* Also sync guest cr3 here in case we live migrate */
        svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
        mark_dirty(svm->vmcb, VMCB_CR);
-
-       svm_flush_tlb(vcpu, true);
 }
 
 static int is_disabled(void)