vmx_deliver_interrupt(apic, delivery_mode, trig_mode, vector);
 }
 
+static void vt_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_vcpu_after_set_cpuid(vcpu);
+}
+
+static void vt_update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_update_exception_bitmap(vcpu);
+}
+
+static u64 vt_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_get_segment_base(vcpu, seg);
+}
+
+static void vt_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+                             int seg)
+{
+       if (is_td_vcpu(vcpu)) {
+               memset(var, 0, sizeof(*var));
+               return;
+       }
+
+       vmx_get_segment(vcpu, var, seg);
+}
+
+static void vt_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+                             int seg)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_segment(vcpu, var, seg);
+}
+
+static int vt_get_cpl(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_get_cpl(vcpu);
+}
+
+static int vt_get_cpl_no_cache(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_get_cpl_no_cache(vcpu);
+}
+
+static void vt_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+       if (is_td_vcpu(vcpu)) {
+               *db = 0;
+               *l = 0;
+               return;
+       }
+
+       vmx_get_cs_db_l_bits(vcpu, db, l);
+}
+
+static bool vt_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+       if (is_td_vcpu(vcpu))
+               return true;
+
+       return vmx_is_valid_cr0(vcpu, cr0);
+}
+
+static void vt_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_cr0(vcpu, cr0);
+}
+
+static bool vt_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+       if (is_td_vcpu(vcpu))
+               return true;
+
+       return vmx_is_valid_cr4(vcpu, cr4);
+}
+
+static void vt_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_cr4(vcpu, cr4);
+}
+
+static int vt_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_set_efer(vcpu, efer);
+}
+
+static void vt_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+       if (is_td_vcpu(vcpu)) {
+               memset(dt, 0, sizeof(*dt));
+               return;
+       }
+
+       vmx_get_idt(vcpu, dt);
+}
+
+static void vt_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_idt(vcpu, dt);
+}
+
+static void vt_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+       if (is_td_vcpu(vcpu)) {
+               memset(dt, 0, sizeof(*dt));
+               return;
+       }
+
+       vmx_get_gdt(vcpu, dt);
+}
+
+static void vt_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_gdt(vcpu, dt);
+}
+
+static void vt_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_dr6(vcpu, val);
+}
+
+static void vt_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_dr7(vcpu, val);
+}
+
+static void vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+       /*
+        * MOV-DR exiting is always cleared for TD guest, even in debug mode.
+        * Thus KVM_DEBUGREG_WONT_EXIT can never be set and it should never
+        * reach here for TD vcpu.
+        */
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_sync_dirty_debug_regs(vcpu);
+}
+
+static void vt_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+       if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
+               return;
+
+       vmx_cache_reg(vcpu, reg);
+}
+
+static unsigned long vt_get_rflags(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_get_rflags(vcpu);
+}
+
+static void vt_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_rflags(vcpu, rflags);
+}
+
+static bool vt_get_if_flag(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return false;
+
+       return vmx_get_if_flag(vcpu);
+}
+
 static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu)) {
        vmx_inject_irq(vcpu, reinjected);
 }
 
+static void vt_inject_exception(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_inject_exception(vcpu);
+}
+
 static void vt_cancel_injection(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu))
        vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
 }
 
+static void vt_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_update_cr8_intercept(vcpu, tpr, irr);
+}
+
 static void vt_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu))
        vmx_refresh_apicv_exec_ctrl(vcpu);
 }
 
+static void vt_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
+static int vt_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+       if (is_td(kvm))
+               return 0;
+
+       return vmx_set_tss_addr(kvm, addr);
+}
+
+static int vt_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+       if (is_td(kvm))
+               return 0;
+
+       return vmx_set_identity_map_addr(kvm, ident_addr);
+}
+
 static int vt_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
 {
        if (!is_td(kvm))
        .vcpu_load = vt_vcpu_load,
        .vcpu_put = vt_vcpu_put,
 
-       .update_exception_bitmap = vmx_update_exception_bitmap,
+       .update_exception_bitmap = vt_update_exception_bitmap,
        .get_feature_msr = vmx_get_feature_msr,
        .get_msr = vt_get_msr,
        .set_msr = vt_set_msr,
-       .get_segment_base = vmx_get_segment_base,
-       .get_segment = vmx_get_segment,
-       .set_segment = vmx_set_segment,
-       .get_cpl = vmx_get_cpl,
-       .get_cpl_no_cache = vmx_get_cpl_no_cache,
-       .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
-       .is_valid_cr0 = vmx_is_valid_cr0,
-       .set_cr0 = vmx_set_cr0,
-       .is_valid_cr4 = vmx_is_valid_cr4,
-       .set_cr4 = vmx_set_cr4,
-       .set_efer = vmx_set_efer,
-       .get_idt = vmx_get_idt,
-       .set_idt = vmx_set_idt,
-       .get_gdt = vmx_get_gdt,
-       .set_gdt = vmx_set_gdt,
-       .set_dr6 = vmx_set_dr6,
-       .set_dr7 = vmx_set_dr7,
-       .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
-       .cache_reg = vmx_cache_reg,
-       .get_rflags = vmx_get_rflags,
-       .set_rflags = vmx_set_rflags,
-       .get_if_flag = vmx_get_if_flag,
+
+       .get_segment_base = vt_get_segment_base,
+       .get_segment = vt_get_segment,
+       .set_segment = vt_set_segment,
+       .get_cpl = vt_get_cpl,
+       .get_cpl_no_cache = vt_get_cpl_no_cache,
+       .get_cs_db_l_bits = vt_get_cs_db_l_bits,
+       .is_valid_cr0 = vt_is_valid_cr0,
+       .set_cr0 = vt_set_cr0,
+       .is_valid_cr4 = vt_is_valid_cr4,
+       .set_cr4 = vt_set_cr4,
+       .set_efer = vt_set_efer,
+       .get_idt = vt_get_idt,
+       .set_idt = vt_set_idt,
+       .get_gdt = vt_get_gdt,
+       .set_gdt = vt_set_gdt,
+       .set_dr6 = vt_set_dr6,
+       .set_dr7 = vt_set_dr7,
+       .sync_dirty_debug_regs = vt_sync_dirty_debug_regs,
+       .cache_reg = vt_cache_reg,
+       .get_rflags = vt_get_rflags,
+       .set_rflags = vt_set_rflags,
+       .get_if_flag = vt_get_if_flag,
 
        .flush_tlb_all = vt_flush_tlb_all,
        .flush_tlb_current = vt_flush_tlb_current,
        .patch_hypercall = vmx_patch_hypercall,
        .inject_irq = vt_inject_irq,
        .inject_nmi = vt_inject_nmi,
-       .inject_exception = vmx_inject_exception,
+       .inject_exception = vt_inject_exception,
        .cancel_injection = vt_cancel_injection,
        .interrupt_allowed = vt_interrupt_allowed,
        .nmi_allowed = vt_nmi_allowed,
        .set_nmi_mask = vt_set_nmi_mask,
        .enable_nmi_window = vt_enable_nmi_window,
        .enable_irq_window = vt_enable_irq_window,
-       .update_cr8_intercept = vmx_update_cr8_intercept,
+       .update_cr8_intercept = vt_update_cr8_intercept,
 
        .x2apic_icr_is_split = false,
        .set_virtual_apic_mode = vt_set_virtual_apic_mode,
        .set_apic_access_page_addr = vt_set_apic_access_page_addr,
        .refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl,
-       .load_eoi_exitmap = vmx_load_eoi_exitmap,
+       .load_eoi_exitmap = vt_load_eoi_exitmap,
        .apicv_pre_state_restore = vt_apicv_pre_state_restore,
        .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
        .hwapic_isr_update = vt_hwapic_isr_update,
        .deliver_interrupt = vt_deliver_interrupt,
        .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
 
-       .set_tss_addr = vmx_set_tss_addr,
-       .set_identity_map_addr = vmx_set_identity_map_addr,
+       .set_tss_addr = vt_set_tss_addr,
+       .set_identity_map_addr = vt_set_identity_map_addr,
        .get_mt_mask = vmx_get_mt_mask,
 
        .get_exit_info = vt_get_exit_info,
        .get_entry_info = vt_get_entry_info,
 
-       .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
+       .vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid,
 
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,