return vmx_handle_exit(vcpu, fastpath);
 }
 
+static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+{
+       struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
+
+       pi_clear_on(pi);
+       memset(pi->pir, 0, sizeof(pi->pir));
+}
+
+static int vt_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return -1;
+
+       return vmx_sync_pir_to_irr(vcpu);
+}
+
+static void vt_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+                          int trig_mode, int vector)
+{
+       if (is_td_vcpu(apic->vcpu)) {
+               tdx_deliver_interrupt(apic, delivery_mode, trig_mode,
+                                            vector);
+               return;
+       }
+
+       vmx_deliver_interrupt(apic, delivery_mode, trig_mode, vector);
+}
+
 static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu)) {
        vmx_load_mmu_pgd(vcpu, root_hpa, pgd_level);
 }
 
+static void vt_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_set_interrupt_shadow(vcpu, mask);
+}
+
+static u32 vt_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return 0;
+
+       return vmx_get_interrupt_shadow(vcpu);
+}
+
+static void vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_inject_irq(vcpu, reinjected);
+}
+
+static void vt_cancel_injection(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_cancel_injection(vcpu);
+}
+
+static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+       if (is_td_vcpu(vcpu))
+               return true;
+
+       return vmx_interrupt_allowed(vcpu, for_injection);
+}
+
+static void vt_enable_irq_window(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu))
+               return;
+
+       vmx_enable_irq_window(vcpu);
+}
+
 static void vt_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
 {
        *intr_info = 0;
        .handle_exit = vt_handle_exit,
        .skip_emulated_instruction = vmx_skip_emulated_instruction,
        .update_emulated_instruction = vmx_update_emulated_instruction,
-       .set_interrupt_shadow = vmx_set_interrupt_shadow,
-       .get_interrupt_shadow = vmx_get_interrupt_shadow,
+       .set_interrupt_shadow = vt_set_interrupt_shadow,
+       .get_interrupt_shadow = vt_get_interrupt_shadow,
        .patch_hypercall = vmx_patch_hypercall,
-       .inject_irq = vmx_inject_irq,
+       .inject_irq = vt_inject_irq,
        .inject_nmi = vmx_inject_nmi,
        .inject_exception = vmx_inject_exception,
-       .cancel_injection = vmx_cancel_injection,
-       .interrupt_allowed = vmx_interrupt_allowed,
+       .cancel_injection = vt_cancel_injection,
+       .interrupt_allowed = vt_interrupt_allowed,
        .nmi_allowed = vmx_nmi_allowed,
        .get_nmi_mask = vmx_get_nmi_mask,
        .set_nmi_mask = vmx_set_nmi_mask,
        .enable_nmi_window = vmx_enable_nmi_window,
-       .enable_irq_window = vmx_enable_irq_window,
+       .enable_irq_window = vt_enable_irq_window,
        .update_cr8_intercept = vmx_update_cr8_intercept,
 
        .x2apic_icr_is_split = false,
        .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
        .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
-       .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+       .apicv_pre_state_restore = vt_apicv_pre_state_restore,
        .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
        .hwapic_isr_update = vmx_hwapic_isr_update,
-       .sync_pir_to_irr = vmx_sync_pir_to_irr,
-       .deliver_interrupt = vmx_deliver_interrupt,
+       .sync_pir_to_irr = vt_sync_pir_to_irr,
+       .deliver_interrupt = vt_deliver_interrupt,
        .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
 
        .set_tss_addr = vmx_set_tss_addr,
 
        if ((kvm_tdx->xfam & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE)
                vcpu->arch.xfd_no_write_intercept = true;
 
+       tdx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
+       __pi_set_sn(&tdx->vt.pi_desc);
+
        tdx->state = VCPU_TD_STATE_UNINITIALIZED;
 
        return 0;
 {
        struct vcpu_tdx *tdx = to_tdx(vcpu);
 
+       vmx_vcpu_pi_load(vcpu, cpu);
        if (vcpu->cpu == cpu || !is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
                return;
 
 
        trace_kvm_entry(vcpu, force_immediate_exit);
 
+       if (pi_test_on(&vt->pi_desc))
+               apic->send_IPI_self(POSTED_INTR_VECTOR);
+
        tdx_vcpu_enter_exit(vcpu);
 
        if (vt->host_debugctlmsr & ~TDX_DEBUGCTL_PRESERVED)
        return tdx_sept_drop_private_spte(kvm, gfn, level, page);
 }
 
+void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+                          int trig_mode, int vector)
+{
+       struct kvm_vcpu *vcpu = apic->vcpu;
+       struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+       /* TDX supports only posted interrupt.  No lapic emulation. */
+       __vmx_deliver_posted_interrupt(vcpu, &tdx->vt.pi_desc, vector);
+
+       trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+}
+
 int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
 {
        struct vcpu_tdx *tdx = to_tdx(vcpu);
        if (ret)
                return ret;
 
+       td_vmcs_write16(tdx, POSTED_INTR_NV, POSTED_INTR_VECTOR);
+       td_vmcs_write64(tdx, POSTED_INTR_DESC_ADDR, __pa(&tdx->vt.pi_desc));
+       td_vmcs_setbit32(tdx, PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_POSTED_INTR);
+
        tdx->state = VCPU_TD_STATE_INITIALIZED;
 
        return 0;
 
 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
 void vmx_migrate_timers(struct kvm_vcpu *vcpu);
 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
-void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
 void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
 void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
 bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
 int tdx_handle_exit(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion fastpath);
+
+void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+                          int trig_mode, int vector);
 void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
                u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
 
 static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
 static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion fastpath) { return 0; }
+
+static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+                                        int trig_mode, int vector) {}
 static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
                                     u64 *info2, u32 *intr_info, u32 *error_code) {}