}
 }
 
-static void apic_timer_expired(struct kvm_lapic *apic)
+static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
 {
        struct kvm_vcpu *vcpu = apic->vcpu;
        struct kvm_timer *ktimer = &apic->lapic_timer;
        if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
                ktimer->expired_tscdeadline = ktimer->tscdeadline;
 
+       if (!from_timer_fn && vcpu->arch.apicv_active) {
+               WARN_ON(kvm_get_running_vcpu() != vcpu);
+               kvm_apic_inject_pending_timer_irqs(apic);
+               return;
+       }
+
        if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
                if (apic->lapic_timer.timer_advance_ns)
                        __kvm_wait_lapic_expire(vcpu);
                expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
                hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
        } else
-               apic_timer_expired(apic);
+               apic_timer_expired(apic, false);
 
        local_irq_restore(flags);
 }
 
        if (ktime_after(ktime_get(),
                        apic->lapic_timer.target_expiration)) {
-               apic_timer_expired(apic);
+               apic_timer_expired(apic, false);
 
                if (apic_lvtt_oneshot(apic))
                        return;
                if (atomic_read(&ktimer->pending)) {
                        cancel_hv_timer(apic);
                } else if (expired) {
-                       apic_timer_expired(apic);
+                       apic_timer_expired(apic, false);
                        cancel_hv_timer(apic);
                }
        }
                goto out;
        WARN_ON(rcuwait_active(&vcpu->wait));
        cancel_hv_timer(apic);
-       apic_timer_expired(apic);
+       apic_timer_expired(apic, false);
 
        if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
                advance_periodic_target_expiration(apic);
        struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
        struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
 
-       apic_timer_expired(apic);
+       apic_timer_expired(apic, true);
 
        if (lapic_is_periodic(apic)) {
                advance_periodic_target_expiration(apic);
 
        return 1;
 }
 
+static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
+{
+       if (!kvm_can_use_hv_timer(vcpu))
+               return 1;
+
+       kvm_set_lapic_tscdeadline_msr(vcpu, data);
+       return 0;
+}
+
 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
 {
        u32 msr = kvm_rcx_read(vcpu);
                        ret = EXIT_FASTPATH_EXIT_HANDLED;
                }
                break;
+       case MSR_IA32_TSCDEADLINE:
+               data = kvm_read_edx_eax(vcpu);
+               if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
+                       kvm_skip_emulated_instruction(vcpu);
+                       ret = EXIT_FASTPATH_REENTER_GUEST;
+               }
+               break;
        default:
                break;
        }