1 : count_vectors(apic->regs + APIC_ISR);
        apic->highest_isr_cache = -1;
        if (vcpu->arch.apicv_active) {
-               if (kvm_x86_ops->apicv_post_state_restore)
-                       kvm_x86_ops->apicv_post_state_restore(vcpu);
+               kvm_x86_ops->apicv_post_state_restore(vcpu);
                kvm_x86_ops->hwapic_irr_update(vcpu,
                                apic_find_highest_irr(apic));
                kvm_x86_ops->hwapic_isr_update(vcpu,
 
        vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
 }
 
+static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       pi_clear_on(&vmx->pi_desc);
+       memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
+}
+
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
        u32 exit_intr_info;
        .get_enable_apicv = vmx_get_enable_apicv,
        .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
+       .apicv_post_state_restore = vmx_apicv_post_state_restore,
        .hwapic_irr_update = vmx_hwapic_irr_update,
        .hwapic_isr_update = vmx_hwapic_isr_update,
        .sync_pir_to_irr = vmx_sync_pir_to_irr,