static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
 {
+       int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
        int ret = 0;
        unsigned long flags;
        struct vcpu_svm *svm = to_svm(vcpu);
 
        list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
                if (activate)
-                       ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data);
+                       ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data, apic_id);
                else
                        ret = amd_iommu_deactivate_guest_mode(irqfd->irq_bypass_data);
                if (ret)
                 */
                guard(spinlock_irqsave)(&svm->ir_list_lock);
 
+               /*
+                * Update the target pCPU for IOMMU doorbells if the vCPU is
+                * running.  If the vCPU is NOT running, i.e. is blocking or
+                * scheduled out, KVM will update the pCPU info when the vCPU
+                * is awakened and/or scheduled in.  See also avic_vcpu_load().
+                */
+               entry = svm->avic_physical_id_entry;
+               if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+                       pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+               else
+                       pi_data.cpu = -1;
+
                ret = irq_set_vcpu_affinity(host_irq, &pi_data);
                if (ret)
                        return ret;
                        return -EIO;
                }
 
-               /*
-                * Update the target pCPU for IOMMU doorbells if the vCPU is
-                * running.  If the vCPU is NOT running, i.e. is blocking or
-                * scheduled out, KVM will update the pCPU info when the vCPU
-                * is awakened and/or scheduled in.  See also avic_vcpu_load().
-                */
-               entry = svm->avic_physical_id_entry;
-               if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
-                       amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
-                                           pi_data.ir_data);
-
                irqfd->irq_bypass_data = pi_data.ir_data;
                list_add(&irqfd->vcpu_list, &svm->ir_list);
                return 0;
 
 }
 EXPORT_SYMBOL(amd_iommu_update_ga);
 
-int amd_iommu_activate_guest_mode(void *data)
+int amd_iommu_activate_guest_mode(void *data, int cpu)
 {
        struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
        struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
        entry->hi.fields.vector            = ir_data->ga_vector;
        entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
 
+       __amd_iommu_update_ga(entry, cpu);
+
        return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
                              ir_data->irq_2_irte.index, entry);
 }
                ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
                ir_data->ga_vector = pi_data->vector;
                ir_data->ga_tag = pi_data->ga_tag;
-               ret = amd_iommu_activate_guest_mode(ir_data);
+               ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu);
        } else {
                ret = amd_iommu_deactivate_guest_mode(ir_data);
        }
 
 
 extern int amd_iommu_update_ga(int cpu, void *data);
 
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu);
 extern int amd_iommu_deactivate_guest_mode(void *data);
 
 #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
        return 0;
 }
 
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu)
 {
        return 0;
 }