Use a dedicated counter to track the number of IRQs that can utilize IRQ
bypass instead of piggybacking the assigned device count.  As evidenced by
commit 
2edd9cb79fb3 ("kvm: detect assigned device via irqbypass manager"),
it's possible for a device to be able to post IRQs to a vCPU without said
device being assigned to a VM.
Leave the calls to kvm_arch_{start,end}_assignment() alone for the moment
to avoid regressing the MMIO stale data mitigation.  KVM is abusing the
assigned device count when applying mmio_stale_data_clear, and it's not at
all clear if vDPA devices rely on this behavior.  This will hopefully be
cleaned up in the future, as the number of assigned devices is a terrible
heuristic for detecting if a VM has access to host MMIO.
Link: https://lore.kernel.org/r/20250611224604.313496-55-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
 KVM_X86_OP_OPTIONAL(vcpu_blocking)
 KVM_X86_OP_OPTIONAL(vcpu_unblocking)
 KVM_X86_OP_OPTIONAL(pi_update_irte)
-KVM_X86_OP_OPTIONAL(pi_start_assignment)
+KVM_X86_OP_OPTIONAL(pi_start_bypass)
 KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
 KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
 KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
 
        atomic_t noncoherent_dma_count;
 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
        atomic_t assigned_device_count;
+       unsigned long nr_possible_bypass_irqs;
+
 #ifdef CONFIG_KVM_IOAPIC
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
                              unsigned int host_irq, uint32_t guest_irq,
                              struct kvm_vcpu *vcpu, u32 vector);
-       void (*pi_start_assignment)(struct kvm *kvm);
+       void (*pi_start_bypass)(struct kvm *kvm);
        void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
        bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
        spin_lock_irq(&kvm->irqfds.lock);
        irqfd->producer = prod;
 
+       if (!kvm->arch.nr_possible_bypass_irqs++)
+               kvm_x86_call(pi_start_bypass)(kvm);
+
        if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
                ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry);
-               if (ret)
+               if (ret) {
+                       kvm->arch.nr_possible_bypass_irqs--;
                        kvm_arch_end_assignment(irqfd->kvm);
+               }
        }
        spin_unlock_irq(&kvm->irqfds.lock);
 
        }
        irqfd->producer = NULL;
 
+       kvm->arch.nr_possible_bypass_irqs--;
+
        spin_unlock_irq(&kvm->irqfds.lock);
 
 
 
        .nested_ops = &vmx_nested_ops,
 
        .pi_update_irte = vmx_pi_update_irte,
-       .pi_start_assignment = vmx_pi_start_assignment,
+       .pi_start_bypass = vmx_pi_start_bypass,
 
 #ifdef CONFIG_X86_64
        .set_hv_timer = vt_op(set_hv_timer),
 
 
 static bool vmx_can_use_vtd_pi(struct kvm *kvm)
 {
+       /*
+        * Note, reading the number of possible bypass IRQs can race with a
+        * bypass IRQ being attached to the VM.  vmx_pi_start_bypass() ensures
+        * blockng vCPUs will see an elevated count or get KVM_REQ_UNBLOCK.
+        */
        return irqchip_in_kernel(kvm) && kvm_arch_has_irq_bypass() &&
-              kvm_arch_has_assigned_device(kvm);
+              READ_ONCE(kvm->arch.nr_possible_bypass_irqs);
 }
 
 /*
 
 
 /*
- * Bail out of the block loop if the VM has an assigned
- * device, but the blocking vCPU didn't reconfigure the
- * PI.NV to the wakeup vector, i.e. the assigned device
- * came along after the initial check in vmx_vcpu_pi_put().
+ * Kick all vCPUs when the first possible bypass IRQ is attached to a VM, as
+ * blocking vCPUs may scheduled out without reconfiguring PID.NV to the wakeup
+ * vector, i.e. if the bypass IRQ came along after vmx_vcpu_pi_put().
  */
-void vmx_pi_start_assignment(struct kvm *kvm)
+void vmx_pi_start_bypass(struct kvm *kvm)
 {
        if (!kvm_arch_has_irq_bypass())
                return;
 
 int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
                       unsigned int host_irq, uint32_t guest_irq,
                       struct kvm_vcpu *vcpu, u32 vector);
-void vmx_pi_start_assignment(struct kvm *kvm);
+void vmx_pi_start_bypass(struct kvm *kvm);
 
 static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
 {
 
 
 void kvm_arch_start_assignment(struct kvm *kvm)
 {
-       if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
-               kvm_x86_call(pi_start_assignment)(kvm);
+       atomic_inc(&kvm->arch.assigned_device_count);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);