return 0;
}
-static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
+static bool kvm_xen_check_pollers(struct kvm *kvm, struct kvm_vcpu *deliver_vcpu, int port)
{
- int poll_evtchn = vcpu->arch.xen.poll_evtchn;
-
- if ((poll_evtchn == port || poll_evtchn == -1) &&
- test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
- kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
- kvm_vcpu_kick(vcpu);
+ int vcpu_idx, nr_vcpus = atomic_read(&kvm->online_vcpus);
+ bool ret = false;
+
+ for_each_set_bit(vcpu_idx, kvm->arch.xen.poll_mask, nr_vcpus) {
+ struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ int poll_evtchn = vcpu->arch.xen.poll_evtchn;
+
+ if ((poll_evtchn == port || poll_evtchn == -1) &&
+ test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
+ kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
+ kvm_vcpu_kick(vcpu);
+ if (vcpu == deliver_vcpu)
+ ret = true;
+ }
}
+
+ return ret;
}
/*
unsigned long *pending_bits, *mask_bits;
unsigned long flags;
int port_word_bit;
- bool kick_vcpu = false;
+ bool kick_vcpu = false, check_pollers = false;
int vcpu_idx, idx, rc;
vcpu_idx = READ_ONCE(xe->vcpu_idx);
if (test_and_set_bit(xe->port, pending_bits)) {
rc = 0; /* It was already raised */
} else if (test_bit(xe->port, mask_bits)) {
+ check_pollers = true;
rc = -ENOTCONN; /* Masked */
- kvm_xen_check_poller(vcpu, xe->port);
} else {
+ check_pollers = true;
rc = 1; /* Delivered to the bitmap in shared_info. */
/* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
read_unlock_irqrestore(&gpc->lock, flags);
read_unlock_irqrestore(&gpc->lock, flags);
srcu_read_unlock(&kvm->srcu, idx);
+ /* No need to kick the target vCPU if it was polling */
+ if (check_pollers && kvm_xen_check_pollers(kvm, vcpu, xe->port))
+ kick_vcpu = false;
+
if (kick_vcpu) {
kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
kvm_vcpu_kick(vcpu);