return ret;
}
+static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
+{
+ int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
+
+ kvm_xen_inject_vcpu_callback_vector(vcpu, type);
+}
+
static bool valid_port(evtchn_port_t port)
{
if (!port) {
return 0;
}
+/*
+ * Never thought I'd hear myself say this, but C++ templates would be
+ * kind of nice here.
+ *
+ * template<class T> static int do_unmask_port(T *shinfo, ...);
+ */
+static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask, struct shared_info *shinfo,
+ struct vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
+ bool do_unmask,
+ struct compat_shared_info *shinfo,
+ struct compat_vcpu_info *vcpu_info)
+{
+ const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
+ typeof(shinfo->evtchn_pending[0]) mask;
+ int idx = port / bits_per_word;
+ int offset = port % bits_per_word;
+
+ mask = 1UL << offset;
+
+ if (idx >= bits_per_word) {
+ return -EINVAL;
+ }
+
+ if (do_unmask) {
+ /*
+ * If this is a true unmask operation, clear the mask bit. If
+ * it was already unmasked, we have nothing further to do.
+ */
+ if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
+ return 0;
+ }
+ } else {
+ /*
+ * This is a pseudo-unmask for affinity changes. We don't
+ * change the mask bit, and if it's *masked* we have nothing
+ * else to do.
+ */
+ if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
+ return 0;
+ }
+ }
+
+ /* If the event was not pending, we're done. */
+ if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
+ return 0;
+ }
+
+ /* Now on to the vcpu_info evtchn_pending_sel index... */
+ mask = 1UL << idx;
+
+ /* If a port in this word was already pending for this vCPU, all done. */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
+ return 0;
+ }
+
+ /* Set evtchn_upcall_pending for this vCPU */
+ if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
+ return 0;
+ }
+
+ inject_callback(s, s->port_table[port].vcpu);
+
+ return 0;
+}
+
+static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
+{
+ void *vcpu_info, *shinfo;
+
+ if (s->port_table[port].type == EVTCHNSTAT_closed) {
+ return -EINVAL;
+ }
+
+ shinfo = xen_overlay_get_shinfo_ptr();
+ if (!shinfo) {
+ return -ENOTSUP;
+ }
+
+ vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
+ if (!vcpu_info) {
+ return -EINVAL;
+ }
+
+ if (xen_is_long_mode()) {
+ return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
+ } else {
+ return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
+ }
+}
+
static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
{
void *p = xen_overlay_get_shinfo_ptr();
return ret;
}
+
+int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (!valid_port(unmask->port)) {
+ return -EINVAL;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = unmask_port(s, unmask->port, true);
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}