int ret = 0;
int idx = srcu_read_lock(&kvm->srcu);
- read_lock_irq(&gpc->lock);
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
read_unlock_irq(&gpc->lock);
if (ret)
goto out;
- read_lock_irq(&gpc->lock);
+ read_lock(&gpc->lock);
}
/*
smp_wmb();
wc->version = wc_version + 1;
- read_unlock_irq(&gpc->lock);
+ read_unlock(&gpc->lock);
kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
size_t user_len, user_len1, user_len2;
struct vcpu_runstate_info rs;
- unsigned long flags;
size_t times_ofs;
uint8_t *update_bit = NULL;
uint64_t entry_time;
* gfn_to_pfn caches that cover the region.
*/
if (atomic) {
- local_irq_save(flags);
- if (!read_trylock(&gpc1->lock)) {
- local_irq_restore(flags);
+ if (!read_trylock(&gpc1->lock))
return;
- }
} else {
- read_lock_irqsave(&gpc1->lock, flags);
+ read_lock(&gpc1->lock);
}
while (!kvm_gpc_check(gpc1, user_len1)) {
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
/* When invoked from kvm_sched_out() we cannot sleep */
if (atomic)
if (kvm_gpc_refresh(gpc1, user_len1))
return;
- read_lock_irqsave(&gpc1->lock, flags);
+ read_lock(&gpc1->lock);
}
if (likely(!user_len2)) {
lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
if (atomic) {
if (!read_trylock(&gpc2->lock)) {
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
return;
}
} else {
if (!kvm_gpc_check(gpc2, user_len2)) {
read_unlock(&gpc2->lock);
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
/* When invoked from kvm_sched_out() we cannot sleep */
if (atomic)
}
kvm_gpc_mark_dirty_in_slot(gpc1);
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
{
unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
- unsigned long flags;
if (!evtchn_pending_sel)
return;
* does anyway. Page it in and retry the instruction. We're just a
* little more honest about it.
*/
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
return;
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
}
/* Now gpc->khva is a valid kernel address for the vcpu_info */
}
kvm_gpc_mark_dirty_in_slot(gpc);
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
{
struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
- unsigned long flags;
u8 rc = 0;
/*
BUILD_BUG_ON(sizeof(rc) !=
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
- read_lock_irqsave(&gpc->lock, flags);
+ WARN_ON_ONCE(in_interrupt());
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
/*
* This function gets called from kvm_vcpu_block() after setting the
*/
return 0;
}
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
}
rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
return rc;
}
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
unsigned long *pending_bits;
- unsigned long flags;
bool ret = true;
int idx, i;
idx = srcu_read_lock(&kvm->srcu);
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
if (!kvm_gpc_check(gpc, PAGE_SIZE))
goto out_rcu;
}
out_rcu:
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
unsigned long *pending_bits, *mask_bits;
- unsigned long flags;
int rc = -EWOULDBLOCK;
- read_lock_irqsave(&gpc->lock, flags);
+ if (in_interrupt()) {
+ if (!read_trylock(&gpc->lock))
+ goto out;
+ } else {
+ read_lock(&gpc->lock);
+ }
+
if (!kvm_gpc_check(gpc, PAGE_SIZE))
- goto out;
+ goto out_unlock;
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
struct shared_info *shinfo = gpc->khva;
rc = 1; /* It is newly raised */
}
+ out_unlock:
+ read_unlock(&gpc->lock);
out:
- read_unlock_irqrestore(&gpc->lock, flags);
return rc;
}
{
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
- unsigned long flags;
bool kick_vcpu = false;
+ bool locked;
- read_lock_irqsave(&gpc->lock, flags);
+ locked = read_trylock(&gpc->lock);
/*
* Try to deliver the event directly to the vcpu_info. If successful and
* the guest is using upcall_vector delivery, send the MSI.
- * If the pfncache is invalid, set the shadow. In this case, or if the
- * guest is using another form of event delivery, the vCPU must be
- * kicked to complete the delivery.
+ * If the pfncache lock is contended or the cache is invalid, set the
+ * shadow. In this case, or if the guest is using another form of event
+ * delivery, the vCPU must be kicked to complete the delivery.
*/
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
struct vcpu_info *vcpu_info = gpc->khva;
int port_word_bit = port / 64;
- if (!kvm_gpc_check(gpc, sizeof(*vcpu_info))) {
+ if ((!locked || !kvm_gpc_check(gpc, sizeof(*vcpu_info)))) {
if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
kick_vcpu = true;
goto out;
struct compat_vcpu_info *vcpu_info = gpc->khva;
int port_word_bit = port / 32;
- if (!kvm_gpc_check(gpc, sizeof(*vcpu_info))) {
+ if ((!locked || !kvm_gpc_check(gpc, sizeof(*vcpu_info)))) {
if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
kick_vcpu = true;
goto out;
}
out:
- read_unlock_irqrestore(&gpc->lock, flags);
+ if (locked)
+ read_unlock(&gpc->lock);
+
return kick_vcpu;
}