int ret = 0;
int idx = srcu_read_lock(&kvm->srcu);
- read_lock_irq(&gpc->lock);
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
read_unlock_irq(&gpc->lock);
if (ret)
goto out;
- read_lock_irq(&gpc->lock);
+ read_lock(&gpc->lock);
}
/*
smp_wmb();
wc->version = wc_version + 1;
- read_unlock_irq(&gpc->lock);
+ read_unlock(&gpc->lock);
kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
vcpu->arch.xen.timer.function = xen_timer_callback;
}
-/*
- * With CONFIG_PREEMPT_RT, 'irqsave' locking doesn't really disable IRQs as
- * "all IRQs are threaded" (except the hrtimer IRQs). So, open-coding a
- * local_irq_save() before a read_trylock() is wrong for PREEMPT_RT.
- */
-static inline bool read_trylock_irqsave(rwlock_t *lck, unsigned long *flags)
-{
-#ifndef CONFIG_PREEMPT_RT
- local_irq_save(*flags);
-#endif
- if (!read_trylock(lck)) {
-#ifndef CONFIG_PREEMPT_RT
- local_irq_restore(*flags);
-#endif
- return false;
- }
-
- return true;
-}
-
static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
{
struct kvm_vcpu_xen *vx = &v->arch.xen;
struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
size_t user_len, user_len1, user_len2;
struct vcpu_runstate_info rs;
- unsigned long flags;
size_t times_ofs;
uint8_t *update_bit = NULL;
uint64_t entry_time;
* gfn_to_pfn caches that cover the region.
*/
if (atomic) {
- if (!read_trylock_irqsave(&gpc1->lock, &flags))
+ if (!read_trylock(&gpc1->lock))
return;
} else {
- read_lock_irqsave(&gpc1->lock, flags);
+ read_lock(&gpc1->lock);
}
while (!kvm_gpc_check(gpc1, user_len1)) {
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
/* When invoked from kvm_sched_out() we cannot sleep */
if (atomic)
if (kvm_gpc_refresh(gpc1, user_len1))
return;
- read_lock_irqsave(&gpc1->lock, flags);
+ read_lock(&gpc1->lock);
}
if (likely(!user_len2)) {
lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
if (atomic) {
if (!read_trylock(&gpc2->lock)) {
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
return;
}
} else {
if (!kvm_gpc_check(gpc2, user_len2)) {
read_unlock(&gpc2->lock);
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
/* When invoked from kvm_sched_out() we cannot sleep */
if (atomic)
}
kvm_gpc_mark_dirty_in_slot(gpc1);
- read_unlock_irqrestore(&gpc1->lock, flags);
+ read_unlock(&gpc1->lock);
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
{
unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
- unsigned long flags;
if (!evtchn_pending_sel)
return;
* does anyway. Page it in and retry the instruction. We're just a
* little more honest about it.
*/
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
return;
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
}
/* Now gpc->khva is a valid kernel address for the vcpu_info */
}
kvm_gpc_mark_dirty_in_slot(gpc);
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
{
struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
- unsigned long flags;
u8 rc = 0;
/*
BUILD_BUG_ON(sizeof(rc) !=
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
/*
* This function gets called from kvm_vcpu_block() after setting the
*/
return 0;
}
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
}
rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
return rc;
}
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
unsigned long *pending_bits;
- unsigned long flags;
bool ret = true;
int idx, i;
idx = srcu_read_lock(&kvm->srcu);
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
if (!kvm_gpc_check(gpc, PAGE_SIZE))
goto out_rcu;
}
out_rcu:
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
unsigned long *pending_bits, *mask_bits;
- unsigned long flags;
int rc = -EWOULDBLOCK;
if (in_interrupt()) {
- if (!read_trylock_irqsave(&gpc->lock, &flags))
+ if (!read_trylock(&gpc->lock))
goto out;
} else {
- read_lock_irqsave(&gpc->lock, flags);
+ read_lock(&gpc->lock);
}
if (!kvm_gpc_check(gpc, PAGE_SIZE))
}
out_unlock:
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
out:
return rc;
}
{
struct kvm *kvm = vcpu->kvm;
struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
- unsigned long flags;
bool kick_vcpu = false;
bool locked;
- locked = read_trylock_irqsave(&gpc->lock, &flags);
+ locked = read_trylock(&gpc->lock);
/*
* Try to deliver the event directly to the vcpu_info. If successful and
out:
if (locked)
- read_unlock_irqrestore(&gpc->lock, flags);
+ read_unlock(&gpc->lock);
return kick_vcpu;
}