]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/xen: Don't disable interrupts for shinfo or vcpu_info GPCs xen-rwlock
authorDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 8 Mar 2024 15:59:41 +0000 (15:59 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 8 Mar 2024 15:59:41 +0000 (15:59 +0000)
Now that the interrupt path uses a trylock anyway, there's no need to use
read_lock_irqsave() for these locks. And in particular we can ditch the
awful special cases for PREEMPT_RT.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kvm/xen.c

index 4028e824aae2ac794f82a3fd93a2cef962081e5a..d04d4508c4a8e56dc2be8250534842f28544985d 100644 (file)
@@ -45,7 +45,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm)
        int ret = 0;
        int idx = srcu_read_lock(&kvm->srcu);
 
-       read_lock_irq(&gpc->lock);
+       read_lock(&gpc->lock);
        while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
                read_unlock_irq(&gpc->lock);
 
@@ -53,7 +53,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm)
                if (ret)
                        goto out;
 
-               read_lock_irq(&gpc->lock);
+               read_lock(&gpc->lock);
        }
 
        /*
@@ -96,7 +96,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm)
        smp_wmb();
 
        wc->version = wc_version + 1;
-       read_unlock_irq(&gpc->lock);
+       read_unlock(&gpc->lock);
 
        kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
 
@@ -270,26 +270,6 @@ static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
        vcpu->arch.xen.timer.function = xen_timer_callback;
 }
 
-/*
- * With CONFIG_PREEMPT_RT, 'irqsave' locking doesn't really disable IRQs as
- * "all IRQs are threaded" (except the hrtimer IRQs). So, open-coding a
- * local_irq_save() before a read_trylock() is wrong for PREEMPT_RT.
- */
-static inline bool read_trylock_irqsave(rwlock_t *lck, unsigned long *flags)
-{
-#ifndef CONFIG_PREEMPT_RT
-       local_irq_save(*flags);
-#endif
-       if (!read_trylock(lck)) {
-#ifndef CONFIG_PREEMPT_RT
-               local_irq_restore(*flags);
-#endif
-               return false;
-       }
-
-       return true;
-}
-
 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 {
        struct kvm_vcpu_xen *vx = &v->arch.xen;
@@ -297,7 +277,6 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
        struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
        size_t user_len, user_len1, user_len2;
        struct vcpu_runstate_info rs;
-       unsigned long flags;
        size_t times_ofs;
        uint8_t *update_bit = NULL;
        uint64_t entry_time;
@@ -393,13 +372,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * gfn_to_pfn caches that cover the region.
         */
        if (atomic) {
-               if (!read_trylock_irqsave(&gpc1->lock, &flags))
+               if (!read_trylock(&gpc1->lock))
                        return;
        } else {
-               read_lock_irqsave(&gpc1->lock, flags);
+               read_lock(&gpc1->lock);
        }
        while (!kvm_gpc_check(gpc1, user_len1)) {
-               read_unlock_irqrestore(&gpc1->lock, flags);
+               read_unlock(&gpc1->lock);
 
                /* When invoked from kvm_sched_out() we cannot sleep */
                if (atomic)
@@ -408,7 +387,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                if (kvm_gpc_refresh(gpc1, user_len1))
                        return;
 
-               read_lock_irqsave(&gpc1->lock, flags);
+               read_lock(&gpc1->lock);
        }
 
        if (likely(!user_len2)) {
@@ -436,7 +415,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
                if (atomic) {
                        if (!read_trylock(&gpc2->lock)) {
-                               read_unlock_irqrestore(&gpc1->lock, flags);
+                               read_unlock(&gpc1->lock);
                                return;
                        }
                } else {
@@ -445,7 +424,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 
                if (!kvm_gpc_check(gpc2, user_len2)) {
                        read_unlock(&gpc2->lock);
-                       read_unlock_irqrestore(&gpc1->lock, flags);
+                       read_unlock(&gpc1->lock);
 
                        /* When invoked from kvm_sched_out() we cannot sleep */
                        if (atomic)
@@ -550,7 +529,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
        }
 
        kvm_gpc_mark_dirty_in_slot(gpc1);
-       read_unlock_irqrestore(&gpc1->lock, flags);
+       read_unlock(&gpc1->lock);
 }
 
 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -609,7 +588,6 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 {
        unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
        struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
-       unsigned long flags;
 
        if (!evtchn_pending_sel)
                return;
@@ -619,14 +597,14 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
         * does anyway. Page it in and retry the instruction. We're just a
         * little more honest about it.
         */
-       read_lock_irqsave(&gpc->lock, flags);
+       read_lock(&gpc->lock);
        while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
-               read_unlock_irqrestore(&gpc->lock, flags);
+               read_unlock(&gpc->lock);
 
                if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
                        return;
 
-               read_lock_irqsave(&gpc->lock, flags);
+               read_lock(&gpc->lock);
        }
 
        /* Now gpc->khva is a valid kernel address for the vcpu_info */
@@ -656,7 +634,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
        }
 
        kvm_gpc_mark_dirty_in_slot(gpc);
-       read_unlock_irqrestore(&gpc->lock, flags);
+       read_unlock(&gpc->lock);
 
        /* For the per-vCPU lapic vector, deliver it as MSI. */
        if (v->arch.xen.upcall_vector)
@@ -666,7 +644,6 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
 {
        struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
-       unsigned long flags;
        u8 rc = 0;
 
        /*
@@ -682,9 +659,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
        BUILD_BUG_ON(sizeof(rc) !=
                     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
 
-       read_lock_irqsave(&gpc->lock, flags);
+       read_lock(&gpc->lock);
        while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
-               read_unlock_irqrestore(&gpc->lock, flags);
+               read_unlock(&gpc->lock);
 
                /*
                 * This function gets called from kvm_vcpu_block() after setting the
@@ -704,11 +681,11 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
                         */
                        return 0;
                }
-               read_lock_irqsave(&gpc->lock, flags);
+               read_lock(&gpc->lock);
        }
 
        rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
-       read_unlock_irqrestore(&gpc->lock, flags);
+       read_unlock(&gpc->lock);
        return rc;
 }
 
@@ -1399,12 +1376,11 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
        struct kvm *kvm = vcpu->kvm;
        struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
        unsigned long *pending_bits;
-       unsigned long flags;
        bool ret = true;
        int idx, i;
 
        idx = srcu_read_lock(&kvm->srcu);
-       read_lock_irqsave(&gpc->lock, flags);
+       read_lock(&gpc->lock);
        if (!kvm_gpc_check(gpc, PAGE_SIZE))
                goto out_rcu;
 
@@ -1425,7 +1401,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
        }
 
  out_rcu:
-       read_unlock_irqrestore(&gpc->lock, flags);
+       read_unlock(&gpc->lock);
        srcu_read_unlock(&kvm->srcu, idx);
 
        return ret;
@@ -1747,14 +1723,13 @@ static int set_shinfo_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
        struct kvm *kvm = vcpu->kvm;
        struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
        unsigned long *pending_bits, *mask_bits;
-       unsigned long flags;
        int rc = -EWOULDBLOCK;
 
        if (in_interrupt()) {
-               if (!read_trylock_irqsave(&gpc->lock, &flags))
+               if (!read_trylock(&gpc->lock))
                        goto out;
        } else {
-               read_lock_irqsave(&gpc->lock, flags);
+               read_lock(&gpc->lock);
        }
 
        if (!kvm_gpc_check(gpc, PAGE_SIZE))
@@ -1782,7 +1757,7 @@ static int set_shinfo_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
        }
 
  out_unlock:
-       read_unlock_irqrestore(&gpc->lock, flags);
+       read_unlock(&gpc->lock);
  out:
        return rc;
 }
@@ -1791,11 +1766,10 @@ static bool set_vcpu_info_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
 {
        struct kvm *kvm = vcpu->kvm;
        struct gfn_to_pfn_cache *gpc = &vcpu->arch.xen.vcpu_info_cache;
-       unsigned long flags;
        bool kick_vcpu = false;
        bool locked;
 
-       locked = read_trylock_irqsave(&gpc->lock, &flags);
+       locked = read_trylock(&gpc->lock);
 
        /*
         * Try to deliver the event directly to the vcpu_info. If successful and
@@ -1842,7 +1816,7 @@ static bool set_vcpu_info_evtchn_pending(struct kvm_vcpu *vcpu, u32 port)
 
  out:
        if (locked)
-               read_unlock_irqrestore(&gpc->lock, flags);
+               read_unlock(&gpc->lock);
 
        return kick_vcpu;
 }