]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/xen: fix atomic runstate updates for PREEMPT_RT
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 7 Mar 2024 16:43:51 +0000 (16:43 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 8 Mar 2024 14:53:10 +0000 (14:53 +0000)
With PREEMPT_RT, read_lock_irqsave() is not equivalent to a local_irq_save()
followed by read_lock(). Since the rw spinlock is turned into a mutex, the
IRQ disable is silently elided.

However, kvm_xen_update_runstate_guest() open-codes a trylock by explicitly
calling local_irq_save() and then read_trylock(), in the case where it needs
to perform an atomic update when a vCPU is being descheduled. Fix this by
introducing a rather not-pretty read_trylock_irqsave() which uses its own
knowledge about PREEMPT_RT to determine whether to disable interrupts or
not. This function should match to a read_unlock_irqrestore() for either
PREEMPT_RT or normal kernel builds.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kvm/xen.c

index f8113eb8d0408af425b515341d74e0c13c8e545e..c2eb5784019bbc4037fe54e18f90826f66e3fdab 100644 (file)
@@ -270,6 +270,26 @@ static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
        vcpu->arch.xen.timer.function = xen_timer_callback;
 }
 
+/*
+ * With CONFIG_PREEMPT_RT, 'irqsave' locking doesn't really disable IRQs as
+ * "all IRQs are threaded" (except the hrtimer IRQs). So, open-coding a
+ * local_irq_save() before a read_trylock() is wrong for PREEMPT_RT.
+ */
+static inline bool read_trylock_irqsave(rwlock_t *lck, unsigned long *flags)
+{
+#ifndef CONFIG_PREEMPT_RT
+       local_irq_save(*flags);
+#endif
+       if (!read_trylock(lck)) {
+#ifndef CONFIG_PREEMPT_RT
+               local_irq_restore(*flags);
+#endif
+               return false;
+       }
+
+       return true;
+}
+
 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 {
        struct kvm_vcpu_xen *vx = &v->arch.xen;
@@ -373,11 +393,8 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * gfn_to_pfn caches that cover the region.
         */
        if (atomic) {
-               local_irq_save(flags);
-               if (!read_trylock(&gpc1->lock)) {
-                       local_irq_restore(flags);
+               if (!read_trylock_irqsave(&gpc1->lock, &flags))
                        return;
-               }
        } else {
                read_lock_irqsave(&gpc1->lock, flags);
        }