]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: xen: mark guest pages dirty with the pfncache lock held
authorPaul Durrant <pdurrant@amazon.com>
Thu, 9 Nov 2023 14:17:02 +0000 (14:17 +0000)
committerPaul Durrant <pdurrant@amazon.com>
Thu, 11 Jan 2024 10:23:01 +0000 (10:23 +0000)
Sampling gpa and memslot from an unlocked pfncache may yield inconsistent
values so, since there is no problem with calling mark_page_dirty_in_slot()
with the pfncache lock held, relocate the calls in
kvm_xen_update_runstate_guest() and kvm_xen_inject_pending_events()
accordingly.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
v8:
 - New in this version.

arch/x86/kvm/xen.c

index e43948b87f94cff5ef5a0da89d7908a28b3a8fa3..b63bf54bb376c58c1045826ddef6464ce75c2ac5 100644 (file)
@@ -452,14 +452,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                smp_wmb();
        }
 
-       if (user_len2)
+       if (user_len2) {
+               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
                read_unlock(&gpc2->lock);
-
-       read_unlock_irqrestore(&gpc1->lock, flags);
+       }
 
        mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
-       if (user_len2)
-               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
+       read_unlock_irqrestore(&gpc1->lock, flags);
 }
 
 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -565,13 +564,13 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
                             : "0" (evtchn_pending_sel32));
                WRITE_ONCE(vi->evtchn_upcall_pending, 1);
        }
+
+       mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
        read_unlock_irqrestore(&gpc->lock, flags);
 
        /* For the per-vCPU lapic vector, deliver it as MSI. */
        if (v->arch.xen.upcall_vector)
                kvm_xen_inject_vcpu_vector(v);
-
-       mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
 }
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)