]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86: Remove periodic global clock updates
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 18 Apr 2024 18:36:57 +0000 (19:36 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 26 Apr 2024 14:16:18 +0000 (15:16 +0100)
This effectively reverts commit 332967a3eac0 ("x86: kvm: introduce
periodic global clock updates"). The periodic update was introduced to
propagate NTP corrections to the guest KVM clock, when the KVM clock was
based on CLOCK_MONOTONIC.

However, commit 53fafdbb8b21 ("KVM: x86: switch KVMCLOCK base to
monotonic raw clock") switched to using CLOCK_MONOTONIC_RAW as the basis
for the KVM clock, avoiding the NTP frequency skew altogether.

So the periodic update serves no purpose. Remove it.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index b01c1d000ffff5bdbfbd63a43817f4f95f17157e..a44273faf2f1ac550c128695cab643a6c7793c54 100644 (file)
@@ -1371,7 +1371,6 @@ struct kvm_arch {
        u64 master_kernel_ns;
        u64 master_cycle_now;
        struct delayed_work kvmclock_update_work;
-       struct delayed_work kvmclock_sync_work;
 
        struct kvm_xen_hvm_config xen_hvm_config;
 
index ef3cd611303754eebb55ab3bb9871a06f95fda6c..5250fcdf4fc53b98c960c7b6395fb712e965fd57 100644 (file)
@@ -155,9 +155,6 @@ EXPORT_SYMBOL_GPL(report_ignored_msrs);
 unsigned int min_timer_period_us = 200;
 module_param(min_timer_period_us, uint, 0644);
 
-static bool __read_mostly kvmclock_periodic_sync = true;
-module_param(kvmclock_periodic_sync, bool, 0444);
-
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
 static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, 0644);
@@ -3506,20 +3503,6 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
                                        KVMCLOCK_UPDATE_DELAY);
 }
 
-#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
-
-static void kvmclock_sync_fn(struct work_struct *work)
-{
-       struct delayed_work *dwork = to_delayed_work(work);
-       struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
-                                          kvmclock_sync_work);
-       struct kvm *kvm = container_of(ka, struct kvm, arch);
-
-       schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
-       schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
-                                       KVMCLOCK_SYNC_PERIOD);
-}
-
 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
 static bool is_mci_control_msr(u32 msr)
 {
@@ -12488,8 +12471,6 @@ fail_mmu_destroy:
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
-
        if (mutex_lock_killable(&vcpu->mutex))
                return;
        vcpu_load(vcpu);
@@ -12500,10 +12481,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        vcpu->arch.msr_kvm_poll_control = 1;
 
        mutex_unlock(&vcpu->mutex);
-
-       if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
-               schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
-                                               KVMCLOCK_SYNC_PERIOD);
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -12876,7 +12853,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 #endif
 
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
-       INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
@@ -12916,7 +12892,6 @@ static void kvm_unload_vcpu_mmus(struct kvm *kvm)
 
 void kvm_arch_sync_events(struct kvm *kvm)
 {
-       cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
        cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
        kvm_free_pit(kvm);
 }