]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86: Remove implicit rdtsc() from kvm_compute_l1_tsc_offset()
authorDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 26 Apr 2024 14:57:20 +0000 (15:57 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 4 Jun 2024 10:31:46 +0000 (11:31 +0100)
Let the callers pass the host TSC value in as an explicit parameter.

This leaves some fairly obviously stupid code, which using this function
to compare the guest TSC at some *other* time, with the newly-minted TSC
value from rdtsc(). Unless it's being used to measure *elapsed* time,
that isn't very sensible.

In this case, "obviously stupid" is an improvement over being non-obviously
so.

No functional change intended.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
arch/x86/kvm/x86.c

index 377e09da6c6d73d8ec7ae5b17c821deaa39436c5..f9359becf23dda3f3e5975fa59c2eef7d80e16a0 100644 (file)
@@ -2591,11 +2591,12 @@ u64 kvm_scale_tsc(u64 tsc, u64 ratio)
        return _tsc;
 }
 
-static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 host_tsc,
+                                    u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
+       tsc = kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
 
        return target_tsc - tsc;
 }
@@ -2748,7 +2749,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
        bool synchronizing = false;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_compute_l1_tsc_offset(vcpu, data);
+       offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
        ns = get_kvmclock_base_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -2799,7 +2800,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_compute_l1_tsc_offset(vcpu, data);
+                       offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
                }
                matched = true;
        }
@@ -4014,7 +4015,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (msr_info->host_initiated) {
                        kvm_synchronize_tsc(vcpu, &data);
                } else {
-                       u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
+                       u64 adj = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data) -
+                               vcpu->arch.l1_tsc_offset;
                        adjust_tsc_offset_guest(vcpu, adj);
                        vcpu->arch.ia32_tsc_adjust_msr += adj;
                }
@@ -5088,7 +5090,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                        mark_tsc_unstable("KVM discovered backwards TSC");
 
                if (kvm_check_tsc_unstable()) {
-                       u64 offset = kvm_compute_l1_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(),
                                                vcpu->arch.last_guest_tsc);
                        kvm_vcpu_write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;