From e75c7ffdd987fb5bff6285f131374878976c9b70 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 26 Apr 2024 15:57:20 +0100 Subject: [PATCH] KVM: x86: Remove implicit rdtsc() from kvm_compute_l1_tsc_offset() Let the callers pass the host TSC value in as an explicit parameter. This leaves some fairly obviously stupid code, which using this function to compare the guest TSC at some *other* time, with the newly-minted TSC value from rdtsc(). Unless it's being used to measure *elapsed* time, that isn't very sensible. In this case, "obviously stupid" is an improvement over being non-obviously so. No functional change intended. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- arch/x86/kvm/x86.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1e4f3f637ed8..38dba964419a4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2578,11 +2578,12 @@ u64 kvm_scale_tsc(u64 tsc, u64 ratio) return _tsc; } -static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) +static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 host_tsc, + u64 target_tsc) { u64 tsc; - tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); + tsc = kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); return target_tsc - tsc; } @@ -2735,7 +2736,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) bool synchronizing = false; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); - offset = kvm_compute_l1_tsc_offset(vcpu, data); + offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data); ns = get_kvmclock_base_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; @@ -2786,7 +2787,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; - offset = kvm_compute_l1_tsc_offset(vcpu, data); + offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data); } matched = true; } @@ -4001,7 +4002,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (msr_info->host_initiated) { kvm_synchronize_tsc(vcpu, &data); } else { - u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; + u64 adj = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data) - + vcpu->arch.l1_tsc_offset; adjust_tsc_offset_guest(vcpu, adj); vcpu->arch.ia32_tsc_adjust_msr += adj; } @@ -5091,7 +5093,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mark_tsc_unstable("KVM discovered backwards TSC"); if (kvm_check_tsc_unstable()) { - u64 offset = kvm_compute_l1_tsc_offset(vcpu, + u64 offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), vcpu->arch.last_guest_tsc); kvm_vcpu_write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; -- 2.50.1