From: David Woodhouse Date: Fri, 26 Apr 2024 17:24:10 +0000 (+0100) Subject: KVM: x86: Kill cur_tsc_{nsec,offset,write} fields X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=767d5e71e259cd515eae697a97b1af248e332d8d;p=users%2Fdwmw2%2Flinux.git KVM: x86: Kill cur_tsc_{nsec,offset,write} fields These pointlessly duplicate the last_tsc_{nsec,offset,write} values. The only place they were used was where the TSC is stable and a new vCPU is being synchronized to the previous setting, in which case the 'last_' value is definitely identical. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dbe5a4c506dac..ccc3c8f808db0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1358,9 +1358,6 @@ struct kvm_arch { u32 last_tsc_khz; u64 last_tsc_offset; u64 last_tsc_scaling_ratio; - u64 cur_tsc_nsec; - u64 cur_tsc_write; - u64 cur_tsc_offset; u64 cur_tsc_generation; int nr_vcpus_matched_tsc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39b20c60332d8..0584a41b461df 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2703,11 +2703,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, lockdep_assert_held(&kvm->arch.tsc_write_lock); /* - * We also track th most recent recorded KHZ, write and time to - * allow the matching interval to be extended at each write. + * Track the last recorded kHz (and associated scaling ratio for + * calculating the guest TSC), and offset. */ - kvm->arch.last_tsc_nsec = ns; - kvm->arch.last_tsc_write = tsc; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; kvm->arch.last_tsc_offset = offset; @@ -2726,10 +2724,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, * * These values are tracked in kvm->arch.cur_xxx variables. */ + kvm->arch.last_tsc_nsec = ns; + kvm->arch.last_tsc_write = tsc; kvm->arch.cur_tsc_generation++; - kvm->arch.cur_tsc_nsec = ns; - kvm->arch.cur_tsc_write = tsc; - kvm->arch.cur_tsc_offset = offset; kvm->arch.nr_vcpus_matched_tsc = 0; } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { kvm->arch.nr_vcpus_matched_tsc++; @@ -2737,8 +2734,8 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; - vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; - vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; + vcpu->arch.this_tsc_nsec = kvm->arch.last_tsc_nsec; + vcpu->arch.this_tsc_write = kvm->arch.last_tsc_write; kvm_track_tsc_matching(vcpu); } @@ -2815,8 +2812,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) data = kvm->arch.last_tsc_write; if (!kvm_check_tsc_unstable()) { - offset = kvm->arch.cur_tsc_offset; - ns = kvm->arch.cur_tsc_nsec; + offset = kvm->arch.last_tsc_offset; + ns = kvm->arch.last_tsc_nsec; } else { /* * ... unless the TSC is unstable and has to be