From: David Woodhouse Date: Fri, 26 Apr 2024 17:24:10 +0000 (+0100) Subject: KVM: x86: Kill cur_tsc_{nsec,offset,write} fields X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ef18244ee970ecd793a50fed9bebc2b28dd2a61b;p=users%2Fdwmw2%2Flinux.git KVM: x86: Kill cur_tsc_{nsec,offset,write} fields These pointlessly duplicate the last_tsc_{nsec,offset,write} values. The only place they were used was where the TSC is stable and a new vCPU is being synchronized to the previous setting, in which case the 'last_' value is definitely identical. Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bff9a793ab757..4c4178419a89a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1374,9 +1374,6 @@ struct kvm_arch { u32 last_tsc_khz; u64 last_tsc_offset; u64 last_tsc_scaling_ratio; - u64 cur_tsc_nsec; - u64 cur_tsc_write; - u64 cur_tsc_offset; u64 cur_tsc_generation; int nr_vcpus_matched_tsc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ec0dac0511c10..066efb9cced0e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2690,11 +2690,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, lockdep_assert_held(&kvm->arch.tsc_write_lock); /* - * We also track th most recent recorded KHZ, write and time to - * allow the matching interval to be extended at each write. + * Track the last recorded kHz (and associated scaling ratio for + * calculating the guest TSC), and offset. */ - kvm->arch.last_tsc_nsec = ns; - kvm->arch.last_tsc_write = tsc; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; kvm->arch.last_tsc_offset = offset; @@ -2713,10 +2711,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, * * These values are tracked in kvm->arch.cur_xxx variables. */ + kvm->arch.last_tsc_nsec = ns; + kvm->arch.last_tsc_write = tsc; kvm->arch.cur_tsc_generation++; - kvm->arch.cur_tsc_nsec = ns; - kvm->arch.cur_tsc_write = tsc; - kvm->arch.cur_tsc_offset = offset; kvm->arch.nr_vcpus_matched_tsc = 0; } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { kvm->arch.nr_vcpus_matched_tsc++; @@ -2724,8 +2721,8 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; - vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; - vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; + vcpu->arch.this_tsc_nsec = kvm->arch.last_tsc_nsec; + vcpu->arch.this_tsc_write = kvm->arch.last_tsc_write; kvm_track_tsc_matching(vcpu); } @@ -2802,8 +2799,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) data = kvm->arch.last_tsc_write; if (!kvm_check_tsc_unstable()) { - offset = kvm->arch.cur_tsc_offset; - ns = kvm->arch.cur_tsc_nsec; + offset = kvm->arch.last_tsc_offset; + ns = kvm->arch.last_tsc_nsec; } else { /* * ... unless the TSC is unstable and has to be