]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86: Kill cur_tsc_{nsec,offset,write} fields
authorDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 26 Apr 2024 17:24:10 +0000 (18:24 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 2 Aug 2024 14:51:48 +0000 (15:51 +0100)
These pointlessly duplicate the last_tsc_{nsec,offset,write} values.

The only place they were used was where the TSC is stable and a new vCPU
is being synchronized to the previous setting, in which case the 'last_'
value is definitely identical.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index bff9a793ab757192e20be1eed9b5bd22e89d1199..4c4178419a89a07591fce45d1e0e7d2a49d906c4 100644 (file)
@@ -1374,9 +1374,6 @@ struct kvm_arch {
        u32 last_tsc_khz;
        u64 last_tsc_offset;
        u64 last_tsc_scaling_ratio;
-       u64 cur_tsc_nsec;
-       u64 cur_tsc_write;
-       u64 cur_tsc_offset;
        u64 cur_tsc_generation;
        int nr_vcpus_matched_tsc;
 
index ec0dac0511c1024051c9af17b1928840dfa8be67..066efb9cced0e8dcace197b5c823313809d38357 100644 (file)
@@ -2690,11 +2690,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
        lockdep_assert_held(&kvm->arch.tsc_write_lock);
 
        /*
-        * We also track th most recent recorded KHZ, write and time to
-        * allow the matching interval to be extended at each write.
+        * Track the last recorded kHz (and associated scaling ratio for
+        * calculating the guest TSC), and offset.
         */
-       kvm->arch.last_tsc_nsec = ns;
-       kvm->arch.last_tsc_write = tsc;
        kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
        kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
        kvm->arch.last_tsc_offset = offset;
@@ -2713,10 +2711,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
                 *
                 * These values are tracked in kvm->arch.cur_xxx variables.
                 */
+               kvm->arch.last_tsc_nsec = ns;
+               kvm->arch.last_tsc_write = tsc;
                kvm->arch.cur_tsc_generation++;
-               kvm->arch.cur_tsc_nsec = ns;
-               kvm->arch.cur_tsc_write = tsc;
-               kvm->arch.cur_tsc_offset = offset;
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
                kvm->arch.nr_vcpus_matched_tsc++;
@@ -2724,8 +2721,8 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
 
        /* Keep track of which generation this VCPU has synchronized to */
        vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-       vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-       vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+       vcpu->arch.this_tsc_nsec = kvm->arch.last_tsc_nsec;
+       vcpu->arch.this_tsc_write = kvm->arch.last_tsc_write;
 
        kvm_track_tsc_matching(vcpu);
 }
@@ -2802,8 +2799,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
                data = kvm->arch.last_tsc_write;
 
                if (!kvm_check_tsc_unstable()) {
-                       offset = kvm->arch.cur_tsc_offset;
-                       ns = kvm->arch.cur_tsc_nsec;
+                       offset = kvm->arch.last_tsc_offset;
+                       ns = kvm->arch.last_tsc_nsec;
                } else {
                        /*
                         * ... unless the TSC is unstable and has to be