]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86: Kill cur_tsc_{nsec,offset,write} fields
authorDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 26 Apr 2024 17:24:10 +0000 (18:24 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 4 Jun 2024 10:31:46 +0000 (11:31 +0100)
These pointlessly duplicate the last_tsc_{nsec,offset,write} values.

The only place they were used was where the TSC is stable and a new vCPU
is being synchronized to the previous setting, in which case the 'last_'
value is definitely identical.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index dbe5a4c506dac9e542e684f739f6311d45c4490e..ccc3c8f808db0d0b79d1bbcd4ba7ea17f0495907 100644 (file)
@@ -1358,9 +1358,6 @@ struct kvm_arch {
        u32 last_tsc_khz;
        u64 last_tsc_offset;
        u64 last_tsc_scaling_ratio;
-       u64 cur_tsc_nsec;
-       u64 cur_tsc_write;
-       u64 cur_tsc_offset;
        u64 cur_tsc_generation;
        int nr_vcpus_matched_tsc;
 
index 39b20c60332d8b01c6ecef223f8295137fc8d33b..0584a41b461df6f565aa1ccdea8305f7f25416ac 100644 (file)
@@ -2703,11 +2703,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
        lockdep_assert_held(&kvm->arch.tsc_write_lock);
 
        /*
-        * We also track th most recent recorded KHZ, write and time to
-        * allow the matching interval to be extended at each write.
+        * Track the last recorded kHz (and associated scaling ratio for
+        * calculating the guest TSC), and offset.
         */
-       kvm->arch.last_tsc_nsec = ns;
-       kvm->arch.last_tsc_write = tsc;
        kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
        kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
        kvm->arch.last_tsc_offset = offset;
@@ -2726,10 +2724,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
                 *
                 * These values are tracked in kvm->arch.cur_xxx variables.
                 */
+               kvm->arch.last_tsc_nsec = ns;
+               kvm->arch.last_tsc_write = tsc;
                kvm->arch.cur_tsc_generation++;
-               kvm->arch.cur_tsc_nsec = ns;
-               kvm->arch.cur_tsc_write = tsc;
-               kvm->arch.cur_tsc_offset = offset;
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
                kvm->arch.nr_vcpus_matched_tsc++;
@@ -2737,8 +2734,8 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
 
        /* Keep track of which generation this VCPU has synchronized to */
        vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-       vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-       vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+       vcpu->arch.this_tsc_nsec = kvm->arch.last_tsc_nsec;
+       vcpu->arch.this_tsc_write = kvm->arch.last_tsc_write;
 
        kvm_track_tsc_matching(vcpu);
 }
@@ -2815,8 +2812,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
                data = kvm->arch.last_tsc_write;
 
                if (!kvm_check_tsc_unstable()) {
-                       offset = kvm->arch.cur_tsc_offset;
-                       ns = kvm->arch.cur_tsc_nsec;
+                       offset = kvm->arch.last_tsc_offset;
+                       ns = kvm->arch.last_tsc_nsec;
                } else {
                        /*
                         * ... unless the TSC is unstable and has to be