]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/tsc: Avoid synchronizing TSCs with multiple CPUs in parallel
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 16 Feb 2021 15:05:11 +0000 (15:05 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 9 Feb 2023 13:01:55 +0000 (13:01 +0000)
The TSC sync algorithm is only designed to do a 1:1 sync between the
source and target CPUs.

In order to enable parallel CPU bringup, serialize it by using an
atomic_t containing the number of the target CPU whose turn it is.

In future this could be optimised by inventing a 1:many algorithm for
TSC synchronization algorithm, perhaps falling back to 1:1 if a warp is
observed but doing them all in parallel for the common case where no
adjustment is needed. Or just avoiding the sync completely for cases
like kexec where we trust that they were in sync already.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kernel/tsc_sync.c

index 9452dc9664b51fddcfaeb6274935c91885814fda..ee9278edc96ff1d84b6fcc68def7217c88f91b75 100644 (file)
@@ -243,6 +243,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
  * Entry/exit counters that make sure that both CPUs
  * run the measurement code at once:
  */
+static atomic_t tsc_sync_cpu = ATOMIC_INIT(-1);
 static atomic_t start_count;
 static atomic_t stop_count;
 static atomic_t skip_test;
@@ -367,6 +368,8 @@ void check_tsc_sync_source(int cpu)
                atomic_set(&test_runs, 1);
        else
                atomic_set(&test_runs, 3);
+
+       atomic_set(&tsc_sync_cpu, cpu);
 retry:
        /*
         * Wait for the target to start or to skip the test:
@@ -448,6 +451,10 @@ void check_tsc_sync_target(void)
        if (unsynchronized_tsc())
                return;
 
+       /* Wait for this CPU's turn */
+       while (atomic_read(&tsc_sync_cpu) != cpu)
+               cpu_relax();
+
        /*
         * Store, verify and sanitize the TSC adjust register. If
         * successful skip the test.