]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Avoid parallel TSC sync parallel
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 16 Feb 2021 15:05:11 +0000 (15:05 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 16 Feb 2021 15:05:11 +0000 (15:05 +0000)
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc_sync.c

index d8f51e4502301366f923882e3a9b2b1b6f11b6cc..e226c40bdd8721d62f2945129d2e9cfaefc5d95e 100644 (file)
@@ -1230,7 +1230,7 @@ unreg_nmi:
 }
 
 /* We aren't ready for this part yet */
-static int i_fixed_parallel_tsc_sync = false;
+static int i_fixed_parallel_tsc_sync = true;
 
 int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
index 3d3c761eb74a64a535f0d0c516d275e48a0a36b8..c4062c7c778c286f99829116a58269ed8600fc85 100644 (file)
@@ -202,6 +202,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
  * Entry/exit counters that make sure that both CPUs
  * run the measurement code at once:
  */
+static atomic_t tsc_sync_cpu = ATOMIC_INIT(-1);
 static atomic_t start_count;
 static atomic_t stop_count;
 static atomic_t skip_test;
@@ -326,6 +327,8 @@ void check_tsc_sync_source(int cpu)
                atomic_set(&test_runs, 1);
        else
                atomic_set(&test_runs, 3);
+
+       atomic_set(&tsc_sync_cpu, cpu);
 retry:
        /*
         * Wait for the target to start or to skip the test:
@@ -407,6 +410,10 @@ void check_tsc_sync_target(void)
        if (unsynchronized_tsc())
                return;
 
+       /* Wait for this CPU's turn */
+       while (atomic_read(&tsc_sync_cpu) != cpu)
+               cpu_relax();
+
        /*
         * Store, verify and sanitize the TSC adjust register. If
         * successful skip the test.