From fcb56e6119455c3f5da5b300025c1356866b666e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 16 Feb 2021 15:05:11 +0000 Subject: [PATCH] Avoid parallel TSC sync Signed-off-by: David Woodhouse --- arch/x86/kernel/smpboot.c | 2 +- arch/x86/kernel/tsc_sync.c | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d8f51e4502301..e226c40bdd872 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1230,7 +1230,7 @@ unreg_nmi: } /* We aren't ready for this part yet */ -static int i_fixed_parallel_tsc_sync = false; +static int i_fixed_parallel_tsc_sync = true; int native_cpu_up(unsigned int cpu, struct task_struct *tidle) { diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 3d3c761eb74a6..c4062c7c778c2 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -202,6 +202,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) * Entry/exit counters that make sure that both CPUs * run the measurement code at once: */ +static atomic_t tsc_sync_cpu = ATOMIC_INIT(-1); static atomic_t start_count; static atomic_t stop_count; static atomic_t skip_test; @@ -326,6 +327,8 @@ void check_tsc_sync_source(int cpu) atomic_set(&test_runs, 1); else atomic_set(&test_runs, 3); + + atomic_set(&tsc_sync_cpu, cpu); retry: /* * Wait for the target to start or to skip the test: @@ -407,6 +410,10 @@ void check_tsc_sync_target(void) if (unsynchronized_tsc()) return; + /* Wait for this CPU's turn */ + while (atomic_read(&tsc_sync_cpu) != cpu) + cpu_relax(); + /* * Store, verify and sanitize the TSC adjust register. If * successful skip the test. -- 2.49.0