]> www.infradead.org Git - nvme.git/commitdiff
x86/aperfmperf: Fix deadlock on cpu_hotplug_lock
authorJonathan Cameron <Jonathan.Cameron@huawei.com>
Mon, 29 Jul 2024 10:55:04 +0000 (11:55 +0100)
committerBorislav Petkov (AMD) <bp@alien8.de>
Mon, 29 Jul 2024 13:32:37 +0000 (15:32 +0200)
The broken patch results in a call to init_freq_invariance_cppc() in a CPU
hotplug handler in both the path for initially present CPUs and those
hotplugged later.  That function includes a one time call to
amd_set_max_freq_ratio() which in turn calls freq_invariance_enable() that has
a static_branch_enable() which takes the cpu_hotlug_lock which is already
held.

Avoid the deadlock by using static_branch_enable_cpuslocked() as the lock will
always be already held.  The equivalent path on Intel does not already hold
this lock, so take it around the call to freq_invariance_enable(), which
results in it being held over the call to register_syscall_ops, which looks to
be safe to do.

Fixes: c1385c1f0ba3 ("ACPI: processor: Simplify initial onlining to use same path for cold and hotplug")
Closes: https://lore.kernel.org/all/CABXGCsPvqBfL5hQDOARwfqasLRJ_eNPBbCngZ257HOe=xbWDkA@mail.gmail.com/
Reported-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240729105504.2170-1-Jonathan.Cameron@huawei.com
arch/x86/kernel/cpu/aperfmperf.c

index b3fa61d45352e7120d7be7fb5cdb9d6f071c9729..0b69bfbf345d0ddf0e88c1ad6125d5f22c8883a4 100644 (file)
@@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
                WARN_ON_ONCE(1);
                return;
        }
-       static_branch_enable(&arch_scale_freq_key);
+       static_branch_enable_cpuslocked(&arch_scale_freq_key);
        register_freq_invariance_syscore_ops();
        pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
 }
@@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return;
 
-       if (intel_set_max_freq_ratio())
+       if (intel_set_max_freq_ratio()) {
+               guard(cpus_read_lock)();
                freq_invariance_enable();
+       }
 }
 
 static void disable_freq_invariance_workfn(struct work_struct *work)