This code is convoluted and because it can be invoked post init via the
ACPI/CPPC code, all of the initialization functionality is built in instead
of being part of init text and init data.
As a first step create separate calls for the boot and the application
processors.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20220415161206.536733494@linutronix.de
 #define arch_scale_freq_tick arch_scale_freq_tick
 
 extern void arch_set_max_freq_ratio(bool turbo_disabled);
-void init_freq_invariance(bool secondary, bool cppc_ready);
+extern void bp_init_freq_invariance(bool cppc_ready);
+extern void ap_init_freq_invariance(void);
 #else
-static inline void arch_set_max_freq_ratio(bool turbo_disabled)
-{
-}
-static inline void init_freq_invariance(bool secondary, bool cppc_ready)
-{
-}
+static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
+static inline void bp_init_freq_invariance(bool cppc_ready) { }
+static inline void ap_init_freq_invariance(void) { }
 #endif
 
 #ifdef CONFIG_ACPI_CPPC_LIB
 
 
        mutex_lock(&freq_invariance_lock);
 
-       init_freq_invariance(secondary, true);
+       if (!secondary)
+               bp_init_freq_invariance(true);
        secondary = true;
 
        mutex_unlock(&freq_invariance_lock);
 
 static inline void register_freq_invariance_syscore_ops(void) {}
 #endif
 
-void init_freq_invariance(bool secondary, bool cppc_ready)
+void bp_init_freq_invariance(bool cppc_ready)
 {
-       bool ret = false;
+       bool ret;
 
-       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
+       if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
                return;
 
-       if (secondary) {
-               if (static_branch_likely(&arch_scale_freq_key)) {
-                       init_counter_refs();
-               }
-               return;
-       }
+       init_counter_refs();
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
                ret = intel_set_max_freq_ratio();
        else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-               if (!cppc_ready) {
+               if (!cppc_ready)
                        return;
-               }
                ret = amd_set_max_freq_ratio(&arch_turbo_freq_ratio);
        }
 
        if (ret) {
-               init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
                register_freq_invariance_syscore_ops();
                pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        }
 }
 
+void ap_init_freq_invariance(void)
+{
+       if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+               init_counter_refs();
+}
+
 static void disable_freq_invariance_workfn(struct work_struct *work)
 {
        static_branch_disable(&arch_scale_freq_key);
 
         */
        set_cpu_sibling_map(raw_smp_processor_id());
 
-       init_freq_invariance(true, false);
+       ap_init_freq_invariance();
 
        /*
         * Get our bogomips.
 {
        smp_prepare_cpus_common();
 
-       init_freq_invariance(false, false);
+       bp_init_freq_invariance(false);
        smp_sanity_check();
 
        switch (apic_intr_mode) {