]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
cpufreq: intel_pstate: Clear hybrid_max_perf_cpu before driver registration
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 4 Nov 2024 18:51:28 +0000 (19:51 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 4 Nov 2024 21:51:10 +0000 (22:51 +0100)
Modify intel_pstate_register_driver() to clear hybrid_max_perf_cpu
before calling cpufreq_register_driver(), so that asymmetric CPU
capacity scaling is not updated until hybrid_init_cpu_capacity_scaling()
runs down the road.  This is done in preparation for a subsequent
change adding asymmetric CPU capacity computation to the CPU init path
to handle CPUs that are initially offline.

The information on whether or not hybrid_max_perf_cpu was NULL before
it has been cleared is passed to hybrid_init_cpu_capacity_scaling(),
so full initialization of CPU capacity scaling can be skipped if it
has been carried out already.

No intentional functional impact.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://patch.msgid.link/4616631.LvFx2qVVIh@rjwysocki.net
drivers/cpufreq/intel_pstate.c

index b0018f371ea3a5e996cc9e94dd98e4bc7ae5af02..4e816f0857f24fa6d4a8732739102655c0caaf8c 100644 (file)
@@ -1034,7 +1034,7 @@ static void __hybrid_init_cpu_capacity_scaling(void)
        hybrid_update_cpu_capacity_scaling();
 }
 
-static void hybrid_init_cpu_capacity_scaling(void)
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
 {
        bool disable_itmt = false;
 
@@ -1045,7 +1045,7 @@ static void hybrid_init_cpu_capacity_scaling(void)
         * scaling has been enabled already and the driver is just changing the
         * operation mode.
         */
-       if (hybrid_max_perf_cpu) {
+       if (refresh) {
                __hybrid_init_cpu_capacity_scaling();
                goto unlock;
        }
@@ -1071,6 +1071,18 @@ unlock:
                sched_clear_itmt_support();
 }
 
+static bool hybrid_clear_max_perf_cpu(void)
+{
+       bool ret;
+
+       guard(mutex)(&hybrid_capacity_lock);
+
+       ret = !!hybrid_max_perf_cpu;
+       hybrid_max_perf_cpu = NULL;
+
+       return ret;
+}
+
 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
 {
        u64 cap;
@@ -3352,6 +3364,7 @@ static void intel_pstate_driver_cleanup(void)
 
 static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 {
+       bool refresh_cpu_cap_scaling;
        int ret;
 
        if (driver == &intel_pstate)
@@ -3364,6 +3377,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 
        arch_set_max_freq_ratio(global.turbo_disabled);
 
+       refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
+
        intel_pstate_driver = driver;
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
@@ -3373,7 +3388,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 
        global.min_perf_pct = min_perf_pct_min();
 
-       hybrid_init_cpu_capacity_scaling();
+       hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
 
        return 0;
 }