#undef pr_fmt
 #define pr_fmt(fmt) "AMU: " fmt
 
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
+/*
+ * Ensure that amu_scale_freq_tick() will return SCHED_CAPACITY_SCALE until
+ * the CPU capacity and its associated frequency have been correctly
+ * initialized.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) =  1UL << (2 * SCHED_CAPACITY_SHIFT);
 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
 static cpumask_var_t amu_fie_cpus;
        return true;
 }
 
-static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
+void freq_inv_set_max_ratio(int cpu, u64 max_rate)
 {
-       u64 ratio;
+       u64 ratio, ref_rate = arch_timer_get_rate();
 
        if (unlikely(!max_rate || !ref_rate)) {
-               pr_debug("CPU%d: invalid maximum or reference frequency.\n",
+               WARN_ONCE(1, "CPU%d: invalid maximum or reference frequency.\n",
                         cpu);
-               return -EINVAL;
+               return;
        }
 
        /*
        ratio = div64_u64(ratio, max_rate);
        if (!ratio) {
                WARN_ONCE(1, "Reference frequency too low.\n");
-               return -EINVAL;
+               return;
        }
 
-       per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
-
-       return 0;
+       WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio);
 }
 
 static void amu_scale_freq_tick(void)
                return;
 
        for_each_cpu(cpu, cpus) {
-               if (!freq_counters_valid(cpu) ||
-                   freq_inv_set_max_ratio(cpu,
-                                          cpufreq_get_hw_max_freq(cpu) * 1000ULL,
-                                          arch_timer_get_rate()))
+               if (!freq_counters_valid(cpu))
                        return;
        }
 
 
        return !ret;
 }
 
+void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
+{
+}
+
 #ifdef CONFIG_ACPI_CPPC_LIB
 #include <acpi/cppc_acpi.h>
 
        }
 
        for_each_possible_cpu(cpu) {
+               freq_inv_set_max_ratio(cpu,
+                                      per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+
                capacity = raw_capacity[cpu];
                capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
                                     capacity_scale);
 
        cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
 
-       for_each_cpu(cpu, policy->related_cpus)
+       for_each_cpu(cpu, policy->related_cpus) {
                per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
+               freq_inv_set_max_ratio(cpu,
+                                      per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+       }
 
        if (cpumask_empty(cpus_to_visit)) {
                topology_normalize_cpu_scale();