{
        struct cpufreq_policy *policy = sg_policy->policy;
 
+       if (sg_policy->next_freq == next_freq)
+               return;
+
+       sg_policy->next_freq = next_freq;
+       sg_policy->last_freq_update_time = time;
+
        if (policy->fast_switch_enabled) {
-               if (sg_policy->next_freq == next_freq) {
-                       trace_cpu_frequency(policy->cur, smp_processor_id());
-                       return;
-               }
-               sg_policy->next_freq = next_freq;
-               sg_policy->last_freq_update_time = time;
                next_freq = cpufreq_driver_fast_switch(policy, next_freq);
                if (next_freq == CPUFREQ_ENTRY_INVALID)
                        return;
 
                policy->cur = next_freq;
                trace_cpu_frequency(next_freq, smp_processor_id());
-       } else if (sg_policy->next_freq != next_freq) {
-               sg_policy->next_freq = next_freq;
-               sg_policy->last_freq_update_time = time;
+       } else {
                sg_policy->work_in_progress = true;
                irq_work_queue(&sg_policy->irq_work);
        }