#include <linux/tick.h>
 #include <trace/events/power.h>
 
-/* Macros to iterate over lists */
-/* Iterate over online CPUs policies */
 static LIST_HEAD(cpufreq_policy_list);
-#define for_each_policy(__policy)                              \
+
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+       return cpumask_empty(policy->cpus);
+}
+
+static bool suitable_policy(struct cpufreq_policy *policy, bool active)
+{
+       return active == !policy_is_inactive(policy);
+}
+
+/* Finds Next Acive/Inactive policy */
+static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
+                                         bool active)
+{
+       do {
+               policy = list_next_entry(policy, policy_list);
+
+               /* No more policies in the list */
+               if (&policy->policy_list == &cpufreq_policy_list)
+                       return NULL;
+       } while (!suitable_policy(policy, active));
+
+       return policy;
+}
+
+static struct cpufreq_policy *first_policy(bool active)
+{
+       struct cpufreq_policy *policy;
+
+       /* No policies in the list */
+       if (list_empty(&cpufreq_policy_list))
+               return NULL;
+
+       policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
+                                 policy_list);
+
+       if (!suitable_policy(policy, active))
+               policy = next_policy(policy, active);
+
+       return policy;
+}
+
+/* Macros to iterate over CPU policies */
+#define for_each_suitable_policy(__policy, __active)   \
+       for (__policy = first_policy(__active);         \
+            __policy;                                  \
+            __policy = next_policy(__policy, __active))
+
+#define for_each_active_policy(__policy)               \
+       for_each_suitable_policy(__policy, true)
+#define for_each_inactive_policy(__policy)             \
+       for_each_suitable_policy(__policy, false)
+
+#define for_each_policy(__policy)                      \
        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
 
 /* Iterate over governors */
 
        /* Check if this CPU already has a policy to manage it */
        read_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_policy(policy) {
+       for_each_active_policy(policy) {
                if (cpumask_test_cpu(cpu, policy->related_cpus)) {
                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
                        ret = cpufreq_add_policy_cpu(policy, cpu, dev);
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
-       for_each_policy(policy) {
+       for_each_active_policy(policy) {
                if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
                        pr_err("%s: Failed to stop governor for policy: %p\n",
                                __func__, policy);
 
        pr_debug("%s: Resuming Governors\n", __func__);
 
-       for_each_policy(policy) {
+       for_each_active_policy(policy) {
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
        struct cpufreq_policy *policy;
        int ret = -EINVAL;
 
-       for_each_policy(policy) {
+       for_each_active_policy(policy) {
                freq_table = cpufreq_frequency_get_table(policy->cpu);
                if (freq_table) {
                        ret = cpufreq_frequency_table_cpuinfo(policy,