return cpufreq_driver->target_index || cpufreq_driver->target;
 }
 
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- *   do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- *   the policy altogether (eg. CPU hotplug), will hold this lock in write
- *   mode before doing so.
- *
- * Additional rules:
- * - Governor routines that can be called in cpufreq hotplug path should not
- *   take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu)                                   \
-static void lock_policy_rwsem_##mode(int cpu)                          \
-{                                                                      \
-       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
-       BUG_ON(!policy);                                                \
-       down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));           \
-}
-
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
-
-#define unlock_policy_rwsem(mode, cpu)                                 \
-static void unlock_policy_rwsem_##mode(int cpu)                                \
-{                                                                      \
-       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
-       BUG_ON(!policy);                                                \
-       up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));             \
-}
-
-unlock_policy_rwsem(read, cpu);
-unlock_policy_rwsem(write, cpu);
-
 /*
  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
  * sections
        if (!down_read_trylock(&cpufreq_rwsem))
                return -EINVAL;
 
-       lock_policy_rwsem_read(policy->cpu);
+       down_read(&policy->rwsem);
 
        if (fattr->show)
                ret = fattr->show(policy, buf);
        else
                ret = -EIO;
 
-       unlock_policy_rwsem_read(policy->cpu);
+       up_read(&policy->rwsem);
        up_read(&cpufreq_rwsem);
 
        return ret;
        if (!down_read_trylock(&cpufreq_rwsem))
                goto unlock;
 
-       lock_policy_rwsem_write(policy->cpu);
+       down_write(&policy->rwsem);
 
        if (fattr->store)
                ret = fattr->store(policy, buf, count);
        else
                ret = -EIO;
 
-       unlock_policy_rwsem_write(policy->cpu);
+       up_write(&policy->rwsem);
 
        up_read(&cpufreq_rwsem);
 unlock:
                }
        }
 
-       lock_policy_rwsem_write(policy->cpu);
+       down_write(&policy->rwsem);
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
 
        per_cpu(cpufreq_cpu_data, cpu) = policy;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       unlock_policy_rwsem_write(policy->cpu);
+       up_write(&policy->rwsem);
 
        if (has_target()) {
                if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
                goto err_free_cpumask;
 
        INIT_LIST_HEAD(&policy->policy_list);
+       init_rwsem(&policy->rwsem);
+
        return policy;
 
 err_free_cpumask:
        if (WARN_ON(cpu == policy->cpu))
                return;
 
-       /*
-        * Take direct locks as lock_policy_rwsem_write wouldn't work here.
-        * Also lock for last cpu is enough here as contention will happen only
-        * after policy->cpu is changed and after it is changed, other threads
-        * will try to acquire lock for new cpu. And policy is already updated
-        * by then.
-        */
-       down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+       down_write(&policy->rwsem);
 
        policy->last_cpu = policy->cpu;
        policy->cpu = cpu;
 
-       up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+       up_write(&policy->rwsem);
 
        cpufreq_frequency_table_update_policy_cpu(policy);
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
        if (ret) {
                pr_err("%s: Failed to move kobj: %d", __func__, ret);
 
-               lock_policy_rwsem_write(old_cpu);
+               down_write(&policy->rwsem);
                cpumask_set_cpu(old_cpu, policy->cpus);
-               unlock_policy_rwsem_write(old_cpu);
+               up_write(&policy->rwsem);
 
                ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
                                        "cpufreq");
                        policy->governor->name, CPUFREQ_NAME_LEN);
 #endif
 
-       lock_policy_rwsem_read(cpu);
+       down_read(&policy->rwsem);
        cpus = cpumask_weight(policy->cpus);
-       unlock_policy_rwsem_read(cpu);
+       up_read(&policy->rwsem);
 
        if (cpu != policy->cpu) {
                if (!frozen)
                return -EINVAL;
        }
 
-       lock_policy_rwsem_write(cpu);
+       down_write(&policy->rwsem);
        cpus = cpumask_weight(policy->cpus);
 
        if (cpus > 1)
                cpumask_clear_cpu(cpu, policy->cpus);
-       unlock_policy_rwsem_write(cpu);
+       up_write(&policy->rwsem);
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
                }
 
                if (!frozen) {
-                       lock_policy_rwsem_read(cpu);
+                       down_read(&policy->rwsem);
                        kobj = &policy->kobj;
                        cmp = &policy->kobj_unregister;
-                       unlock_policy_rwsem_read(cpu);
+                       up_read(&policy->rwsem);
                        kobject_put(kobj);
 
                        /*
  */
 unsigned int cpufreq_get(unsigned int cpu)
 {
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
        unsigned int ret_freq = 0;
 
        if (cpufreq_disabled() || !cpufreq_driver)
                return -ENOENT;
 
+       BUG_ON(!policy);
+
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
-       lock_policy_rwsem_read(cpu);
+       down_read(&policy->rwsem);
 
        ret_freq = __cpufreq_get(cpu);
 
-       unlock_policy_rwsem_read(cpu);
+       up_read(&policy->rwsem);
        up_read(&cpufreq_rwsem);
 
        return ret_freq;
 {
        int ret = -EINVAL;
 
-       lock_policy_rwsem_write(policy->cpu);
+       down_write(&policy->rwsem);
 
        ret = __cpufreq_driver_target(policy, target_freq, relation);
 
-       unlock_policy_rwsem_write(policy->cpu);
+       up_write(&policy->rwsem);
 
        return ret;
 }
                        /* end old governor */
                        if (policy->governor) {
                                __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-                               unlock_policy_rwsem_write(new_policy->cpu);
+                               up_write(&policy->rwsem);
                                __cpufreq_governor(policy,
                                                CPUFREQ_GOV_POLICY_EXIT);
-                               lock_policy_rwsem_write(new_policy->cpu);
+                               down_write(&policy->rwsem);
                        }
 
                        /* start new governor */
                                if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
                                        failed = 0;
                                } else {
-                                       unlock_policy_rwsem_write(new_policy->cpu);
+                                       up_write(&policy->rwsem);
                                        __cpufreq_governor(policy,
                                                        CPUFREQ_GOV_POLICY_EXIT);
-                                       lock_policy_rwsem_write(new_policy->cpu);
+                                       down_write(&policy->rwsem);
                                }
                        }
 
                goto no_policy;
        }
 
-       lock_policy_rwsem_write(cpu);
+       down_write(&policy->rwsem);
 
        pr_debug("updating policy for CPU %u\n", cpu);
        memcpy(&new_policy, policy, sizeof(*policy));
 
        ret = cpufreq_set_policy(policy, &new_policy);
 
-       unlock_policy_rwsem_write(cpu);
+       up_write(&policy->rwsem);
 
        cpufreq_cpu_put(policy);
 no_policy:
 
 static int __init cpufreq_core_init(void)
 {
-       int cpu;
-
        if (cpufreq_disabled())
                return -ENODEV;
 
-       for_each_possible_cpu(cpu)
-               init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-
        cpufreq_global_kobject = kobject_create();
        BUG_ON(!cpufreq_global_kobject);
        register_syscore_ops(&cpufreq_syscore_ops);