*/
 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 {
-       if (cpumask_empty(policy->related_cpus))
-               return show_cpus(policy->cpus, buf);
        return show_cpus(policy->related_cpus, buf);
 }
 
        .release        = cpufreq_sysfs_release,
 };
 
-/*
- * Returns:
- *   Negative: Failure
- *   0:        Success
- *   Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
-                                 struct cpufreq_policy *policy,
-                                 struct device *dev)
-{
-       int ret = 0;
-#ifdef CONFIG_SMP
-       unsigned long flags;
-       unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
-       struct cpufreq_governor *gov;
-
-       gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
-       if (gov) {
-               policy->governor = gov;
-               pr_debug("Restoring governor %s for cpu %d\n",
-                      policy->governor->name, cpu);
-       }
-#endif
-
-       for_each_cpu(j, policy->cpus) {
-               struct cpufreq_policy *managed_policy;
-
-               if (cpu == j)
-                       continue;
-
-               /* Check for existing affected CPUs.
-                * They may not be aware of it due to CPU Hotplug.
-                * cpufreq_cpu_put is called when the device is removed
-                * in __cpufreq_remove_dev()
-                */
-               managed_policy = cpufreq_cpu_get(j);
-               if (unlikely(managed_policy)) {
-
-                       /* Set proper policy_cpu */
-                       unlock_policy_rwsem_write(cpu);
-                       per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
-                       if (lock_policy_rwsem_write(cpu) < 0) {
-                               /* Should not go through policy unlock path */
-                               if (cpufreq_driver->exit)
-                                       cpufreq_driver->exit(policy);
-                               cpufreq_cpu_put(managed_policy);
-                               return -EBUSY;
-                       }
-
-                       __cpufreq_governor(managed_policy, CPUFREQ_GOV_STOP);
-
-                       spin_lock_irqsave(&cpufreq_driver_lock, flags);
-                       cpumask_copy(managed_policy->cpus, policy->cpus);
-                       per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
-                       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-                       __cpufreq_governor(managed_policy, CPUFREQ_GOV_START);
-                       __cpufreq_governor(managed_policy, CPUFREQ_GOV_LIMITS);
-
-                       pr_debug("CPU already managed, adding link\n");
-                       ret = sysfs_create_link(&dev->kobj,
-                                               &managed_policy->kobj,
-                                               "cpufreq");
-                       if (ret)
-                               cpufreq_cpu_put(managed_policy);
-                       /*
-                        * Success. We only needed to be added to the mask.
-                        * Call driver->exit() because only the cpu parent of
-                        * the kobj needed to call init().
-                        */
-                       if (cpufreq_driver->exit)
-                               cpufreq_driver->exit(policy);
-
-                       if (!ret)
-                               return 1;
-                       else
-                               return ret;
-               }
-       }
-#endif
-       return ret;
-}
-
-
 /* symlink affected CPUs */
 static int cpufreq_add_dev_symlink(unsigned int cpu,
                                   struct cpufreq_policy *policy)
        return ret;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
+                                 struct device *dev)
+{
+       struct cpufreq_policy *policy;
+       int ret = 0;
+       unsigned long flags;
+
+       policy = cpufreq_cpu_get(sibling);
+       WARN_ON(!policy);
+
+       per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
+
+       lock_policy_rwsem_write(cpu);
+
+       __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+
+       spin_lock_irqsave(&cpufreq_driver_lock, flags);
+       cpumask_set_cpu(cpu, policy->cpus);
+       per_cpu(cpufreq_cpu_data, cpu) = policy;
+       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+       __cpufreq_governor(policy, CPUFREQ_GOV_START);
+       __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+       unlock_policy_rwsem_write(cpu);
+
+       ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+       if (ret) {
+               cpufreq_cpu_put(policy);
+               return ret;
+       }
+
+       return 0;
+}
+#endif
 
 /**
  * cpufreq_add_dev - add a CPU device
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
-       unsigned int cpu = dev->id;
-       int ret = 0, found = 0;
+       unsigned int j, cpu = dev->id;
+       int ret = -ENOMEM, found = 0;
        struct cpufreq_policy *policy;
        unsigned long flags;
-       unsigned int j;
 #ifdef CONFIG_HOTPLUG_CPU
+       struct cpufreq_governor *gov;
        int sibling;
 #endif
 
                cpufreq_cpu_put(policy);
                return 0;
        }
+
+#ifdef CONFIG_HOTPLUG_CPU
+       /* Check if this cpu was hot-unplugged earlier and has siblings */
+       for_each_online_cpu(sibling) {
+               struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+               if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
+                       return cpufreq_add_policy_cpu(cpu, sibling, dev);
+       }
+#endif
 #endif
 
        if (!try_module_get(cpufreq_driver->owner)) {
                goto module_out;
        }
 
-       ret = -ENOMEM;
        policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
        if (!policy)
                goto nomem_out;
                goto err_unlock_policy;
        }
 
+       /* related cpus should atleast have policy->cpus */
+       cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
        /*
         * affected cpus must always be the one, which are online. We aren't
         * managing offline cpus here.
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
-       ret = cpufreq_add_dev_policy(cpu, policy, dev);
-       if (ret) {
-               if (ret > 0)
-                       /* This is a managed cpu, symlink created,
-                          exit with 0 */
-                       ret = 0;
-               goto err_unlock_policy;
+#ifdef CONFIG_HOTPLUG_CPU
+       gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+       if (gov) {
+               policy->governor = gov;
+               pr_debug("Restoring governor %s for cpu %d\n",
+                      policy->governor->name, cpu);
        }
+#endif
 
        ret = cpufreq_add_dev_interface(cpu, policy, dev);
        if (ret)
 
        return 0;
 
-
 err_out_unregister:
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
        for_each_cpu(j, policy->cpus)