static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
 static cpumask_t cpu_associativity_changes_mask;
 static int vphn_enabled;
-static void set_topology_timer(void);
+static int prrn_enabled;
+static void reset_topology_timer(void);
 
 /*
  * Store the current values of the associativity change counters in the
  */
 static int update_cpu_associativity_changes_mask(void)
 {
-       int cpu, nr_cpus = 0;
+       int cpu;
        cpumask_t *changes = &cpu_associativity_changes_mask;
 
-       cpumask_clear(changes);
-
        for_each_possible_cpu(cpu) {
                int i, changed = 0;
                u8 *counts = vphn_cpu_change_counts[cpu];
                }
                if (changed) {
                        cpumask_set_cpu(cpu, changes);
-                       nr_cpus++;
                }
        }
 
-       return nr_cpus;
+       return cpumask_weight(changes);
 }
 
 /*
        unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
        struct device *dev;
 
-       for_each_cpu(cpu,&cpu_associativity_changes_mask) {
+       for_each_cpu(cpu, &cpu_associativity_changes_mask) {
                vphn_get_associativity(cpu, associativity);
                nid = associativity_to_nid(associativity);
 
                dev = get_cpu_device(cpu);
                if (dev)
                        kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+               cpumask_clear_cpu(cpu, &cpu_associativity_changes_mask);
                changed = 1;
        }
 
 
 static void topology_timer_fn(unsigned long ignored)
 {
-       if (!vphn_enabled)
-               return;
-       if (update_cpu_associativity_changes_mask() > 0)
+       if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
                topology_schedule_update();
-       set_topology_timer();
+       else if (vphn_enabled) {
+               if (update_cpu_associativity_changes_mask() > 0)
+                       topology_schedule_update();
+               reset_topology_timer();
+       }
 }
 static struct timer_list topology_timer =
        TIMER_INITIALIZER(topology_timer_fn, 0, 0);
 
-static void set_topology_timer(void)
+static void reset_topology_timer(void)
 {
        topology_timer.data = 0;
        topology_timer.expires = jiffies + 60 * HZ;
-       add_timer(&topology_timer);
+       mod_timer(&topology_timer, topology_timer.expires);
+}
+
+static void stage_topology_update(int core_id)
+{
+       cpumask_or(&cpu_associativity_changes_mask,
+               &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
+       reset_topology_timer();
+}
+
+static int dt_update_callback(struct notifier_block *nb,
+                               unsigned long action, void *data)
+{
+       struct of_prop_reconfig *update;
+       int rc = NOTIFY_DONE;
+
+       switch (action) {
+       case OF_RECONFIG_ADD_PROPERTY:
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               update = (struct of_prop_reconfig *)data;
+               if (!of_prop_cmp(update->dn->type, "cpu")) {
+                       u32 core_id;
+                       of_property_read_u32(update->dn, "reg", &core_id);
+                       stage_topology_update(core_id);
+                       rc = NOTIFY_OK;
+               }
+               break;
+       }
+
+       return rc;
 }
 
+static struct notifier_block dt_update_nb = {
+       .notifier_call = dt_update_callback,
+};
+
 /*
- * Start polling for VPHN associativity changes.
+ * Start polling for associativity changes.
  */
 int start_topology_update(void)
 {
        int rc = 0;
 
-       /* Disabled until races with load balancing are fixed */
-       if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
-           get_lppaca()->shared_proc) {
-               vphn_enabled = 1;
-               setup_cpu_associativity_change_counters();
-               init_timer_deferrable(&topology_timer);
-               set_topology_timer();
-               rc = 1;
+       if (firmware_has_feature(FW_FEATURE_PRRN)) {
+               if (!prrn_enabled) {
+                       prrn_enabled = 1;
+                       vphn_enabled = 0;
+                       rc = of_reconfig_notifier_register(&dt_update_nb);
+               }
+       } else if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
+                  get_lppaca()->shared_proc) {
+               /* Disabled until races with load balancing are fixed */
+               if (!vphn_enabled) {
+                       prrn_enabled = 0;
+                       vphn_enabled = 1;
+                       setup_cpu_associativity_change_counters();
+                       init_timer_deferrable(&topology_timer);
+                       reset_topology_timer();
+               }
        }
 
        return rc;
  */
 int stop_topology_update(void)
 {
-       vphn_enabled = 0;
-       return del_timer_sync(&topology_timer);
+       int rc = 0;
+
+       if (prrn_enabled) {
+               prrn_enabled = 0;
+               rc = of_reconfig_notifier_unregister(&dt_update_nb);
+       } else if (vphn_enabled) {
+               vphn_enabled = 0;
+               rc = del_timer_sync(&topology_timer);
+       }
+
+       return rc;
 }
 #endif /* CONFIG_PPC_SPLPAR */