#include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/sched/topology.h>
+#include <linux/cpuset.h>
 
 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
 
        return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
 }
 
+static void update_topology_flags_workfn(struct work_struct *work);
+static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
+
 static ssize_t cpu_capacity_store(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf,
                topology_set_cpu_scale(i, new_capacity);
        mutex_unlock(&cpu_scale_mutex);
 
+       schedule_work(&update_topology_flags_work);
+
        return count;
 }
 
 }
 subsys_initcall(register_cpu_capacity_sysctl);
 
+static int update_topology;
+
+int topology_update_cpu_topology(void)
+{
+       return update_topology;
+}
+
+/*
+ * Updating the sched_domains can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void update_topology_flags_workfn(struct work_struct *work)
+{
+       update_topology = 1;
+       rebuild_sched_domains();
+       pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
+       update_topology = 0;
+}
+
 static u32 capacity_scale;
 static u32 *raw_capacity;
 
 
        if (cpumask_empty(cpus_to_visit)) {
                topology_normalize_cpu_scale();
+               schedule_work(&update_topology_flags_work);
                free_raw_capacity();
                pr_debug("cpu_capacity: parsing done\n");
                schedule_work(&parsing_done_work);