*/
 #define EM_PERF_STATE_INEFFICIENT BIT(0)
 
+/**
+ * struct em_perf_table - Performance states table
+ * @rcu:       RCU used for safe access and destruction
+ * @state:     List of performance states, in ascending order
+ */
+struct em_perf_table {
+       struct rcu_head rcu;
+       struct em_perf_state state[];
+};
+
 /**
  * struct em_perf_domain - Performance domain
  * @table:             List of performance states, in ascending order
+ * @em_table:          Pointer to the runtime modifiable em_perf_table
  * @nr_perf_states:    Number of performance states
  * @flags:             See "em_perf_domain flags"
  * @cpus:              Cpumask covering the CPUs of the domain. It's here
  */
 struct em_perf_domain {
        struct em_perf_state *table;
+       struct em_perf_table __rcu *em_table;
        int nr_perf_states;
        unsigned long flags;
        unsigned long cpus[];
 
  */
 static DEFINE_MUTEX(em_pd_mutex);
 
+static void em_cpufreq_update_efficiencies(struct device *dev,
+                                          struct em_perf_state *table);
+
 static bool _is_cpu_device(struct device *dev)
 {
        return (dev->bus == &cpu_subsys);
 static void em_debug_remove_pd(struct device *dev) {}
 #endif
 
+static void em_destroy_table_rcu(struct rcu_head *rp)
+{
+       struct em_perf_table __rcu *table;
+
+       table = container_of(rp, struct em_perf_table, rcu);
+       kfree(table);
+}
+
+static void em_free_table(struct em_perf_table __rcu *table)
+{
+       call_rcu(&table->rcu, em_destroy_table_rcu);
+}
+
+static struct em_perf_table __rcu *
+em_allocate_table(struct em_perf_domain *pd)
+{
+       struct em_perf_table __rcu *table;
+       int table_size;
+
+       table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
+
+       table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL);
+       return table;
+}
+
 static int em_compute_costs(struct device *dev, struct em_perf_state *table,
                            struct em_data_callback *cb, int nr_states,
                            unsigned long flags)
        return 0;
 }
 
+static int em_create_runtime_table(struct em_perf_domain *pd)
+{
+       struct em_perf_table __rcu *table;
+       int table_size;
+
+       table = em_allocate_table(pd);
+       if (!table)
+               return -ENOMEM;
+
+       /* Initialize runtime table with existing data */
+       table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
+       memcpy(table->state, pd->table, table_size);
+
+       rcu_assign_pointer(pd->em_table, table);
+
+       return 0;
+}
+
 static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
                                struct em_perf_state *table,
                                struct em_data_callback *cb,
        if (ret)
                goto free_pd_table;
 
+       ret = em_create_runtime_table(pd);
+       if (ret)
+               goto free_pd_table;
+
        if (_is_cpu_device(dev))
                for_each_cpu(cpu, cpus) {
                        cpu_dev = get_cpu_device(cpu);
        em_debug_remove_pd(dev);
 
        kfree(dev->em_pd->table);
+
+       em_free_table(dev->em_pd->em_table);
+
        kfree(dev->em_pd);
        dev->em_pd = NULL;
        mutex_unlock(&em_pd_mutex);