*/
 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
 {
-       kfree(cpuc->shared_regs);
+       intel_cpuc_finish(cpuc);
        kfree(cpuc);
 }
 
        cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
        if (!cpuc)
                return ERR_PTR(-ENOMEM);
-
-       /* only needed, if we have extra_regs */
-       if (x86_pmu.extra_regs) {
-               cpuc->shared_regs = allocate_shared_regs(cpu);
-               if (!cpuc->shared_regs)
-                       goto error;
-       }
        cpuc->is_fake = 1;
+
+       if (intel_cpuc_prepare(cpuc, cpu))
+               goto error;
+
        return cpuc;
 error:
        free_fake_cpuc(cpuc);
 
        return x86_event_sysfs_show(page, config, event);
 }
 
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
 {
        struct intel_shared_regs *regs;
        int i;
        return c;
 }
 
-static int intel_pmu_cpu_prepare(int cpu)
-{
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 
-               cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+               cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
                if (!cpuc->constraint_list)
                        goto err_shared_regs;
 
        return -ENOMEM;
 }
 
+static int intel_pmu_cpu_prepare(int cpu)
+{
+       return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
 static void flip_smm_bit(void *data)
 {
        unsigned long set = *(unsigned long *)data;
        }
 }
 
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 {
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        struct intel_excl_cntrs *c;
 
        c = cpuc->excl_cntrs;
                disable_counter_freeze();
 }
 
-static void intel_pmu_cpu_dead(int cpu)
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
 {
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        struct intel_shared_regs *pc;
 
        pc = cpuc->shared_regs;
                cpuc->shared_regs = NULL;
        }
 
-       free_excl_cntrs(cpu);
+       free_excl_cntrs(cpuc);
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
+       intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
        hardlockup_detector_perf_restart();
 
        for_each_online_cpu(c)
-               free_excl_cntrs(c);
+               free_excl_cntrs(&per_cpu(cpu_hw_events, c));
 
        cpus_read_unlock();
        pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
 
 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event);
 
-struct intel_shared_regs *allocate_shared_regs(int cpu);
+extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
+extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
 
 int intel_pmu_init(void);
 
        return 0;
 }
 
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
+{
+       return 0;
+}
+
+static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
 {
-       return NULL;
 }
 
 static inline int is_ht_workaround_enabled(void)