module_param(uncore_no_discover, bool, 0);
 MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism "
                                     "(default: enable the discovery mechanism).");
-static struct intel_uncore_type *empty_uncore[] = { NULL, };
+struct intel_uncore_type *empty_uncore[] = { NULL, };
 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
 struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
        .attrs = uncore_pmu_attrs,
 };
 
+static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
+{
+       struct intel_uncore_type *type = pmu->type;
+
+       /*
+        * No uncore block name in discovery table.
+        * Use uncore_type_&typeid_&boxid as name.
+        */
+       if (!type->name) {
+               if (type->num_boxes == 1)
+                       sprintf(pmu->name, "uncore_type_%u", type->type_id);
+               else {
+                       sprintf(pmu->name, "uncore_type_%u_%d",
+                               type->type_id, type->box_ids[pmu->pmu_idx]);
+               }
+               return;
+       }
+
+       if (type->num_boxes == 1) {
+               if (strlen(type->name) > 0)
+                       sprintf(pmu->name, "uncore_%s", type->name);
+               else
+                       sprintf(pmu->name, "uncore");
+       } else
+               sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
+
+}
+
 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
 {
        int ret;
                pmu->pmu.attr_update = pmu->type->attr_update;
        }
 
-       if (pmu->type->num_boxes == 1) {
-               if (strlen(pmu->type->name) > 0)
-                       sprintf(pmu->name, "uncore_%s", pmu->type->name);
-               else
-                       sprintf(pmu->name, "uncore");
-       } else {
-               sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
-                       pmu->pmu_idx);
-       }
+       uncore_get_pmu_name(pmu);
 
        ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
        if (!ret)
                kfree(type->pmus);
                type->pmus = NULL;
        }
+       if (type->box_ids) {
+               kfree(type->box_ids);
+               type->box_ids = NULL;
+       }
        kfree(type->events_group);
        type->events_group = NULL;
 }
 };
 
 static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
+       .cpu_init = intel_uncore_generic_uncore_cpu_init,
 };
 
 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
 
                kfree(type);
        }
 }
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
+
+static struct attribute *generic_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh.attr,
+       NULL,
+};
+
+static const struct attribute_group generic_uncore_format_group = {
+       .name = "format",
+       .attrs = generic_uncore_formats_attr,
+};
+
+static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
+}
+
+static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
+}
+
+static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(uncore_msr_box_ctl(box), 0);
+}
+
+static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                           struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                            struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       wrmsrl(hwc->config_base, 0);
+}
+
+static struct intel_uncore_ops generic_uncore_msr_ops = {
+       .init_box               = intel_generic_uncore_msr_init_box,
+       .disable_box            = intel_generic_uncore_msr_disable_box,
+       .enable_box             = intel_generic_uncore_msr_enable_box,
+       .disable_event          = intel_generic_uncore_msr_disable_event,
+       .enable_event           = intel_generic_uncore_msr_enable_event,
+       .read_counter           = uncore_msr_read_counter,
+};
+
+static bool uncore_update_uncore_type(enum uncore_access_type type_id,
+                                     struct intel_uncore_type *uncore,
+                                     struct intel_uncore_discovery_type *type)
+{
+       uncore->type_id = type->type;
+       uncore->num_boxes = type->num_boxes;
+       uncore->num_counters = type->num_counters;
+       uncore->perf_ctr_bits = type->counter_width;
+       uncore->box_ids = type->ids;
+
+       switch (type_id) {
+       case UNCORE_ACCESS_MSR:
+               uncore->ops = &generic_uncore_msr_ops;
+               uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
+               uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
+               uncore->box_ctl = (unsigned int)type->box_ctrl;
+               uncore->msr_offsets = type->box_offset;
+               break;
+       default:
+               return false;
+       }
+
+       return true;
+}
+
+static struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
+{
+       struct intel_uncore_discovery_type *type;
+       struct intel_uncore_type **uncores;
+       struct intel_uncore_type *uncore;
+       struct rb_node *node;
+       int i = 0;
+
+       uncores = kcalloc(num_discovered_types[type_id] + 1,
+                         sizeof(struct intel_uncore_type *), GFP_KERNEL);
+       if (!uncores)
+               return empty_uncore;
+
+       for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
+               type = rb_entry(node, struct intel_uncore_discovery_type, node);
+               if (type->access_type != type_id)
+                       continue;
+
+               uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
+               if (!uncore)
+                       break;
+
+               uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
+               uncore->format_group = &generic_uncore_format_group;
+
+               if (!uncore_update_uncore_type(type_id, uncore, type)) {
+                       kfree(uncore);
+                       continue;
+               }
+               uncores[i++] = uncore;
+       }
+
+       return uncores;
+}
+
+void intel_uncore_generic_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR);
+}
 
         unit.table1 == -1ULL || unit.ctl == -1ULL ||   \
         unit.table3 == -1ULL)
 
+#define GENERIC_PMON_CTL_EV_SEL_MASK   0x000000ff
+#define GENERIC_PMON_CTL_UMASK_MASK    0x0000ff00
+#define GENERIC_PMON_CTL_EDGE_DET      (1 << 18)
+#define GENERIC_PMON_CTL_INVERT                (1 << 23)
+#define GENERIC_PMON_CTL_TRESH_MASK    0xff000000
+#define GENERIC_PMON_RAW_EVENT_MASK    (GENERIC_PMON_CTL_EV_SEL_MASK | \
+                                        GENERIC_PMON_CTL_UMASK_MASK | \
+                                        GENERIC_PMON_CTL_EDGE_DET | \
+                                        GENERIC_PMON_CTL_INVERT | \
+                                        GENERIC_PMON_CTL_TRESH_MASK)
+
+#define GENERIC_PMON_BOX_CTL_FRZ       (1 << 0)
+#define GENERIC_PMON_BOX_CTL_RST_CTRL  (1 << 8)
+#define GENERIC_PMON_BOX_CTL_RST_CTRS  (1 << 9)
+#define GENERIC_PMON_BOX_CTL_INT       (GENERIC_PMON_BOX_CTL_RST_CTRL | \
+                                        GENERIC_PMON_BOX_CTL_RST_CTRS)
+
 enum uncore_access_type {
        UNCORE_ACCESS_MSR       = 0,
        UNCORE_ACCESS_MMIO,
 
 bool intel_uncore_has_discovery_tables(void);
 void intel_uncore_clear_discovery_tables(void);
+void intel_uncore_generic_uncore_cpu_init(void);