EVENT_ATTR_STR(mem-loads,      mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 EVENT_ATTR_STR(mem-stores,     mem_st_snb,     "event=0xcd,umask=0x2");
 
-static struct attribute *nhm_events_attrs[] = {
+static struct attribute *nhm_mem_events_attrs[] = {
        EVENT_PTR(mem_ld_nhm),
        NULL,
 };
        "4", "2");
 
 static struct attribute *snb_events_attrs[] = {
-       EVENT_PTR(mem_ld_snb),
-       EVENT_PTR(mem_st_snb),
        EVENT_PTR(td_slots_issued),
        EVENT_PTR(td_slots_retired),
        EVENT_PTR(td_fetch_bubbles),
        NULL,
 };
 
+static struct attribute *snb_mem_events_attrs[] = {
+       EVENT_PTR(mem_ld_snb),
+       EVENT_PTR(mem_st_snb),
+       NULL,
+};
+
 static struct event_constraint intel_hsw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 EVENT_ATTR_STR(cycles-ct,      cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
 
 static struct attribute *hsw_events_attrs[] = {
-       EVENT_PTR(mem_ld_hsw),
-       EVENT_PTR(mem_st_hsw),
        EVENT_PTR(td_slots_issued),
        EVENT_PTR(td_slots_retired),
        EVENT_PTR(td_fetch_bubbles),
        NULL
 };
 
+static struct attribute *hsw_mem_events_attrs[] = {
+       EVENT_PTR(mem_ld_hsw),
+       EVENT_PTR(mem_st_hsw),
+       NULL,
+};
+
 static struct attribute *hsw_tsx_events_attrs[] = {
        EVENT_PTR(tx_start),
        EVENT_PTR(tx_commit),
        NULL
 };
 
-static __init struct attribute **get_hsw_events_attrs(void)
-{
-       return boot_cpu_has(X86_FEATURE_RTM) ?
-               merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
-               hsw_events_attrs;
-}
-
 static ssize_t freeze_on_smi_show(struct device *cdev,
                                  struct device_attribute *attr,
                                  char *buf)
        NULL,
 };
 
+static __init struct attribute **
+get_events_attrs(struct attribute **base,
+                struct attribute **mem,
+                struct attribute **tsx)
+{
+       struct attribute **attrs = base;
+       struct attribute **old;
+
+       if (mem && x86_pmu.pebs)
+               attrs = merge_attr(attrs, mem);
+
+       if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
+               old = attrs;
+               attrs = merge_attr(attrs, tsx);
+               if (old != base)
+                       kfree(old);
+       }
+
+       return attrs;
+}
+
 __init int intel_pmu_init(void)
 {
        struct attribute **extra_attr = NULL;
+       struct attribute **mem_attr = NULL;
+       struct attribute **tsx_attr = NULL;
        struct attribute **to_free = NULL;
        union cpuid10_edx edx;
        union cpuid10_eax eax;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
 
-               x86_pmu.cpu_events = nhm_events_attrs;
+               mem_attr = nhm_mem_events_attrs;
 
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                x86_pmu.extra_regs = intel_westmere_extra_regs;
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
 
-               x86_pmu.cpu_events = nhm_events_attrs;
+               mem_attr = nhm_mem_events_attrs;
 
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.cpu_events = snb_events_attrs;
+               mem_attr = snb_mem_events_attrs;
 
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.cpu_events = snb_events_attrs;
+               mem_attr = snb_mem_events_attrs;
 
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = get_hsw_events_attrs();
+               x86_pmu.cpu_events = hsw_events_attrs;
                x86_pmu.lbr_double_abort = true;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
+               mem_attr = hsw_mem_events_attrs;
+               tsx_attr = hsw_tsx_events_attrs;
                pr_cont("Haswell events, ");
                name = "haswell";
                break;
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = get_hsw_events_attrs();
+               x86_pmu.cpu_events = hsw_events_attrs;
                x86_pmu.limit_period = bdw_limit_period;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
+               mem_attr = hsw_mem_events_attrs;
+               tsx_attr = hsw_tsx_events_attrs;
                pr_cont("Broadwell events, ");
                name = "broadwell";
                break;
                        hsw_format_attr : nhm_format_attr;
                extra_attr = merge_attr(extra_attr, skl_format_attr);
                to_free = extra_attr;
-               x86_pmu.cpu_events = get_hsw_events_attrs();
+               x86_pmu.cpu_events = hsw_events_attrs;
+               mem_attr = hsw_mem_events_attrs;
+               tsx_attr = hsw_tsx_events_attrs;
                intel_pmu_pebs_data_source_skl(
                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
                pr_cont("Skylake events, ");
                WARN_ON(!x86_pmu.format_attrs);
        }
 
+       x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
+                                             mem_attr, tsx_attr);
+
        if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
                     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);