}
 
 /*
- * Add default attributes, if there were no attributes specified or
+ * Add default events, if there were no attributes specified or
  * if -d/--detailed, -d -d or -d -d -d is used:
  */
-static int add_default_attributes(void)
+static int add_default_events(void)
 {
-       struct perf_event_attr default_attrs0[] = {
-
-  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK             },
-  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES       },
-  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS         },
-  { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS            },
-
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES             },
-};
-       struct perf_event_attr frontend_attrs[] = {
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND        },
-};
-       struct perf_event_attr backend_attrs[] = {
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
-};
-       struct perf_event_attr default_attrs1[] = {
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS           },
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS    },
-  { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES          },
-
-};
-
-/*
- * Detailed stats (-d), covering the L1 and last level data caches:
- */
-       struct perf_event_attr detailed_attrs[] = {
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1D                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1D                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_LL                 <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_LL                 <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-};
-
-/*
- * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
- */
-       struct perf_event_attr very_detailed_attrs[] = {
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1I                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1I                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_DTLB               <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_DTLB               <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_ITLB               <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_ITLB               <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-
-};
+       const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+       struct parse_events_error err;
+       struct evlist *evlist = evlist__new();
+       struct evsel *evsel;
+       int ret = 0;
 
-/*
- * Very, very detailed stats (-d -d -d), adding prefetch events:
- */
-       struct perf_event_attr very_very_detailed_attrs[] = {
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1D                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
-
-  { .type = PERF_TYPE_HW_CACHE,
-    .config =
-        PERF_COUNT_HW_CACHE_L1D                <<  0  |
-       (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
-       (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
-};
+       if (!evlist)
+               return -ENOMEM;
 
-       struct perf_event_attr default_null_attrs[] = {};
-       const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+       parse_events_error__init(&err);
 
        /* Set attrs if no event is selected and !null_run: */
        if (stat_config.null_run)
-               return 0;
+               goto out;
 
        if (transaction_run) {
                /* Handle -T as -M transaction. Once platform specific metrics
                 */
                if (!metricgroup__has_metric(pmu, "transaction")) {
                        pr_err("Missing transaction metrics\n");
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
-               return metricgroup__parse_groups(evsel_list, pmu, "transaction",
+               ret = metricgroup__parse_groups(evlist, pmu, "transaction",
                                                stat_config.metric_no_group,
                                                stat_config.metric_no_merge,
                                                stat_config.metric_no_threshold,
                                                stat_config.system_wide,
                                                stat_config.hardware_aware_grouping,
                                                &stat_config.metric_events);
+               goto out;
        }
 
        if (smi_cost) {
 
                if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
                        pr_err("freeze_on_smi is not supported.\n");
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
 
                if (!smi) {
                        if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
-                               fprintf(stderr, "Failed to set freeze_on_smi.\n");
-                               return -1;
+                               pr_err("Failed to set freeze_on_smi.\n");
+                               ret = -1;
+                               goto out;
                        }
                        smi_reset = true;
                }
 
                if (!metricgroup__has_metric(pmu, "smi")) {
                        pr_err("Missing smi metrics\n");
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
 
                if (!force_metric_only)
                        stat_config.metric_only = true;
 
-               return metricgroup__parse_groups(evsel_list, pmu, "smi",
+               ret = metricgroup__parse_groups(evlist, pmu, "smi",
                                                stat_config.metric_no_group,
                                                stat_config.metric_no_merge,
                                                stat_config.metric_no_threshold,
                                                stat_config.system_wide,
                                                stat_config.hardware_aware_grouping,
                                                &stat_config.metric_events);
+               goto out;
        }
 
        if (topdown_run) {
                if (!max_level) {
                        pr_err("Topdown requested but the topdown metric groups aren't present.\n"
                                "(See perf list the metric groups have names like TopdownL1)\n");
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
                if (stat_config.topdown_level > max_level) {
                        pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
-                       return -1;
-               } else if (!stat_config.topdown_level)
+                       ret = -1;
+                       goto out;
+               } else if (!stat_config.topdown_level) {
                        stat_config.topdown_level = 1;
-
+               }
                if (!stat_config.interval && !stat_config.metric_only) {
                        fprintf(stat_config.output,
                                "Topdown accuracy may decrease when measuring long periods.\n"
                                "Please print the result regularly, e.g. -I1000\n");
                }
                str[8] = stat_config.topdown_level + '0';
-               if (metricgroup__parse_groups(evsel_list,
+               if (metricgroup__parse_groups(evlist,
                                                pmu, str,
                                                /*metric_no_group=*/false,
                                                /*metric_no_merge=*/false,
                                                stat_config.user_requested_cpu_list,
                                                stat_config.system_wide,
                                                stat_config.hardware_aware_grouping,
-                                               &stat_config.metric_events) < 0)
-                       return -1;
+                                               &stat_config.metric_events) < 0) {
+                       ret = -1;
+                       goto out;
+               }
        }
 
        if (!stat_config.topdown_level)
                stat_config.topdown_level = 1;
 
-       if (!evsel_list->core.nr_entries) {
+       if (!evlist->core.nr_entries && !evsel_list->core.nr_entries) {
                /* No events so add defaults. */
                if (target__has_cpu(&target))
-                       default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
+                       ret = parse_events(evlist, "cpu-clock", &err);
+               else
+                       ret = parse_events(evlist, "task-clock", &err);
+               if (ret)
+                       goto out;
+
+               ret = parse_events(evlist,
+                               "context-switches,"
+                               "cpu-migrations,"
+                               "page-faults,"
+                               "instructions,"
+                               "cycles,"
+                               "stalled-cycles-frontend,"
+                               "stalled-cycles-backend,"
+                               "branches,"
+                               "branch-misses",
+                               &err);
+               if (ret)
+                       goto out;
 
-               if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
-                       return -1;
-               if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) {
-                       if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
-                               return -1;
-               }
-               if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) {
-                       if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
-                               return -1;
-               }
-               if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
-                       return -1;
                /*
                 * Add TopdownL1 metrics if they exist. To minimize
                 * multiplexing, don't request threshold computation.
                 */
                if (metricgroup__has_metric(pmu, "Default")) {
                        struct evlist *metric_evlist = evlist__new();
-                       struct evsel *metric_evsel;
-
-                       if (!metric_evlist)
-                               return -1;
 
+                       if (!metric_evlist) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
                        if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
                                                        /*metric_no_group=*/false,
                                                        /*metric_no_merge=*/false,
                                                        stat_config.user_requested_cpu_list,
                                                        stat_config.system_wide,
                                                        stat_config.hardware_aware_grouping,
-                                                       &stat_config.metric_events) < 0)
-                               return -1;
-
-                       evlist__for_each_entry(metric_evlist, metric_evsel) {
-                               metric_evsel->skippable = true;
-                               metric_evsel->default_metricgroup = true;
+                                                       &stat_config.metric_events) < 0) {
+                               ret = -1;
+                               goto out;
                        }
-                       evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
+
+                       evlist__for_each_entry(metric_evlist, evsel)
+                               evsel->default_metricgroup = true;
+
+                       evlist__splice_list_tail(evlist, &metric_evlist->core.entries);
                        evlist__delete(metric_evlist);
                }
-
-               /* Platform specific attrs */
-               if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
-                       return -1;
        }
 
        /* Detailed events get appended to the event list: */
 
-       if (detailed_run <  1)
-               return 0;
-
-       /* Append detailed run extra attributes: */
-       if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
-               return -1;
-
-       if (detailed_run < 2)
-               return 0;
-
-       /* Append very detailed run extra attributes: */
-       if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
-               return -1;
-
-       if (detailed_run < 3)
-               return 0;
-
-       /* Append very, very detailed run extra attributes: */
-       return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
+       if (!ret && detailed_run >=  1) {
+               /*
+                * Detailed stats (-d), covering the L1 and last level data
+                * caches:
+                */
+               ret = parse_events(evlist,
+                               "L1-dcache-loads,"
+                               "L1-dcache-load-misses,"
+                               "LLC-loads,"
+                               "LLC-load-misses",
+                               &err);
+       }
+       if (!ret && detailed_run >=  2) {
+               /*
+                * Very detailed stats (-d -d), covering the instruction cache
+                * and the TLB caches:
+                */
+               ret = parse_events(evlist,
+                               "L1-icache-loads,"
+                               "L1-icache-load-misses,"
+                               "dTLB-loads,"
+                               "dTLB-load-misses,"
+                               "iTLB-loads,"
+                               "iTLB-load-misses",
+                               &err);
+       }
+       if (!ret && detailed_run >=  3) {
+               /*
+                * Very, very detailed stats (-d -d -d), adding prefetch events:
+                */
+               ret = parse_events(evlist,
+                               "L1-dcache-prefetches,"
+                               "L1-dcache-prefetch-misses",
+                               &err);
+       }
+out:
+       if (!ret) {
+               evlist__for_each_entry(evlist, evsel) {
+                       /*
+                        * Make at least one event non-skippable so fatal errors are visible.
+                        * 'cycles' always used to be default and non-skippable, so use that.
+                        */
+                       if (strcmp("cycles", evsel__name(evsel)))
+                               evsel->skippable = true;
+               }
+       }
+       parse_events_error__exit(&err);
+       evlist__splice_list_tail(evsel_list, &evlist->core.entries);
+       evlist__delete(evlist);
+       return ret;
 }
 
 static const char * const stat_record_usage[] = {
                }
        }
 
-       if (add_default_attributes())
+       if (add_default_events())
                goto out;
 
        if (stat_config.cgroup_list) {