* are valid by intersecting with those of the PMU.
                 */
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
+               evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->pmu_cpus);
 
                /*
                 * Empty cpu lists would eventually get opened as "any" so remove
                                list_for_each_entry_from(next, &evlist->entries, node)
                                        next->idx--;
                }
-       } else if (!evsel->own_cpus || evlist->has_user_cpus ||
+       } else if (!evsel->pmu_cpus || evlist->has_user_cpus ||
                (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
                /*
                 * The PMU didn't specify a default cpu map, this isn't a core
                 */
                perf_cpu_map__put(evsel->cpus);
                evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
-       } else if (evsel->cpus != evsel->own_cpus) {
+       } else if (evsel->cpus != evsel->pmu_cpus) {
                /*
                 * No user requested cpu map but the PMU cpu map doesn't match
                 * the evsel's. Reset it back to the PMU cpu map.
                 */
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
+               evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
        }
 
        if (evsel->system_wide) {
 
        assert(evsel->mmap == NULL); /* If not munmap wasn't called. */
        assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */
        perf_cpu_map__put(evsel->cpus);
-       perf_cpu_map__put(evsel->own_cpus);
+       perf_cpu_map__put(evsel->pmu_cpus);
        perf_thread_map__put(evsel->threads);
        free(evsel);
 }
 
         * cpu map for opening the event on, for example, the first CPU on a
         * socket for an uncore event.
         */
-       struct perf_cpu_map     *own_cpus;
+       struct perf_cpu_map     *pmu_cpus;
        struct perf_thread_map  *threads;
        struct xyarray          *fd;
        struct xyarray          *mmap;
 
        TEST_ASSERT_VAL("failed to synthesize attr update name",
                        !perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
 
-       perf_cpu_map__put(evsel->core.own_cpus);
-       evsel->core.own_cpus = perf_cpu_map__new("1,2,3");
+       perf_cpu_map__put(evsel->core.pmu_cpus);
+       evsel->core.pmu_cpus = perf_cpu_map__new("1,2,3");
 
        TEST_ASSERT_VAL("failed to synthesize attr update cpus",
                        !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
 
                return NULL;
 
        evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
-       evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
+       evsel->core.pmu_cpus = perf_cpu_map__get(orig->core.pmu_cpus);
        evsel->core.threads = perf_thread_map__get(orig->core.threads);
        evsel->core.nr_members = orig->core.nr_members;
        evsel->core.system_wide = orig->core.system_wide;
                attr->exclude_user   = 1;
        }
 
-       if (evsel->core.own_cpus || evsel->unit)
+       if (evsel->core.pmu_cpus || evsel->unit)
                evsel->core.attr.read_format |= PERF_FORMAT_ID;
 
        /*
        evsel__free_config_terms(evsel);
        cgroup__put(evsel->cgrp);
        perf_cpu_map__put(evsel->core.cpus);
-       perf_cpu_map__put(evsel->core.own_cpus);
+       perf_cpu_map__put(evsel->core.pmu_cpus);
        perf_thread_map__put(evsel->core.threads);
        zfree(&evsel->group_name);
        zfree(&evsel->name);
 
        case PERF_EVENT_UPDATE__CPUS:
                map = cpu_map__new_data(&ev->cpus.cpus);
                if (map) {
-                       perf_cpu_map__put(evsel->core.own_cpus);
-                       evsel->core.own_cpus = map;
+                       perf_cpu_map__put(evsel->core.pmu_cpus);
+                       evsel->core.pmu_cpus = map;
                } else
                        pr_err("failed to get event_update cpus\n");
        default:
 
 
        (*idx)++;
        evsel->core.cpus = cpus;
-       evsel->core.own_cpus = perf_cpu_map__get(cpus);
+       evsel->core.pmu_cpus = perf_cpu_map__get(cpus);
        evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
        evsel->core.is_pmu_core = is_pmu_core;
        evsel->pmu = pmu;
 
 int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
                                             perf_event__handler_t process)
 {
-       struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
+       struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
        struct perf_record_event_update *ev;
        int err;
 
                        }
                }
 
-               if (evsel->core.own_cpus) {
+               if (evsel->core.pmu_cpus) {
                        err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
                        if (err < 0) {
                                pr_err("Couldn't synthesize evsel cpus.\n");
 
                        /*
                         * "Any CPU" event that can be scheduled on any CPU in
                         * the PMU's cpumask. The PMU cpumask should be saved in
-                        * own_cpus. If not present fall back to max.
+                        * pmu_cpus. If not present fall back to max.
                         */
-                       if (!perf_cpu_map__is_empty(evsel->core.own_cpus))
-                               *result = perf_cpu_map__nr(evsel->core.own_cpus);
+                       if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus))
+                               *result = perf_cpu_map__nr(evsel->core.pmu_cpus);
                        else
                                *result = cpu__max_present_cpu().cpu;
                }
                        /*
                         * "Any CPU" event that can be scheduled on any CPU in
                         * the PMU's cpumask. The PMU cpumask should be saved in
-                        * own_cpus, if not present then just the online cpu
+                        * pmu_cpus, if not present then just the online cpu
                         * mask.
                         */
-                       if (!perf_cpu_map__is_empty(evsel->core.own_cpus)) {
+                       if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) {
                                struct perf_cpu_map *tmp =
-                                       perf_cpu_map__intersect(online, evsel->core.own_cpus);
+                                       perf_cpu_map__intersect(online, evsel->core.pmu_cpus);
 
                                *result = perf_cpu_map__nr(tmp);
                                perf_cpu_map__put(tmp);