PERF_RECORD_AUXTRACE                    = 71,
        PERF_RECORD_AUXTRACE_ERROR              = 72,
        PERF_RECORD_THREAD_MAP                  = 73,
+       PERF_RECORD_CPU_MAP                     = 74,
        PERF_RECORD_HEADER_MAX
 };
 
        u32 nr_proc_map_timeout;
 };
 
+enum {
+       PERF_CPU_MAP__CPUS = 0,
+       PERF_CPU_MAP__MASK = 1,
+};
+
+struct cpu_map_entries {
+       u16     nr;
+       u16     cpu[];
+};
+
+struct cpu_map_mask {
+       u16     nr;
+       u16     long_size;
+       unsigned long mask[];
+};
+
+struct cpu_map_data {
+       u16     type;
+       char    data[];
+};
+
+struct cpu_map_event {
+       struct perf_event_header        header;
+       struct cpu_map_data             data;
+};
+
 struct attr_event {
        struct perf_event_header header;
        struct perf_event_attr attr;
        struct itrace_start_event       itrace_start;
        struct context_switch_event     context_switch;
        struct thread_map_event         thread_map;
+       struct cpu_map_event            cpu_map;
 };
 
 void perf_event__print_totals(void);
 
        return 0;
 }
 
+static
+int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
+                              union perf_event *event __maybe_unused,
+                              struct perf_session *session __maybe_unused)
+{
+       dump_printf(": unhandled!\n");
+       return 0;
+}
+
 void perf_tool__fill_defaults(struct perf_tool *tool)
 {
        if (tool->sample == NULL)
                tool->auxtrace_error = process_event_auxtrace_error_stub;
        if (tool->thread_map == NULL)
                tool->thread_map = process_event_thread_map_stub;
+       if (tool->cpu_map == NULL)
+               tool->cpu_map = process_event_cpu_map_stub;
 }
 
 static void swap_sample_id_all(union perf_event *event, void *data)
                event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
 }
 
+static void perf_event__cpu_map_swap(union perf_event *event,
+                                    bool sample_id_all __maybe_unused)
+{
+       struct cpu_map_data *data = &event->cpu_map.data;
+       struct cpu_map_entries *cpus;
+       struct cpu_map_mask *mask;
+       unsigned i;
+
+       data->type = bswap_64(data->type);
+
+       switch (data->type) {
+       case PERF_CPU_MAP__CPUS:
+               cpus = (struct cpu_map_entries *)data->data;
+
+               cpus->nr = bswap_16(cpus->nr);
+
+               for (i = 0; i < cpus->nr; i++)
+                       cpus->cpu[i] = bswap_16(cpus->cpu[i]);
+               break;
+       case PERF_CPU_MAP__MASK:
+               mask = (struct cpu_map_mask *) data->data;
+
+               mask->nr = bswap_16(mask->nr);
+               mask->long_size = bswap_16(mask->long_size);
+
+               switch (mask->long_size) {
+               case 4: mem_bswap_32(&mask->mask, mask->nr); break;
+               case 8: mem_bswap_64(&mask->mask, mask->nr); break;
+               default:
+                       pr_err("cpu_map swap: unsupported long size\n");
+               }
+       default:
+               break;
+       }
+}
+
 typedef void (*perf_event__swap_op)(union perf_event *event,
                                    bool sample_id_all);
 
        [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
        [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
        [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
+       [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
        [PERF_RECORD_HEADER_MAX]          = NULL,
 };
 
                return tool->auxtrace_error(tool, event, session);
        case PERF_RECORD_THREAD_MAP:
                return tool->thread_map(tool, event, session);
+       case PERF_RECORD_CPU_MAP:
+               return tool->cpu_map(tool, event, session);
        default:
                return -EINVAL;
        }