struct machine *machine);
 };
 
+struct perf_sched_map {
+       DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
+       int                     *comp_cpus;
+       bool                     comp;
+};
+
 struct perf_sched {
        struct perf_tool tool;
        const char       *sort_order;
        struct list_head sort_list, cmp_pid;
        bool force;
        bool skip_merge;
+       struct perf_sched_map map;
 };
 
 static u64 get_nsecs(void)
        int new_shortname;
        u64 timestamp0, timestamp = sample->time;
        s64 delta;
-       int cpu, this_cpu = sample->cpu;
+       int i, this_cpu = sample->cpu;
+       int cpus_nr;
+       bool new_cpu = false;
 
        BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
 
        if (this_cpu > sched->max_cpu)
                sched->max_cpu = this_cpu;
 
+       if (sched->map.comp) {
+               cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
+               if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
+                       sched->map.comp_cpus[cpus_nr++] = this_cpu;
+                       new_cpu = true;
+               }
+       } else
+               cpus_nr = sched->max_cpu;
+
        timestamp0 = sched->cpu_last_switched[this_cpu];
        sched->cpu_last_switched[this_cpu] = timestamp;
        if (timestamp0)
                new_shortname = 1;
        }
 
-       for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
+       for (i = 0; i < cpus_nr; i++) {
+               int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
+
                if (cpu != this_cpu)
                        printf(" ");
                else
 
        printf("  %12.6f secs ", (double)timestamp/1e9);
        if (new_shortname) {
-               printf("%s => %s:%d\n",
+               printf("%s => %s:%d",
                       sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
-       } else {
-               printf("\n");
        }
 
+       if (sched->map.comp && new_cpu)
+               printf(" (CPU %d)", this_cpu);
+
+       printf("\n");
+
        thread__put(sched_in);
 
        return 0;
        return 0;
 }
 
+static int setup_map_cpus(struct perf_sched *sched)
+{
+       sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);
+
+       if (sched->map.comp) {
+               sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
+               return sched->map.comp_cpus ? 0 : -1;
+       }
+
+       return 0;
+}
+
 static int perf_sched__map(struct perf_sched *sched)
 {
-       sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+       if (setup_map_cpus(sched))
+               return -1;
 
        setup_pager();
        if (perf_sched__read_events(sched))
                    "dump raw trace in ASCII"),
        OPT_END()
        };
+       const struct option map_options[] = {
+       OPT_BOOLEAN(0, "compact", &sched.map.comp,
+                   "map output in compact mode"),
+       OPT_END()
+       };
        const char * const latency_usage[] = {
                "perf sched latency [<options>]",
                NULL
                "perf sched replay [<options>]",
                NULL
        };
+       const char * const map_usage[] = {
+               "perf sched map [<options>]",
+               NULL
+       };
        const char *const sched_subcommands[] = { "record", "latency", "map",
                                                  "replay", "script", NULL };
        const char *sched_usage[] = {
                setup_sorting(&sched, latency_options, latency_usage);
                return perf_sched__lat(&sched);
        } else if (!strcmp(argv[0], "map")) {
+               if (argc) {
+                       argc = parse_options(argc, argv, map_options, replay_usage, 0);
+                       if (argc)
+                               usage_with_options(map_usage, map_options);
+               }
                sched.tp_handler = &map_ops;
                setup_sorting(&sched, latency_options, latency_usage);
                return perf_sched__map(&sched);