struct option *record_options = __record_options;
 
-static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
 {
        struct perf_cpu cpu;
        int idx;
 
        if (cpu_map__is_dummy(cpus))
-               return;
+               return 0;
 
-       perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+       perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+               /* Return ENODEV is input cpu is greater than max cpu */
+               if ((unsigned long)cpu.cpu > mask->nbits)
+                       return -ENODEV;
                set_bit(cpu.cpu, mask->bits);
+       }
+
+       return 0;
 }
 
 static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
                return -ENOMEM;
 
        bitmap_zero(mask->bits, mask->nbits);
-       record__mmap_cpu_mask_init(mask, cpus);
+       if (record__mmap_cpu_mask_init(mask, cpus))
+               return -ENODEV;
+
        perf_cpu_map__put(cpus);
 
        return 0;
                pr_err("Failed to allocate CPUs mask\n");
                return ret;
        }
-       record__mmap_cpu_mask_init(&cpus_mask, cpus);
+
+       ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
+       if (ret) {
+               pr_err("Failed to init cpu mask\n");
+               goto out_free_cpu_mask;
+       }
 
        ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
        if (ret) {
        if (ret)
                return ret;
 
-       record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
+       if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
+               return -ENODEV;
 
        rec->nr_threads = 1;