}
 
 /**
- * bitmap_alloc - Allocate bitmap
+ * bitmap_zalloc - Allocate bitmap
  * @nbits: Number of bits
  */
-static inline unsigned long *bitmap_alloc(int nbits)
+static inline unsigned long *bitmap_zalloc(int nbits)
 {
        return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
 }
 
 
 static int do_for_each_set_bit(unsigned int num_bits)
 {
-       unsigned long *to_test = bitmap_alloc(num_bits);
+       unsigned long *to_test = bitmap_zalloc(num_bits);
        struct timeval start, end, diff;
        u64 runtime_us;
        struct stats fb_time_stats, tb_time_stats;
 
        if (!c2c_he)
                return NULL;
 
-       c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
+       c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
        if (!c2c_he->cpuset)
                return NULL;
 
-       c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
+       c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
        if (!c2c_he->nodeset)
                return NULL;
 
                struct perf_cpu_map *map = n[node].map;
                unsigned long *set;
 
-               set = bitmap_alloc(c2c.cpus_cnt);
+               set = bitmap_zalloc(c2c.cpus_cnt);
                if (!set)
                        return -ENOMEM;
 
 
 
        if (rec->opts.affinity != PERF_AFFINITY_SYS) {
                rec->affinity_mask.nbits = cpu__max_cpu();
-               rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
+               rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
                if (!rec->affinity_mask.bits) {
                        pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
                        err = -ENOMEM;
 
        unsigned long *bm = NULL;
        int i;
 
-       bm = bitmap_alloc(nbits);
+       bm = bitmap_zalloc(nbits);
 
        if (map && bm) {
                for (i = 0; i < map->nr; i++)
 
        unsigned long *bm = NULL;
        int i;
 
-       bm = bitmap_alloc(nbits);
+       bm = bitmap_zalloc(nbits);
 
        if (map && bm) {
                for (i = 0; i < map->nr; i++) {
 
 {
        int cpu_set_size = get_cpu_set_size();
 
-       a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
+       a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
        if (!a->orig_cpus)
                return -1;
        sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
-       a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
+       a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
        if (!a->sched_cpus) {
                zfree(&a->orig_cpus);
                return -1;
 
        if (ret)
                return ret;
 
-       set = bitmap_alloc(size);
+       set = bitmap_zalloc(size);
        if (!set)
                return -ENOMEM;
 
 
        size++;
 
-       n->set = bitmap_alloc(size);
+       n->set = bitmap_zalloc(size);
        if (!n->set) {
                closedir(dir);
                return -ENOMEM;
 
        struct evsel *evsel, *tmp;
        unsigned long *evlist_used;
 
-       evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
+       evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
        if (!evlist_used)
                return -ENOMEM;
 
 
                data = map->aio.data[idx];
                mmap_len = mmap__mmap_len(map);
                node_index = cpu__get_node(cpu);
-               node_mask = bitmap_alloc(node_index + 1);
+               node_mask = bitmap_zalloc(node_index + 1);
                if (!node_mask) {
                        pr_err("Failed to allocate node mask for mbind: error %m\n");
                        return -1;
 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
 {
        map->affinity_mask.nbits = cpu__max_cpu();
-       map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
+       map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
        if (!map->affinity_mask.bits)
                return -1;
 
 
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
-       bmap = bitmap_alloc(host_num_pages);
+       bmap = bitmap_zalloc(host_num_pages);
 
        if (dirty_log_manual_caps) {
                cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
 
 
        pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
 
-       bmap = bitmap_alloc(host_num_pages);
-       host_bmap_track = bitmap_alloc(host_num_pages);
+       bmap = bitmap_zalloc(host_num_pages);
+       host_bmap_track = bitmap_zalloc(host_num_pages);
 
        /* Add an extra memory slot for testing dirty logging */
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 
        nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
        nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
 
-       bmap = bitmap_alloc(TEST_MEM_PAGES);
+       bmap = bitmap_zalloc(TEST_MEM_PAGES);
        host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
 
        while (!done) {