bitmap_zero(to_test, num_bits);
                skip = num_bits / set_bits;
                for (i = 0; i < num_bits; i += skip)
-                       set_bit(i, to_test);
+                       __set_bit(i, to_test);
 
                for (i = 0; i < outer_iterations; i++) {
                        old = accumulator;
 
                      "WARNING: no sample cpu value"))
                return;
 
-       set_bit(sample->cpu, c2c_he->cpuset);
+       __set_bit(sample->cpu, c2c_he->cpuset);
 }
 
 static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
        if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
                return;
 
-       set_bit(node, c2c_he->nodeset);
+       __set_bit(node, c2c_he->nodeset);
 
        if (c2c_he->paddr != sample->phys_addr) {
                c2c_he->paddr_cnt++;
                        continue;
 
                perf_cpu_map__for_each_cpu(cpu, idx, map) {
-                       set_bit(cpu.cpu, set);
+                       __set_bit(cpu.cpu, set);
 
                        if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
                                return -EINVAL;
 
        list_add_tail(&page->list, &kwork->atom_page_list);
 
 found_atom:
-       set_bit(i, page->bitmap);
+       __set_bit(i, page->bitmap);
        atom->time = sample->time;
        atom->prev = NULL;
        atom->page_addr = page;
        if (atom->prev != NULL)
                atom_free(atom->prev);
 
-       clear_bit(atom->bit_inpage,
-                 ((struct kwork_atom_page *)atom->page_addr)->bitmap);
+       __clear_bit(atom->bit_inpage,
+                   ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 }
 
 static void atom_del(struct kwork_atom *atom)
 
                /* Return ENODEV is input cpu is greater than max cpu */
                if ((unsigned long)cpu.cpu > mask->nbits)
                        return -ENODEV;
-               set_bit(cpu.cpu, mask->bits);
+               __set_bit(cpu.cpu, mask->bits);
        }
 
        return 0;
        pr_debug("nr_threads: %d\n", rec->nr_threads);
 
        for (t = 0; t < rec->nr_threads; t++) {
-               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
-               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
+               __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+               __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
                if (verbose) {
                        pr_debug("thread_masks[%d]: ", t);
                        mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
 
 
        if (sched->map.comp) {
                cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
-               if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
+               if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
                        sched->map.comp_cpus[cpus_nr++] = this_cpu;
                        new_cpu = true;
                }
 
 
        if (map && bm) {
                for (i = 0; i < perf_cpu_map__nr(map); i++)
-                       set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
+                       __set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
        }
 
        if (map)
 
                int i;
 
                perf_cpu_map__for_each_cpu(cpu, i, map)
-                       set_bit(cpu.cpu, bm);
+                       __set_bit(cpu.cpu, bm);
        }
 
        if (map)
 
                return;
 
        a->changed = true;
-       set_bit(cpu, a->sched_cpus);
+       __set_bit(cpu, a->sched_cpus);
        /*
         * We ignore errors because affinity is just an optimization.
         * This could happen for example with isolated CPUs or cpusets.
         * In this case the IPIs inside the kernel's perf API still work.
         */
        sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
-       clear_bit(cpu, a->sched_cpus);
+       __clear_bit(cpu, a->sched_cpus);
 }
 
 static void __affinity__cleanup(struct affinity *a)
 
 
 void perf_header__set_feat(struct perf_header *header, int feat)
 {
-       set_bit(feat, header->adds_features);
+       __set_bit(feat, header->adds_features);
 }
 
 void perf_header__clear_feat(struct perf_header *header, int feat)
 {
-       clear_bit(feat, header->adds_features);
+       __clear_bit(feat, header->adds_features);
 }
 
 bool perf_header__has_feat(const struct perf_header *header, int feat)
        rewinddir(dir);
 
        for_each_memory(phys, dir) {
-               set_bit(phys, n->set);
+               __set_bit(phys, n->set);
        }
 
        closedir(dir);
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
                        bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
-                       set_bit(HEADER_BUILD_ID, header->adds_features);
+                       __set_bit(HEADER_BUILD_ID, header->adds_features);
                }
        }
 
 
                        pr_err("Failed to allocate node mask for mbind: error %m\n");
                        return -1;
                }
-               set_bit(node_index, node_mask);
+               __set_bit(node_index, node_mask);
                if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
                        pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
                                data, data + mmap_len, node_index);
        for (idx = 0; idx < nr_cpus; idx++) {
                cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
                if (cpu__get_node(cpu) == node)
-                       set_bit(cpu.cpu, mask->bits);
+                       __set_bit(cpu.cpu, mask->bits);
        }
 }
 
        if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
                build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
        else if (mp->affinity == PERF_AFFINITY_CPU)
-               set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
+               __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
 
        return 0;
 }
 
 
        memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
        for (b = from; b <= to; b++)
-               set_bit(b, bits);
+               __set_bit(b, bits);
 }
 
 void perf_pmu__del_formats(struct list_head *formats)
 
 
        sprintf(handler, "%s::%s", event->system, event->name);
 
-       if (!test_and_set_bit(event->id, events_defined))
+       if (!__test_and_set_bit(event->id, events_defined))
                define_event_symbols(event, handler, event->print_fmt.args);
 
        s = nsecs / NSEC_PER_SEC;
 
 
        sprintf(handler_name, "%s__%s", event->system, event->name);
 
-       if (!test_and_set_bit(event->id, events_defined))
+       if (!__test_and_set_bit(event->id, events_defined))
                define_event_symbols(event, handler_name, event->print_fmt.args);
 
        handler = get_handler(handler_name);
 
                        goto out_delete_map;
                }
 
-               set_bit(cpu.cpu, cpu_bitmap);
+               __set_bit(cpu.cpu, cpu_bitmap);
        }
 
        err = 0;
 
                        break;
                }
 
-               set_bit(c.cpu, cpumask_bits(b));
+               __set_bit(c.cpu, cpumask_bits(b));
        }
 
        perf_cpu_map__put(m);