To have a way to intercept usage of the reference counted struct map.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
        if (use_browser <= 0)
                sleep(5);
 
-       map->erange_warned = true;
+       map__set_erange_warned(map, true);
 }
 
 static void perf_top__record_precise_ip(struct perf_top *top,
                 */
                mutex_unlock(&he->hists->lock);
 
-               if (err == -ERANGE && !he->ms.map->erange_warned)
+               if (err == -ERANGE && !map__erange_warned(he->ms.map))
                        ui__warn_map_erange(he->ms.map, sym, ip);
                else if (err == -ENOMEM) {
                        pr_err("Not enough memory for annotating '%s' symbol!\n",
 
                map = dso__new_map(bpf_progs[i].name);
                TEST_ASSERT_VAL("failed to create map", map);
 
-               map->start = bpf_progs[i].start;
-               map->end   = bpf_progs[i].end;
+               map__set_start(map, bpf_progs[i].start);
+               map__set_end(map, bpf_progs[i].end);
                TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
                map__put(map);
        }
        TEST_ASSERT_VAL("failed to create map", map_kcore3);
 
        /* kcore1 map overlaps over all bpf maps */
-       map_kcore1->start = 100;
-       map_kcore1->end   = 1000;
+       map__set_start(map_kcore1, 100);
+       map__set_end(map_kcore1, 1000);
 
        /* kcore2 map hides behind bpf_prog_2 */
-       map_kcore2->start = 550;
-       map_kcore2->end   = 570;
+       map__set_start(map_kcore2, 550);
+       map__set_end(map_kcore2, 570);
 
        /* kcore3 map hides behind bpf_prog_3, kcore1 and adds new map */
-       map_kcore3->start = 880;
-       map_kcore3->end   = 1100;
+       map__set_start(map_kcore3, 880);
+       map__set_end(map_kcore3, 1100);
 
        ret = maps__merge_in(maps, map_kcore1);
        TEST_ASSERT_VAL("failed to merge map", !ret);
 
                                                                dso->short_name :
                                                                dso->name));
                if (pair) {
-                       pair->priv = 1;
+                       map__set_priv(pair, 1);
                } else {
                        if (!header_printed) {
                                pr_info("WARN: Maps only in vmlinux:\n");
                                pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
                                        map__start(pair), map__end(pair), map__pgoff(pair));
                        pr_info(" %s\n", dso->name);
-                       pair->priv = 1;
+                       map__set_priv(pair, 1);
                }
        }
 
 
                        dso__set_loaded(dso);
                }
 
-               map->start = event->ksymbol.addr;
-               map->end = map__start(map) + event->ksymbol.len;
+               map__set_start(map, event->ksymbol.addr);
+               map__set_end(map, map__start(map) + event->ksymbol.len);
                err = maps__insert(machine__kernel_maps(machine), map);
                if (err) {
                        err = -ENOMEM;
        if (!map)
                return -ENOMEM;
 
-       map->end   = xm->end;
-       map->pgoff = xm->pgoff;
+       map__set_end(map, xm->end);
+       map__set_pgoff(map, xm->pgoff);
 
        kmap = map__kmap(map);
 
 
                dest_map = maps__find(kmaps, map__pgoff(map));
                if (dest_map != map)
-                       map->pgoff = map__map_ip(dest_map, map__pgoff(map));
+                       map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
                found = true;
        }
        if (found || machine->trampolines_mapped)
        if (machine->vmlinux_map == NULL)
                return -ENOMEM;
 
-       machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
+       map__set_map_ip(machine->vmlinux_map, identity__map_ip);
+       map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
        return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
 }
 
        map = machine__addnew_module_map(machine, start, name);
        if (map == NULL)
                return -1;
-       map->end = start + size;
+       map__set_end(map, start + size);
 
        dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
        map__put(map);
 static void machine__set_kernel_mmap(struct machine *machine,
                                     u64 start, u64 end)
 {
-       machine->vmlinux_map->start = start;
-       machine->vmlinux_map->end   = end;
+       map__set_start(machine->vmlinux_map, start);
+       map__set_end(machine->vmlinux_map, end);
        /*
         * Be a bit paranoid here, some perf.data file came with
         * a zero sized synthesized MMAP event for the kernel.
         */
        if (start == 0 && end == 0)
-               machine->vmlinux_map->end = ~0ULL;
+               map__set_end(machine->vmlinux_map, ~0ULL);
 }
 
 static int machine__update_kernel_mmap(struct machine *machine,
                if (map == NULL)
                        goto out_problem;
 
-               map->end = map__start(map) + xm->end - xm->start;
+               map__set_end(map, map__start(map) + xm->end - xm->start);
 
                if (build_id__is_defined(bid))
                        dso__set_build_id(map__dso(map), bid);
 
 
 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
 {
-       map->start    = start;
-       map->end      = end;
-       map->pgoff    = pgoff;
-       map->reloc    = 0;
-       map->dso      = dso__get(dso);
-       map->map_ip   = map__dso_map_ip;
-       map->unmap_ip = map__dso_unmap_ip;
-       map->erange_warned = false;
+       map__set_start(map, start);
+       map__set_end(map, end);
+       map__set_pgoff(map, pgoff);
+       map__set_reloc(map, 0);
+       map__set_dso(map, dso__get(dso));
+       map__set_map_ip(map, map__dso_map_ip);
+       map__set_unmap_ip(map, map__dso_unmap_ip);
+       map__set_erange_warned(map, false);
        refcount_set(map__refcnt(map), 1);
 }
 
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
 
-               map->start = sym->start;
+               map__set_start(map, sym->start);
        }
 }
 
 
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
-               map->end = sym->end;
+               map__set_end(map, sym->end);
        }
 }
 
 
        return map->unmap_ip(map, ip);
 }
 
+static inline void *map__map_ip_ptr(struct map *map)
+{
+       return map->map_ip;
+}
+
+static inline void* map__unmap_ip_ptr(struct map *map)
+{
+       return map->unmap_ip;
+}
+
 static inline u64 map__start(const struct map *map)
 {
        return map->start;
        return &map->refcnt;
 }
 
+static inline bool map__erange_warned(struct map *map)
+{
+       return map->erange_warned;
+}
+
 static inline size_t map__size(const struct map *map)
 {
        return map__end(map) - map__start(map);
               !strncmp(filename, "/SYSV", 5)  ||
               !strcmp(filename, "[heap]");
 }
+
+static inline void map__set_start(struct map *map, u64 start)
+{
+       map->start = start;
+}
+
+static inline void map__set_end(struct map *map, u64 end)
+{
+       map->end = end;
+}
+
+static inline void map__set_pgoff(struct map *map, u64 pgoff)
+{
+       map->pgoff = pgoff;
+}
+
+static inline void map__add_pgoff(struct map *map, u64 inc)
+{
+       map->pgoff += inc;
+}
+
+static inline void map__set_reloc(struct map *map, u64 reloc)
+{
+       map->reloc = reloc;
+}
+
+static inline void map__set_priv(struct map *map, int priv)
+{
+       map->priv = priv;
+}
+
+static inline void map__set_erange_warned(struct map *map, bool erange_warned)
+{
+       map->erange_warned = erange_warned;
+}
+
+static inline void map__set_dso(struct map *map, struct dso *dso)
+{
+       map->dso = dso;
+}
+
+static inline void map__set_map_ip(struct map *map, u64 (*map_ip)(const struct map *map, u64 ip))
+{
+       map->map_ip = map_ip;
+}
+
+static inline void map__set_unmap_ip(struct map *map, u64 (*unmap_ip)(const struct map *map, u64 rip))
+{
+       map->unmap_ip = unmap_ip;
+}
 #endif /* __PERF_MAP_H */
 
                                goto put_map;
                        }
 
-                       before->end = map__start(map);
+                       map__set_end(before, map__start(map));
                        err = __maps__insert(maps, before);
                        if (err) {
                                map__put(before);
                                goto put_map;
                        }
 
-                       after->start = map__end(map);
-                       after->pgoff += map__end(map) - map__start(pos->map);
+                       map__set_start(after, map__end(map));
+                       map__add_pgoff(after, map__end(map) - map__start(pos->map));
                        assert(map__map_ip(pos->map, map__end(map)) ==
                                map__map_ip(after, map__end(map)));
                        err = __maps__insert(maps, after);
 
                 */
                if (*remap_kernel && dso->kernel && !kmodule) {
                        *remap_kernel = false;
-                       map->start = shdr->sh_addr + ref_reloc(kmap);
-                       map->end = map__start(map) + shdr->sh_size;
-                       map->pgoff = shdr->sh_offset;
-                       map->map_ip = map__dso_map_ip;
-                       map->unmap_ip = map__dso_unmap_ip;
+                       map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
+                       map__set_end(map, map__start(map) + shdr->sh_size);
+                       map__set_pgoff(map, shdr->sh_offset);
+                       map__set_map_ip(map, map__dso_map_ip);
+                       map__set_unmap_ip(map, map__dso_unmap_ip);
                        /* Ensure maps are correctly ordered */
                        if (kmaps) {
                                int err;
                 */
                if (*remap_kernel && kmodule) {
                        *remap_kernel = false;
-                       map->pgoff = shdr->sh_offset;
+                       map__set_pgoff(map, shdr->sh_offset);
                }
 
                *curr_mapp = map;
                        map__kmap(curr_map)->kmaps = kmaps;
 
                if (adjust_kernel_syms) {
-                       curr_map->start  = shdr->sh_addr + ref_reloc(kmap);
-                       curr_map->end    = map__start(curr_map) + shdr->sh_size;
-                       curr_map->pgoff  = shdr->sh_offset;
+                       map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap));
+                       map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
+                       map__set_pgoff(curr_map, shdr->sh_offset);
                } else {
-                       curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
+                       map__set_map_ip(curr_map, identity__map_ip);
+                       map__set_unmap_ip(curr_map, identity__map_ip);
                }
                curr_dso->symtab_type = dso->symtab_type;
                if (maps__insert(kmaps, curr_map))
                        if (strcmp(elf_name, kmap->ref_reloc_sym->name))
                                continue;
                        kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
-                       map->reloc = kmap->ref_reloc_sym->addr -
-                                    kmap->ref_reloc_sym->unrelocated_addr;
+                       map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
                        break;
                }
        }
         * attempted to prelink vdso to its virtual address.
         */
        if (dso__is_vdso(dso))
-               map->reloc = map__start(map) - dso->text_offset;
+               map__set_reloc(map, map__start(map) - dso->text_offset);
 
        dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
        /*
 
 
        maps__for_each_entry(maps, curr) {
                if (prev != NULL && !map__end(prev->map))
-                       prev->map->end = map__start(curr->map);
+                       map__set_end(prev->map, map__start(curr->map));
 
                prev = curr;
        }
         * last map final address.
         */
        if (curr && !map__end(curr->map))
-               curr->map->end = ~0ULL;
+               map__set_end(curr->map, ~0ULL);
 
        up_write(maps__lock(maps));
 }
                                return -1;
                        }
 
-                       curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
+                       map__set_map_ip(curr_map, identity__map_ip);
+                       map__set_unmap_ip(curr_map, identity__map_ip);
                        if (maps__insert(kmaps, curr_map)) {
                                dso__put(ndso);
                                return -1;
                return -ENOMEM;
        }
 
-       list_node->map->end = map__start(list_node->map) + len;
-       list_node->map->pgoff = pgoff;
+       map__set_end(list_node->map, map__start(list_node->map) + len);
+       map__set_pgoff(list_node->map, pgoff);
 
        list_add(&list_node->node, &md->maps);
 
                                 * |new......|     -> |new..|
                                 *       |old....| ->       |old....|
                                 */
-                               new_map->end = map__start(old_map);
+                               map__set_end(new_map, map__start(old_map));
                        } else {
                                /*
                                 * |new.............| -> |new..|       |new..|
                                        goto out;
                                }
 
-                               m->map->end = map__start(old_map);
+                               map__set_end(m->map, map__start(old_map));
                                list_add_tail(&m->node, &merged);
-                               new_map->pgoff += map__end(old_map) - map__start(new_map);
-                               new_map->start = map__end(old_map);
+                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+                               map__set_start(new_map, map__end(old_map));
                        }
                } else {
                        /*
                                 *      |new......| ->         |new...|
                                 * |old....|        -> |old....|
                                 */
-                               new_map->pgoff += map__end(old_map) - map__start(new_map);
-                               new_map->start = map__end(old_map);
+                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+                               map__set_start(new_map, map__end(old_map));
                        }
                }
        }
                list_del_init(&new_node->node);
 
                if (new_map == replacement_map) {
-                       map->start      = map__start(new_map);
-                       map->end        = map__end(new_map);
-                       map->pgoff      = map__pgoff(new_map);
-                       map->map_ip     = new_map->map_ip;
-                       map->unmap_ip   = new_map->unmap_ip;
+                       map__set_start(map, map__start(new_map));
+                       map__set_end(map, map__end(new_map));
+                       map__set_pgoff(map, map__pgoff(new_map));
+                       map__set_map_ip(map, map__map_ip_ptr(new_map));
+                       map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
                        /* Ensure maps are correctly ordered */
                        map__get(map);
                        maps__remove(kmaps, map);