machine->vmlinux_maps[type]->unmap_ip =
                                identity__map_ip;
                kmap = map__kmap(machine->vmlinux_maps[type]);
+               if (!kmap)
+                       return -1;
+
                kmap->kmaps = &machine->kmaps;
                map_groups__insert(&machine->kmaps,
                                   machine->vmlinux_maps[type]);
                kmap = map__kmap(machine->vmlinux_maps[type]);
                map_groups__remove(&machine->kmaps,
                                   machine->vmlinux_maps[type]);
-               if (kmap->ref_reloc_sym) {
+               if (kmap && kmap->ref_reloc_sym) {
                        /*
                         * ref_reloc_sym is shared among all maps, so free just
                         * on one of them.
 
                return rb_entry(next, struct map, rb_node);
        return NULL;
 }
+
+struct kmap *map__kmap(struct map *map)
+{
+       if (!map->dso || !map->dso->kernel) {
+               pr_err("Internal error: map__kmap with a non-kernel map\n");
+               return NULL;
+       }
+       return (struct kmap *)(map + 1);
+}
+
+struct map_groups *map__kmaps(struct map *map)
+{
+       struct kmap *kmap = map__kmap(map);
+
+       if (!kmap || !kmap->kmaps) {
+               pr_err("Internal error: map__kmaps with a non-kernel map\n");
+               return NULL;
+       }
+       return kmap->kmaps;
+}
 
 
 void map_groups__put(struct map_groups *mg);
 
-static inline struct kmap *map__kmap(struct map *map)
-{
-       return (struct kmap *)(map + 1);
-}
+struct kmap *map__kmap(struct map *map);
+struct map_groups *map__kmaps(struct map *map);
 
 static inline u64 map__map_ip(struct map *map, u64 ip)
 {
 
                return NULL;
 
        kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+       if (!kmap)
+               return NULL;
        return kmap->ref_reloc_sym;
 }
 
 
 
        for (i = 0; i < MAP__NR_TYPES; ++i) {
                struct kmap *kmap = map__kmap(maps[i]);
+
+               if (!kmap)
+                       continue;
                kmap->ref_reloc_sym = ref;
        }
 
 
                  symbol_filter_t filter, int kmodule)
 {
        struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+       struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
        struct map *curr_map = map;
        struct dso *curr_dso = dso;
        Elf_Data *symstrs, *secstrs;
        int nr = 0;
        bool remap_kernel = false, adjust_kernel_syms = false;
 
+       if (kmap && !kmaps)
+               return -1;
+
        dso->symtab_type = syms_ss->type;
        dso->is_64_bit = syms_ss->is_64_bit;
        dso->rel = syms_ss->ehdr.e_type == ET_REL;
                                        map->map_ip = map__map_ip;
                                        map->unmap_ip = map__unmap_ip;
                                        /* Ensure maps are correctly ordered */
-                                       map_groups__remove(kmap->kmaps, map);
-                                       map_groups__insert(kmap->kmaps, map);
+                                       if (kmaps) {
+                                               map_groups__remove(kmaps, map);
+                                               map_groups__insert(kmaps, map);
+                                       }
                                }
 
                                /*
                        snprintf(dso_name, sizeof(dso_name),
                                 "%s%s", dso->short_name, section_name);
 
-                       curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+                       curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
                        if (curr_map == NULL) {
                                u64 start = sym.st_value;
 
                                        curr_map->unmap_ip = identity__map_ip;
                                }
                                curr_dso->symtab_type = dso->symtab_type;
-                               map_groups__insert(kmap->kmaps, curr_map);
+                               map_groups__insert(kmaps, curr_map);
                                /*
                                 * The new DSO should go to the kernel DSOS
                                 */
                         * We need to fixup this here too because we create new
                         * maps here, for things like vsyscall sections.
                         */
-                       __map_groups__fixup_end(kmap->kmaps, map->type);
+                       __map_groups__fixup_end(kmaps, map->type);
                }
        }
        err = nr;
 
 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
                                         symbol_filter_t filter)
 {
-       struct map_groups *kmaps = map__kmap(map)->kmaps;
+       struct map_groups *kmaps = map__kmaps(map);
        struct map *curr_map;
        struct symbol *pos;
        int count = 0, moved = 0;
        struct rb_root *root = &dso->symbols[map->type];
        struct rb_node *next = rb_first(root);
 
+       if (!kmaps)
+               return -1;
+
        while (next) {
                char *module;
 
 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
                               symbol_filter_t filter)
 {
-       struct map_groups *kmaps = map__kmap(map)->kmaps;
-       struct machine *machine = kmaps->machine;
+       struct map_groups *kmaps = map__kmaps(map);
+       struct machine *machine;
        struct map *curr_map = map;
        struct symbol *pos;
        int count = 0, moved = 0;
        struct rb_node *next = rb_first(root);
        int kernel_range = 0;
 
+       if (!kmaps)
+               return -1;
+
+       machine = kmaps->machine;
+
        while (next) {
                char *module;
 
 static int validate_kcore_modules(const char *kallsyms_filename,
                                  struct map *map)
 {
-       struct map_groups *kmaps = map__kmap(map)->kmaps;
+       struct map_groups *kmaps = map__kmaps(map);
        char modules_filename[PATH_MAX];
 
+       if (!kmaps)
+               return -EINVAL;
+
        if (!filename_from_kallsyms_filename(modules_filename, "modules",
                                             kallsyms_filename))
                return -EINVAL;
 {
        struct kmap *kmap = map__kmap(map);
 
+       if (!kmap)
+               return -EINVAL;
+
        if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
                u64 start;
 
 static int dso__load_kcore(struct dso *dso, struct map *map,
                           const char *kallsyms_filename)
 {
-       struct map_groups *kmaps = map__kmap(map)->kmaps;
-       struct machine *machine = kmaps->machine;
+       struct map_groups *kmaps = map__kmaps(map);
+       struct machine *machine;
        struct kcore_mapfn_data md;
        struct map *old_map, *new_map, *replacement_map = NULL;
        bool is_64_bit;
        char kcore_filename[PATH_MAX];
        struct symbol *sym;
 
+       if (!kmaps)
+               return -EINVAL;
+
+       machine = kmaps->machine;
+
        /* This function requires that the map is the kernel map */
        if (map != machine->vmlinux_maps[map->type])
                return -EINVAL;
        struct kmap *kmap = map__kmap(map);
        u64 addr;
 
+       if (!kmap)
+               return -1;
+
        if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
                return 0;