case BPF_FIELD_SIGNED: return "signed";
        case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
        case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
+       case BPF_TYPE_ID_LOCAL: return "local_type_id";
+       case BPF_TYPE_ID_TARGET: return "target_type_id";
+       case BPF_TYPE_EXISTS: return "type_exists";
+       case BPF_TYPE_SIZE: return "type_size";
        default: return "unknown";
        }
 }
        }
 }
 
+static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
+{
+       switch (kind) {
+       case BPF_TYPE_ID_LOCAL:
+       case BPF_TYPE_ID_TARGET:
+       case BPF_TYPE_EXISTS:
+       case BPF_TYPE_SIZE:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /*
  * Turn bpf_core_relo into a low- and high-level spec representation,
  * validating correctness along the way, as well as calculating resulting
  *   - field 'a' access (corresponds to '2' in low-level spec);
  *   - array element #3 access (corresponds to '3' in low-level spec).
  *
+ * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
+ * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
+ * spec and raw_spec are kept empty.
  */
 static int bpf_core_parse_spec(const struct btf *btf,
                               __u32 type_id,
        spec->root_type_id = type_id;
        spec->relo_kind = relo_kind;
 
+       /* type-based relocations don't have a field access string */
+       if (core_relo_is_type_based(relo_kind)) {
+               if (strcmp(spec_str, "0"))
+                       return -EINVAL;
+               return 0;
+       }
+
        /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
        while (*spec_str) {
                if (*spec_str == ':')
                                           const struct btf *targ_btf)
 {
        size_t local_essent_len, targ_essent_len;
-       const char *local_name, *targ_name, *targ_kind;
+       const char *local_name, *targ_name;
        const struct btf_type *t, *local_t;
        struct ids_vec *cand_ids;
        __u32 *new_ids;
        n = btf__get_nr_types(targ_btf);
        for (i = 1; i <= n; i++) {
                t = btf__type_by_id(targ_btf, i);
-               targ_name = btf__name_by_offset(targ_btf, t->name_off);
-               if (str_is_empty(targ_name))
+               if (btf_kind(t) != btf_kind(local_t))
                        continue;
-               targ_kind = btf_kind_str(t);
 
-               t = skip_mods_and_typedefs(targ_btf, i, NULL);
-               if (!btf_is_composite(t) && !btf_is_array(t))
+               targ_name = btf__name_by_offset(targ_btf, t->name_off);
+               if (str_is_empty(targ_name))
                        continue;
 
                targ_essent_len = bpf_core_essential_name_len(targ_name);
                if (strncmp(local_name, targ_name, local_essent_len) == 0) {
                        pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
                                 local_type_id, btf_kind_str(local_t),
-                                local_name, i, targ_kind, targ_name);
+                                local_name, i, btf_kind_str(t), targ_name);
                        new_ids = libbpf_reallocarray(cand_ids->data,
                                                      cand_ids->len + 1,
                                                      sizeof(*cand_ids->data));
        return ERR_PTR(err);
 }
 
-/* Check two types for compatibility, skipping const/volatile/restrict and
- * typedefs, to ensure we are relocating compatible entities:
+/* Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
  *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
  *   - any two PTRs are always compatible;
        return 0;
 }
 
+/* Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ *     kind should match for local and target types (i.e., STRUCT is not
+ *     compatible with UNION);
+ *   - for ENUMs, the size is ignored;
+ *   - for INT, size and signedness are ignored;
+ *   - for ARRAY, dimensionality is ignored, element types are checked for
+ *     compatibility recursively;
+ *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ *   - FUNC_PROTOs are compatible if they have compatible signature: same
+ *     number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+                                    const struct btf *targ_btf, __u32 targ_id)
+{
+       const struct btf_type *local_type, *targ_type;
+       int depth = 32; /* max recursion depth */
+
+       /* caller made sure that names match (ignoring flavor suffix) */
+       local_type = btf__type_by_id(local_btf, local_id);
+       targ_type = btf__type_by_id(local_btf, local_id);
+       if (btf_kind(local_type) != btf_kind(targ_type))
+               return 0;
+
+recur:
+       depth--;
+       if (depth < 0)
+               return -EINVAL;
+
+       local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+       targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+       if (!local_type || !targ_type)
+               return -EINVAL;
+
+       if (btf_kind(local_type) != btf_kind(targ_type))
+               return 0;
+
+       switch (btf_kind(local_type)) {
+       case BTF_KIND_UNKN:
+       case BTF_KIND_STRUCT:
+       case BTF_KIND_UNION:
+       case BTF_KIND_ENUM:
+       case BTF_KIND_FWD:
+               return 1;
+       case BTF_KIND_INT:
+               /* just reject deprecated bitfield-like integers; all other
+                * integers are by default compatible between each other
+                */
+               return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
+       case BTF_KIND_PTR:
+               local_id = local_type->type;
+               targ_id = targ_type->type;
+               goto recur;
+       case BTF_KIND_ARRAY:
+               local_id = btf_array(local_type)->type;
+               targ_id = btf_array(targ_type)->type;
+               goto recur;
+       case BTF_KIND_FUNC_PROTO: {
+               struct btf_param *local_p = btf_params(local_type);
+               struct btf_param *targ_p = btf_params(targ_type);
+               __u16 local_vlen = btf_vlen(local_type);
+               __u16 targ_vlen = btf_vlen(targ_type);
+               int i, err;
+
+               if (local_vlen != targ_vlen)
+                       return 0;
+
+               for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+                       skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
+                       skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
+                       err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
+                       if (err <= 0)
+                               return err;
+               }
+
+               /* tail recurse for return type check */
+               skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
+               skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
+               goto recur;
+       }
+       default:
+               pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+                       btf_kind_str(local_type), local_id, targ_id);
+               return 0;
+       }
+}
+
 /*
  * Try to match local spec to a target type and, if successful, produce full
  * target spec (high-level, low-level + bit offset).
        targ_spec->root_type_id = targ_id;
        targ_spec->relo_kind = local_spec->relo_kind;
 
+       if (core_relo_is_type_based(local_spec->relo_kind)) {
+               return bpf_core_types_are_compat(local_spec->btf,
+                                                local_spec->root_type_id,
+                                                targ_btf, targ_id);
+       }
+
        local_acc = &local_spec->spec[0];
        targ_acc = &targ_spec->spec[0];
 
        return 0;
 }
 
+static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
+                                  const struct bpf_core_spec *spec,
+                                  __u32 *val)
+{
+       __s64 sz;
+
+       /* type-based relos return zero when target type is not found */
+       if (!spec) {
+               *val = 0;
+               return 0;
+       }
+
+       switch (relo->kind) {
+       case BPF_TYPE_ID_TARGET:
+               *val = spec->root_type_id;
+               break;
+       case BPF_TYPE_EXISTS:
+               *val = 1;
+               break;
+       case BPF_TYPE_SIZE:
+               sz = btf__resolve_size(spec->btf, spec->root_type_id);
+               if (sz < 0)
+                       return -EINVAL;
+               *val = sz;
+               break;
+       case BPF_TYPE_ID_LOCAL:
+       /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
 struct bpf_core_relo_res
 {
        /* expected value in the instruction, unless validate == false */
        if (core_relo_is_field_based(relo->kind)) {
                err = bpf_core_calc_field_relo(prog, relo, local_spec, &res->orig_val, &res->validate);
                err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, &res->new_val, NULL);
+       } else if (core_relo_is_type_based(relo->kind)) {
+               err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
+               err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
        }
 
        if (err == -EUCLEAN) {
 
        libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
 
+       if (core_relo_is_type_based(spec->relo_kind))
+               return;
+
        if (core_relo_is_field_based(spec->relo_kind)) {
                for (i = 0; i < spec->len; i++) {
                        if (spec->spec[i].name)
                                     spec->bit_offset / 8, spec->bit_offset % 8);
                else
                        libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
+               return;
        }
 }
 
  *    between multiple relocations for the same type ID and is updated as some
  *    of the candidates are pruned due to structural incompatibility.
  */
-static int bpf_core_reloc_field(struct bpf_program *prog,
-                                const struct bpf_core_relo *relo,
-                                int relo_idx,
-                                const struct btf *local_btf,
-                                const struct btf *targ_btf,
-                                struct hashmap *cand_cache)
+static int bpf_core_apply_relo(struct bpf_program *prog,
+                              const struct bpf_core_relo *relo,
+                              int relo_idx,
+                              const struct btf *local_btf,
+                              const struct btf *targ_btf,
+                              struct hashmap *cand_cache)
 {
        const char *prog_name = bpf_program__title(prog, false);
        struct bpf_core_spec local_spec, cand_spec, targ_spec;
                return -EINVAL;
 
        local_name = btf__name_by_offset(local_btf, local_type->name_off);
-       if (str_is_empty(local_name))
+       if (!local_name)
                return -EINVAL;
 
        spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
        if (err) {
                pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
                        prog_name, relo_idx, local_id, btf_kind_str(local_type),
-                       local_name, spec_str, err);
+                       str_is_empty(local_name) ? "<anon>" : local_name,
+                       spec_str, err);
                return -EINVAL;
        }
 
        bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
        libbpf_print(LIBBPF_DEBUG, "\n");
 
+       /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
+       if (relo->kind == BPF_TYPE_ID_LOCAL) {
+               targ_res.validate = true;
+               targ_res.poison = false;
+               targ_res.orig_val = local_spec.root_type_id;
+               targ_res.new_val = local_spec.root_type_id;
+               goto patch_insn;
+       }
+
+       /* libbpf doesn't support candidate search for anonymous types */
+       if (str_is_empty(spec_str)) {
+               pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
+                       prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
+               return -EOPNOTSUPP;
+       }
+
        if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
                cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
                if (IS_ERR(cand_ids)) {
                        pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
-                               prog_name, relo_idx, local_id, btf_kind_str(local_type), local_name,
-                               PTR_ERR(cand_ids));
+                               prog_name, relo_idx, local_id, btf_kind_str(local_type),
+                               local_name, PTR_ERR(cand_ids));
                        return PTR_ERR(cand_ids);
                }
                err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
                        return -EINVAL;
                }
 
-               cand_ids->data[j++] = cand_spec.spec[0].type_id;
+               cand_ids->data[j++] = cand_spec.root_type_id;
        }
 
        /*
         * as well as expected case, depending whether instruction w/
         * relocation is guarded in some way that makes it unreachable (dead
         * code) if relocation can't be resolved. This is handled in
-        * bpf_core_reloc_insn() uniformly by replacing that instruction with
+        * bpf_core_patch_insn() uniformly by replacing that instruction with
         * BPF helper call insn (using invalid helper ID). If that instruction
         * is indeed unreachable, then it will be ignored and eliminated by
         * verifier. If it was an error, then verifier will complain and point
                        return err;
        }
 
+patch_insn:
        /* bpf_core_patch_insn() should know how to handle missing targ_spec */
        err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
        if (err) {
                         sec_name, sec->num_info);
 
                for_each_btf_ext_rec(seg, sec, i, rec) {
-                       err = bpf_core_reloc_field(prog, rec, i, obj->btf,
-                                                  targ_btf, cand_cache);
+                       err = bpf_core_apply_relo(prog, rec, i, obj->btf,
+                                                 targ_btf, cand_cache);
                        if (err) {
                                pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
                                        sec_name, i, err);