{
        bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
        struct bpf_core_cand_list cands = {};
+       struct bpf_core_relo_res targ_res;
        struct bpf_core_spec *specs;
        int err;
 
                cands.len = cc->cnt;
                /* cand_cache_mutex needs to span the cache lookup and
                 * copy of btf pointer into bpf_core_cand_list,
-                * since module can be unloaded while bpf_core_apply_relo_insn
+                * since module can be unloaded while bpf_core_calc_relo_insn
                 * is working with module's btf.
                 */
        }
 
-       err = bpf_core_apply_relo_insn((void *)ctx->log, insn, relo->insn_off / 8,
-                                      relo, relo_idx, ctx->btf, &cands, specs);
+       err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
+                                     &targ_res);
+       if (err)
+               goto out;
+
+       err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
+                                 &targ_res);
+
 out:
        kfree(specs);
        if (need_cands) {
 
        return 0;
 }
 
-static int bpf_core_apply_relo(struct bpf_program *prog,
-                              const struct bpf_core_relo *relo,
-                              int relo_idx,
-                              const struct btf *local_btf,
-                              struct hashmap *cand_cache)
+static int bpf_core_resolve_relo(struct bpf_program *prog,
+                                const struct bpf_core_relo *relo,
+                                int relo_idx,
+                                const struct btf *local_btf,
+                                struct hashmap *cand_cache,
+                                struct bpf_core_relo_res *targ_res)
 {
        struct bpf_core_spec specs_scratch[3] = {};
        const void *type_key = u32_as_hash_key(relo->type_id);
        const struct btf_type *local_type;
        const char *local_name;
        __u32 local_id = relo->type_id;
-       struct bpf_insn *insn;
-       int insn_idx, err;
-
-       if (relo->insn_off % BPF_INSN_SZ)
-               return -EINVAL;
-       insn_idx = relo->insn_off / BPF_INSN_SZ;
-       /* adjust insn_idx from section frame of reference to the local
-        * program's frame of reference; (sub-)program code is not yet
-        * relocated, so it's enough to just subtract in-section offset
-        */
-       insn_idx = insn_idx - prog->sec_insn_off;
-       if (insn_idx >= prog->insns_cnt)
-               return -EINVAL;
-       insn = &prog->insns[insn_idx];
+       int err;
 
        local_type = btf__type_by_id(local_btf, local_id);
        if (!local_type)
        if (!local_name)
                return -EINVAL;
 
-       if (prog->obj->gen_loader) {
-               const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
-
-               pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n",
-                       prog - prog->obj->programs, relo->insn_off / 8,
-                       btf_kind_str(local_type), local_name, spec_str, insn_idx);
-               return record_relo_core(prog, relo, insn_idx);
-       }
-
        if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
            !hashmap__find(cand_cache, type_key, (void **)&cands)) {
                cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
                }
        }
 
-       return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo,
-                                       relo_idx, local_btf, cands, specs_scratch);
+       return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
+                                      targ_res);
 }
 
 static int
 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
 {
        const struct btf_ext_info_sec *sec;
+       struct bpf_core_relo_res targ_res;
        const struct bpf_core_relo *rec;
        const struct btf_ext_info *seg;
        struct hashmap_entry *entry;
        struct hashmap *cand_cache = NULL;
        struct bpf_program *prog;
+       struct bpf_insn *insn;
        const char *sec_name;
        int i, err = 0, insn_idx, sec_idx;
 
                         sec_name, sec->num_info);
 
                for_each_btf_ext_rec(seg, sec, i, rec) {
+                       if (rec->insn_off % BPF_INSN_SZ)
+                               return -EINVAL;
                        insn_idx = rec->insn_off / BPF_INSN_SZ;
                        prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
                        if (!prog) {
                        if (!prog->load)
                                continue;
 
-                       err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
+                       /* adjust insn_idx from section frame of reference to the local
+                        * program's frame of reference; (sub-)program code is not yet
+                        * relocated, so it's enough to just subtract in-section offset
+                        */
+                       insn_idx = insn_idx - prog->sec_insn_off;
+                       if (insn_idx >= prog->insns_cnt)
+                               return -EINVAL;
+                       insn = &prog->insns[insn_idx];
+
+                       if (prog->obj->gen_loader) {
+                               err = record_relo_core(prog, rec, insn_idx);
+                               if (err) {
+                                       pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+                                               prog->name, i, err);
+                                       goto out;
+                               }
+                               continue;
+                       }
+
+                       err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
                        if (err) {
                                pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
                                        prog->name, i, err);
                                goto out;
                        }
+
+                       err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
+                       if (err) {
+                               pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
+                                       prog->name, i, insn_idx, err);
+                               goto out;
+                       }
                }
        }
 
 
        return 0;
 }
 
-struct bpf_core_relo_res
-{
-       /* expected value in the instruction, unless validate == false */
-       __u32 orig_val;
-       /* new value that needs to be patched up to */
-       __u32 new_val;
-       /* relocation unsuccessful, poison instruction, but don't fail load */
-       bool poison;
-       /* some relocations can't be validated against orig_val */
-       bool validate;
-       /* for field byte offset relocations or the forms:
-        *     *(T *)(rX + <off>) = rY
-        *     rX = *(T *)(rY + <off>),
-        * we remember original and resolved field size to adjust direct
-        * memory loads of pointers and integers; this is necessary for 32-bit
-        * host kernel architectures, but also allows to automatically
-        * relocate fields that were resized from, e.g., u32 to u64, etc.
-        */
-       bool fail_memsz_adjust;
-       __u32 orig_sz;
-       __u32 orig_type_id;
-       __u32 new_sz;
-       __u32 new_type_id;
-};
-
 /* Calculate original and target relocation values, given local and target
  * specs and relocation kind. These values are calculated for each candidate.
  * If there are multiple candidates, resulting values should all be consistent
  * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
  * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
  */
-static int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
-                              int insn_idx, const struct bpf_core_relo *relo,
-                              int relo_idx, const struct bpf_core_relo_res *res)
+int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
+                       int insn_idx, const struct bpf_core_relo *relo,
+                       int relo_idx, const struct bpf_core_relo_res *res)
 {
        __u32 orig_val, new_val;
        __u8 class;
 }
 
 /*
- * CO-RE relocate single instruction.
+ * Calculate CO-RE relocation target result.
  *
  * The outline and important points of the algorithm:
  * 1. For given local type, find corresponding candidate target types.
  *    between multiple relocations for the same type ID and is updated as some
  *    of the candidates are pruned due to structural incompatibility.
  */
-int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
-                            int insn_idx,
-                            const struct bpf_core_relo *relo,
-                            int relo_idx,
-                            const struct btf *local_btf,
-                            struct bpf_core_cand_list *cands,
-                            struct bpf_core_spec *specs_scratch)
+int bpf_core_calc_relo_insn(const char *prog_name,
+                           const struct bpf_core_relo *relo,
+                           int relo_idx,
+                           const struct btf *local_btf,
+                           struct bpf_core_cand_list *cands,
+                           struct bpf_core_spec *specs_scratch,
+                           struct bpf_core_relo_res *targ_res)
 {
        struct bpf_core_spec *local_spec = &specs_scratch[0];
        struct bpf_core_spec *cand_spec = &specs_scratch[1];
        struct bpf_core_spec *targ_spec = &specs_scratch[2];
-       struct bpf_core_relo_res cand_res, targ_res;
+       struct bpf_core_relo_res cand_res;
        const struct btf_type *local_type;
        const char *local_name;
        __u32 local_id;
        /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
        if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
                /* bpf_insn's imm value could get out of sync during linking */
-               memset(&targ_res, 0, sizeof(targ_res));
-               targ_res.validate = false;
-               targ_res.poison = false;
-               targ_res.orig_val = local_spec->root_type_id;
-               targ_res.new_val = local_spec->root_type_id;
-               goto patch_insn;
+               memset(targ_res, 0, sizeof(*targ_res));
+               targ_res->validate = false;
+               targ_res->poison = false;
+               targ_res->orig_val = local_spec->root_type_id;
+               targ_res->new_val = local_spec->root_type_id;
+               return 0;
        }
 
        /* libbpf doesn't support candidate search for anonymous types */
                        return err;
 
                if (j == 0) {
-                       targ_res = cand_res;
+                       *targ_res = cand_res;
                        *targ_spec = *cand_spec;
                } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
                        /* if there are many field relo candidates, they
                                prog_name, relo_idx, cand_spec->bit_offset,
                                targ_spec->bit_offset);
                        return -EINVAL;
-               } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
+               } else if (cand_res.poison != targ_res->poison ||
+                          cand_res.new_val != targ_res->new_val) {
                        /* all candidates should result in the same relocation
                         * decision and value, otherwise it's dangerous to
                         * proceed due to ambiguity
                        pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
                                prog_name, relo_idx,
                                cand_res.poison ? "failure" : "success", cand_res.new_val,
-                               targ_res.poison ? "failure" : "success", targ_res.new_val);
+                               targ_res->poison ? "failure" : "success", targ_res->new_val);
                        return -EINVAL;
                }
 
                         prog_name, relo_idx);
 
                /* calculate single target relo result explicitly */
-               err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res);
+               err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
                if (err)
                        return err;
        }
 
-patch_insn:
-       /* bpf_core_patch_insn() should know how to handle missing targ_spec */
-       err = bpf_core_patch_insn(prog_name, insn, insn_idx, relo, relo_idx, &targ_res);
-       if (err) {
-               pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
-                       prog_name, relo_idx, relo->insn_off / 8, err);
-               return -EINVAL;
-       }
-
        return 0;
 }
 
        __u32 bit_offset;
 };
 
-int bpf_core_apply_relo_insn(const char *prog_name,
-                            struct bpf_insn *insn, int insn_idx,
-                            const struct bpf_core_relo *relo, int relo_idx,
-                            const struct btf *local_btf,
-                            struct bpf_core_cand_list *cands,
-                            struct bpf_core_spec *specs_scratch);
+struct bpf_core_relo_res {
+       /* expected value in the instruction, unless validate == false */
+       __u32 orig_val;
+       /* new value that needs to be patched up to */
+       __u32 new_val;
+       /* relocation unsuccessful, poison instruction, but don't fail load */
+       bool poison;
+       /* some relocations can't be validated against orig_val */
+       bool validate;
+       /* for field byte offset relocations or the forms:
+        *     *(T *)(rX + <off>) = rY
+        *     rX = *(T *)(rY + <off>),
+        * we remember original and resolved field size to adjust direct
+        * memory loads of pointers and integers; this is necessary for 32-bit
+        * host kernel architectures, but also allows to automatically
+        * relocate fields that were resized from, e.g., u32 to u64, etc.
+        */
+       bool fail_memsz_adjust;
+       __u32 orig_sz;
+       __u32 orig_type_id;
+       __u32 new_sz;
+       __u32 new_type_id;
+};
+
 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
                              const struct btf *targ_btf, __u32 targ_id);
 
 size_t bpf_core_essential_name_len(const char *name);
+
+int bpf_core_calc_relo_insn(const char *prog_name,
+                           const struct bpf_core_relo *relo, int relo_idx,
+                           const struct btf *local_btf,
+                           struct bpf_core_cand_list *cands,
+                           struct bpf_core_spec *specs_scratch,
+                           struct bpf_core_relo_res *targ_res);
+
+int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
+                       int insn_idx, const struct bpf_core_relo *relo,
+                       int relo_idx, const struct bpf_core_relo_res *res);
+
 #endif