enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
 #define __bpfcall
 extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
 
 static inline int cfi_get_offset(void)
 {
        return BUG_TRAP_TYPE_NONE;
 }
 #define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
 #endif /* CONFIG_CFI_CLANG */
 
 #endif /* _ASM_X86_CFI_H */
 
 "      .size   cfi_bpf_hash, 4                                 \n"
 "      .popsection                                             \n"
 );
+
+/* Must match bpf_callback_t */
+extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
+
+__ADDRESSABLE(__bpf_callback_fn);
+
+/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
+asm (
+"      .pushsection    .data..ro_after_init,\"aw\",@progbits   \n"
+"      .type   cfi_bpf_subprog_hash,@object                    \n"
+"      .globl  cfi_bpf_subprog_hash                            \n"
+"      .p2align        2, 0x0                                  \n"
+"cfi_bpf_subprog_hash:                                         \n"
+"      .long   __kcfi_typeid___bpf_callback_fn                 \n"
+"      .size   cfi_bpf_subprog_hash, 4                         \n"
+"      .popsection                                             \n"
+);
 #endif
 
 #ifdef CONFIG_FINEIBT
                if (builtin) {
                        cfi_seed = get_random_u32();
                        cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
+                       cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
                }
 
                ret = cfi_rand_preamble(start_cfi, end_cfi);
 
  * in arch/x86/kernel/alternative.c
  */
 
-static void emit_fineibt(u8 **pprog)
+static void emit_fineibt(u8 **pprog, bool is_subprog)
 {
+       u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash;
        u8 *prog = *pprog;
 
        EMIT_ENDBR();
-       EMIT3_off32(0x41, 0x81, 0xea, cfi_bpf_hash);    /* subl $hash, %r10d    */
+       EMIT3_off32(0x41, 0x81, 0xea, hash);            /* subl $hash, %r10d    */
        EMIT2(0x74, 0x07);                              /* jz.d8 +7             */
        EMIT2(0x0f, 0x0b);                              /* ud2                  */
        EMIT1(0x90);                                    /* nop                  */
        *pprog = prog;
 }
 
-static void emit_kcfi(u8 **pprog)
+static void emit_kcfi(u8 **pprog, bool is_subprog)
 {
+       u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash;
        u8 *prog = *pprog;
 
-       EMIT1_off32(0xb8, cfi_bpf_hash);                /* movl $hash, %eax     */
+       EMIT1_off32(0xb8, hash);                        /* movl $hash, %eax     */
 #ifdef CONFIG_CALL_PADDING
        EMIT1(0x90);
        EMIT1(0x90);
        *pprog = prog;
 }
 
-static void emit_cfi(u8 **pprog)
+static void emit_cfi(u8 **pprog, bool is_subprog)
 {
        u8 *prog = *pprog;
 
        switch (cfi_mode) {
        case CFI_FINEIBT:
-               emit_fineibt(&prog);
+               emit_fineibt(&prog, is_subprog);
                break;
 
        case CFI_KCFI:
-               emit_kcfi(&prog);
+               emit_kcfi(&prog, is_subprog);
                break;
 
        default:
 {
        u8 *prog = *pprog;
 
-       emit_cfi(&prog);
+       emit_cfi(&prog, is_subprog);
        /* BPF trampoline can be made to work without these nops,
         * but let's waste 5 bytes for now and optimize later
         */