*             **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
  *             **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
  *             own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ *     Description
+ *             Get address of the traced function (for tracing programs).
+ *     Return
+ *             Address of the traced function.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(timer_set_callback),         \
        FN(timer_start),                \
        FN(timer_cancel),               \
+       FN(get_func_ip),                \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
 
        return err;
 }
 
+static int check_get_func_ip(struct bpf_verifier_env *env)
+{
+       enum bpf_attach_type eatype = env->prog->expected_attach_type;
+       enum bpf_prog_type type = resolve_prog_type(env->prog);
+       int func_id = BPF_FUNC_get_func_ip;
+
+       if (type == BPF_PROG_TYPE_TRACING) {
+               if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
+                   eatype != BPF_MODIFY_RETURN) {
+                       verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
+                               func_id_name(func_id), func_id);
+                       return -ENOTSUPP;
+               }
+               return 0;
+       }
+
+       verbose(env, "func %s#%d not supported for program type %d\n",
+               func_id_name(func_id), func_id, type);
+       return -ENOTSUPP;
+}
+
 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                             int *insn_idx_p)
 {
        if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
                env->prog->call_get_stack = true;
 
+       if (func_id == BPF_FUNC_get_func_ip) {
+               if (check_get_func_ip(env))
+                       return -ENOTSUPP;
+               env->prog->call_get_func_ip = true;
+       }
+
        if (changes_data)
                clear_all_pkt_pointers(env);
        return 0;
 {
        struct bpf_prog *prog = env->prog;
        bool expect_blinding = bpf_jit_blinding_enabled(prog);
+       enum bpf_prog_type prog_type = resolve_prog_type(prog);
        struct bpf_insn *insn = prog->insnsi;
        const struct bpf_func_proto *fn;
        const int insn_cnt = prog->len;
                        continue;
                }
 
+               /* Implement bpf_get_func_ip inline. */
+               if (prog_type == BPF_PROG_TYPE_TRACING &&
+                   insn->imm == BPF_FUNC_get_func_ip) {
+                       /* Load IP address from ctx - 8 */
+                       insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
 patch_call_imm:
                fn = env->ops->get_func_proto(insn->imm, env->prog);
                /* all functions that have prototype and verifier allowed
 
        .arg5_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
+{
+       /* This helper call is inlined by verifier. */
+       return ((u64 *)ctx)[-1];
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
+       .func           = bpf_get_func_ip_tracing,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
 const struct bpf_func_proto *
 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
                return &bpf_for_each_map_elem_proto;
        case BPF_FUNC_snprintf:
                return &bpf_snprintf_proto;
+       case BPF_FUNC_get_func_ip:
+               return &bpf_get_func_ip_proto_tracing;
        default:
                return bpf_base_func_proto(func_id);
        }
 
  *             **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
  *             **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
  *             own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ *     Description
+ *             Get address of the traced function (for tracing programs).
+ *     Return
+ *             Address of the traced function.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(timer_set_callback),         \
        FN(timer_start),                \
        FN(timer_cancel),               \
+       FN(get_func_ip),                \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper