return arr ? arr : ZERO_SIZE_PTR;
 }
 
-static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
+static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src)
 {
        dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
                               sizeof(struct bpf_reference_state), GFP_KERNEL);
        if (!dst->refs)
                return -ENOMEM;
 
-       dst->active_locks = src->active_locks;
        dst->acquired_refs = src->acquired_refs;
+       dst->active_locks = src->active_locks;
+       dst->active_preempt_locks = src->active_preempt_locks;
+       dst->active_rcu_lock = src->active_rcu_lock;
        return 0;
 }
 
        return 0;
 }
 
-static int resize_reference_state(struct bpf_func_state *state, size_t n)
+static int resize_reference_state(struct bpf_verifier_state *state, size_t n)
 {
        state->refs = realloc_array(state->refs, state->acquired_refs, n,
                                    sizeof(struct bpf_reference_state));
  */
 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
 {
-       struct bpf_func_state *state = cur_func(env);
+       struct bpf_verifier_state *state = env->cur_state;
        int new_ofs = state->acquired_refs;
        int id, err;
 
 static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type,
                              int id, void *ptr)
 {
-       struct bpf_func_state *state = cur_func(env);
+       struct bpf_verifier_state *state = env->cur_state;
        int new_ofs = state->acquired_refs;
        int err;
 
 }
 
 /* release function corresponding to acquire_reference_state(). Idempotent. */
-static int release_reference_state(struct bpf_func_state *state, int ptr_id)
+static int release_reference_state(struct bpf_verifier_state *state, int ptr_id)
 {
        int i, last_idx;
 
        return -EINVAL;
 }
 
-static int release_lock_state(struct bpf_func_state *state, int type, int id, void *ptr)
+static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
 {
        int i, last_idx;
 
        return -EINVAL;
 }
 
-static struct bpf_reference_state *find_lock_state(struct bpf_verifier_env *env, enum ref_state_type type,
+static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type,
                                                   int id, void *ptr)
 {
-       struct bpf_func_state *state = cur_func(env);
        int i;
 
        for (i = 0; i < state->acquired_refs; i++) {
 {
        if (!state)
                return;
-       kfree(state->refs);
        kfree(state->stack);
        kfree(state);
 }
                free_func_state(state->frame[i]);
                state->frame[i] = NULL;
        }
+       kfree(state->refs);
        if (free_self)
                kfree(state);
 }
 static int copy_func_state(struct bpf_func_state *dst,
                           const struct bpf_func_state *src)
 {
-       int err;
-
-       memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
-       err = copy_reference_state(dst, src);
-       if (err)
-               return err;
+       memcpy(dst, src, offsetof(struct bpf_func_state, stack));
        return copy_stack_state(dst, src);
 }
 
                free_func_state(dst_state->frame[i]);
                dst_state->frame[i] = NULL;
        }
+       err = copy_reference_state(dst_state, src);
+       if (err)
+               return err;
        dst_state->speculative = src->speculative;
-       dst_state->active_rcu_lock = src->active_rcu_lock;
-       dst_state->active_preempt_lock = src->active_preempt_lock;
        dst_state->in_sleepable = src->in_sleepable;
        dst_state->curframe = src->curframe;
        dst_state->branches = src->branches;
                                fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
                                               bt_frame_stack_mask(bt, fr));
                                verbose(env, "stack=%s: ", env->tmp_str_buf);
-                               print_verifier_state(env, func, true);
+                               print_verifier_state(env, st, fr, true);
                        }
                }
 
 static bool in_rcu_cs(struct bpf_verifier_env *env)
 {
        return env->cur_state->active_rcu_lock ||
-              cur_func(env)->active_locks ||
+              env->cur_state->active_locks ||
               !in_sleepable(env);
 }
 
  * Since only one bpf_spin_lock is allowed the checks are simpler than
  * reg_is_refcounted() logic. The verifier needs to remember only
  * one spin_lock instead of array of acquired_refs.
- * cur_func(env)->active_locks remembers which map value element or allocated
+ * env->cur_state->active_locks remembers which map value element or allocated
  * object got locked and clears it after bpf_spin_unlock.
  */
 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
                             bool is_lock)
 {
        struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
+       struct bpf_verifier_state *cur = env->cur_state;
        bool is_const = tnum_is_const(reg->var_off);
-       struct bpf_func_state *cur = cur_func(env);
        u64 val = reg->var_off.value;
        struct bpf_map *map = NULL;
        struct btf *btf = NULL;
                        return -EINVAL;
                }
 
-               if (release_lock_state(cur_func(env), REF_TYPE_LOCK, reg->id, ptr)) {
+               if (release_lock_state(env->cur_state, REF_TYPE_LOCK, reg->id, ptr)) {
                        verbose(env, "bpf_spin_unlock of different lock\n");
                        return -EINVAL;
                }
        struct bpf_reg_state *reg;
        int err;
 
-       err = release_reference_state(cur_func(env), ref_obj_id);
+       err = release_reference_state(env->cur_state, ref_obj_id);
        if (err)
                return err;
 
                        callsite,
                        state->curframe + 1 /* frameno within this callchain */,
                        subprog /* subprog number within this prog */);
-       /* Transfer references to the callee */
-       err = copy_reference_state(callee, caller);
-       err = err ?: set_callee_state_cb(env, caller, callee, callsite);
+       err = set_callee_state_cb(env, caller, callee, callsite);
        if (err)
                goto err_out;
 
                const char *sub_name = subprog_name(env, subprog);
 
                /* Only global subprogs cannot be called with a lock held. */
-               if (cur_func(env)->active_locks) {
+               if (env->cur_state->active_locks) {
                        verbose(env, "global function calls are not allowed while holding a lock,\n"
                                     "use static function instead\n");
                        return -EINVAL;
                }
 
                /* Only global subprogs cannot be called with preemption disabled. */
-               if (env->cur_state->active_preempt_lock) {
+               if (env->cur_state->active_preempt_locks) {
                        verbose(env, "global function calls are not allowed with preemption disabled,\n"
                                     "use static function instead\n");
                        return -EINVAL;
 
        if (env->log.level & BPF_LOG_LEVEL) {
                verbose(env, "caller:\n");
-               print_verifier_state(env, caller, true);
+               print_verifier_state(env, state, caller->frameno, true);
                verbose(env, "callee:\n");
-               print_verifier_state(env, state->frame[state->curframe], true);
+               print_verifier_state(env, state, state->curframe, true);
        }
 
        return 0;
                caller->regs[BPF_REG_0] = *r0;
        }
 
-       /* Transfer references to the caller */
-       err = copy_reference_state(caller, callee);
-       if (err)
-               return err;
-
        /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
         * there function call logic would reschedule callback visit. If iteration
         * converges is_state_visited() would prune that visit eventually.
 
        if (env->log.level & BPF_LOG_LEVEL) {
                verbose(env, "returning from callee:\n");
-               print_verifier_state(env, callee, true);
+               print_verifier_state(env, state, callee->frameno, true);
                verbose(env, "to caller at %d:\n", *insn_idx);
-               print_verifier_state(env, caller, true);
+               print_verifier_state(env, state, caller->frameno, true);
        }
        /* clear everything in the callee. In case of exceptional exits using
         * bpf_throw, this will be done by copy_verifier_state for extra frames. */
 
 static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
 {
-       struct bpf_func_state *state = cur_func(env);
+       struct bpf_verifier_state *state = env->cur_state;
        bool refs_lingering = false;
        int i;
 
-       if (!exception_exit && state->frameno)
+       if (!exception_exit && cur_func(env)->frameno)
                return 0;
 
        for (i = 0; i < state->acquired_refs; i++) {
 {
        int err;
 
-       if (check_lock && cur_func(env)->active_locks) {
+       if (check_lock && env->cur_state->active_locks) {
                verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix);
                return -EINVAL;
        }
                return -EINVAL;
        }
 
-       if (check_lock && env->cur_state->active_preempt_lock) {
+       if (check_lock && env->cur_state->active_preempt_locks) {
                verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix);
                return -EINVAL;
        }
                        env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
        }
 
-       if (env->cur_state->active_preempt_lock) {
+       if (env->cur_state->active_preempt_locks) {
                if (fn->might_sleep) {
                        verbose(env, "sleepable helper %s#%d in non-preemptible region\n",
                                func_id_name(func_id), func_id);
                        struct bpf_func_state *state;
                        struct bpf_reg_state *reg;
 
-                       err = release_reference_state(cur_func(env), ref_obj_id);
+                       err = release_reference_state(env->cur_state, ref_obj_id);
                        if (!err) {
                                bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
                                        if (reg->ref_obj_id == ref_obj_id) {
 {
        struct btf_record *rec = reg_btf_record(reg);
 
-       if (!cur_func(env)->active_locks) {
+       if (!env->cur_state->active_locks) {
                verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
                return -EFAULT;
        }
 
 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
 {
-       struct bpf_func_state *state, *unused;
+       struct bpf_verifier_state *state = env->cur_state;
+       struct bpf_func_state *unused;
        struct bpf_reg_state *reg;
        int i;
 
-       state = cur_func(env);
-
        if (!ref_obj_id) {
                verbose(env, "verifier internal error: ref_obj_id is zero for "
                             "owning -> non-owning conversion\n");
        }
        id = reg->id;
 
-       if (!cur_func(env)->active_locks)
+       if (!env->cur_state->active_locks)
                return -EINVAL;
-       s = find_lock_state(env, REF_TYPE_LOCK, id, ptr);
+       s = find_lock_state(env->cur_state, REF_TYPE_LOCK, id, ptr);
        if (!s) {
                verbose(env, "held lock and object are not in the same allocation\n");
                return -EINVAL;
                return -EINVAL;
        }
 
-       if (env->cur_state->active_preempt_lock) {
+       if (env->cur_state->active_preempt_locks) {
                if (preempt_disable) {
-                       env->cur_state->active_preempt_lock++;
+                       env->cur_state->active_preempt_locks++;
                } else if (preempt_enable) {
-                       env->cur_state->active_preempt_lock--;
+                       env->cur_state->active_preempt_locks--;
                } else if (sleepable) {
                        verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name);
                        return -EACCES;
                }
        } else if (preempt_disable) {
-               env->cur_state->active_preempt_lock++;
+               env->cur_state->active_preempt_locks++;
        } else if (preempt_enable) {
                verbose(env, "unmatched attempt to enable preemption (kernel function %s)\n", func_name);
                return -EINVAL;
 
        /* Got here implies adding two SCALAR_VALUEs */
        if (WARN_ON_ONCE(ptr_reg)) {
-               print_verifier_state(env, state, true);
+               print_verifier_state(env, vstate, vstate->curframe, true);
                verbose(env, "verifier internal error: unexpected ptr_reg\n");
                return -EINVAL;
        }
        if (WARN_ON(!src_reg)) {
-               print_verifier_state(env, state, true);
+               print_verifier_state(env, vstate, vstate->curframe, true);
                verbose(env, "verifier internal error: no src_reg\n");
                return -EINVAL;
        }
                 * No one could have freed the reference state before
                 * doing the NULL check.
                 */
-               WARN_ON_ONCE(release_reference_state(state, id));
+               WARN_ON_ONCE(release_reference_state(vstate, id));
 
        bpf_for_each_reg_in_vstate(vstate, state, reg, ({
                mark_ptr_or_null_reg(state, reg, id, is_null);
                                               *insn_idx))
                        return -EFAULT;
                if (env->log.level & BPF_LOG_LEVEL)
-                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
+                       print_insn_state(env, this_branch, this_branch->curframe);
                *insn_idx += insn->off;
                return 0;
        } else if (pred == 0) {
                                               *insn_idx))
                        return -EFAULT;
                if (env->log.level & BPF_LOG_LEVEL)
-                       print_insn_state(env, this_branch->frame[this_branch->curframe]);
+                       print_insn_state(env, this_branch, this_branch->curframe);
                return 0;
        }
 
                return -EACCES;
        }
        if (env->log.level & BPF_LOG_LEVEL)
-               print_insn_state(env, this_branch->frame[this_branch->curframe]);
+               print_insn_state(env, this_branch, this_branch->curframe);
        return 0;
 }
 
        return true;
 }
 
-static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
+static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
                    struct bpf_idmap *idmap)
 {
        int i;
        if (old->acquired_refs != cur->acquired_refs)
                return false;
 
+       if (old->active_locks != cur->active_locks)
+               return false;
+
+       if (old->active_preempt_locks != cur->active_preempt_locks)
+               return false;
+
+       if (old->active_rcu_lock != cur->active_rcu_lock)
+               return false;
+
        for (i = 0; i < old->acquired_refs; i++) {
                if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
                    old->refs[i].type != cur->refs[i].type)
        if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
                return false;
 
-       if (!refsafe(old, cur, &env->idmap_scratch))
-               return false;
-
        return true;
 }
 
        if (old->speculative && !cur->speculative)
                return false;
 
-       if (old->active_rcu_lock != cur->active_rcu_lock)
-               return false;
-
-       if (old->active_preempt_lock != cur->active_preempt_lock)
+       if (old->in_sleepable != cur->in_sleepable)
                return false;
 
-       if (old->in_sleepable != cur->in_sleepable)
+       if (!refsafe(old, cur, &env->idmap_scratch))
                return false;
 
        /* for states to be equal callsites have to be the same
                                verbose_linfo(env, insn_idx, "; ");
                                verbose(env, "infinite loop detected at insn %d\n", insn_idx);
                                verbose(env, "cur state:");
-                               print_verifier_state(env, cur->frame[cur->curframe], true);
+                               print_verifier_state(env, cur, cur->curframe, true);
                                verbose(env, "old state:");
-                               print_verifier_state(env, sl->state.frame[cur->curframe], true);
+                               print_verifier_state(env, &sl->state, cur->curframe, true);
                                return -EINVAL;
                        }
                        /* if the verifier is processing a loop, avoid adding new state
                                env->prev_insn_idx, env->insn_idx,
                                env->cur_state->speculative ?
                                " (speculative execution)" : "");
-                       print_verifier_state(env, state->frame[state->curframe], true);
+                       print_verifier_state(env, state, state->curframe, true);
                        do_print_state = false;
                }
 
                        };
 
                        if (verifier_state_scratched(env))
-                               print_insn_state(env, state->frame[state->curframe]);
+                               print_insn_state(env, state, state->curframe);
 
                        verbose_linfo(env, env->insn_idx, "; ");
                        env->prev_log_pos = env->log.end_pos;
                                        return -EINVAL;
                                }
 
-                               if (cur_func(env)->active_locks) {
+                               if (env->cur_state->active_locks) {
                                        if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
                                            (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
                                             (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {