cur->active_lock.ptr = btf;
                cur->active_lock.id = reg->id;
        } else {
+               struct bpf_func_state *fstate = cur_func(env);
                void *ptr;
+               int i;
 
                if (map)
                        ptr = map;
                }
                cur->active_lock.ptr = NULL;
                cur->active_lock.id = 0;
+
+               for (i = 0; i < fstate->acquired_refs; i++) {
+                       int err;
+
+                       /* Complain on error because this reference state cannot
+                        * be freed before this point, as bpf_spin_lock critical
+                        * section does not allow functions that release the
+                        * allocated object immediately.
+                        */
+                       if (!fstate->refs[i].release_on_unlock)
+                               continue;
+                       err = release_reference(env, fstate->refs[i].id);
+                       if (err) {
+                               verbose(env, "failed to release release_on_unlock reference");
+                               return err;
+                       }
+               }
        }
        return 0;
 }
        return 0;
 }
 
+static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
+{
+       struct bpf_func_state *state = cur_func(env);
+       struct bpf_reg_state *reg;
+       int i;
+
+       /* bpf_spin_lock only allows calling list_push and list_pop, no BPF
+        * subprogs, no global functions. This means that the references would
+        * not be released inside the critical section but they may be added to
+        * the reference state, and the acquired_refs are never copied out for a
+        * different frame as BPF to BPF calls don't work in bpf_spin_lock
+        * critical sections.
+        */
+       if (!ref_obj_id) {
+               verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
+               return -EFAULT;
+       }
+       for (i = 0; i < state->acquired_refs; i++) {
+               if (state->refs[i].id == ref_obj_id) {
+                       if (state->refs[i].release_on_unlock) {
+                               verbose(env, "verifier internal error: expected false release_on_unlock");
+                               return -EFAULT;
+                       }
+                       state->refs[i].release_on_unlock = true;
+                       /* Now mark everyone sharing same ref_obj_id as untrusted */
+                       bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
+                               if (reg->ref_obj_id == ref_obj_id)
+                                       reg->type |= PTR_UNTRUSTED;
+                       }));
+                       return 0;
+               }
+       }
+       verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
+       return -EFAULT;
+}
+
 /* Implementation details:
  *
  * Each register points to some region of memory, which we define as an
                        btf_name_by_offset(field->list_head.btf, et->name_off));
                return -EINVAL;
        }
-       return 0;
+       /* Set arg#1 for expiration after unlock */
+       return ref_set_release_on_unlock(env, reg->ref_obj_id);
 }
 
 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)