bool bpf_token_capable(const struct bpf_token *token, int cap);
 
-static inline bool bpf_allow_ptr_leaks(void)
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
 int bpf_map_new_fd(struct bpf_map *map, int flags);
 
                return false;
        if (!bpf_jit_harden)
                return false;
-       if (bpf_jit_harden == 1 && bpf_capable())
+       if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
                return false;
 
        return true;
 
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        int numa_node = bpf_map_attr_numa_node(attr);
        u32 elem_size, index_mask, max_entries;
-       bool bypass_spec_v1 = bpf_bypass_spec_v1();
+       bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
        u64 array_size, mask64;
        struct bpf_array *array;
 
 
 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 {
        if (!bpf_prog_kallsyms_candidate(fp) ||
-           !bpf_capable())
+           !bpf_token_capable(fp->aux->token, CAP_BPF))
                return;
 
        bpf_prog_ksym_set_addr(fp);
 
        env->prog = *prog;
        env->ops = bpf_verifier_ops[env->prog->type];
        env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
-       is_priv = bpf_capable();
+
+       env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+       env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+       env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+       env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+       env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
 
        bpf_get_btf_vmlinux();
 
        if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
                env->strict_alignment = false;
 
-       env->allow_ptr_leaks = bpf_allow_ptr_leaks();
-       env->allow_uninit_stack = bpf_allow_uninit_stack();
-       env->bypass_spec_v1 = bpf_bypass_spec_v1();
-       env->bypass_spec_v4 = bpf_bypass_spec_v4();
-       env->bpf_capable = bpf_capable();
-
        if (is_priv)
                env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
        env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
 
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
-               if (!bpf_capable())
+               if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                        return false;
                break;
        }
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                case bpf_ctx_range(struct __sk_buff, tstamp):
-                       if (!bpf_capable())
+                       if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                                return false;
                        break;
                default: