return false;
 }
 
+s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+                         const struct cpumask *allowed, u64 flags)
+{
+       struct rq *rq;
+       struct rq_flags rf;
+       s32 cpu;
+
+       if (!kf_cpu_valid(prev_cpu, NULL))
+               return -EINVAL;
+
+       if (!check_builtin_idle_enabled())
+               return -EBUSY;
+
+       /*
+        * If called from an unlocked context, acquire the task's rq lock,
+        * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed.
+        *
+        * Otherwise, allow to use this kfunc only from ops.select_cpu()
+        * and ops.select_enqueue().
+        */
+       if (scx_kf_allowed_if_unlocked()) {
+               rq = task_rq_lock(p, &rf);
+       } else {
+               if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE))
+                       return -EPERM;
+               rq = scx_locked_rq();
+       }
+
+       /*
+        * Validate locking correctness to access p->cpus_ptr and
+        * p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
+        * otherwise, assert that p->pi_lock is held.
+        */
+       if (!rq)
+               lockdep_assert_held(&p->pi_lock);
+
+#ifdef CONFIG_SMP
+       /*
+        * This may also be called from ops.enqueue(), so we need to handle
+        * per-CPU tasks as well. For these tasks, we can skip all idle CPU
+        * selection optimizations and simply check whether the previously
+        * used CPU is idle and within the allowed cpumask.
+        */
+       if (p->nr_cpus_allowed == 1) {
+               if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
+                   scx_idle_test_and_clear_cpu(prev_cpu))
+                       cpu = prev_cpu;
+               else
+                       cpu = -EBUSY;
+       } else {
+               cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
+                                        allowed ?: p->cpus_ptr, flags);
+       }
+#else
+       cpu = -EBUSY;
+#endif
+       if (scx_kf_allowed_if_unlocked())
+               task_rq_unlock(rq, p, &rf);
+
+       return cpu;
+}
+
 /**
  * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
  *                   trigger an error if @cpu is invalid
  * @wake_flags: %SCX_WAKE_* flags
  * @is_idle: out parameter indicating whether the returned CPU is idle
  *
- * Can only be called from ops.select_cpu() if the built-in CPU selection is
- * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
- * @p, @prev_cpu and @wake_flags match ops.select_cpu().
+ * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
+ * context such as a BPF test_run() call, as long as built-in CPU selection
+ * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
+ * is set.
  *
  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
  * currently idle and thus a good candidate for direct dispatching.
 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
                                       u64 wake_flags, bool *is_idle)
 {
-#ifdef CONFIG_SMP
        s32 cpu;
-#endif
-       if (!kf_cpu_valid(prev_cpu, NULL))
-               goto prev_cpu;
-
-       if (!check_builtin_idle_enabled())
-               goto prev_cpu;
-
-       if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
-               goto prev_cpu;
 
-#ifdef CONFIG_SMP
-       cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
+       cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0);
        if (cpu >= 0) {
                *is_idle = true;
                return cpu;
        }
-#endif
-
-prev_cpu:
        *is_idle = false;
+
        return prev_cpu;
 }
 
 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
                                       const struct cpumask *cpus_allowed, u64 flags)
 {
-       struct rq *rq;
-       struct rq_flags rf;
-       s32 cpu;
-
-       if (!kf_cpu_valid(prev_cpu, NULL))
-               return -EINVAL;
-
-       if (!check_builtin_idle_enabled())
-               return -EBUSY;
-
-       /*
-        * If called from an unlocked context, acquire the task's rq lock,
-        * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed.
-        *
-        * Otherwise, allow to use this kfunc only from ops.select_cpu()
-        * and ops.select_enqueue().
-        */
-       if (scx_kf_allowed_if_unlocked()) {
-               rq = task_rq_lock(p, &rf);
-       } else {
-               if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE))
-                       return -EPERM;
-               rq = scx_locked_rq();
-       }
-
-       /*
-        * Validate locking correctness to access p->cpus_ptr and
-        * p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
-        * otherwise, assert that p->pi_lock is held.
-        */
-       if (!rq)
-               lockdep_assert_held(&p->pi_lock);
-
-#ifdef CONFIG_SMP
-       /*
-        * This may also be called from ops.enqueue(), so we need to handle
-        * per-CPU tasks as well. For these tasks, we can skip all idle CPU
-        * selection optimizations and simply check whether the previously
-        * used CPU is idle and within the allowed cpumask.
-        */
-       if (p->nr_cpus_allowed == 1) {
-               if (cpumask_test_cpu(prev_cpu, cpus_allowed) &&
-                   scx_idle_test_and_clear_cpu(prev_cpu))
-                       cpu = prev_cpu;
-               else
-                       cpu = -EBUSY;
-       } else {
-               cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, cpus_allowed, flags);
-       }
-#else
-       cpu = -EBUSY;
-#endif
-       if (scx_kf_allowed_if_unlocked())
-               task_rq_unlock(rq, p, &rf);
-
-       return cpu;
+       return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags);
 }
 
 /**
 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU)
 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
 BTF_KFUNCS_END(scx_kfunc_ids_idle)
 
 static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
        .set                    = &scx_kfunc_ids_idle,
 };
 
-BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
-BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
-BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
-
-static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
-       .owner                  = THIS_MODULE,
-       .set                    = &scx_kfunc_ids_select_cpu,
-};
-
 int scx_idle_init(void)
 {
        int ret;
 
-       ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) ||
-             register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
+       ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
              register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
              register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle);