static struct sched_ext_ops scx_ops;
static bool scx_warned_zero_slice;
-static struct static_key_false scx_has_op[SCX_OPI_END] =
- { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
+static DECLARE_BITMAP(scx_has_op, SCX_OPI_END);
static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
static struct scx_exit_info *scx_exit_info;
#define scx_error(fmt, args...) \
__scx_error(SCX_EXIT_ERROR, fmt, ##args)
-#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
+#define SCX_HAS_OP(op) test_bit(SCX_OP_IDX(op), scx_has_op)
static long jiffies_delta_msecs(unsigned long at, unsigned long now)
{
lockdep_assert_rq_held(rq);
#ifdef CONFIG_SCHED_CORE
- if (SCX_HAS_OP(core_sched_before))
+ if (unlikely(SCX_HAS_OP(core_sched_before)))
touch_core_sched(rq, p);
#endif
}
goto local;
}
- if (!SCX_HAS_OP(enqueue))
+ if (unlikely(!SCX_HAS_OP(enqueue)))
goto global;
/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
if (consume_global_dsq(rq))
goto has_tasks;
- if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
+ if (unlikely(!SCX_HAS_OP(dispatch)) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
goto no_tasks;
dspc->rq = rq;
return prev_cpu;
rq_bypass = scx_rq_bypassing(task_rq(p));
- if (SCX_HAS_OP(select_cpu) && !rq_bypass) {
+ if (likely(SCX_HAS_OP(select_cpu)) && !rq_bypass) {
s32 cpu;
struct task_struct **ddsp_taskp;
struct task_struct *p;
struct rhashtable_iter rht_iter;
struct scx_dispatch_q *dsq;
- int i, kind, cpu;
+ int kind, cpu;
kind = atomic_read(&scx_exit_kind);
while (true) {
/* no task is on scx, turn off all the switches and flush in-progress calls */
static_branch_disable(&__scx_enabled);
- for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
- static_branch_disable(&scx_has_op[i]);
+ bitmap_zero(scx_has_op, SCX_OPI_END);
scx_idle_disable();
synchronize_rcu();
for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
if (((void (**)(void))ops)[i])
- static_branch_enable_cpuslocked(&scx_has_op[i]);
+ set_bit(i, scx_has_op);
check_hotplug_seq(ops);
scx_idle_update_selcpu_topology(ops);
for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
if (((void (**)(void))ops)[i])
- static_branch_enable(&scx_has_op[i]);
+ set_bit(i, scx_has_op);
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
scx_ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;