From: Tejun Heo Date: Wed, 9 Apr 2025 19:06:00 +0000 (-1000) Subject: sched_ext: Remove scx_ops_cpu_preempt static_key X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=54d2e717bc5f419b111915adfdec7ecc1ca8cf90;p=users%2Fwilly%2Fxarray.git sched_ext: Remove scx_ops_cpu_preempt static_key scx_ops_cpu_preempt is used to encode whether ops.cpu_acquire/release() are implemented into a static_key. These tests aren't hot enough for static_key usage to make any meaningful difference and are made to use a static_key mostly because there was no reason not to. However, global static_keys can't work with the planned hierarchical multiple scheduler support. Remove the static_key and instead use an internal ops flag SCX_OPS_HAS_CPU_PREEMPT to record and test whether ops.cpu_acquire/release() are implemented. In repeated hackbench runs before and after static_keys removal on an AMD Ryzen 3900X, I couldn't tell any measurable performance difference. Signed-off-by: Tejun Heo Acked-by: Changwoo Min Acked-by: Andrea Righi --- diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1e685e77b5e4..1adf5c299cce 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -173,6 +173,11 @@ enum scx_ops_flags { SCX_OPS_SWITCH_PARTIAL | SCX_OPS_BUILTIN_IDLE_PER_NODE | SCX_OPS_HAS_CGROUP_WEIGHT, + + /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */ + __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56, + + SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56, }; /* argument container for ops.init_task() */ @@ -924,7 +929,6 @@ static struct sched_ext_ops scx_ops; static bool scx_warned_zero_slice; DEFINE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup); -static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); static struct static_key_false scx_has_op[SCX_OPI_END] = { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT }; @@ -2931,7 +2935,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev) rq->scx.flags |= SCX_RQ_IN_BALANCE; rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP); - if (static_branch_unlikely(&scx_ops_cpu_preempt) && + if ((scx_ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && unlikely(rq->scx.cpu_released)) { /* * If the previous sched_class for the current CPU was not SCX, @@ -3160,7 +3164,7 @@ static void switch_class(struct rq *rq, struct task_struct *next) */ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); #endif - if (!static_branch_unlikely(&scx_ops_cpu_preempt)) + if (!(scx_ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) return; /* @@ -4725,7 +4729,6 @@ static void scx_disable_workfn(struct kthread_work *work) for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++) static_branch_disable(&scx_has_op[i]); static_branch_disable(&scx_ops_allow_queued_wakeup); - static_branch_disable(&scx_ops_cpu_preempt); scx_idle_disable(); synchronize_rcu(); @@ -5367,7 +5370,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) if (ops->flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) static_branch_enable(&scx_ops_allow_queued_wakeup); if (scx_ops.cpu_acquire || scx_ops.cpu_release) - static_branch_enable(&scx_ops_cpu_preempt); + scx_ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; /* * Lock out forks, cgroup on/offlining and moves before opening the