]> www.infradead.org Git - users/willy/xarray.git/commitdiff
sched_ext: Make scx_has_op a bitmap
authorTejun Heo <tj@kernel.org>
Wed, 9 Apr 2025 19:06:00 +0000 (09:06 -1000)
committerTejun Heo <tj@kernel.org>
Wed, 9 Apr 2025 19:06:00 +0000 (09:06 -1000)
scx_has_op is used to encode which ops are implemented by the BPF scheduler
into an array of static_keys. While this saves a bit of branching overhead,
that is unlikely to be noticeable compared to the overall cost. As the
global static_keys can't work with the planned hierarchical multiple
scheduler support, replace the static_key array with a bitmap.

In repeated hackbench runs before and after static_keys removal on an AMD
Ryzen 3900X, I couldn't tell any measurable performance difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index f0ed0cec4c98c3900928a9d00a57574399bb26e1..8ae85ec6d9a2b6aa3fea64150d5196686112a754 100644 (file)
@@ -928,8 +928,7 @@ DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
 static struct sched_ext_ops scx_ops;
 static bool scx_warned_zero_slice;
 
-static struct static_key_false scx_has_op[SCX_OPI_END] =
-       { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
+static DECLARE_BITMAP(scx_has_op, SCX_OPI_END);
 
 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
 static struct scx_exit_info *scx_exit_info;
@@ -1055,7 +1054,7 @@ static __printf(3, 4) void __scx_exit(enum scx_exit_kind kind, s64 exit_code,
 #define scx_error(fmt, args...)                                                        \
        __scx_error(SCX_EXIT_ERROR, fmt, ##args)
 
-#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
+#define SCX_HAS_OP(op) test_bit(SCX_OP_IDX(op), scx_has_op)
 
 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
 {
@@ -1774,7 +1773,7 @@ static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
        lockdep_assert_rq_held(rq);
 
 #ifdef CONFIG_SCHED_CORE
-       if (SCX_HAS_OP(core_sched_before))
+       if (unlikely(SCX_HAS_OP(core_sched_before)))
                touch_core_sched(rq, p);
 #endif
 }
@@ -2156,7 +2155,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
                goto local;
        }
 
-       if (!SCX_HAS_OP(enqueue))
+       if (unlikely(!SCX_HAS_OP(enqueue)))
                goto global;
 
        /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
@@ -2972,7 +2971,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
        if (consume_global_dsq(rq))
                goto has_tasks;
 
-       if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
+       if (unlikely(!SCX_HAS_OP(dispatch)) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
                goto no_tasks;
 
        dspc->rq = rq;
@@ -3373,7 +3372,7 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
                return prev_cpu;
 
        rq_bypass = scx_rq_bypassing(task_rq(p));
-       if (SCX_HAS_OP(select_cpu) && !rq_bypass) {
+       if (likely(SCX_HAS_OP(select_cpu)) && !rq_bypass) {
                s32 cpu;
                struct task_struct **ddsp_taskp;
 
@@ -4638,7 +4637,7 @@ static void scx_disable_workfn(struct kthread_work *work)
        struct task_struct *p;
        struct rhashtable_iter rht_iter;
        struct scx_dispatch_q *dsq;
-       int i, kind, cpu;
+       int kind, cpu;
 
        kind = atomic_read(&scx_exit_kind);
        while (true) {
@@ -4731,8 +4730,7 @@ static void scx_disable_workfn(struct kthread_work *work)
 
        /* no task is on scx, turn off all the switches and flush in-progress calls */
        static_branch_disable(&__scx_enabled);
-       for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
-               static_branch_disable(&scx_has_op[i]);
+       bitmap_zero(scx_has_op, SCX_OPI_END);
        scx_idle_disable();
        synchronize_rcu();
 
@@ -5328,7 +5326,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
 
        for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
                if (((void (**)(void))ops)[i])
-                       static_branch_enable_cpuslocked(&scx_has_op[i]);
+                       set_bit(i, scx_has_op);
 
        check_hotplug_seq(ops);
        scx_idle_update_selcpu_topology(ops);
@@ -5369,7 +5367,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
 
        for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
                if (((void (**)(void))ops)[i])
-                       static_branch_enable(&scx_has_op[i]);
+                       set_bit(i, scx_has_op);
 
        if (scx_ops.cpu_acquire || scx_ops.cpu_release)
                scx_ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;