]> www.infradead.org Git - users/willy/xarray.git/commitdiff
sched_ext: Drop "ops" from scx_ops_helper, scx_ops_enable_mutex and __scx_ops_enabled
authorTejun Heo <tj@kernel.org>
Fri, 4 Apr 2025 18:52:48 +0000 (08:52 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 4 Apr 2025 18:52:48 +0000 (08:52 -1000)
The tag "ops" is used for two different purposes. First, to indicate that
the entity is directly related to the operations such as flags carried in
sched_ext_ops. Second, to indicate that the entity applies to something
global such as enable or bypass states. The second usage is historical and
causes confusion rather than clarifying anything. For example,
scx_ops_enable_state enums are named SCX_OPS_* and thus conflict with
scx_ops_flags. Let's drop the second usages.

Drop "ops" from scx_ops_helper, scx_ops_enable_mutex and __scx_ops_enabled.
Update scx_show_state.py accordingly.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/sched.h
tools/sched_ext/scx_show_state.py

index 07b07e89a57885de1c15eb606d591b84cdcdaaa5..51c875aee5ec53e3316ee8c8304c47a60071065b 100644 (file)
@@ -907,9 +907,9 @@ static DEFINE_SPINLOCK(scx_tasks_lock);
 static LIST_HEAD(scx_tasks);
 
 /* ops enable/disable */
-static struct kthread_worker *scx_ops_helper;
-static DEFINE_MUTEX(scx_ops_enable_mutex);
-DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
+static struct kthread_worker *scx_helper;
+static DEFINE_MUTEX(scx_enable_mutex);
+DEFINE_STATIC_KEY_FALSE(__scx_enabled);
 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
 static unsigned long scx_in_softlockup;
@@ -4712,7 +4712,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
         * we can safely use blocking synchronization constructs. Actually
         * disable ops.
         */
-       mutex_lock(&scx_ops_enable_mutex);
+       mutex_lock(&scx_enable_mutex);
 
        static_branch_disable(&__scx_switched_all);
        WRITE_ONCE(scx_switching_all, false);
@@ -4766,7 +4766,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
        }
 
        /* no task is on scx, turn off all the switches and flush in-progress calls */
-       static_branch_disable(&__scx_ops_enabled);
+       static_branch_disable(&__scx_enabled);
        for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
                static_branch_disable(&scx_has_op[i]);
        static_branch_disable(&scx_ops_allow_queued_wakeup);
@@ -4826,7 +4826,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
        free_exit_info(scx_exit_info);
        scx_exit_info = NULL;
 
-       mutex_unlock(&scx_ops_enable_mutex);
+       mutex_unlock(&scx_enable_mutex);
 
        WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
 done:
@@ -4837,11 +4837,11 @@ static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
 
 static void schedule_scx_ops_disable_work(void)
 {
-       struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
+       struct kthread_worker *helper = READ_ONCE(scx_helper);
 
        /*
         * We may be called spuriously before the first bpf_sched_ext_reg(). If
-        * scx_ops_helper isn't set up yet, there's nothing to do.
+        * scx_helper isn't set up yet, there's nothing to do.
         */
        if (helper)
                kthread_queue_work(helper, &scx_ops_disable_work);
@@ -5262,7 +5262,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                return -EINVAL;
        }
 
-       mutex_lock(&scx_ops_enable_mutex);
+       mutex_lock(&scx_enable_mutex);
 
        /*
         * Clear event counters so a new scx scheduler gets
@@ -5273,10 +5273,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                memset(e, 0, sizeof(*e));
        }
 
-       if (!scx_ops_helper) {
-               WRITE_ONCE(scx_ops_helper,
-                          scx_create_rt_helper("sched_ext_ops_helper"));
-               if (!scx_ops_helper) {
+       if (!scx_helper) {
+               WRITE_ONCE(scx_helper, scx_create_rt_helper("sched_ext_helper"));
+               if (!scx_helper) {
                        ret = -ENOMEM;
                        goto err_unlock;
                }
@@ -5400,10 +5399,10 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                           scx_watchdog_timeout / 2);
 
        /*
-        * Once __scx_ops_enabled is set, %current can be switched to SCX
-        * anytime. This can lead to stalls as some BPF schedulers (e.g.
-        * userspace scheduling) may not function correctly before all tasks are
-        * switched. Init in bypass mode to guarantee forward progress.
+        * Once __scx_enabled is set, %current can be switched to SCX anytime.
+        * This can lead to stalls as some BPF schedulers (e.g. userspace
+        * scheduling) may not function correctly before all tasks are switched.
+        * Init in bypass mode to guarantee forward progress.
         */
        scx_ops_bypass(true);
 
@@ -5485,7 +5484,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
         * all eligible tasks.
         */
        WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
-       static_branch_enable(&__scx_ops_enabled);
+       static_branch_enable(&__scx_enabled);
 
        /*
         * We're fully committed and can't fail. The task READY -> ENABLED
@@ -5529,7 +5528,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
        pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
                scx_ops.name, scx_switched_all() ? "" : " (partial)");
        kobject_uevent(scx_root_kobj, KOBJ_ADD);
-       mutex_unlock(&scx_ops_enable_mutex);
+       mutex_unlock(&scx_enable_mutex);
 
        atomic_long_inc(&scx_enable_seq);
 
@@ -5545,7 +5544,7 @@ err:
                scx_exit_info = NULL;
        }
 err_unlock:
-       mutex_unlock(&scx_ops_enable_mutex);
+       mutex_unlock(&scx_enable_mutex);
        return ret;
 
 err_disable_unlock_all:
@@ -5553,7 +5552,7 @@ err_disable_unlock_all:
        percpu_up_write(&scx_fork_rwsem);
        scx_ops_bypass(false);
 err_disable:
-       mutex_unlock(&scx_ops_enable_mutex);
+       mutex_unlock(&scx_enable_mutex);
        /*
         * Returning an error code here would not pass all the error information
         * to userspace. Record errno using scx_ops_error() for cases
@@ -5836,7 +5835,7 @@ static struct bpf_struct_ops bpf_sched_ext_ops = {
 
 static void sysrq_handle_sched_ext_reset(u8 key)
 {
-       if (scx_ops_helper)
+       if (scx_helper)
                scx_ops_disable(SCX_EXIT_SYSRQ);
        else
                pr_info("sched_ext: BPF scheduler not yet used\n");
index 47972f34ea70144d40446ee7bb6ca922608eed5c..ac07f64c8f39550c4f7c4f9499f7e5aeae479098 100644 (file)
@@ -1717,10 +1717,10 @@ extern struct balance_callback balance_push_callback;
 #ifdef CONFIG_SCHED_CLASS_EXT
 extern const struct sched_class ext_sched_class;
 
-DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);   /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_enabled);       /* SCX BPF scheduler loaded */
 DECLARE_STATIC_KEY_FALSE(__scx_switched_all);  /* all fair class tasks on SCX */
 
-#define scx_enabled()          static_branch_unlikely(&__scx_ops_enabled)
+#define scx_enabled()          static_branch_unlikely(&__scx_enabled)
 #define scx_switched_all()     static_branch_unlikely(&__scx_switched_all)
 
 static inline void scx_rq_clock_update(struct rq *rq, u64 clock)
index 9c658171c16bed74eb6da6eaea732db342b0aaa3..d3c81b92248a1319f089690cbf07618491493dfd 100644 (file)
@@ -31,7 +31,7 @@ ops = prog['scx_ops']
 enable_state = read_atomic("scx_enable_state_var")
 
 print(f'ops           : {ops.name.string_().decode()}')
-print(f'enabled       : {read_static_key("__scx_ops_enabled")}')
+print(f'enabled       : {read_static_key("__scx_enabled")}')
 print(f'switching_all : {read_int("scx_switching_all")}')
 print(f'switched_all  : {read_static_key("__scx_switched_all")}')
 print(f'enable_state  : {state_str(enable_state)} ({enable_state})')