]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sched_ext: Allow p->scx.disallow only while loading
authorTejun Heo <tj@kernel.org>
Thu, 1 Aug 2024 23:32:59 +0000 (13:32 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 2 Aug 2024 18:59:32 +0000 (08:59 -1000)
From 1232da7eced620537a78f19c8cf3d4a3508e2419 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Wed, 31 Jul 2024 09:14:52 -1000

p->scx.disallow provides a way for the BPF scheduler to reject certain tasks
from attaching. It's currently allowed for both the load and fork paths;
however, the latter doesn't actually work as p->sched_class is already set
by the time scx_ops_init_task() is called during fork.

This is a convenience feature which is mostly useful from the load path
anyway. Allow it only from the load path.

v2: Trigger scx_ops_error() iff @p->policy == SCHED_EXT to make it a bit
    easier for the BPF scheduler (David).

Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: "Zhangqiao (2012 lab)" <zhangqiao22@huawei.com>
Link: http://lkml.kernel.org/r/20240711110720.1285-1-zhangqiao22@huawei.com
Fixes: 7bb6f0810ecf ("sched_ext: Allow BPF schedulers to disallow specific tasks from joining SCHED_EXT")
Acked-by: David Vernet <void@manifault.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/sched/ext.h
kernel/sched/ext.c

index 26e1c33bc844ab41a63e70faa3fb11c2652d9c7d..69f68e2121a8f8586a8f8ac5d91be091a50f9f6c 100644 (file)
@@ -179,11 +179,12 @@ struct sched_ext_entity {
         * If set, reject future sched_setscheduler(2) calls updating the policy
         * to %SCHED_EXT with -%EACCES.
         *
-        * If set from ops.init_task() and the task's policy is already
-        * %SCHED_EXT, which can happen while the BPF scheduler is being loaded
-        * or by inhering the parent's policy during fork, the task's policy is
-        * rejected and forcefully reverted to %SCHED_NORMAL. The number of
-        * such events are reported through /sys/kernel/debug/sched_ext::nr_rejected.
+        * Can be set from ops.init_task() while the BPF scheduler is being
+        * loaded (!scx_init_task_args->fork). If set and the task's policy is
+        * already %SCHED_EXT, the task's policy is rejected and forcefully
+        * reverted to %SCHED_NORMAL. The number of such events are reported
+        * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+        * during fork is not allowed.
         */
        bool                    disallow;       /* reject switching into SCX */
 
index 6f7c7d8b56de340186a481c25ed4d5621c6ff84b..938830121a3258cec27233abf8fec49c99859fc5 100644 (file)
@@ -3396,24 +3396,29 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool
        scx_set_task_state(p, SCX_TASK_INIT);
 
        if (p->scx.disallow) {
-               struct rq *rq;
-               struct rq_flags rf;
+               if (!fork) {
+                       struct rq *rq;
+                       struct rq_flags rf;
 
-               rq = task_rq_lock(p, &rf);
+                       rq = task_rq_lock(p, &rf);
 
-               /*
-                * We're either in fork or load path and @p->policy will be
-                * applied right after. Reverting @p->policy here and rejecting
-                * %SCHED_EXT transitions from scx_check_setscheduler()
-                * guarantees that if ops.init_task() sets @p->disallow, @p can
-                * never be in SCX.
-                */
-               if (p->policy == SCHED_EXT) {
-                       p->policy = SCHED_NORMAL;
-                       atomic_long_inc(&scx_nr_rejected);
-               }
+                       /*
+                        * We're in the load path and @p->policy will be applied
+                        * right after. Reverting @p->policy here and rejecting
+                        * %SCHED_EXT transitions from scx_check_setscheduler()
+                        * guarantees that if ops.init_task() sets @p->disallow,
+                        * @p can never be in SCX.
+                        */
+                       if (p->policy == SCHED_EXT) {
+                               p->policy = SCHED_NORMAL;
+                               atomic_long_inc(&scx_nr_rejected);
+                       }
 
-               task_rq_unlock(rq, p, &rf);
+                       task_rq_unlock(rq, p, &rf);
+               } else if (p->policy == SCHED_EXT) {
+                       scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
+                                     p->comm, p->pid);
+               }
        }
 
        p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;