From e99129e5dbf7ca87233d31ad19348f6ce8627b38 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 1 Aug 2024 13:32:59 -1000 Subject: [PATCH] sched_ext: Allow p->scx.disallow only while loading From 1232da7eced620537a78f19c8cf3d4a3508e2419 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 31 Jul 2024 09:14:52 -1000 p->scx.disallow provides a way for the BPF scheduler to reject certain tasks from attaching. It's currently allowed for both the load and fork paths; however, the latter doesn't actually work as p->sched_class is already set by the time scx_ops_init_task() is called during fork. This is a convenience feature which is mostly useful from the load path anyway. Allow it only from the load path. v2: Trigger scx_ops_error() iff @p->policy == SCHED_EXT to make it a bit easier for the BPF scheduler (David). Signed-off-by: Tejun Heo Reported-by: "Zhangqiao (2012 lab)" Link: http://lkml.kernel.org/r/20240711110720.1285-1-zhangqiao22@huawei.com Fixes: 7bb6f0810ecf ("sched_ext: Allow BPF schedulers to disallow specific tasks from joining SCHED_EXT") Acked-by: David Vernet Signed-off-by: Tejun Heo --- include/linux/sched/ext.h | 11 ++++++----- kernel/sched/ext.c | 35 ++++++++++++++++++++--------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 26e1c33bc844a..69f68e2121a8f 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -179,11 +179,12 @@ struct sched_ext_entity { * If set, reject future sched_setscheduler(2) calls updating the policy * to %SCHED_EXT with -%EACCES. * - * If set from ops.init_task() and the task's policy is already - * %SCHED_EXT, which can happen while the BPF scheduler is being loaded - * or by inhering the parent's policy during fork, the task's policy is - * rejected and forcefully reverted to %SCHED_NORMAL. The number of - * such events are reported through /sys/kernel/debug/sched_ext::nr_rejected. + * Can be set from ops.init_task() while the BPF scheduler is being + * loaded (!scx_init_task_args->fork). If set and the task's policy is + * already %SCHED_EXT, the task's policy is rejected and forcefully + * reverted to %SCHED_NORMAL. The number of such events are reported + * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag + * during fork is not allowed. */ bool disallow; /* reject switching into SCX */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 6f7c7d8b56de3..938830121a325 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3396,24 +3396,29 @@ static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool scx_set_task_state(p, SCX_TASK_INIT); if (p->scx.disallow) { - struct rq *rq; - struct rq_flags rf; + if (!fork) { + struct rq *rq; + struct rq_flags rf; - rq = task_rq_lock(p, &rf); + rq = task_rq_lock(p, &rf); - /* - * We're either in fork or load path and @p->policy will be - * applied right after. Reverting @p->policy here and rejecting - * %SCHED_EXT transitions from scx_check_setscheduler() - * guarantees that if ops.init_task() sets @p->disallow, @p can - * never be in SCX. - */ - if (p->policy == SCHED_EXT) { - p->policy = SCHED_NORMAL; - atomic_long_inc(&scx_nr_rejected); - } + /* + * We're in the load path and @p->policy will be applied + * right after. Reverting @p->policy here and rejecting + * %SCHED_EXT transitions from scx_check_setscheduler() + * guarantees that if ops.init_task() sets @p->disallow, + * @p can never be in SCX. + */ + if (p->policy == SCHED_EXT) { + p->policy = SCHED_NORMAL; + atomic_long_inc(&scx_nr_rejected); + } - task_rq_unlock(rq, p, &rf); + task_rq_unlock(rq, p, &rf); + } else if (p->policy == SCHED_EXT) { + scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork", + p->comm, p->pid); + } } p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; -- 2.50.1