]> www.infradead.org Git - users/willy/xarray.git/commitdiff
sched_ext: idle: Explicitly pass allowed cpumask to scx_select_cpu_dfl()
authorAndrea Righi <arighi@nvidia.com>
Sat, 5 Apr 2025 13:39:22 +0000 (15:39 +0200)
committerTejun Heo <tj@kernel.org>
Mon, 7 Apr 2025 17:13:52 +0000 (07:13 -1000)
Modify scx_select_cpu_dfl() to take the allowed cpumask as an explicit
argument, instead of implicitly using @p->cpus_ptr.

This prepares for future changes where arbitrary cpumasks may be passed
to the built-in idle CPU selection policy.

This is a pure refactoring with no functional changes.

Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c
kernel/sched/ext_idle.c
kernel/sched/ext_idle.h

index 6781e6da059b7cc2daf6286b7c1644c2b6d58018..ac3fd7a409e9aeee5f5948b0ad64314994bac8d3 100644 (file)
@@ -3392,7 +3392,7 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
        } else {
                s32 cpu;
 
-               cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
+               cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
                if (cpu >= 0) {
                        p->scx.slice = SCX_SLICE_DFL;
                        p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
index ed37fb8e45180fd938ee0e97ccadd008753fc2c9..5d6253c6ed9084ae0638105ff1108ccc20b1ea19 100644 (file)
@@ -438,9 +438,11 @@ static inline bool task_affinity_all(const struct task_struct *p)
  * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
  * we never call ops.select_cpu() for them, see select_task_rq().
  */
-s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags)
+s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+                      const struct cpumask *cpus_allowed, u64 flags)
 {
        const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL;
+       const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr;
        int node = scx_cpu_node_if_enabled(prev_cpu);
        s32 cpu;
 
@@ -460,9 +462,9 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
                struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask);
                const struct cpumask *cpus = numa_span(prev_cpu);
 
-               if (task_affinity_all(p))
+               if (allowed == p->cpus_ptr && task_affinity_all(p))
                        numa_cpus = cpus;
-               else if (cpus && cpumask_and(local_cpus, p->cpus_ptr, cpus))
+               else if (cpus && cpumask_and(local_cpus, allowed, cpus))
                        numa_cpus = local_cpus;
        }
 
@@ -470,9 +472,9 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
                struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask);
                const struct cpumask *cpus = llc_span(prev_cpu);
 
-               if (task_affinity_all(p))
+               if (allowed == p->cpus_ptr && task_affinity_all(p))
                        llc_cpus = cpus;
-               else if (cpus && cpumask_and(local_cpus, p->cpus_ptr, cpus))
+               else if (cpus && cpumask_and(local_cpus, allowed, cpus))
                        llc_cpus = local_cpus;
        }
 
@@ -511,7 +513,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
                    cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
                    (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
                    !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
-                       if (cpumask_test_cpu(cpu, p->cpus_ptr))
+                       if (cpumask_test_cpu(cpu, allowed))
                                goto out_unlock;
                }
        }
@@ -556,7 +558,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
                 * begin in prev_cpu's node and proceed to other nodes in
                 * order of increasing distance.
                 */
-               cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE);
+               cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
                if (cpu >= 0)
                        goto out_unlock;
 
@@ -604,7 +606,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
         * in prev_cpu's node and proceed to other nodes in order of
         * increasing distance.
         */
-       cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
+       cpu = scx_pick_idle_cpu(allowed, node, flags);
 
 out_unlock:
        rcu_read_unlock();
@@ -858,7 +860,7 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
                goto prev_cpu;
 
 #ifdef CONFIG_SMP
-       cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
+       cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
        if (cpu >= 0) {
                *is_idle = true;
                return cpu;
index 511cc2221f7a85321bed3abe1d1425f01c7e483b..37be78a7502b3257e4e4b77bfb3b4e3cdb1dd2ea 100644 (file)
@@ -27,7 +27,8 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node
 }
 #endif /* CONFIG_SMP */
 
-s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags);
+s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+                      const struct cpumask *cpus_allowed, u64 flags);
 void scx_idle_enable(struct sched_ext_ops *ops);
 void scx_idle_disable(void);
 int scx_idle_init(void);