]> www.infradead.org Git - users/willy/xarray.git/commitdiff
selftests/sched_ext: Add test for scx_bpf_select_cpu_and()
authorAndrea Righi <arighi@nvidia.com>
Sat, 5 Apr 2025 13:39:25 +0000 (15:39 +0200)
committerTejun Heo <tj@kernel.org>
Mon, 7 Apr 2025 17:13:52 +0000 (07:13 -1000)
Add a selftest to validate the behavior of the built-in idle CPU
selection policy applied to a subset of allowed CPUs, using
scx_bpf_select_cpu_and().

Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/testing/selftests/sched_ext/Makefile
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c [new file with mode: 0644]
tools/testing/selftests/sched_ext/allowed_cpus.c [new file with mode: 0644]

index f4531327b8e764246c5e4ff9a49f2c7133a4d96c..e9d5bc575f806c4d355c97b1097c44d401e22615 100644 (file)
@@ -173,6 +173,7 @@ auto-test-targets :=                        \
        maybe_null                      \
        minimal                         \
        numa                            \
+       allowed_cpus                    \
        prog_run                        \
        reload_loop                     \
        select_cpu_dfl                  \
diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c b/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
new file mode 100644 (file)
index 0000000..39d57f7
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A scheduler that validates the behavior of scx_bpf_select_cpu_and() by
+ * selecting idle CPUs strictly within a subset of allowed CPUs.
+ *
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+private(PREF_CPUS) struct bpf_cpumask __kptr * allowed_cpumask;
+
+static void
+validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu)
+{
+       if (scx_bpf_test_and_clear_cpu_idle(cpu))
+               scx_bpf_error("CPU %d should be marked as busy", cpu);
+
+       if (bpf_cpumask_subset(allowed, p->cpus_ptr) &&
+           !bpf_cpumask_test_cpu(cpu, allowed))
+               scx_bpf_error("CPU %d not in the allowed domain for %d (%s)",
+                             cpu, p->pid, p->comm);
+}
+
+s32 BPF_STRUCT_OPS(allowed_cpus_select_cpu,
+                  struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+       const struct cpumask *allowed;
+       s32 cpu;
+
+       allowed = cast_mask(allowed_cpumask);
+       if (!allowed) {
+               scx_bpf_error("allowed domain not initialized");
+               return -EINVAL;
+       }
+
+       /*
+        * Select an idle CPU strictly within the allowed domain.
+        */
+       cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0);
+       if (cpu >= 0) {
+               validate_idle_cpu(p, allowed, cpu);
+               scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+
+               return cpu;
+       }
+
+       return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(allowed_cpus_enqueue, struct task_struct *p, u64 enq_flags)
+{
+       const struct cpumask *allowed;
+       s32 prev_cpu = scx_bpf_task_cpu(p), cpu;
+
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+
+       allowed = cast_mask(allowed_cpumask);
+       if (!allowed) {
+               scx_bpf_error("allowed domain not initialized");
+               return;
+       }
+
+       /*
+        * Use scx_bpf_select_cpu_and() to proactively kick an idle CPU
+        * within @allowed_cpumask, usable by @p.
+        */
+       cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0);
+       if (cpu >= 0) {
+               validate_idle_cpu(p, allowed, cpu);
+               scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
+       }
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(allowed_cpus_init)
+{
+       struct bpf_cpumask *mask;
+
+       mask = bpf_cpumask_create();
+       if (!mask)
+               return -ENOMEM;
+
+       mask = bpf_kptr_xchg(&allowed_cpumask, mask);
+       if (mask)
+               bpf_cpumask_release(mask);
+
+       bpf_rcu_read_lock();
+
+       /*
+        * Assign the first online CPU to the allowed domain.
+        */
+       mask = allowed_cpumask;
+       if (mask) {
+               const struct cpumask *online = scx_bpf_get_online_cpumask();
+
+               bpf_cpumask_set_cpu(bpf_cpumask_first(online), mask);
+               scx_bpf_put_cpumask(online);
+       }
+
+       bpf_rcu_read_unlock();
+
+       return 0;
+}
+
+void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei)
+{
+       UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops allowed_cpus_ops = {
+       .select_cpu             = (void *)allowed_cpus_select_cpu,
+       .enqueue                = (void *)allowed_cpus_enqueue,
+       .init                   = (void *)allowed_cpus_init,
+       .exit                   = (void *)allowed_cpus_exit,
+       .name                   = "allowed_cpus",
+};
diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.c b/tools/testing/selftests/sched_ext/allowed_cpus.c
new file mode 100644 (file)
index 0000000..a001a3a
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "allowed_cpus.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+       struct allowed_cpus *skel;
+
+       skel = allowed_cpus__open();
+       SCX_FAIL_IF(!skel, "Failed to open");
+       SCX_ENUM_INIT(skel);
+       SCX_FAIL_IF(allowed_cpus__load(skel), "Failed to load skel");
+
+       *ctx = skel;
+
+       return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+       struct allowed_cpus *skel = ctx;
+       struct bpf_link *link;
+
+       link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops);
+       SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+       /* Just sleeping is fine, plenty of scheduling events happening */
+       sleep(1);
+
+       SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
+       bpf_link__destroy(link);
+
+       return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+       struct allowed_cpus *skel = ctx;
+
+       allowed_cpus__destroy(skel);
+}
+
+struct scx_test allowed_cpus = {
+       .name = "allowed_cpus",
+       .description = "Verify scx_bpf_select_cpu_and()",
+       .setup = setup,
+       .run = run,
+       .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&allowed_cpus)