xref: /linux/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c (revision feacb1774bd5eac6382990d0f6d1378dc01dd78f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * A scheduler that validates the behavior of scx_bpf_select_cpu_and() by
4  * selecting idle CPUs strictly within a subset of allowed CPUs.
5  *
6  * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
7  */
8 
9 #include <scx/common.bpf.h>
10 
11 char _license[] SEC("license") = "GPL";
12 
13 UEI_DEFINE(uei);
14 
15 private(PREF_CPUS) struct bpf_cpumask __kptr * allowed_cpumask;
16 
17 static void
18 validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu)
19 {
20 	if (scx_bpf_test_and_clear_cpu_idle(cpu))
21 		scx_bpf_error("CPU %d should be marked as busy", cpu);
22 
23 	if (bpf_cpumask_subset(allowed, p->cpus_ptr) &&
24 	    !bpf_cpumask_test_cpu(cpu, allowed))
25 		scx_bpf_error("CPU %d not in the allowed domain for %d (%s)",
26 			      cpu, p->pid, p->comm);
27 }
28 
29 s32 BPF_STRUCT_OPS(allowed_cpus_select_cpu,
30 		   struct task_struct *p, s32 prev_cpu, u64 wake_flags)
31 {
32 	const struct cpumask *allowed;
33 	s32 cpu;
34 
35 	allowed = cast_mask(allowed_cpumask);
36 	if (!allowed) {
37 		scx_bpf_error("allowed domain not initialized");
38 		return -EINVAL;
39 	}
40 
41 	/*
42 	 * Select an idle CPU strictly within the allowed domain.
43 	 */
44 	cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0);
45 	if (cpu >= 0) {
46 		validate_idle_cpu(p, allowed, cpu);
47 		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
48 
49 		return cpu;
50 	}
51 
52 	return prev_cpu;
53 }
54 
55 void BPF_STRUCT_OPS(allowed_cpus_enqueue, struct task_struct *p, u64 enq_flags)
56 {
57 	const struct cpumask *allowed;
58 	s32 prev_cpu = scx_bpf_task_cpu(p), cpu;
59 
60 	scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
61 
62 	allowed = cast_mask(allowed_cpumask);
63 	if (!allowed) {
64 		scx_bpf_error("allowed domain not initialized");
65 		return;
66 	}
67 
68 	/*
69 	 * Use scx_bpf_select_cpu_and() to proactively kick an idle CPU
70 	 * within @allowed_cpumask, usable by @p.
71 	 */
72 	cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0);
73 	if (cpu >= 0) {
74 		validate_idle_cpu(p, allowed, cpu);
75 		scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
76 	}
77 }
78 
79 s32 BPF_STRUCT_OPS_SLEEPABLE(allowed_cpus_init)
80 {
81 	struct bpf_cpumask *mask;
82 
83 	mask = bpf_cpumask_create();
84 	if (!mask)
85 		return -ENOMEM;
86 
87 	mask = bpf_kptr_xchg(&allowed_cpumask, mask);
88 	if (mask)
89 		bpf_cpumask_release(mask);
90 
91 	bpf_rcu_read_lock();
92 
93 	/*
94 	 * Assign the first online CPU to the allowed domain.
95 	 */
96 	mask = allowed_cpumask;
97 	if (mask) {
98 		const struct cpumask *online = scx_bpf_get_online_cpumask();
99 
100 		bpf_cpumask_set_cpu(bpf_cpumask_first(online), mask);
101 		scx_bpf_put_cpumask(online);
102 	}
103 
104 	bpf_rcu_read_unlock();
105 
106 	return 0;
107 }
108 
109 void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei)
110 {
111 	UEI_RECORD(uei, ei);
112 }
113 
114 struct task_cpu_arg {
115 	pid_t pid;
116 };
117 
118 SEC("syscall")
119 int select_cpu_from_user(struct task_cpu_arg *input)
120 {
121 	struct task_struct *p;
122 	int cpu;
123 
124 	p = bpf_task_from_pid(input->pid);
125 	if (!p)
126 		return -EINVAL;
127 
128 	bpf_rcu_read_lock();
129 	cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0);
130 	bpf_rcu_read_unlock();
131 
132 	bpf_task_release(p);
133 
134 	return cpu;
135 }
136 
137 SEC(".struct_ops.link")
138 struct sched_ext_ops allowed_cpus_ops = {
139 	.select_cpu		= (void *)allowed_cpus_select_cpu,
140 	.enqueue		= (void *)allowed_cpus_enqueue,
141 	.init			= (void *)allowed_cpus_init,
142 	.exit			= (void *)allowed_cpus_exit,
143 	.name			= "allowed_cpus",
144 };
145