xref: /linux/tools/sched_ext/scx_central.bpf.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * A central FIFO sched_ext scheduler which demonstrates the following:
4  *
5  * a. Making all scheduling decisions from one CPU:
6  *
7  *    The central CPU is the only one making scheduling decisions. All other
8  *    CPUs kick the central CPU when they run out of tasks to run.
9  *
10  *    There is one global BPF queue and the central CPU schedules all CPUs by
11  *    dispatching from the global queue to each CPU's local dsq from dispatch().
12  *    This isn't the most straightforward. e.g. It'd be easier to bounce
13  *    through per-CPU BPF queues. The current design is chosen to maximally
14  *    utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
15  *
16  * b. Tickless operation
17  *
18  *    All tasks are dispatched with the infinite slice which allows stopping the
19  *    ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
20  *    parameter. The tickless operation can be observed through
21  *    /proc/interrupts.
22  *
23  *    Periodic switching is enforced by a periodic timer checking all CPUs and
24  *    preempting them as necessary. Unfortunately, BPF timer currently doesn't
25  *    have a way to pin to a specific CPU, so the periodic timer isn't pinned to
26  *    the central CPU.
27  *
28  * c. Preemption
29  *
30  *    Kthreads are unconditionally queued to the head of a matching local dsq
31  *    and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
32  *    prioritized over user threads, which is required for ensuring forward
33  *    progress as e.g. the periodic timer may run on a ksoftirqd and if the
34  *    ksoftirqd gets starved by a user thread, there may not be anything else to
35  *    vacate that user thread.
36  *
37  *    SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
38  *    next tasks.
39  *
40  * This scheduler is designed to maximize usage of various SCX mechanisms. A
41  * more practical implementation would likely put the scheduling loop outside
42  * the central CPU's dispatch() path and add some form of priority mechanism.
43  *
44  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
45  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
46  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
47  */
48 #include <scx/common.bpf.h>
49 
50 char _license[] SEC("license") = "GPL";
51 
52 enum {
53 	FALLBACK_DSQ_ID		= 0,
54 	MS_TO_NS		= 1000LLU * 1000,
55 	TIMER_INTERVAL_NS	= 1 * MS_TO_NS,
56 };
57 
58 const volatile s32 central_cpu;
59 const volatile u32 nr_cpu_ids = 1;	/* !0 for veristat, set during init */
60 const volatile u64 slice_ns;
61 
62 bool timer_pinned = true;
63 bool timer_started;
64 u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
65 u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
66 u64 nr_overflows;
67 
68 UEI_DEFINE(uei);
69 
70 struct {
71 	__uint(type, BPF_MAP_TYPE_QUEUE);
72 	__uint(max_entries, 4096);
73 	__type(value, s32);
74 } central_q SEC(".maps");
75 
76 /* can't use percpu map due to bad lookups */
77 bool RESIZABLE_ARRAY(data, cpu_gimme_task);
78 u64 RESIZABLE_ARRAY(data, cpu_started_at);
79 
80 struct central_timer {
81 	struct bpf_timer timer;
82 };
83 
84 struct {
85 	__uint(type, BPF_MAP_TYPE_ARRAY);
86 	__uint(max_entries, 1);
87 	__type(key, u32);
88 	__type(value, struct central_timer);
89 } central_timer SEC(".maps");
90 
91 s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
92 		   s32 prev_cpu, u64 wake_flags)
93 {
94 	/*
95 	 * Steer wakeups to the central CPU as much as possible to avoid
96 	 * disturbing other CPUs. It's safe to blindly return the central cpu as
97 	 * select_cpu() is a hint and if @p can't be on it, the kernel will
98 	 * automatically pick a fallback CPU.
99 	 */
100 	return central_cpu;
101 }
102 
103 void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
104 {
105 	s32 pid = p->pid;
106 
107 	__sync_fetch_and_add(&nr_total, 1);
108 
109 	/*
110 	 * Push per-cpu kthreads at the head of local dsq's and preempt the
111 	 * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
112 	 * behind other threads which is necessary for forward progress
113 	 * guarantee as we depend on the BPF timer which may run from ksoftirqd.
114 	 */
115 	if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
116 		__sync_fetch_and_add(&nr_locals, 1);
117 		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
118 				   enq_flags | SCX_ENQ_PREEMPT);
119 		return;
120 	}
121 
122 	if (bpf_map_push_elem(&central_q, &pid, 0)) {
123 		__sync_fetch_and_add(&nr_overflows, 1);
124 		scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
125 		return;
126 	}
127 
128 	__sync_fetch_and_add(&nr_queued, 1);
129 
130 	if (!scx_bpf_task_running(p))
131 		scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
132 }
133 
134 static bool dispatch_to_cpu(s32 cpu)
135 {
136 	struct task_struct *p;
137 	s32 pid;
138 
139 	bpf_repeat(BPF_MAX_LOOPS) {
140 		if (bpf_map_pop_elem(&central_q, &pid))
141 			break;
142 
143 		__sync_fetch_and_sub(&nr_queued, 1);
144 
145 		p = bpf_task_from_pid(pid);
146 		if (!p) {
147 			__sync_fetch_and_add(&nr_lost_pids, 1);
148 			continue;
149 		}
150 
151 		/*
152 		 * If we can't run the task at the top, do the dumb thing and
153 		 * bounce it to the fallback dsq.
154 		 */
155 		if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
156 			__sync_fetch_and_add(&nr_mismatches, 1);
157 			scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
158 			bpf_task_release(p);
159 			/*
160 			 * We might run out of dispatch buffer slots if we continue dispatching
161 			 * to the fallback DSQ, without dispatching to the local DSQ of the
162 			 * target CPU. In such a case, break the loop now as will fail the
163 			 * next dispatch operation.
164 			 */
165 			if (!scx_bpf_dispatch_nr_slots())
166 				break;
167 			continue;
168 		}
169 
170 		/* dispatch to local and mark that @cpu doesn't need more */
171 		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
172 
173 		if (cpu != central_cpu)
174 			scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
175 
176 		bpf_task_release(p);
177 		return true;
178 	}
179 
180 	return false;
181 }
182 
183 static void start_central_timer(void)
184 {
185 	struct bpf_timer *timer;
186 	u32 key = 0;
187 	int ret;
188 
189 	if (likely(timer_started))
190 		return;
191 
192 	timer = bpf_map_lookup_elem(&central_timer, &key);
193 	if (!timer) {
194 		scx_bpf_error("failed to lookup central timer");
195 		return;
196 	}
197 
198 	ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
199 	/*
200 	 * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
201 	 * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
202 	 * Retry without the PIN. This would be the perfect use case for
203 	 * bpf_core_enum_value_exists() but the enum type doesn't have a name
204 	 * and can't be used with bpf_core_enum_value_exists(). Oh well...
205 	 */
206 	if (ret == -EINVAL) {
207 		timer_pinned = false;
208 		ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
209 	}
210 
211 	if (ret) {
212 		scx_bpf_error("bpf_timer_start failed (%d)", ret);
213 		return;
214 	}
215 
216 	timer_started = true;
217 }
218 
219 void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
220 {
221 	if (cpu == central_cpu) {
222 		start_central_timer();
223 
224 		/* dispatch for all other CPUs first */
225 		__sync_fetch_and_add(&nr_dispatches, 1);
226 
227 		bpf_for(cpu, 0, nr_cpu_ids) {
228 			bool *gimme;
229 
230 			if (!scx_bpf_dispatch_nr_slots())
231 				break;
232 
233 			/* central's gimme is never set */
234 			gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
235 			if (!gimme || !*gimme)
236 				continue;
237 
238 			if (dispatch_to_cpu(cpu))
239 				*gimme = false;
240 		}
241 
242 		/*
243 		 * Retry if we ran out of dispatch buffer slots as we might have
244 		 * skipped some CPUs and also need to dispatch for self. The ext
245 		 * core automatically retries if the local dsq is empty but we
246 		 * can't rely on that as we're dispatching for other CPUs too.
247 		 * Kick self explicitly to retry.
248 		 */
249 		if (!scx_bpf_dispatch_nr_slots()) {
250 			__sync_fetch_and_add(&nr_retries, 1);
251 			scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
252 			return;
253 		}
254 
255 		/* look for a task to run on the central CPU */
256 		if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID, 0))
257 			return;
258 		dispatch_to_cpu(central_cpu);
259 	} else {
260 		bool *gimme;
261 
262 		if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID, 0))
263 			return;
264 
265 		gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
266 		if (gimme)
267 			*gimme = true;
268 
269 		/*
270 		 * Force dispatch on the scheduling CPU so that it finds a task
271 		 * to run for us.
272 		 */
273 		scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
274 	}
275 }
276 
277 void BPF_STRUCT_OPS(central_running, struct task_struct *p)
278 {
279 	s32 cpu = scx_bpf_task_cpu(p);
280 	u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
281 	if (started_at)
282 		*started_at = scx_bpf_now() ?: 1;	/* 0 indicates idle */
283 }
284 
285 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
286 {
287 	s32 cpu = scx_bpf_task_cpu(p);
288 	u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
289 	if (started_at)
290 		*started_at = 0;
291 }
292 
293 static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
294 {
295 	u64 now = scx_bpf_now();
296 	u64 nr_to_kick = nr_queued;
297 	s32 i, curr_cpu;
298 
299 	curr_cpu = bpf_get_smp_processor_id();
300 	if (timer_pinned && (curr_cpu != central_cpu)) {
301 		scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
302 			      curr_cpu, central_cpu);
303 		return 0;
304 	}
305 
306 	bpf_for(i, 0, nr_cpu_ids) {
307 		s32 cpu = (nr_timers + i) % nr_cpu_ids;
308 		u64 *started_at;
309 
310 		if (cpu == central_cpu)
311 			continue;
312 
313 		/* kick iff the current one exhausted its slice */
314 		started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
315 		if (started_at && *started_at &&
316 		    time_before(now, *started_at + slice_ns))
317 			continue;
318 
319 		/* and there's something pending */
320 		if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
321 		    scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
322 			;
323 		else if (nr_to_kick)
324 			nr_to_kick--;
325 		else
326 			continue;
327 
328 		scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
329 	}
330 
331 	bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
332 	__sync_fetch_and_add(&nr_timers, 1);
333 	return 0;
334 }
335 
336 int BPF_STRUCT_OPS_SLEEPABLE(central_init)
337 {
338 	u32 key = 0;
339 	struct bpf_timer *timer;
340 	int ret;
341 
342 	ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
343 	if (ret) {
344 		scx_bpf_error("scx_bpf_create_dsq failed (%d)", ret);
345 		return ret;
346 	}
347 
348 	timer = bpf_map_lookup_elem(&central_timer, &key);
349 	if (!timer)
350 		return -ESRCH;
351 
352 	bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
353 	bpf_timer_set_callback(timer, central_timerfn);
354 
355 	scx_bpf_kick_cpu(central_cpu, 0);
356 
357 	return 0;
358 }
359 
360 void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
361 {
362 	UEI_RECORD(uei, ei);
363 }
364 
365 SCX_OPS_DEFINE(central_ops,
366 	       /*
367 		* We are offloading all scheduling decisions to the central CPU
368 		* and thus being the last task on a given CPU doesn't mean
369 		* anything special. Enqueue the last tasks like any other tasks.
370 		*/
371 	       .flags			= SCX_OPS_ENQ_LAST,
372 
373 	       .select_cpu		= (void *)central_select_cpu,
374 	       .enqueue			= (void *)central_enqueue,
375 	       .dispatch		= (void *)central_dispatch,
376 	       .running			= (void *)central_running,
377 	       .stopping		= (void *)central_stopping,
378 	       .init			= (void *)central_init,
379 	       .exit			= (void *)central_exit,
380 	       .name			= "central");
381