xref: /linux/kernel/sched/ext_internal.h (revision a23cd25baed2316e50597f8b67192bdc904f955b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
7  */
8 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
9 
10 enum scx_consts {
11 	SCX_DSP_DFL_MAX_BATCH		= 32,
12 	SCX_DSP_MAX_LOOPS		= 32,
13 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
14 
15 	SCX_EXIT_BT_LEN			= 64,
16 	SCX_EXIT_MSG_LEN		= 1024,
17 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
18 
19 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
20 
21 	/*
22 	 * Iterating all tasks may take a while. Periodically drop
23 	 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
24 	 */
25 	SCX_TASK_ITER_BATCH		= 32,
26 };
27 
28 enum scx_exit_kind {
29 	SCX_EXIT_NONE,
30 	SCX_EXIT_DONE,
31 
32 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
33 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
34 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
35 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
36 
37 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
38 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
39 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
40 };
41 
42 /*
43  * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
44  * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
45  * are 64bit of the format:
46  *
47  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
48  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
49  *
50  *   SYS ACT: System-defined exit actions
51  *   SYS RSN: System-defined exit reasons
52  *   USR    : User-defined exit codes and reasons
53  *
54  * Using the above, users may communicate intention and context by ORing system
55  * actions and/or system reasons with a user-defined exit code.
56  */
57 enum scx_exit_code {
58 	/* Reasons */
59 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
60 
61 	/* Actions */
62 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
63 };
64 
65 enum scx_exit_flags {
66 	/*
67 	 * ops.exit() may be called even if the loading failed before ops.init()
68 	 * finishes successfully. This is because ops.exit() allows rich exit
69 	 * info communication. The following flag indicates whether ops.init()
70 	 * finished successfully.
71 	 */
72 	SCX_EFLAG_INITIALIZED,
73 };
74 
75 /*
76  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
77  * being disabled.
78  */
79 struct scx_exit_info {
80 	/* %SCX_EXIT_* - broad category of the exit reason */
81 	enum scx_exit_kind	kind;
82 
83 	/* exit code if gracefully exiting */
84 	s64			exit_code;
85 
86 	/* %SCX_EFLAG_* */
87 	u64			flags;
88 
89 	/* textual representation of the above */
90 	const char		*reason;
91 
92 	/* backtrace if exiting due to an error */
93 	unsigned long		*bt;
94 	u32			bt_len;
95 
96 	/* informational message */
97 	char			*msg;
98 
99 	/* debug dump */
100 	char			*dump;
101 };
102 
103 /* sched_ext_ops.flags */
104 enum scx_ops_flags {
105 	/*
106 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
107 	 */
108 	SCX_OPS_KEEP_BUILTIN_IDLE	= 1LLU << 0,
109 
110 	/*
111 	 * By default, if there are no other task to run on the CPU, ext core
112 	 * keeps running the current task even after its slice expires. If this
113 	 * flag is specified, such tasks are passed to ops.enqueue() with
114 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
115 	 */
116 	SCX_OPS_ENQ_LAST		= 1LLU << 1,
117 
118 	/*
119 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
120 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
121 	 * scheduler depends on pid lookup for dispatching, the task will be
122 	 * lost leading to various issues including RCU grace period stalls.
123 	 *
124 	 * To mask this problem, by default, unhashed tasks are automatically
125 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
126 	 * depend on pid lookups and wants to handle these tasks directly, the
127 	 * following flag can be used.
128 	 */
129 	SCX_OPS_ENQ_EXITING		= 1LLU << 2,
130 
131 	/*
132 	 * If set, only tasks with policy set to SCHED_EXT are attached to
133 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
134 	 */
135 	SCX_OPS_SWITCH_PARTIAL		= 1LLU << 3,
136 
137 	/*
138 	 * A migration disabled task can only execute on its current CPU. By
139 	 * default, such tasks are automatically put on the CPU's local DSQ with
140 	 * the default slice on enqueue. If this ops flag is set, they also go
141 	 * through ops.enqueue().
142 	 *
143 	 * A migration disabled task never invokes ops.select_cpu() as it can
144 	 * only select the current CPU. Also, p->cpus_ptr will only contain its
145 	 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
146 	 * and thus may disagree with cpumask_weight(p->cpus_ptr).
147 	 */
148 	SCX_OPS_ENQ_MIGRATION_DISABLED	= 1LLU << 4,
149 
150 	/*
151 	 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
152 	 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
153 	 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
154 	 * transfers. When this optimization is enabled, ops.select_cpu() is
155 	 * skipped in some cases (when racing against the wakee switching out).
156 	 * As the BPF scheduler may depend on ops.select_cpu() being invoked
157 	 * during wakeups, queued wakeup is disabled by default.
158 	 *
159 	 * If this ops flag is set, queued wakeup optimization is enabled and
160 	 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
161 	 * wakee's CPU without preceding ops.select_cpu() even for tasks which
162 	 * may be executed on multiple CPUs.
163 	 */
164 	SCX_OPS_ALLOW_QUEUED_WAKEUP	= 1LLU << 5,
165 
166 	/*
167 	 * If set, enable per-node idle cpumasks. If clear, use a single global
168 	 * flat idle cpumask.
169 	 */
170 	SCX_OPS_BUILTIN_IDLE_PER_NODE	= 1LLU << 6,
171 
172 	/*
173 	 * CPU cgroup support flags
174 	 */
175 	SCX_OPS_HAS_CGROUP_WEIGHT	= 1LLU << 16,	/* DEPRECATED, will be removed on 6.18 */
176 
177 	SCX_OPS_ALL_FLAGS		= SCX_OPS_KEEP_BUILTIN_IDLE |
178 					  SCX_OPS_ENQ_LAST |
179 					  SCX_OPS_ENQ_EXITING |
180 					  SCX_OPS_ENQ_MIGRATION_DISABLED |
181 					  SCX_OPS_ALLOW_QUEUED_WAKEUP |
182 					  SCX_OPS_SWITCH_PARTIAL |
183 					  SCX_OPS_BUILTIN_IDLE_PER_NODE |
184 					  SCX_OPS_HAS_CGROUP_WEIGHT,
185 
186 	/* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
187 	__SCX_OPS_INTERNAL_MASK		= 0xffLLU << 56,
188 
189 	SCX_OPS_HAS_CPU_PREEMPT		= 1LLU << 56,
190 };
191 
192 /* argument container for ops.init_task() */
193 struct scx_init_task_args {
194 	/*
195 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
196 	 * to the scheduler transition path.
197 	 */
198 	bool			fork;
199 #ifdef CONFIG_EXT_GROUP_SCHED
200 	/* the cgroup the task is joining */
201 	struct cgroup		*cgroup;
202 #endif
203 };
204 
205 /* argument container for ops.exit_task() */
206 struct scx_exit_task_args {
207 	/* Whether the task exited before running on sched_ext. */
208 	bool cancelled;
209 };
210 
211 /* argument container for ops->cgroup_init() */
212 struct scx_cgroup_init_args {
213 	/* the weight of the cgroup [1..10000] */
214 	u32			weight;
215 
216 	/* bandwidth control parameters from cpu.max and cpu.max.burst */
217 	u64			bw_period_us;
218 	u64			bw_quota_us;
219 	u64			bw_burst_us;
220 };
221 
222 enum scx_cpu_preempt_reason {
223 	/* next task is being scheduled by &sched_class_rt */
224 	SCX_CPU_PREEMPT_RT,
225 	/* next task is being scheduled by &sched_class_dl */
226 	SCX_CPU_PREEMPT_DL,
227 	/* next task is being scheduled by &sched_class_stop */
228 	SCX_CPU_PREEMPT_STOP,
229 	/* unknown reason for SCX being preempted */
230 	SCX_CPU_PREEMPT_UNKNOWN,
231 };
232 
233 /*
234  * Argument container for ops->cpu_acquire(). Currently empty, but may be
235  * expanded in the future.
236  */
237 struct scx_cpu_acquire_args {};
238 
239 /* argument container for ops->cpu_release() */
240 struct scx_cpu_release_args {
241 	/* the reason the CPU was preempted */
242 	enum scx_cpu_preempt_reason reason;
243 
244 	/* the task that's going to be scheduled on the CPU */
245 	struct task_struct	*task;
246 };
247 
248 /*
249  * Informational context provided to dump operations.
250  */
251 struct scx_dump_ctx {
252 	enum scx_exit_kind	kind;
253 	s64			exit_code;
254 	const char		*reason;
255 	u64			at_ns;
256 	u64			at_jiffies;
257 };
258 
259 /**
260  * struct sched_ext_ops - Operation table for BPF scheduler implementation
261  *
262  * A BPF scheduler can implement an arbitrary scheduling policy by
263  * implementing and loading operations in this table. Note that a userland
264  * scheduling policy can also be implemented using the BPF scheduler
265  * as a shim layer.
266  */
267 struct sched_ext_ops {
268 	/**
269 	 * @select_cpu: Pick the target CPU for a task which is being woken up
270 	 * @p: task being woken up
271 	 * @prev_cpu: the cpu @p was on before sleeping
272 	 * @wake_flags: SCX_WAKE_*
273 	 *
274 	 * Decision made here isn't final. @p may be moved to any CPU while it
275 	 * is getting dispatched for execution later. However, as @p is not on
276 	 * the rq at this point, getting the eventual execution CPU right here
277 	 * saves a small bit of overhead down the line.
278 	 *
279 	 * If an idle CPU is returned, the CPU is kicked and will try to
280 	 * dispatch. While an explicit custom mechanism can be added,
281 	 * select_cpu() serves as the default way to wake up idle CPUs.
282 	 *
283 	 * @p may be inserted into a DSQ directly by calling
284 	 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
285 	 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
286 	 * of the CPU returned by this operation.
287 	 *
288 	 * Note that select_cpu() is never called for tasks that can only run
289 	 * on a single CPU or tasks with migration disabled, as they don't have
290 	 * the option to select a different CPU. See select_task_rq() for
291 	 * details.
292 	 */
293 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
294 
295 	/**
296 	 * @enqueue: Enqueue a task on the BPF scheduler
297 	 * @p: task being enqueued
298 	 * @enq_flags: %SCX_ENQ_*
299 	 *
300 	 * @p is ready to run. Insert directly into a DSQ by calling
301 	 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
302 	 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
303 	 * the task will stall.
304 	 *
305 	 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
306 	 * skipped.
307 	 */
308 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
309 
310 	/**
311 	 * @dequeue: Remove a task from the BPF scheduler
312 	 * @p: task being dequeued
313 	 * @deq_flags: %SCX_DEQ_*
314 	 *
315 	 * Remove @p from the BPF scheduler. This is usually called to isolate
316 	 * the task while updating its scheduling properties (e.g. priority).
317 	 *
318 	 * The ext core keeps track of whether the BPF side owns a given task or
319 	 * not and can gracefully ignore spurious dispatches from BPF side,
320 	 * which makes it safe to not implement this method. However, depending
321 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
322 	 * scheduling position not being updated across a priority change.
323 	 */
324 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
325 
326 	/**
327 	 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
328 	 * @cpu: CPU to dispatch tasks for
329 	 * @prev: previous task being switched out
330 	 *
331 	 * Called when a CPU's local dsq is empty. The operation should dispatch
332 	 * one or more tasks from the BPF scheduler into the DSQs using
333 	 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
334 	 * using scx_bpf_dsq_move_to_local().
335 	 *
336 	 * The maximum number of times scx_bpf_dsq_insert() can be called
337 	 * without an intervening scx_bpf_dsq_move_to_local() is specified by
338 	 * ops.dispatch_max_batch. See the comments on top of the two functions
339 	 * for more details.
340 	 *
341 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
342 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
343 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
344 	 * ops.dispatch() returns. To keep executing @prev, return without
345 	 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
346 	 */
347 	void (*dispatch)(s32 cpu, struct task_struct *prev);
348 
349 	/**
350 	 * @tick: Periodic tick
351 	 * @p: task running currently
352 	 *
353 	 * This operation is called every 1/HZ seconds on CPUs which are
354 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
355 	 * immediate dispatch cycle on the CPU.
356 	 */
357 	void (*tick)(struct task_struct *p);
358 
359 	/**
360 	 * @runnable: A task is becoming runnable on its associated CPU
361 	 * @p: task becoming runnable
362 	 * @enq_flags: %SCX_ENQ_*
363 	 *
364 	 * This and the following three functions can be used to track a task's
365 	 * execution state transitions. A task becomes ->runnable() on a CPU,
366 	 * and then goes through one or more ->running() and ->stopping() pairs
367 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
368 	 * done running on the CPU.
369 	 *
370 	 * @p is becoming runnable on the CPU because it's
371 	 *
372 	 * - waking up (%SCX_ENQ_WAKEUP)
373 	 * - being moved from another CPU
374 	 * - being restored after temporarily taken off the queue for an
375 	 *   attribute change.
376 	 *
377 	 * This and ->enqueue() are related but not coupled. This operation
378 	 * notifies @p's state transition and may not be followed by ->enqueue()
379 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
380 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
381 	 * task may be ->enqueue()'d without being preceded by this operation
382 	 * e.g. after exhausting its slice.
383 	 */
384 	void (*runnable)(struct task_struct *p, u64 enq_flags);
385 
386 	/**
387 	 * @running: A task is starting to run on its associated CPU
388 	 * @p: task starting to run
389 	 *
390 	 * Note that this callback may be called from a CPU other than the
391 	 * one the task is going to run on. This can happen when a task
392 	 * property is changed (i.e., affinity), since scx_next_task_scx(),
393 	 * which triggers this callback, may run on a CPU different from
394 	 * the task's assigned CPU.
395 	 *
396 	 * Therefore, always use scx_bpf_task_cpu(@p) to determine the
397 	 * target CPU the task is going to use.
398 	 *
399 	 * See ->runnable() for explanation on the task state notifiers.
400 	 */
401 	void (*running)(struct task_struct *p);
402 
403 	/**
404 	 * @stopping: A task is stopping execution
405 	 * @p: task stopping to run
406 	 * @runnable: is task @p still runnable?
407 	 *
408 	 * Note that this callback may be called from a CPU other than the
409 	 * one the task was running on. This can happen when a task
410 	 * property is changed (i.e., affinity), since dequeue_task_scx(),
411 	 * which triggers this callback, may run on a CPU different from
412 	 * the task's assigned CPU.
413 	 *
414 	 * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
415 	 * the task was running on.
416 	 *
417 	 * See ->runnable() for explanation on the task state notifiers. If
418 	 * !@runnable, ->quiescent() will be invoked after this operation
419 	 * returns.
420 	 */
421 	void (*stopping)(struct task_struct *p, bool runnable);
422 
423 	/**
424 	 * @quiescent: A task is becoming not runnable on its associated CPU
425 	 * @p: task becoming not runnable
426 	 * @deq_flags: %SCX_DEQ_*
427 	 *
428 	 * See ->runnable() for explanation on the task state notifiers.
429 	 *
430 	 * @p is becoming quiescent on the CPU because it's
431 	 *
432 	 * - sleeping (%SCX_DEQ_SLEEP)
433 	 * - being moved to another CPU
434 	 * - being temporarily taken off the queue for an attribute change
435 	 *   (%SCX_DEQ_SAVE)
436 	 *
437 	 * This and ->dequeue() are related but not coupled. This operation
438 	 * notifies @p's state transition and may not be preceded by ->dequeue()
439 	 * e.g. when @p is being dispatched to a remote CPU.
440 	 */
441 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
442 
443 	/**
444 	 * @yield: Yield CPU
445 	 * @from: yielding task
446 	 * @to: optional yield target task
447 	 *
448 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
449 	 * The BPF scheduler should ensure that other available tasks are
450 	 * dispatched before the yielding task. Return value is ignored in this
451 	 * case.
452 	 *
453 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
454 	 * scheduler can implement the request, return %true; otherwise, %false.
455 	 */
456 	bool (*yield)(struct task_struct *from, struct task_struct *to);
457 
458 	/**
459 	 * @core_sched_before: Task ordering for core-sched
460 	 * @a: task A
461 	 * @b: task B
462 	 *
463 	 * Used by core-sched to determine the ordering between two tasks. See
464 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
465 	 * core-sched.
466 	 *
467 	 * Both @a and @b are runnable and may or may not currently be queued on
468 	 * the BPF scheduler. Should return %true if @a should run before @b.
469 	 * %false if there's no required ordering or @b should run before @a.
470 	 *
471 	 * If not specified, the default is ordering them according to when they
472 	 * became runnable.
473 	 */
474 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
475 
476 	/**
477 	 * @set_weight: Set task weight
478 	 * @p: task to set weight for
479 	 * @weight: new weight [1..10000]
480 	 *
481 	 * Update @p's weight to @weight.
482 	 */
483 	void (*set_weight)(struct task_struct *p, u32 weight);
484 
485 	/**
486 	 * @set_cpumask: Set CPU affinity
487 	 * @p: task to set CPU affinity for
488 	 * @cpumask: cpumask of cpus that @p can run on
489 	 *
490 	 * Update @p's CPU affinity to @cpumask.
491 	 */
492 	void (*set_cpumask)(struct task_struct *p,
493 			    const struct cpumask *cpumask);
494 
495 	/**
496 	 * @update_idle: Update the idle state of a CPU
497 	 * @cpu: CPU to update the idle state for
498 	 * @idle: whether entering or exiting the idle state
499 	 *
500 	 * This operation is called when @rq's CPU goes or leaves the idle
501 	 * state. By default, implementing this operation disables the built-in
502 	 * idle CPU tracking and the following helpers become unavailable:
503 	 *
504 	 * - scx_bpf_select_cpu_dfl()
505 	 * - scx_bpf_select_cpu_and()
506 	 * - scx_bpf_test_and_clear_cpu_idle()
507 	 * - scx_bpf_pick_idle_cpu()
508 	 *
509 	 * The user also must implement ops.select_cpu() as the default
510 	 * implementation relies on scx_bpf_select_cpu_dfl().
511 	 *
512 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
513 	 * tracking.
514 	 */
515 	void (*update_idle)(s32 cpu, bool idle);
516 
517 	/**
518 	 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
519 	 * @cpu: The CPU being acquired by the BPF scheduler.
520 	 * @args: Acquire arguments, see the struct definition.
521 	 *
522 	 * A CPU that was previously released from the BPF scheduler is now once
523 	 * again under its control.
524 	 */
525 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
526 
527 	/**
528 	 * @cpu_release: A CPU is taken away from the BPF scheduler
529 	 * @cpu: The CPU being released by the BPF scheduler.
530 	 * @args: Release arguments, see the struct definition.
531 	 *
532 	 * The specified CPU is no longer under the control of the BPF
533 	 * scheduler. This could be because it was preempted by a higher
534 	 * priority sched_class, though there may be other reasons as well. The
535 	 * caller should consult @args->reason to determine the cause.
536 	 */
537 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
538 
539 	/**
540 	 * @init_task: Initialize a task to run in a BPF scheduler
541 	 * @p: task to initialize for BPF scheduling
542 	 * @args: init arguments, see the struct definition
543 	 *
544 	 * Either we're loading a BPF scheduler or a new task is being forked.
545 	 * Initialize @p for BPF scheduling. This operation may block and can
546 	 * be used for allocations, and is called exactly once for a task.
547 	 *
548 	 * Return 0 for success, -errno for failure. An error return while
549 	 * loading will abort loading of the BPF scheduler. During a fork, it
550 	 * will abort that specific fork.
551 	 */
552 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
553 
554 	/**
555 	 * @exit_task: Exit a previously-running task from the system
556 	 * @p: task to exit
557 	 * @args: exit arguments, see the struct definition
558 	 *
559 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
560 	 * necessary cleanup for @p.
561 	 */
562 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
563 
564 	/**
565 	 * @enable: Enable BPF scheduling for a task
566 	 * @p: task to enable BPF scheduling for
567 	 *
568 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
569 	 * enters SCX, and is always paired with a matching disable().
570 	 */
571 	void (*enable)(struct task_struct *p);
572 
573 	/**
574 	 * @disable: Disable BPF scheduling for a task
575 	 * @p: task to disable BPF scheduling for
576 	 *
577 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
578 	 * Disable BPF scheduling for @p. A disable() call is always matched
579 	 * with a prior enable() call.
580 	 */
581 	void (*disable)(struct task_struct *p);
582 
583 	/**
584 	 * @dump: Dump BPF scheduler state on error
585 	 * @ctx: debug dump context
586 	 *
587 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
588 	 */
589 	void (*dump)(struct scx_dump_ctx *ctx);
590 
591 	/**
592 	 * @dump_cpu: Dump BPF scheduler state for a CPU on error
593 	 * @ctx: debug dump context
594 	 * @cpu: CPU to generate debug dump for
595 	 * @idle: @cpu is currently idle without any runnable tasks
596 	 *
597 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
598 	 * @cpu. If @idle is %true and this operation doesn't produce any
599 	 * output, @cpu is skipped for dump.
600 	 */
601 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
602 
603 	/**
604 	 * @dump_task: Dump BPF scheduler state for a runnable task on error
605 	 * @ctx: debug dump context
606 	 * @p: runnable task to generate debug dump for
607 	 *
608 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
609 	 * @p.
610 	 */
611 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
612 
613 #ifdef CONFIG_EXT_GROUP_SCHED
614 	/**
615 	 * @cgroup_init: Initialize a cgroup
616 	 * @cgrp: cgroup being initialized
617 	 * @args: init arguments, see the struct definition
618 	 *
619 	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
620 	 * @cgrp for sched_ext. This operation may block.
621 	 *
622 	 * Return 0 for success, -errno for failure. An error return while
623 	 * loading will abort loading of the BPF scheduler. During cgroup
624 	 * creation, it will abort the specific cgroup creation.
625 	 */
626 	s32 (*cgroup_init)(struct cgroup *cgrp,
627 			   struct scx_cgroup_init_args *args);
628 
629 	/**
630 	 * @cgroup_exit: Exit a cgroup
631 	 * @cgrp: cgroup being exited
632 	 *
633 	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
634 	 * @cgrp for sched_ext. This operation my block.
635 	 */
636 	void (*cgroup_exit)(struct cgroup *cgrp);
637 
638 	/**
639 	 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
640 	 * @p: task being moved
641 	 * @from: cgroup @p is being moved from
642 	 * @to: cgroup @p is being moved to
643 	 *
644 	 * Prepare @p for move from cgroup @from to @to. This operation may
645 	 * block and can be used for allocations.
646 	 *
647 	 * Return 0 for success, -errno for failure. An error return aborts the
648 	 * migration.
649 	 */
650 	s32 (*cgroup_prep_move)(struct task_struct *p,
651 				struct cgroup *from, struct cgroup *to);
652 
653 	/**
654 	 * @cgroup_move: Commit cgroup move
655 	 * @p: task being moved
656 	 * @from: cgroup @p is being moved from
657 	 * @to: cgroup @p is being moved to
658 	 *
659 	 * Commit the move. @p is dequeued during this operation.
660 	 */
661 	void (*cgroup_move)(struct task_struct *p,
662 			    struct cgroup *from, struct cgroup *to);
663 
664 	/**
665 	 * @cgroup_cancel_move: Cancel cgroup move
666 	 * @p: task whose cgroup move is being canceled
667 	 * @from: cgroup @p was being moved from
668 	 * @to: cgroup @p was being moved to
669 	 *
670 	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
671 	 * Undo the preparation.
672 	 */
673 	void (*cgroup_cancel_move)(struct task_struct *p,
674 				   struct cgroup *from, struct cgroup *to);
675 
676 	/**
677 	 * @cgroup_set_weight: A cgroup's weight is being changed
678 	 * @cgrp: cgroup whose weight is being updated
679 	 * @weight: new weight [1..10000]
680 	 *
681 	 * Update @cgrp's weight to @weight.
682 	 */
683 	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
684 
685 	/**
686 	 * @cgroup_set_bandwidth: A cgroup's bandwidth is being changed
687 	 * @cgrp: cgroup whose bandwidth is being updated
688 	 * @period_us: bandwidth control period
689 	 * @quota_us: bandwidth control quota
690 	 * @burst_us: bandwidth control burst
691 	 *
692 	 * Update @cgrp's bandwidth control parameters. This is from the cpu.max
693 	 * cgroup interface.
694 	 *
695 	 * @quota_us / @period_us determines the CPU bandwidth @cgrp is entitled
696 	 * to. For example, if @period_us is 1_000_000 and @quota_us is
697 	 * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be
698 	 * interpreted in the same fashion and specifies how much @cgrp can
699 	 * burst temporarily. The specific control mechanism and thus the
700 	 * interpretation of @period_us and burstiness is upto to the BPF
701 	 * scheduler.
702 	 */
703 	void (*cgroup_set_bandwidth)(struct cgroup *cgrp,
704 				     u64 period_us, u64 quota_us, u64 burst_us);
705 
706 #endif	/* CONFIG_EXT_GROUP_SCHED */
707 
708 	/*
709 	 * All online ops must come before ops.cpu_online().
710 	 */
711 
712 	/**
713 	 * @cpu_online: A CPU became online
714 	 * @cpu: CPU which just came up
715 	 *
716 	 * @cpu just came online. @cpu will not call ops.enqueue() or
717 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
718 	 */
719 	void (*cpu_online)(s32 cpu);
720 
721 	/**
722 	 * @cpu_offline: A CPU is going offline
723 	 * @cpu: CPU which is going offline
724 	 *
725 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
726 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
727 	 */
728 	void (*cpu_offline)(s32 cpu);
729 
730 	/*
731 	 * All CPU hotplug ops must come before ops.init().
732 	 */
733 
734 	/**
735 	 * @init: Initialize the BPF scheduler
736 	 */
737 	s32 (*init)(void);
738 
739 	/**
740 	 * @exit: Clean up after the BPF scheduler
741 	 * @info: Exit info
742 	 *
743 	 * ops.exit() is also called on ops.init() failure, which is a bit
744 	 * unusual. This is to allow rich reporting through @info on how
745 	 * ops.init() failed.
746 	 */
747 	void (*exit)(struct scx_exit_info *info);
748 
749 	/**
750 	 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
751 	 */
752 	u32 dispatch_max_batch;
753 
754 	/**
755 	 * @flags: %SCX_OPS_* flags
756 	 */
757 	u64 flags;
758 
759 	/**
760 	 * @timeout_ms: The maximum amount of time, in milliseconds, that a
761 	 * runnable task should be able to wait before being scheduled. The
762 	 * maximum timeout may not exceed the default timeout of 30 seconds.
763 	 *
764 	 * Defaults to the maximum allowed timeout value of 30 seconds.
765 	 */
766 	u32 timeout_ms;
767 
768 	/**
769 	 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
770 	 * value of 32768 is used.
771 	 */
772 	u32 exit_dump_len;
773 
774 	/**
775 	 * @hotplug_seq: A sequence number that may be set by the scheduler to
776 	 * detect when a hotplug event has occurred during the loading process.
777 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
778 	 * load if the sequence number does not match @scx_hotplug_seq on the
779 	 * enable path.
780 	 */
781 	u64 hotplug_seq;
782 
783 	/**
784 	 * @name: BPF scheduler's name
785 	 *
786 	 * Must be a non-zero valid BPF object name including only isalnum(),
787 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
788 	 * BPF scheduler is enabled.
789 	 */
790 	char name[SCX_OPS_NAME_LEN];
791 
792 	/* internal use only, must be NULL */
793 	void *priv;
794 };
795 
796 enum scx_opi {
797 	SCX_OPI_BEGIN			= 0,
798 	SCX_OPI_NORMAL_BEGIN		= 0,
799 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
800 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
801 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
802 	SCX_OPI_END			= SCX_OP_IDX(init),
803 };
804 
805 /*
806  * Collection of event counters. Event types are placed in descending order.
807  */
808 struct scx_event_stats {
809 	/*
810 	 * If ops.select_cpu() returns a CPU which can't be used by the task,
811 	 * the core scheduler code silently picks a fallback CPU.
812 	 */
813 	s64		SCX_EV_SELECT_CPU_FALLBACK;
814 
815 	/*
816 	 * When dispatching to a local DSQ, the CPU may have gone offline in
817 	 * the meantime. In this case, the task is bounced to the global DSQ.
818 	 */
819 	s64		SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
820 
821 	/*
822 	 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
823 	 * continued to run because there were no other tasks on the CPU.
824 	 */
825 	s64		SCX_EV_DISPATCH_KEEP_LAST;
826 
827 	/*
828 	 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
829 	 * is dispatched to a local DSQ when exiting.
830 	 */
831 	s64		SCX_EV_ENQ_SKIP_EXITING;
832 
833 	/*
834 	 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
835 	 * migration disabled task skips ops.enqueue() and is dispatched to its
836 	 * local DSQ.
837 	 */
838 	s64		SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
839 
840 	/*
841 	 * Total number of times a task's time slice was refilled with the
842 	 * default value (SCX_SLICE_DFL).
843 	 */
844 	s64		SCX_EV_REFILL_SLICE_DFL;
845 
846 	/*
847 	 * The total duration of bypass modes in nanoseconds.
848 	 */
849 	s64		SCX_EV_BYPASS_DURATION;
850 
851 	/*
852 	 * The number of tasks dispatched in the bypassing mode.
853 	 */
854 	s64		SCX_EV_BYPASS_DISPATCH;
855 
856 	/*
857 	 * The number of times the bypassing mode has been activated.
858 	 */
859 	s64		SCX_EV_BYPASS_ACTIVATE;
860 };
861 
862 struct scx_sched_pcpu {
863 	/*
864 	 * The event counters are in a per-CPU variable to minimize the
865 	 * accounting overhead. A system-wide view on the event counter is
866 	 * constructed when requested by scx_bpf_events().
867 	 */
868 	struct scx_event_stats	event_stats;
869 };
870 
871 struct scx_sched {
872 	struct sched_ext_ops	ops;
873 	DECLARE_BITMAP(has_op, SCX_OPI_END);
874 
875 	/*
876 	 * Dispatch queues.
877 	 *
878 	 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
879 	 * This is to avoid live-locking in bypass mode where all tasks are
880 	 * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
881 	 * per-node split isn't sufficient, it can be further split.
882 	 */
883 	struct rhashtable	dsq_hash;
884 	struct scx_dispatch_q	**global_dsqs;
885 	struct scx_sched_pcpu __percpu *pcpu;
886 
887 	bool			warned_zero_slice:1;
888 	bool			warned_deprecated_rq:1;
889 
890 	atomic_t		exit_kind;
891 	struct scx_exit_info	*exit_info;
892 
893 	struct kobject		kobj;
894 
895 	struct kthread_worker	*helper;
896 	struct irq_work		error_irq_work;
897 	struct kthread_work	disable_work;
898 	struct rcu_work		rcu_work;
899 };
900 
901 enum scx_wake_flags {
902 	/* expose select WF_* flags as enums */
903 	SCX_WAKE_FORK		= WF_FORK,
904 	SCX_WAKE_TTWU		= WF_TTWU,
905 	SCX_WAKE_SYNC		= WF_SYNC,
906 };
907 
908 enum scx_enq_flags {
909 	/* expose select ENQUEUE_* flags as enums */
910 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
911 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
912 	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
913 
914 	/* high 32bits are SCX specific */
915 
916 	/*
917 	 * Set the following to trigger preemption when calling
918 	 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
919 	 * current task is cleared to zero and the CPU is kicked into the
920 	 * scheduling path. Implies %SCX_ENQ_HEAD.
921 	 */
922 	SCX_ENQ_PREEMPT		= 1LLU << 32,
923 
924 	/*
925 	 * The task being enqueued was previously enqueued on the current CPU's
926 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
927 	 * scx_bpf_reenqueue_local() kfunc. If scx_bpf_reenqueue_local() was
928 	 * invoked in a ->cpu_release() callback, and the task is again
929 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
930 	 * task will not be scheduled on the CPU until at least the next invocation
931 	 * of the ->cpu_acquire() callback.
932 	 */
933 	SCX_ENQ_REENQ		= 1LLU << 40,
934 
935 	/*
936 	 * The task being enqueued is the only task available for the cpu. By
937 	 * default, ext core keeps executing such tasks but when
938 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
939 	 * %SCX_ENQ_LAST flag set.
940 	 *
941 	 * The BPF scheduler is responsible for triggering a follow-up
942 	 * scheduling event. Otherwise, Execution may stall.
943 	 */
944 	SCX_ENQ_LAST		= 1LLU << 41,
945 
946 	/* high 8 bits are internal */
947 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
948 
949 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
950 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
951 };
952 
953 enum scx_deq_flags {
954 	/* expose select DEQUEUE_* flags as enums */
955 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
956 
957 	/* high 32bits are SCX specific */
958 
959 	/*
960 	 * The generic core-sched layer decided to execute the task even though
961 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
962 	 */
963 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
964 };
965 
966 enum scx_pick_idle_cpu_flags {
967 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
968 	SCX_PICK_IDLE_IN_NODE	= 1LLU << 1,	/* pick a CPU in the same target NUMA node */
969 };
970 
971 enum scx_kick_flags {
972 	/*
973 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
974 	 * through at least one full scheduling cycle before going idle. If the
975 	 * target CPU can be determined to be currently not idle and going to go
976 	 * through a scheduling cycle before going idle, noop.
977 	 */
978 	SCX_KICK_IDLE		= 1LLU << 0,
979 
980 	/*
981 	 * Preempt the current task and execute the dispatch path. If the
982 	 * current task of the target CPU is an SCX task, its ->scx.slice is
983 	 * cleared to zero before the scheduling path is invoked so that the
984 	 * task expires and the dispatch path is invoked.
985 	 */
986 	SCX_KICK_PREEMPT	= 1LLU << 1,
987 
988 	/*
989 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
990 	 * return after the target CPU finishes picking the next task.
991 	 */
992 	SCX_KICK_WAIT		= 1LLU << 2,
993 };
994 
995 enum scx_tg_flags {
996 	SCX_TG_ONLINE		= 1U << 0,
997 	SCX_TG_INITED		= 1U << 1,
998 };
999 
1000 enum scx_enable_state {
1001 	SCX_ENABLING,
1002 	SCX_ENABLED,
1003 	SCX_DISABLING,
1004 	SCX_DISABLED,
1005 };
1006 
1007 static const char *scx_enable_state_str[] = {
1008 	[SCX_ENABLING]		= "enabling",
1009 	[SCX_ENABLED]		= "enabled",
1010 	[SCX_DISABLING]		= "disabling",
1011 	[SCX_DISABLED]		= "disabled",
1012 };
1013 
1014 /*
1015  * sched_ext_entity->ops_state
1016  *
1017  * Used to track the task ownership between the SCX core and the BPF scheduler.
1018  * State transitions look as follows:
1019  *
1020  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
1021  *   ^              |                 |
1022  *   |              v                 v
1023  *   \-------------------------------/
1024  *
1025  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
1026  * sites for explanations on the conditions being waited upon and why they are
1027  * safe. Transitions out of them into NONE or QUEUED must store_release and the
1028  * waiters should load_acquire.
1029  *
1030  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
1031  * any given task can be dispatched by the BPF scheduler at all times and thus
1032  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
1033  * to try to dispatch any task anytime regardless of its state as the SCX core
1034  * can safely reject invalid dispatches.
1035  */
1036 enum scx_ops_state {
1037 	SCX_OPSS_NONE,		/* owned by the SCX core */
1038 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
1039 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
1040 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
1041 
1042 	/*
1043 	 * QSEQ brands each QUEUED instance so that, when dispatch races
1044 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
1045 	 * on the task being dispatched.
1046 	 *
1047 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
1048 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
1049 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
1050 	 * and runs with IRQ disabled. 30 bits should be sufficient.
1051 	 */
1052 	SCX_OPSS_QSEQ_SHIFT	= 2,
1053 };
1054 
1055 /* Use macros to ensure that the type is unsigned long for the masks */
1056 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
1057 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
1058 
1059 DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
1060 
1061 /*
1062  * Return the rq currently locked from an scx callback, or NULL if no rq is
1063  * locked.
1064  */
scx_locked_rq(void)1065 static inline struct rq *scx_locked_rq(void)
1066 {
1067 	return __this_cpu_read(scx_locked_rq_state);
1068 }
1069 
scx_kf_allowed_if_unlocked(void)1070 static inline bool scx_kf_allowed_if_unlocked(void)
1071 {
1072 	return !current->scx.kf_mask;
1073 }
1074 
scx_rq_bypassing(struct rq * rq)1075 static inline bool scx_rq_bypassing(struct rq *rq)
1076 {
1077 	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1078 }
1079