xref: /linux/kernel/sched/ext.c (revision c924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11 
12 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
13 
14 enum scx_consts {
15 	SCX_DSP_DFL_MAX_BATCH		= 32,
16 	SCX_DSP_MAX_LOOPS		= 32,
17 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
18 
19 	SCX_EXIT_BT_LEN			= 64,
20 	SCX_EXIT_MSG_LEN		= 1024,
21 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
22 
23 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
24 
25 	/*
26 	 * Iterating all tasks may take a while. Periodically drop
27 	 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
28 	 */
29 	SCX_OPS_TASK_ITER_BATCH		= 32,
30 };
31 
32 enum scx_exit_kind {
33 	SCX_EXIT_NONE,
34 	SCX_EXIT_DONE,
35 
36 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
37 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
38 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
39 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
40 
41 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
42 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
43 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
44 };
45 
46 /*
47  * An exit code can be specified when exiting with scx_bpf_exit() or
48  * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
49  * respectively. The codes are 64bit of the format:
50  *
51  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
52  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
53  *
54  *   SYS ACT: System-defined exit actions
55  *   SYS RSN: System-defined exit reasons
56  *   USR    : User-defined exit codes and reasons
57  *
58  * Using the above, users may communicate intention and context by ORing system
59  * actions and/or system reasons with a user-defined exit code.
60  */
61 enum scx_exit_code {
62 	/* Reasons */
63 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
64 
65 	/* Actions */
66 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
67 };
68 
69 /*
70  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
71  * being disabled.
72  */
73 struct scx_exit_info {
74 	/* %SCX_EXIT_* - broad category of the exit reason */
75 	enum scx_exit_kind	kind;
76 
77 	/* exit code if gracefully exiting */
78 	s64			exit_code;
79 
80 	/* textual representation of the above */
81 	const char		*reason;
82 
83 	/* backtrace if exiting due to an error */
84 	unsigned long		*bt;
85 	u32			bt_len;
86 
87 	/* informational message */
88 	char			*msg;
89 
90 	/* debug dump */
91 	char			*dump;
92 };
93 
94 /* sched_ext_ops.flags */
95 enum scx_ops_flags {
96 	/*
97 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
98 	 */
99 	SCX_OPS_KEEP_BUILTIN_IDLE	= 1LLU << 0,
100 
101 	/*
102 	 * By default, if there are no other task to run on the CPU, ext core
103 	 * keeps running the current task even after its slice expires. If this
104 	 * flag is specified, such tasks are passed to ops.enqueue() with
105 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
106 	 */
107 	SCX_OPS_ENQ_LAST		= 1LLU << 1,
108 
109 	/*
110 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
111 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
112 	 * scheduler depends on pid lookup for dispatching, the task will be
113 	 * lost leading to various issues including RCU grace period stalls.
114 	 *
115 	 * To mask this problem, by default, unhashed tasks are automatically
116 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
117 	 * depend on pid lookups and wants to handle these tasks directly, the
118 	 * following flag can be used.
119 	 */
120 	SCX_OPS_ENQ_EXITING		= 1LLU << 2,
121 
122 	/*
123 	 * If set, only tasks with policy set to SCHED_EXT are attached to
124 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
125 	 */
126 	SCX_OPS_SWITCH_PARTIAL		= 1LLU << 3,
127 
128 	/*
129 	 * A migration disabled task can only execute on its current CPU. By
130 	 * default, such tasks are automatically put on the CPU's local DSQ with
131 	 * the default slice on enqueue. If this ops flag is set, they also go
132 	 * through ops.enqueue().
133 	 *
134 	 * A migration disabled task never invokes ops.select_cpu() as it can
135 	 * only select the current CPU. Also, p->cpus_ptr will only contain its
136 	 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
137 	 * and thus may disagree with cpumask_weight(p->cpus_ptr).
138 	 */
139 	SCX_OPS_ENQ_MIGRATION_DISABLED	= 1LLU << 4,
140 
141 	/*
142 	 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
143 	 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
144 	 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
145 	 * transfers. When this optimization is enabled, ops.select_cpu() is
146 	 * skipped in some cases (when racing against the wakee switching out).
147 	 * As the BPF scheduler may depend on ops.select_cpu() being invoked
148 	 * during wakeups, queued wakeup is disabled by default.
149 	 *
150 	 * If this ops flag is set, queued wakeup optimization is enabled and
151 	 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
152 	 * wakee's CPU without preceding ops.select_cpu() even for tasks which
153 	 * may be executed on multiple CPUs.
154 	 */
155 	SCX_OPS_ALLOW_QUEUED_WAKEUP	= 1LLU << 5,
156 
157 	/*
158 	 * If set, enable per-node idle cpumasks. If clear, use a single global
159 	 * flat idle cpumask.
160 	 */
161 	SCX_OPS_BUILTIN_IDLE_PER_NODE	= 1LLU << 6,
162 
163 	/*
164 	 * CPU cgroup support flags
165 	 */
166 	SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16,	/* cpu.weight */
167 
168 	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
169 				  SCX_OPS_ENQ_LAST |
170 				  SCX_OPS_ENQ_EXITING |
171 				  SCX_OPS_ENQ_MIGRATION_DISABLED |
172 				  SCX_OPS_ALLOW_QUEUED_WAKEUP |
173 				  SCX_OPS_SWITCH_PARTIAL |
174 				  SCX_OPS_BUILTIN_IDLE_PER_NODE |
175 				  SCX_OPS_HAS_CGROUP_WEIGHT,
176 };
177 
178 /* argument container for ops.init_task() */
179 struct scx_init_task_args {
180 	/*
181 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
182 	 * to the scheduler transition path.
183 	 */
184 	bool			fork;
185 #ifdef CONFIG_EXT_GROUP_SCHED
186 	/* the cgroup the task is joining */
187 	struct cgroup		*cgroup;
188 #endif
189 };
190 
191 /* argument container for ops.exit_task() */
192 struct scx_exit_task_args {
193 	/* Whether the task exited before running on sched_ext. */
194 	bool cancelled;
195 };
196 
197 /* argument container for ops->cgroup_init() */
198 struct scx_cgroup_init_args {
199 	/* the weight of the cgroup [1..10000] */
200 	u32			weight;
201 };
202 
203 enum scx_cpu_preempt_reason {
204 	/* next task is being scheduled by &sched_class_rt */
205 	SCX_CPU_PREEMPT_RT,
206 	/* next task is being scheduled by &sched_class_dl */
207 	SCX_CPU_PREEMPT_DL,
208 	/* next task is being scheduled by &sched_class_stop */
209 	SCX_CPU_PREEMPT_STOP,
210 	/* unknown reason for SCX being preempted */
211 	SCX_CPU_PREEMPT_UNKNOWN,
212 };
213 
214 /*
215  * Argument container for ops->cpu_acquire(). Currently empty, but may be
216  * expanded in the future.
217  */
218 struct scx_cpu_acquire_args {};
219 
220 /* argument container for ops->cpu_release() */
221 struct scx_cpu_release_args {
222 	/* the reason the CPU was preempted */
223 	enum scx_cpu_preempt_reason reason;
224 
225 	/* the task that's going to be scheduled on the CPU */
226 	struct task_struct	*task;
227 };
228 
229 /*
230  * Informational context provided to dump operations.
231  */
232 struct scx_dump_ctx {
233 	enum scx_exit_kind	kind;
234 	s64			exit_code;
235 	const char		*reason;
236 	u64			at_ns;
237 	u64			at_jiffies;
238 };
239 
240 /**
241  * struct sched_ext_ops - Operation table for BPF scheduler implementation
242  *
243  * A BPF scheduler can implement an arbitrary scheduling policy by
244  * implementing and loading operations in this table. Note that a userland
245  * scheduling policy can also be implemented using the BPF scheduler
246  * as a shim layer.
247  */
248 struct sched_ext_ops {
249 	/**
250 	 * @select_cpu: Pick the target CPU for a task which is being woken up
251 	 * @p: task being woken up
252 	 * @prev_cpu: the cpu @p was on before sleeping
253 	 * @wake_flags: SCX_WAKE_*
254 	 *
255 	 * Decision made here isn't final. @p may be moved to any CPU while it
256 	 * is getting dispatched for execution later. However, as @p is not on
257 	 * the rq at this point, getting the eventual execution CPU right here
258 	 * saves a small bit of overhead down the line.
259 	 *
260 	 * If an idle CPU is returned, the CPU is kicked and will try to
261 	 * dispatch. While an explicit custom mechanism can be added,
262 	 * select_cpu() serves as the default way to wake up idle CPUs.
263 	 *
264 	 * @p may be inserted into a DSQ directly by calling
265 	 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
266 	 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
267 	 * of the CPU returned by this operation.
268 	 *
269 	 * Note that select_cpu() is never called for tasks that can only run
270 	 * on a single CPU or tasks with migration disabled, as they don't have
271 	 * the option to select a different CPU. See select_task_rq() for
272 	 * details.
273 	 */
274 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
275 
276 	/**
277 	 * @enqueue: Enqueue a task on the BPF scheduler
278 	 * @p: task being enqueued
279 	 * @enq_flags: %SCX_ENQ_*
280 	 *
281 	 * @p is ready to run. Insert directly into a DSQ by calling
282 	 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
283 	 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
284 	 * the task will stall.
285 	 *
286 	 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
287 	 * skipped.
288 	 */
289 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
290 
291 	/**
292 	 * @dequeue: Remove a task from the BPF scheduler
293 	 * @p: task being dequeued
294 	 * @deq_flags: %SCX_DEQ_*
295 	 *
296 	 * Remove @p from the BPF scheduler. This is usually called to isolate
297 	 * the task while updating its scheduling properties (e.g. priority).
298 	 *
299 	 * The ext core keeps track of whether the BPF side owns a given task or
300 	 * not and can gracefully ignore spurious dispatches from BPF side,
301 	 * which makes it safe to not implement this method. However, depending
302 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
303 	 * scheduling position not being updated across a priority change.
304 	 */
305 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
306 
307 	/**
308 	 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
309 	 * @cpu: CPU to dispatch tasks for
310 	 * @prev: previous task being switched out
311 	 *
312 	 * Called when a CPU's local dsq is empty. The operation should dispatch
313 	 * one or more tasks from the BPF scheduler into the DSQs using
314 	 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
315 	 * using scx_bpf_dsq_move_to_local().
316 	 *
317 	 * The maximum number of times scx_bpf_dsq_insert() can be called
318 	 * without an intervening scx_bpf_dsq_move_to_local() is specified by
319 	 * ops.dispatch_max_batch. See the comments on top of the two functions
320 	 * for more details.
321 	 *
322 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
323 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
324 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
325 	 * ops.dispatch() returns. To keep executing @prev, return without
326 	 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
327 	 */
328 	void (*dispatch)(s32 cpu, struct task_struct *prev);
329 
330 	/**
331 	 * @tick: Periodic tick
332 	 * @p: task running currently
333 	 *
334 	 * This operation is called every 1/HZ seconds on CPUs which are
335 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
336 	 * immediate dispatch cycle on the CPU.
337 	 */
338 	void (*tick)(struct task_struct *p);
339 
340 	/**
341 	 * @runnable: A task is becoming runnable on its associated CPU
342 	 * @p: task becoming runnable
343 	 * @enq_flags: %SCX_ENQ_*
344 	 *
345 	 * This and the following three functions can be used to track a task's
346 	 * execution state transitions. A task becomes ->runnable() on a CPU,
347 	 * and then goes through one or more ->running() and ->stopping() pairs
348 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
349 	 * done running on the CPU.
350 	 *
351 	 * @p is becoming runnable on the CPU because it's
352 	 *
353 	 * - waking up (%SCX_ENQ_WAKEUP)
354 	 * - being moved from another CPU
355 	 * - being restored after temporarily taken off the queue for an
356 	 *   attribute change.
357 	 *
358 	 * This and ->enqueue() are related but not coupled. This operation
359 	 * notifies @p's state transition and may not be followed by ->enqueue()
360 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
361 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
362 	 * task may be ->enqueue()'d without being preceded by this operation
363 	 * e.g. after exhausting its slice.
364 	 */
365 	void (*runnable)(struct task_struct *p, u64 enq_flags);
366 
367 	/**
368 	 * @running: A task is starting to run on its associated CPU
369 	 * @p: task starting to run
370 	 *
371 	 * See ->runnable() for explanation on the task state notifiers.
372 	 */
373 	void (*running)(struct task_struct *p);
374 
375 	/**
376 	 * @stopping: A task is stopping execution
377 	 * @p: task stopping to run
378 	 * @runnable: is task @p still runnable?
379 	 *
380 	 * See ->runnable() for explanation on the task state notifiers. If
381 	 * !@runnable, ->quiescent() will be invoked after this operation
382 	 * returns.
383 	 */
384 	void (*stopping)(struct task_struct *p, bool runnable);
385 
386 	/**
387 	 * @quiescent: A task is becoming not runnable on its associated CPU
388 	 * @p: task becoming not runnable
389 	 * @deq_flags: %SCX_DEQ_*
390 	 *
391 	 * See ->runnable() for explanation on the task state notifiers.
392 	 *
393 	 * @p is becoming quiescent on the CPU because it's
394 	 *
395 	 * - sleeping (%SCX_DEQ_SLEEP)
396 	 * - being moved to another CPU
397 	 * - being temporarily taken off the queue for an attribute change
398 	 *   (%SCX_DEQ_SAVE)
399 	 *
400 	 * This and ->dequeue() are related but not coupled. This operation
401 	 * notifies @p's state transition and may not be preceded by ->dequeue()
402 	 * e.g. when @p is being dispatched to a remote CPU.
403 	 */
404 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
405 
406 	/**
407 	 * @yield: Yield CPU
408 	 * @from: yielding task
409 	 * @to: optional yield target task
410 	 *
411 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
412 	 * The BPF scheduler should ensure that other available tasks are
413 	 * dispatched before the yielding task. Return value is ignored in this
414 	 * case.
415 	 *
416 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
417 	 * scheduler can implement the request, return %true; otherwise, %false.
418 	 */
419 	bool (*yield)(struct task_struct *from, struct task_struct *to);
420 
421 	/**
422 	 * @core_sched_before: Task ordering for core-sched
423 	 * @a: task A
424 	 * @b: task B
425 	 *
426 	 * Used by core-sched to determine the ordering between two tasks. See
427 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
428 	 * core-sched.
429 	 *
430 	 * Both @a and @b are runnable and may or may not currently be queued on
431 	 * the BPF scheduler. Should return %true if @a should run before @b.
432 	 * %false if there's no required ordering or @b should run before @a.
433 	 *
434 	 * If not specified, the default is ordering them according to when they
435 	 * became runnable.
436 	 */
437 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
438 
439 	/**
440 	 * @set_weight: Set task weight
441 	 * @p: task to set weight for
442 	 * @weight: new weight [1..10000]
443 	 *
444 	 * Update @p's weight to @weight.
445 	 */
446 	void (*set_weight)(struct task_struct *p, u32 weight);
447 
448 	/**
449 	 * @set_cpumask: Set CPU affinity
450 	 * @p: task to set CPU affinity for
451 	 * @cpumask: cpumask of cpus that @p can run on
452 	 *
453 	 * Update @p's CPU affinity to @cpumask.
454 	 */
455 	void (*set_cpumask)(struct task_struct *p,
456 			    const struct cpumask *cpumask);
457 
458 	/**
459 	 * @update_idle: Update the idle state of a CPU
460 	 * @cpu: CPU to update the idle state for
461 	 * @idle: whether entering or exiting the idle state
462 	 *
463 	 * This operation is called when @rq's CPU goes or leaves the idle
464 	 * state. By default, implementing this operation disables the built-in
465 	 * idle CPU tracking and the following helpers become unavailable:
466 	 *
467 	 * - scx_bpf_select_cpu_dfl()
468 	 * - scx_bpf_test_and_clear_cpu_idle()
469 	 * - scx_bpf_pick_idle_cpu()
470 	 *
471 	 * The user also must implement ops.select_cpu() as the default
472 	 * implementation relies on scx_bpf_select_cpu_dfl().
473 	 *
474 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
475 	 * tracking.
476 	 */
477 	void (*update_idle)(s32 cpu, bool idle);
478 
479 	/**
480 	 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
481 	 * @cpu: The CPU being acquired by the BPF scheduler.
482 	 * @args: Acquire arguments, see the struct definition.
483 	 *
484 	 * A CPU that was previously released from the BPF scheduler is now once
485 	 * again under its control.
486 	 */
487 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
488 
489 	/**
490 	 * @cpu_release: A CPU is taken away from the BPF scheduler
491 	 * @cpu: The CPU being released by the BPF scheduler.
492 	 * @args: Release arguments, see the struct definition.
493 	 *
494 	 * The specified CPU is no longer under the control of the BPF
495 	 * scheduler. This could be because it was preempted by a higher
496 	 * priority sched_class, though there may be other reasons as well. The
497 	 * caller should consult @args->reason to determine the cause.
498 	 */
499 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
500 
501 	/**
502 	 * @init_task: Initialize a task to run in a BPF scheduler
503 	 * @p: task to initialize for BPF scheduling
504 	 * @args: init arguments, see the struct definition
505 	 *
506 	 * Either we're loading a BPF scheduler or a new task is being forked.
507 	 * Initialize @p for BPF scheduling. This operation may block and can
508 	 * be used for allocations, and is called exactly once for a task.
509 	 *
510 	 * Return 0 for success, -errno for failure. An error return while
511 	 * loading will abort loading of the BPF scheduler. During a fork, it
512 	 * will abort that specific fork.
513 	 */
514 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
515 
516 	/**
517 	 * @exit_task: Exit a previously-running task from the system
518 	 * @p: task to exit
519 	 * @args: exit arguments, see the struct definition
520 	 *
521 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
522 	 * necessary cleanup for @p.
523 	 */
524 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
525 
526 	/**
527 	 * @enable: Enable BPF scheduling for a task
528 	 * @p: task to enable BPF scheduling for
529 	 *
530 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
531 	 * enters SCX, and is always paired with a matching disable().
532 	 */
533 	void (*enable)(struct task_struct *p);
534 
535 	/**
536 	 * @disable: Disable BPF scheduling for a task
537 	 * @p: task to disable BPF scheduling for
538 	 *
539 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
540 	 * Disable BPF scheduling for @p. A disable() call is always matched
541 	 * with a prior enable() call.
542 	 */
543 	void (*disable)(struct task_struct *p);
544 
545 	/**
546 	 * @dump: Dump BPF scheduler state on error
547 	 * @ctx: debug dump context
548 	 *
549 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
550 	 */
551 	void (*dump)(struct scx_dump_ctx *ctx);
552 
553 	/**
554 	 * @dump_cpu: Dump BPF scheduler state for a CPU on error
555 	 * @ctx: debug dump context
556 	 * @cpu: CPU to generate debug dump for
557 	 * @idle: @cpu is currently idle without any runnable tasks
558 	 *
559 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
560 	 * @cpu. If @idle is %true and this operation doesn't produce any
561 	 * output, @cpu is skipped for dump.
562 	 */
563 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
564 
565 	/**
566 	 * @dump_task: Dump BPF scheduler state for a runnable task on error
567 	 * @ctx: debug dump context
568 	 * @p: runnable task to generate debug dump for
569 	 *
570 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
571 	 * @p.
572 	 */
573 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
574 
575 #ifdef CONFIG_EXT_GROUP_SCHED
576 	/**
577 	 * @cgroup_init: Initialize a cgroup
578 	 * @cgrp: cgroup being initialized
579 	 * @args: init arguments, see the struct definition
580 	 *
581 	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
582 	 * @cgrp for sched_ext. This operation may block.
583 	 *
584 	 * Return 0 for success, -errno for failure. An error return while
585 	 * loading will abort loading of the BPF scheduler. During cgroup
586 	 * creation, it will abort the specific cgroup creation.
587 	 */
588 	s32 (*cgroup_init)(struct cgroup *cgrp,
589 			   struct scx_cgroup_init_args *args);
590 
591 	/**
592 	 * @cgroup_exit: Exit a cgroup
593 	 * @cgrp: cgroup being exited
594 	 *
595 	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
596 	 * @cgrp for sched_ext. This operation my block.
597 	 */
598 	void (*cgroup_exit)(struct cgroup *cgrp);
599 
600 	/**
601 	 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
602 	 * @p: task being moved
603 	 * @from: cgroup @p is being moved from
604 	 * @to: cgroup @p is being moved to
605 	 *
606 	 * Prepare @p for move from cgroup @from to @to. This operation may
607 	 * block and can be used for allocations.
608 	 *
609 	 * Return 0 for success, -errno for failure. An error return aborts the
610 	 * migration.
611 	 */
612 	s32 (*cgroup_prep_move)(struct task_struct *p,
613 				struct cgroup *from, struct cgroup *to);
614 
615 	/**
616 	 * @cgroup_move: Commit cgroup move
617 	 * @p: task being moved
618 	 * @from: cgroup @p is being moved from
619 	 * @to: cgroup @p is being moved to
620 	 *
621 	 * Commit the move. @p is dequeued during this operation.
622 	 */
623 	void (*cgroup_move)(struct task_struct *p,
624 			    struct cgroup *from, struct cgroup *to);
625 
626 	/**
627 	 * @cgroup_cancel_move: Cancel cgroup move
628 	 * @p: task whose cgroup move is being canceled
629 	 * @from: cgroup @p was being moved from
630 	 * @to: cgroup @p was being moved to
631 	 *
632 	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
633 	 * Undo the preparation.
634 	 */
635 	void (*cgroup_cancel_move)(struct task_struct *p,
636 				   struct cgroup *from, struct cgroup *to);
637 
638 	/**
639 	 * @cgroup_set_weight: A cgroup's weight is being changed
640 	 * @cgrp: cgroup whose weight is being updated
641 	 * @weight: new weight [1..10000]
642 	 *
643 	 * Update @tg's weight to @weight.
644 	 */
645 	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
646 #endif	/* CONFIG_EXT_GROUP_SCHED */
647 
648 	/*
649 	 * All online ops must come before ops.cpu_online().
650 	 */
651 
652 	/**
653 	 * @cpu_online: A CPU became online
654 	 * @cpu: CPU which just came up
655 	 *
656 	 * @cpu just came online. @cpu will not call ops.enqueue() or
657 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
658 	 */
659 	void (*cpu_online)(s32 cpu);
660 
661 	/**
662 	 * @cpu_offline: A CPU is going offline
663 	 * @cpu: CPU which is going offline
664 	 *
665 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
666 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
667 	 */
668 	void (*cpu_offline)(s32 cpu);
669 
670 	/*
671 	 * All CPU hotplug ops must come before ops.init().
672 	 */
673 
674 	/**
675 	 * @init: Initialize the BPF scheduler
676 	 */
677 	s32 (*init)(void);
678 
679 	/**
680 	 * @exit: Clean up after the BPF scheduler
681 	 * @info: Exit info
682 	 *
683 	 * ops.exit() is also called on ops.init() failure, which is a bit
684 	 * unusual. This is to allow rich reporting through @info on how
685 	 * ops.init() failed.
686 	 */
687 	void (*exit)(struct scx_exit_info *info);
688 
689 	/**
690 	 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
691 	 */
692 	u32 dispatch_max_batch;
693 
694 	/**
695 	 * @flags: %SCX_OPS_* flags
696 	 */
697 	u64 flags;
698 
699 	/**
700 	 * @timeout_ms: The maximum amount of time, in milliseconds, that a
701 	 * runnable task should be able to wait before being scheduled. The
702 	 * maximum timeout may not exceed the default timeout of 30 seconds.
703 	 *
704 	 * Defaults to the maximum allowed timeout value of 30 seconds.
705 	 */
706 	u32 timeout_ms;
707 
708 	/**
709 	 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
710 	 * value of 32768 is used.
711 	 */
712 	u32 exit_dump_len;
713 
714 	/**
715 	 * @hotplug_seq: A sequence number that may be set by the scheduler to
716 	 * detect when a hotplug event has occurred during the loading process.
717 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
718 	 * load if the sequence number does not match @scx_hotplug_seq on the
719 	 * enable path.
720 	 */
721 	u64 hotplug_seq;
722 
723 	/**
724 	 * @name: BPF scheduler's name
725 	 *
726 	 * Must be a non-zero valid BPF object name including only isalnum(),
727 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
728 	 * BPF scheduler is enabled.
729 	 */
730 	char name[SCX_OPS_NAME_LEN];
731 };
732 
733 enum scx_opi {
734 	SCX_OPI_BEGIN			= 0,
735 	SCX_OPI_NORMAL_BEGIN		= 0,
736 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
737 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
738 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
739 	SCX_OPI_END			= SCX_OP_IDX(init),
740 };
741 
742 enum scx_wake_flags {
743 	/* expose select WF_* flags as enums */
744 	SCX_WAKE_FORK		= WF_FORK,
745 	SCX_WAKE_TTWU		= WF_TTWU,
746 	SCX_WAKE_SYNC		= WF_SYNC,
747 };
748 
749 enum scx_enq_flags {
750 	/* expose select ENQUEUE_* flags as enums */
751 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
752 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
753 	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
754 
755 	/* high 32bits are SCX specific */
756 
757 	/*
758 	 * Set the following to trigger preemption when calling
759 	 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
760 	 * current task is cleared to zero and the CPU is kicked into the
761 	 * scheduling path. Implies %SCX_ENQ_HEAD.
762 	 */
763 	SCX_ENQ_PREEMPT		= 1LLU << 32,
764 
765 	/*
766 	 * The task being enqueued was previously enqueued on the current CPU's
767 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
768 	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
769 	 * invoked in a ->cpu_release() callback, and the task is again
770 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
771 	 * task will not be scheduled on the CPU until at least the next invocation
772 	 * of the ->cpu_acquire() callback.
773 	 */
774 	SCX_ENQ_REENQ		= 1LLU << 40,
775 
776 	/*
777 	 * The task being enqueued is the only task available for the cpu. By
778 	 * default, ext core keeps executing such tasks but when
779 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
780 	 * %SCX_ENQ_LAST flag set.
781 	 *
782 	 * The BPF scheduler is responsible for triggering a follow-up
783 	 * scheduling event. Otherwise, Execution may stall.
784 	 */
785 	SCX_ENQ_LAST		= 1LLU << 41,
786 
787 	/* high 8 bits are internal */
788 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
789 
790 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
791 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
792 };
793 
794 enum scx_deq_flags {
795 	/* expose select DEQUEUE_* flags as enums */
796 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
797 
798 	/* high 32bits are SCX specific */
799 
800 	/*
801 	 * The generic core-sched layer decided to execute the task even though
802 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
803 	 */
804 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
805 };
806 
807 enum scx_pick_idle_cpu_flags {
808 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
809 	SCX_PICK_IDLE_IN_NODE	= 1LLU << 1,	/* pick a CPU in the same target NUMA node */
810 };
811 
812 enum scx_kick_flags {
813 	/*
814 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
815 	 * through at least one full scheduling cycle before going idle. If the
816 	 * target CPU can be determined to be currently not idle and going to go
817 	 * through a scheduling cycle before going idle, noop.
818 	 */
819 	SCX_KICK_IDLE		= 1LLU << 0,
820 
821 	/*
822 	 * Preempt the current task and execute the dispatch path. If the
823 	 * current task of the target CPU is an SCX task, its ->scx.slice is
824 	 * cleared to zero before the scheduling path is invoked so that the
825 	 * task expires and the dispatch path is invoked.
826 	 */
827 	SCX_KICK_PREEMPT	= 1LLU << 1,
828 
829 	/*
830 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
831 	 * return after the target CPU finishes picking the next task.
832 	 */
833 	SCX_KICK_WAIT		= 1LLU << 2,
834 };
835 
836 enum scx_tg_flags {
837 	SCX_TG_ONLINE		= 1U << 0,
838 	SCX_TG_INITED		= 1U << 1,
839 };
840 
841 enum scx_ops_enable_state {
842 	SCX_OPS_ENABLING,
843 	SCX_OPS_ENABLED,
844 	SCX_OPS_DISABLING,
845 	SCX_OPS_DISABLED,
846 };
847 
848 static const char *scx_ops_enable_state_str[] = {
849 	[SCX_OPS_ENABLING]	= "enabling",
850 	[SCX_OPS_ENABLED]	= "enabled",
851 	[SCX_OPS_DISABLING]	= "disabling",
852 	[SCX_OPS_DISABLED]	= "disabled",
853 };
854 
855 /*
856  * sched_ext_entity->ops_state
857  *
858  * Used to track the task ownership between the SCX core and the BPF scheduler.
859  * State transitions look as follows:
860  *
861  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
862  *   ^              |                 |
863  *   |              v                 v
864  *   \-------------------------------/
865  *
866  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
867  * sites for explanations on the conditions being waited upon and why they are
868  * safe. Transitions out of them into NONE or QUEUED must store_release and the
869  * waiters should load_acquire.
870  *
871  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
872  * any given task can be dispatched by the BPF scheduler at all times and thus
873  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
874  * to try to dispatch any task anytime regardless of its state as the SCX core
875  * can safely reject invalid dispatches.
876  */
877 enum scx_ops_state {
878 	SCX_OPSS_NONE,		/* owned by the SCX core */
879 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
880 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
881 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
882 
883 	/*
884 	 * QSEQ brands each QUEUED instance so that, when dispatch races
885 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
886 	 * on the task being dispatched.
887 	 *
888 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
889 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
890 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
891 	 * and runs with IRQ disabled. 30 bits should be sufficient.
892 	 */
893 	SCX_OPSS_QSEQ_SHIFT	= 2,
894 };
895 
896 /* Use macros to ensure that the type is unsigned long for the masks */
897 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
898 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
899 
900 /*
901  * During exit, a task may schedule after losing its PIDs. When disabling the
902  * BPF scheduler, we need to be able to iterate tasks in every state to
903  * guarantee system safety. Maintain a dedicated task list which contains every
904  * task between its fork and eventual free.
905  */
906 static DEFINE_SPINLOCK(scx_tasks_lock);
907 static LIST_HEAD(scx_tasks);
908 
909 /* ops enable/disable */
910 static struct kthread_worker *scx_ops_helper;
911 static DEFINE_MUTEX(scx_ops_enable_mutex);
912 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
913 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
914 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
915 static unsigned long scx_in_softlockup;
916 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
917 static int scx_ops_bypass_depth;
918 static bool scx_ops_init_task_enabled;
919 static bool scx_switching_all;
920 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
921 
922 static struct sched_ext_ops scx_ops;
923 static bool scx_warned_zero_slice;
924 
925 DEFINE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
926 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
927 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
928 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_migration_disabled);
929 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
930 
931 static struct static_key_false scx_has_op[SCX_OPI_END] =
932 	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
933 
934 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
935 static struct scx_exit_info *scx_exit_info;
936 
937 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
938 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
939 
940 /*
941  * A monotically increasing sequence number that is incremented every time a
942  * scheduler is enabled. This can be used by to check if any custom sched_ext
943  * scheduler has ever been used in the system.
944  */
945 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
946 
947 /*
948  * The maximum amount of time in jiffies that a task may be runnable without
949  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
950  * scx_ops_error().
951  */
952 static unsigned long scx_watchdog_timeout;
953 
954 /*
955  * The last time the delayed work was run. This delayed work relies on
956  * ksoftirqd being able to run to service timer interrupts, so it's possible
957  * that this work itself could get wedged. To account for this, we check that
958  * it's not stalled in the timer tick, and trigger an error if it is.
959  */
960 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
961 
962 static struct delayed_work scx_watchdog_work;
963 
964 /* for %SCX_KICK_WAIT */
965 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
966 
967 /*
968  * Direct dispatch marker.
969  *
970  * Non-NULL values are used for direct dispatch from enqueue path. A valid
971  * pointer points to the task currently being enqueued. An ERR_PTR value is used
972  * to indicate that direct dispatch has already happened.
973  */
974 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
975 
976 /*
977  * Dispatch queues.
978  *
979  * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
980  * to avoid live-locking in bypass mode where all tasks are dispatched to
981  * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
982  * sufficient, it can be further split.
983  */
984 static struct scx_dispatch_q **global_dsqs;
985 
986 static const struct rhashtable_params dsq_hash_params = {
987 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
988 	.key_offset		= offsetof(struct scx_dispatch_q, id),
989 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
990 };
991 
992 static struct rhashtable dsq_hash;
993 static LLIST_HEAD(dsqs_to_free);
994 
995 /* dispatch buf */
996 struct scx_dsp_buf_ent {
997 	struct task_struct	*task;
998 	unsigned long		qseq;
999 	u64			dsq_id;
1000 	u64			enq_flags;
1001 };
1002 
1003 static u32 scx_dsp_max_batch;
1004 
1005 struct scx_dsp_ctx {
1006 	struct rq		*rq;
1007 	u32			cursor;
1008 	u32			nr_tasks;
1009 	struct scx_dsp_buf_ent	buf[];
1010 };
1011 
1012 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
1013 
1014 /* string formatting from BPF */
1015 struct scx_bstr_buf {
1016 	u64			data[MAX_BPRINTF_VARARGS];
1017 	char			line[SCX_EXIT_MSG_LEN];
1018 };
1019 
1020 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
1021 static struct scx_bstr_buf scx_exit_bstr_buf;
1022 
1023 /* ops debug dump */
1024 struct scx_dump_data {
1025 	s32			cpu;
1026 	bool			first;
1027 	s32			cursor;
1028 	struct seq_buf		*s;
1029 	const char		*prefix;
1030 	struct scx_bstr_buf	buf;
1031 };
1032 
1033 static struct scx_dump_data scx_dump_data = {
1034 	.cpu			= -1,
1035 };
1036 
1037 /* /sys/kernel/sched_ext interface */
1038 static struct kset *scx_kset;
1039 static struct kobject *scx_root_kobj;
1040 
1041 #define CREATE_TRACE_POINTS
1042 #include <trace/events/sched_ext.h>
1043 
1044 static void process_ddsp_deferred_locals(struct rq *rq);
1045 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1046 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1047 					     s64 exit_code,
1048 					     const char *fmt, ...);
1049 
1050 #define scx_ops_error_kind(err, fmt, args...)					\
1051 	scx_ops_exit_kind((err), 0, fmt, ##args)
1052 
1053 #define scx_ops_exit(code, fmt, args...)					\
1054 	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1055 
1056 #define scx_ops_error(fmt, args...)						\
1057 	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1058 
1059 #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1060 
jiffies_delta_msecs(unsigned long at,unsigned long now)1061 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1062 {
1063 	if (time_after(at, now))
1064 		return jiffies_to_msecs(at - now);
1065 	else
1066 		return -(long)jiffies_to_msecs(now - at);
1067 }
1068 
1069 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1070 static u32 higher_bits(u32 flags)
1071 {
1072 	return ~((1 << fls(flags)) - 1);
1073 }
1074 
1075 /* return the mask with only the highest bit set */
highest_bit(u32 flags)1076 static u32 highest_bit(u32 flags)
1077 {
1078 	int bit = fls(flags);
1079 	return ((u64)1 << bit) >> 1;
1080 }
1081 
u32_before(u32 a,u32 b)1082 static bool u32_before(u32 a, u32 b)
1083 {
1084 	return (s32)(a - b) < 0;
1085 }
1086 
find_global_dsq(struct task_struct * p)1087 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1088 {
1089 	return global_dsqs[cpu_to_node(task_cpu(p))];
1090 }
1091 
find_user_dsq(u64 dsq_id)1092 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1093 {
1094 	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1095 }
1096 
1097 /*
1098  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1099  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1100  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1101  * whether it's running from an allowed context.
1102  *
1103  * @mask is constant, always inline to cull the mask calculations.
1104  */
scx_kf_allow(u32 mask)1105 static __always_inline void scx_kf_allow(u32 mask)
1106 {
1107 	/* nesting is allowed only in increasing scx_kf_mask order */
1108 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1109 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1110 		  current->scx.kf_mask, mask);
1111 	current->scx.kf_mask |= mask;
1112 	barrier();
1113 }
1114 
scx_kf_disallow(u32 mask)1115 static void scx_kf_disallow(u32 mask)
1116 {
1117 	barrier();
1118 	current->scx.kf_mask &= ~mask;
1119 }
1120 
1121 #define SCX_CALL_OP(mask, op, args...)						\
1122 do {										\
1123 	if (mask) {								\
1124 		scx_kf_allow(mask);						\
1125 		scx_ops.op(args);						\
1126 		scx_kf_disallow(mask);						\
1127 	} else {								\
1128 		scx_ops.op(args);						\
1129 	}									\
1130 } while (0)
1131 
1132 #define SCX_CALL_OP_RET(mask, op, args...)					\
1133 ({										\
1134 	__typeof__(scx_ops.op(args)) __ret;					\
1135 	if (mask) {								\
1136 		scx_kf_allow(mask);						\
1137 		__ret = scx_ops.op(args);					\
1138 		scx_kf_disallow(mask);						\
1139 	} else {								\
1140 		__ret = scx_ops.op(args);					\
1141 	}									\
1142 	__ret;									\
1143 })
1144 
1145 /*
1146  * Some kfuncs are allowed only on the tasks that are subjects of the
1147  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1148  * restrictions, the following SCX_CALL_OP_*() variants should be used when
1149  * invoking scx_ops operations that take task arguments. These can only be used
1150  * for non-nesting operations due to the way the tasks are tracked.
1151  *
1152  * kfuncs which can only operate on such tasks can in turn use
1153  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1154  * the specific task.
1155  */
1156 #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
1157 do {										\
1158 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1159 	current->scx.kf_tasks[0] = task;					\
1160 	SCX_CALL_OP(mask, op, task, ##args);					\
1161 	current->scx.kf_tasks[0] = NULL;					\
1162 } while (0)
1163 
1164 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
1165 ({										\
1166 	__typeof__(scx_ops.op(task, ##args)) __ret;				\
1167 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1168 	current->scx.kf_tasks[0] = task;					\
1169 	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1170 	current->scx.kf_tasks[0] = NULL;					\
1171 	__ret;									\
1172 })
1173 
1174 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1175 ({										\
1176 	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1177 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1178 	current->scx.kf_tasks[0] = task0;					\
1179 	current->scx.kf_tasks[1] = task1;					\
1180 	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1181 	current->scx.kf_tasks[0] = NULL;					\
1182 	current->scx.kf_tasks[1] = NULL;					\
1183 	__ret;									\
1184 })
1185 
1186 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1187 static __always_inline bool scx_kf_allowed(u32 mask)
1188 {
1189 	if (unlikely(!(current->scx.kf_mask & mask))) {
1190 		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1191 			      mask, current->scx.kf_mask);
1192 		return false;
1193 	}
1194 
1195 	/*
1196 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1197 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1198 	 * inside ops.dispatch(). We don't need to check boundaries for any
1199 	 * blocking kfuncs as the verifier ensures they're only called from
1200 	 * sleepable progs.
1201 	 */
1202 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1203 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1204 		scx_ops_error("cpu_release kfunc called from a nested operation");
1205 		return false;
1206 	}
1207 
1208 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1209 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1210 		scx_ops_error("dispatch kfunc called from a nested operation");
1211 		return false;
1212 	}
1213 
1214 	return true;
1215 }
1216 
1217 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1218 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1219 							struct task_struct *p)
1220 {
1221 	if (!scx_kf_allowed(mask))
1222 		return false;
1223 
1224 	if (unlikely((p != current->scx.kf_tasks[0] &&
1225 		      p != current->scx.kf_tasks[1]))) {
1226 		scx_ops_error("called on a task not being operated on");
1227 		return false;
1228 	}
1229 
1230 	return true;
1231 }
1232 
scx_kf_allowed_if_unlocked(void)1233 static bool scx_kf_allowed_if_unlocked(void)
1234 {
1235 	return !current->scx.kf_mask;
1236 }
1237 
1238 /**
1239  * nldsq_next_task - Iterate to the next task in a non-local DSQ
1240  * @dsq: user dsq being iterated
1241  * @cur: current position, %NULL to start iteration
1242  * @rev: walk backwards
1243  *
1244  * Returns %NULL when iteration is finished.
1245  */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1246 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1247 					   struct task_struct *cur, bool rev)
1248 {
1249 	struct list_head *list_node;
1250 	struct scx_dsq_list_node *dsq_lnode;
1251 
1252 	lockdep_assert_held(&dsq->lock);
1253 
1254 	if (cur)
1255 		list_node = &cur->scx.dsq_list.node;
1256 	else
1257 		list_node = &dsq->list;
1258 
1259 	/* find the next task, need to skip BPF iteration cursors */
1260 	do {
1261 		if (rev)
1262 			list_node = list_node->prev;
1263 		else
1264 			list_node = list_node->next;
1265 
1266 		if (list_node == &dsq->list)
1267 			return NULL;
1268 
1269 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1270 					 node);
1271 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1272 
1273 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1274 }
1275 
1276 #define nldsq_for_each_task(p, dsq)						\
1277 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
1278 	     (p) = nldsq_next_task((dsq), (p), false))
1279 
1280 
1281 /*
1282  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1283  * dispatch order. BPF-visible iterator is opaque and larger to allow future
1284  * changes without breaking backward compatibility. Can be used with
1285  * bpf_for_each(). See bpf_iter_scx_dsq_*().
1286  */
1287 enum scx_dsq_iter_flags {
1288 	/* iterate in the reverse dispatch order */
1289 	SCX_DSQ_ITER_REV		= 1U << 16,
1290 
1291 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
1292 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
1293 
1294 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
1295 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
1296 					  __SCX_DSQ_ITER_HAS_SLICE |
1297 					  __SCX_DSQ_ITER_HAS_VTIME,
1298 };
1299 
1300 struct bpf_iter_scx_dsq_kern {
1301 	struct scx_dsq_list_node	cursor;
1302 	struct scx_dispatch_q		*dsq;
1303 	u64				slice;
1304 	u64				vtime;
1305 } __attribute__((aligned(8)));
1306 
1307 struct bpf_iter_scx_dsq {
1308 	u64				__opaque[6];
1309 } __attribute__((aligned(8)));
1310 
1311 
1312 /*
1313  * SCX task iterator.
1314  */
1315 struct scx_task_iter {
1316 	struct sched_ext_entity		cursor;
1317 	struct task_struct		*locked;
1318 	struct rq			*rq;
1319 	struct rq_flags			rf;
1320 	u32				cnt;
1321 };
1322 
1323 /**
1324  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1325  * @iter: iterator to init
1326  *
1327  * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1328  * must eventually be stopped with scx_task_iter_stop().
1329  *
1330  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1331  * between this and the first next() call or between any two next() calls. If
1332  * the locks are released between two next() calls, the caller is responsible
1333  * for ensuring that the task being iterated remains accessible either through
1334  * RCU read lock or obtaining a reference count.
1335  *
1336  * All tasks which existed when the iteration started are guaranteed to be
1337  * visited as long as they still exist.
1338  */
scx_task_iter_start(struct scx_task_iter * iter)1339 static void scx_task_iter_start(struct scx_task_iter *iter)
1340 {
1341 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1342 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1343 
1344 	spin_lock_irq(&scx_tasks_lock);
1345 
1346 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1347 	list_add(&iter->cursor.tasks_node, &scx_tasks);
1348 	iter->locked = NULL;
1349 	iter->cnt = 0;
1350 }
1351 
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)1352 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1353 {
1354 	if (iter->locked) {
1355 		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1356 		iter->locked = NULL;
1357 	}
1358 }
1359 
1360 /**
1361  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1362  * @iter: iterator to unlock
1363  *
1364  * If @iter is in the middle of a locked iteration, it may be locking the rq of
1365  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1366  * This function can be safely called anytime during an iteration.
1367  */
scx_task_iter_unlock(struct scx_task_iter * iter)1368 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1369 {
1370 	__scx_task_iter_rq_unlock(iter);
1371 	spin_unlock_irq(&scx_tasks_lock);
1372 }
1373 
1374 /**
1375  * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1376  * @iter: iterator to re-lock
1377  *
1378  * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1379  * doesn't re-lock the rq lock. Must be called before other iterator operations.
1380  */
scx_task_iter_relock(struct scx_task_iter * iter)1381 static void scx_task_iter_relock(struct scx_task_iter *iter)
1382 {
1383 	spin_lock_irq(&scx_tasks_lock);
1384 }
1385 
1386 /**
1387  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1388  * @iter: iterator to exit
1389  *
1390  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1391  * which is released on return. If the iterator holds a task's rq lock, that rq
1392  * lock is also released. See scx_task_iter_start() for details.
1393  */
scx_task_iter_stop(struct scx_task_iter * iter)1394 static void scx_task_iter_stop(struct scx_task_iter *iter)
1395 {
1396 	list_del_init(&iter->cursor.tasks_node);
1397 	scx_task_iter_unlock(iter);
1398 }
1399 
1400 /**
1401  * scx_task_iter_next - Next task
1402  * @iter: iterator to walk
1403  *
1404  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1405  * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1406  * stalls by holding scx_tasks_lock for too long.
1407  */
scx_task_iter_next(struct scx_task_iter * iter)1408 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1409 {
1410 	struct list_head *cursor = &iter->cursor.tasks_node;
1411 	struct sched_ext_entity *pos;
1412 
1413 	if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1414 		scx_task_iter_unlock(iter);
1415 		cond_resched();
1416 		scx_task_iter_relock(iter);
1417 	}
1418 
1419 	list_for_each_entry(pos, cursor, tasks_node) {
1420 		if (&pos->tasks_node == &scx_tasks)
1421 			return NULL;
1422 		if (!(pos->flags & SCX_TASK_CURSOR)) {
1423 			list_move(cursor, &pos->tasks_node);
1424 			return container_of(pos, struct task_struct, scx);
1425 		}
1426 	}
1427 
1428 	/* can't happen, should always terminate at scx_tasks above */
1429 	BUG();
1430 }
1431 
1432 /**
1433  * scx_task_iter_next_locked - Next non-idle task with its rq locked
1434  * @iter: iterator to walk
1435  *
1436  * Visit the non-idle task with its rq lock held. Allows callers to specify
1437  * whether they would like to filter out dead tasks. See scx_task_iter_start()
1438  * for details.
1439  */
scx_task_iter_next_locked(struct scx_task_iter * iter)1440 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1441 {
1442 	struct task_struct *p;
1443 
1444 	__scx_task_iter_rq_unlock(iter);
1445 
1446 	while ((p = scx_task_iter_next(iter))) {
1447 		/*
1448 		 * scx_task_iter is used to prepare and move tasks into SCX
1449 		 * while loading the BPF scheduler and vice-versa while
1450 		 * unloading. The init_tasks ("swappers") should be excluded
1451 		 * from the iteration because:
1452 		 *
1453 		 * - It's unsafe to use __setschduler_prio() on an init_task to
1454 		 *   determine the sched_class to use as it won't preserve its
1455 		 *   idle_sched_class.
1456 		 *
1457 		 * - ops.init/exit_task() can easily be confused if called with
1458 		 *   init_tasks as they, e.g., share PID 0.
1459 		 *
1460 		 * As init_tasks are never scheduled through SCX, they can be
1461 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1462 		 * doesn't work here:
1463 		 *
1464 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1465 		 *   yet been onlined.
1466 		 *
1467 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1468 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
1469 		 *
1470 		 * Test for idle_sched_class as only init_tasks are on it.
1471 		 */
1472 		if (p->sched_class != &idle_sched_class)
1473 			break;
1474 	}
1475 	if (!p)
1476 		return NULL;
1477 
1478 	iter->rq = task_rq_lock(p, &iter->rf);
1479 	iter->locked = p;
1480 
1481 	return p;
1482 }
1483 
1484 /*
1485  * Collection of event counters. Event types are placed in descending order.
1486  */
1487 struct scx_event_stats {
1488 	/*
1489 	 * If ops.select_cpu() returns a CPU which can't be used by the task,
1490 	 * the core scheduler code silently picks a fallback CPU.
1491 	 */
1492 	s64		SCX_EV_SELECT_CPU_FALLBACK;
1493 
1494 	/*
1495 	 * When dispatching to a local DSQ, the CPU may have gone offline in
1496 	 * the meantime. In this case, the task is bounced to the global DSQ.
1497 	 */
1498 	s64		SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
1499 
1500 	/*
1501 	 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
1502 	 * continued to run because there were no other tasks on the CPU.
1503 	 */
1504 	s64		SCX_EV_DISPATCH_KEEP_LAST;
1505 
1506 	/*
1507 	 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
1508 	 * is dispatched to a local DSQ when exiting.
1509 	 */
1510 	s64		SCX_EV_ENQ_SKIP_EXITING;
1511 
1512 	/*
1513 	 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
1514 	 * migration disabled task skips ops.enqueue() and is dispatched to its
1515 	 * local DSQ.
1516 	 */
1517 	s64		SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
1518 
1519 	/*
1520 	 * The total number of tasks enqueued (or pick_task-ed) with a
1521 	 * default time slice (SCX_SLICE_DFL).
1522 	 */
1523 	s64		SCX_EV_ENQ_SLICE_DFL;
1524 
1525 	/*
1526 	 * The total duration of bypass modes in nanoseconds.
1527 	 */
1528 	s64		SCX_EV_BYPASS_DURATION;
1529 
1530 	/*
1531 	 * The number of tasks dispatched in the bypassing mode.
1532 	 */
1533 	s64		SCX_EV_BYPASS_DISPATCH;
1534 
1535 	/*
1536 	 * The number of times the bypassing mode has been activated.
1537 	 */
1538 	s64		SCX_EV_BYPASS_ACTIVATE;
1539 };
1540 
1541 /*
1542  * The event counter is organized by a per-CPU variable to minimize the
1543  * accounting overhead without synchronization. A system-wide view on the
1544  * event counter is constructed when requested by scx_bpf_get_event_stat().
1545  */
1546 static DEFINE_PER_CPU(struct scx_event_stats, event_stats_cpu);
1547 
1548 /**
1549  * scx_add_event - Increase an event counter for 'name' by 'cnt'
1550  * @name: an event name defined in struct scx_event_stats
1551  * @cnt: the number of the event occured
1552  *
1553  * This can be used when preemption is not disabled.
1554  */
1555 #define scx_add_event(name, cnt) do {						\
1556 	this_cpu_add(event_stats_cpu.name, cnt);				\
1557 	trace_sched_ext_event(#name, cnt);					\
1558 } while(0)
1559 
1560 /**
1561  * __scx_add_event - Increase an event counter for 'name' by 'cnt'
1562  * @name: an event name defined in struct scx_event_stats
1563  * @cnt: the number of the event occured
1564  *
1565  * This should be used only when preemption is disabled.
1566  */
1567 #define __scx_add_event(name, cnt) do {						\
1568 	__this_cpu_add(event_stats_cpu.name, cnt);				\
1569 	trace_sched_ext_event(#name, cnt);					\
1570 } while(0)
1571 
1572 /**
1573  * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
1574  * @dst_e: destination event stats
1575  * @src_e: source event stats
1576  * @kind: a kind of event to be aggregated
1577  */
1578 #define scx_agg_event(dst_e, src_e, kind) do {					\
1579 	(dst_e)->kind += READ_ONCE((src_e)->kind);				\
1580 } while(0)
1581 
1582 /**
1583  * scx_dump_event - Dump an event 'kind' in 'events' to 's'
1584  * @s: output seq_buf
1585  * @events: event stats
1586  * @kind: a kind of event to dump
1587  */
1588 #define scx_dump_event(s, events, kind) do {					\
1589 	dump_line(&(s), "%40s: %16lld", #kind, (events)->kind);			\
1590 } while (0)
1591 
1592 
1593 static void scx_bpf_events(struct scx_event_stats *events, size_t events__sz);
1594 
scx_ops_enable_state(void)1595 static enum scx_ops_enable_state scx_ops_enable_state(void)
1596 {
1597 	return atomic_read(&scx_ops_enable_state_var);
1598 }
1599 
1600 static enum scx_ops_enable_state
scx_ops_set_enable_state(enum scx_ops_enable_state to)1601 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1602 {
1603 	return atomic_xchg(&scx_ops_enable_state_var, to);
1604 }
1605 
scx_ops_tryset_enable_state(enum scx_ops_enable_state to,enum scx_ops_enable_state from)1606 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1607 					enum scx_ops_enable_state from)
1608 {
1609 	int from_v = from;
1610 
1611 	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1612 }
1613 
scx_rq_bypassing(struct rq * rq)1614 static bool scx_rq_bypassing(struct rq *rq)
1615 {
1616 	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1617 }
1618 
1619 /**
1620  * wait_ops_state - Busy-wait the specified ops state to end
1621  * @p: target task
1622  * @opss: state to wait the end of
1623  *
1624  * Busy-wait for @p to transition out of @opss. This can only be used when the
1625  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1626  * has load_acquire semantics to ensure that the caller can see the updates made
1627  * in the enqueueing and dispatching paths.
1628  */
wait_ops_state(struct task_struct * p,unsigned long opss)1629 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1630 {
1631 	do {
1632 		cpu_relax();
1633 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1634 }
1635 
1636 /**
1637  * ops_cpu_valid - Verify a cpu number
1638  * @cpu: cpu number which came from a BPF ops
1639  * @where: extra information reported on error
1640  *
1641  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1642  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1643  * an ops error.
1644  */
ops_cpu_valid(s32 cpu,const char * where)1645 static bool ops_cpu_valid(s32 cpu, const char *where)
1646 {
1647 	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1648 		return true;
1649 	} else {
1650 		scx_ops_error("invalid CPU %d%s%s", cpu,
1651 			      where ? " " : "", where ?: "");
1652 		return false;
1653 	}
1654 }
1655 
1656 /**
1657  * ops_sanitize_err - Sanitize a -errno value
1658  * @ops_name: operation to blame on failure
1659  * @err: -errno value to sanitize
1660  *
1661  * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1662  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1663  * cause misbehaviors. For an example, a large negative return from
1664  * ops.init_task() triggers an oops when passed up the call chain because the
1665  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1666  * handled as a pointer.
1667  */
ops_sanitize_err(const char * ops_name,s32 err)1668 static int ops_sanitize_err(const char *ops_name, s32 err)
1669 {
1670 	if (err < 0 && err >= -MAX_ERRNO)
1671 		return err;
1672 
1673 	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1674 	return -EPROTO;
1675 }
1676 
run_deferred(struct rq * rq)1677 static void run_deferred(struct rq *rq)
1678 {
1679 	process_ddsp_deferred_locals(rq);
1680 }
1681 
1682 #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1683 static void deferred_bal_cb_workfn(struct rq *rq)
1684 {
1685 	run_deferred(rq);
1686 }
1687 #endif
1688 
deferred_irq_workfn(struct irq_work * irq_work)1689 static void deferred_irq_workfn(struct irq_work *irq_work)
1690 {
1691 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1692 
1693 	raw_spin_rq_lock(rq);
1694 	run_deferred(rq);
1695 	raw_spin_rq_unlock(rq);
1696 }
1697 
1698 /**
1699  * schedule_deferred - Schedule execution of deferred actions on an rq
1700  * @rq: target rq
1701  *
1702  * Schedule execution of deferred actions on @rq. Must be called with @rq
1703  * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1704  * can unlock @rq to e.g. migrate tasks to other rqs.
1705  */
schedule_deferred(struct rq * rq)1706 static void schedule_deferred(struct rq *rq)
1707 {
1708 	lockdep_assert_rq_held(rq);
1709 
1710 #ifdef CONFIG_SMP
1711 	/*
1712 	 * If in the middle of waking up a task, task_woken_scx() will be called
1713 	 * afterwards which will then run the deferred actions, no need to
1714 	 * schedule anything.
1715 	 */
1716 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1717 		return;
1718 
1719 	/*
1720 	 * If in balance, the balance callbacks will be called before rq lock is
1721 	 * released. Schedule one.
1722 	 */
1723 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1724 		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1725 				       deferred_bal_cb_workfn);
1726 		return;
1727 	}
1728 #endif
1729 	/*
1730 	 * No scheduler hooks available. Queue an irq work. They are executed on
1731 	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1732 	 * The above WAKEUP and BALANCE paths should cover most of the cases and
1733 	 * the time to IRQ re-enable shouldn't be long.
1734 	 */
1735 	irq_work_queue(&rq->scx.deferred_irq_work);
1736 }
1737 
1738 /**
1739  * touch_core_sched - Update timestamp used for core-sched task ordering
1740  * @rq: rq to read clock from, must be locked
1741  * @p: task to update the timestamp for
1742  *
1743  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1744  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1745  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1746  * exhaustion).
1747  */
touch_core_sched(struct rq * rq,struct task_struct * p)1748 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1749 {
1750 	lockdep_assert_rq_held(rq);
1751 
1752 #ifdef CONFIG_SCHED_CORE
1753 	/*
1754 	 * It's okay to update the timestamp spuriously. Use
1755 	 * sched_core_disabled() which is cheaper than enabled().
1756 	 *
1757 	 * As this is used to determine ordering between tasks of sibling CPUs,
1758 	 * it may be better to use per-core dispatch sequence instead.
1759 	 */
1760 	if (!sched_core_disabled())
1761 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1762 #endif
1763 }
1764 
1765 /**
1766  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1767  * @rq: rq to read clock from, must be locked
1768  * @p: task being dispatched
1769  *
1770  * If the BPF scheduler implements custom core-sched ordering via
1771  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1772  * ordering within each local DSQ. This function is called from dispatch paths
1773  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1774  */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1775 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1776 {
1777 	lockdep_assert_rq_held(rq);
1778 
1779 #ifdef CONFIG_SCHED_CORE
1780 	if (SCX_HAS_OP(core_sched_before))
1781 		touch_core_sched(rq, p);
1782 #endif
1783 }
1784 
update_curr_scx(struct rq * rq)1785 static void update_curr_scx(struct rq *rq)
1786 {
1787 	struct task_struct *curr = rq->curr;
1788 	s64 delta_exec;
1789 
1790 	delta_exec = update_curr_common(rq);
1791 	if (unlikely(delta_exec <= 0))
1792 		return;
1793 
1794 	if (curr->scx.slice != SCX_SLICE_INF) {
1795 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1796 		if (!curr->scx.slice)
1797 			touch_core_sched(rq, curr);
1798 	}
1799 }
1800 
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1801 static bool scx_dsq_priq_less(struct rb_node *node_a,
1802 			      const struct rb_node *node_b)
1803 {
1804 	const struct task_struct *a =
1805 		container_of(node_a, struct task_struct, scx.dsq_priq);
1806 	const struct task_struct *b =
1807 		container_of(node_b, struct task_struct, scx.dsq_priq);
1808 
1809 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1810 }
1811 
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1812 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1813 {
1814 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1815 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1816 }
1817 
dispatch_enqueue(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1818 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1819 			     u64 enq_flags)
1820 {
1821 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1822 
1823 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1824 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1825 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1826 
1827 	if (!is_local) {
1828 		raw_spin_lock(&dsq->lock);
1829 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1830 			scx_ops_error("attempting to dispatch to a destroyed dsq");
1831 			/* fall back to the global dsq */
1832 			raw_spin_unlock(&dsq->lock);
1833 			dsq = find_global_dsq(p);
1834 			raw_spin_lock(&dsq->lock);
1835 		}
1836 	}
1837 
1838 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1839 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1840 		/*
1841 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1842 		 * their FIFO queues. To avoid confusion and accidentally
1843 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1844 		 * disallow any internal DSQ from doing vtime ordering of
1845 		 * tasks.
1846 		 */
1847 		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1848 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1849 	}
1850 
1851 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1852 		struct rb_node *rbp;
1853 
1854 		/*
1855 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1856 		 * linked to both the rbtree and list on PRIQs, this can only be
1857 		 * tested easily when adding the first task.
1858 		 */
1859 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1860 			     nldsq_next_task(dsq, NULL, false)))
1861 			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1862 				      dsq->id);
1863 
1864 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1865 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1866 
1867 		/*
1868 		 * Find the previous task and insert after it on the list so
1869 		 * that @dsq->list is vtime ordered.
1870 		 */
1871 		rbp = rb_prev(&p->scx.dsq_priq);
1872 		if (rbp) {
1873 			struct task_struct *prev =
1874 				container_of(rbp, struct task_struct,
1875 					     scx.dsq_priq);
1876 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1877 		} else {
1878 			list_add(&p->scx.dsq_list.node, &dsq->list);
1879 		}
1880 	} else {
1881 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1882 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1883 			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1884 				      dsq->id);
1885 
1886 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1887 			list_add(&p->scx.dsq_list.node, &dsq->list);
1888 		else
1889 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1890 	}
1891 
1892 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1893 	dsq->seq++;
1894 	p->scx.dsq_seq = dsq->seq;
1895 
1896 	dsq_mod_nr(dsq, 1);
1897 	p->scx.dsq = dsq;
1898 
1899 	/*
1900 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1901 	 * direct dispatch path, but we clear them here because the direct
1902 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1903 	 * bypass.
1904 	 */
1905 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1906 	p->scx.ddsp_enq_flags = 0;
1907 
1908 	/*
1909 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1910 	 * match waiters' load_acquire.
1911 	 */
1912 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1913 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1914 
1915 	if (is_local) {
1916 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1917 		bool preempt = false;
1918 
1919 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1920 		    rq->curr->sched_class == &ext_sched_class) {
1921 			rq->curr->scx.slice = 0;
1922 			preempt = true;
1923 		}
1924 
1925 		if (preempt || sched_class_above(&ext_sched_class,
1926 						 rq->curr->sched_class))
1927 			resched_curr(rq);
1928 	} else {
1929 		raw_spin_unlock(&dsq->lock);
1930 	}
1931 }
1932 
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1933 static void task_unlink_from_dsq(struct task_struct *p,
1934 				 struct scx_dispatch_q *dsq)
1935 {
1936 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1937 
1938 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1939 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1940 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1941 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1942 	}
1943 
1944 	list_del_init(&p->scx.dsq_list.node);
1945 	dsq_mod_nr(dsq, -1);
1946 }
1947 
dispatch_dequeue(struct rq * rq,struct task_struct * p)1948 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1949 {
1950 	struct scx_dispatch_q *dsq = p->scx.dsq;
1951 	bool is_local = dsq == &rq->scx.local_dsq;
1952 
1953 	if (!dsq) {
1954 		/*
1955 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1956 		 * Unlinking is all that's needed to cancel.
1957 		 */
1958 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1959 			list_del_init(&p->scx.dsq_list.node);
1960 
1961 		/*
1962 		 * When dispatching directly from the BPF scheduler to a local
1963 		 * DSQ, the task isn't associated with any DSQ but
1964 		 * @p->scx.holding_cpu may be set under the protection of
1965 		 * %SCX_OPSS_DISPATCHING.
1966 		 */
1967 		if (p->scx.holding_cpu >= 0)
1968 			p->scx.holding_cpu = -1;
1969 
1970 		return;
1971 	}
1972 
1973 	if (!is_local)
1974 		raw_spin_lock(&dsq->lock);
1975 
1976 	/*
1977 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1978 	 * change underneath us.
1979 	*/
1980 	if (p->scx.holding_cpu < 0) {
1981 		/* @p must still be on @dsq, dequeue */
1982 		task_unlink_from_dsq(p, dsq);
1983 	} else {
1984 		/*
1985 		 * We're racing against dispatch_to_local_dsq() which already
1986 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1987 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1988 		 * the race.
1989 		 */
1990 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1991 		p->scx.holding_cpu = -1;
1992 	}
1993 	p->scx.dsq = NULL;
1994 
1995 	if (!is_local)
1996 		raw_spin_unlock(&dsq->lock);
1997 }
1998 
find_dsq_for_dispatch(struct rq * rq,u64 dsq_id,struct task_struct * p)1999 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
2000 						    struct task_struct *p)
2001 {
2002 	struct scx_dispatch_q *dsq;
2003 
2004 	if (dsq_id == SCX_DSQ_LOCAL)
2005 		return &rq->scx.local_dsq;
2006 
2007 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
2008 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
2009 
2010 		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2011 			return find_global_dsq(p);
2012 
2013 		return &cpu_rq(cpu)->scx.local_dsq;
2014 	}
2015 
2016 	if (dsq_id == SCX_DSQ_GLOBAL)
2017 		dsq = find_global_dsq(p);
2018 	else
2019 		dsq = find_user_dsq(dsq_id);
2020 
2021 	if (unlikely(!dsq)) {
2022 		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
2023 			      dsq_id, p->comm, p->pid);
2024 		return find_global_dsq(p);
2025 	}
2026 
2027 	return dsq;
2028 }
2029 
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)2030 static void mark_direct_dispatch(struct task_struct *ddsp_task,
2031 				 struct task_struct *p, u64 dsq_id,
2032 				 u64 enq_flags)
2033 {
2034 	/*
2035 	 * Mark that dispatch already happened from ops.select_cpu() or
2036 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
2037 	 * which can never match a valid task pointer.
2038 	 */
2039 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
2040 
2041 	/* @p must match the task on the enqueue path */
2042 	if (unlikely(p != ddsp_task)) {
2043 		if (IS_ERR(ddsp_task))
2044 			scx_ops_error("%s[%d] already direct-dispatched",
2045 				      p->comm, p->pid);
2046 		else
2047 			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
2048 				      ddsp_task->comm, ddsp_task->pid,
2049 				      p->comm, p->pid);
2050 		return;
2051 	}
2052 
2053 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
2054 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
2055 
2056 	p->scx.ddsp_dsq_id = dsq_id;
2057 	p->scx.ddsp_enq_flags = enq_flags;
2058 }
2059 
direct_dispatch(struct task_struct * p,u64 enq_flags)2060 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
2061 {
2062 	struct rq *rq = task_rq(p);
2063 	struct scx_dispatch_q *dsq =
2064 		find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2065 
2066 	touch_core_sched_dispatch(rq, p);
2067 
2068 	p->scx.ddsp_enq_flags |= enq_flags;
2069 
2070 	/*
2071 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
2072 	 * double lock a remote rq and enqueue to its local DSQ. For
2073 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
2074 	 * the enqueue so that it's executed when @rq can be unlocked.
2075 	 */
2076 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
2077 		unsigned long opss;
2078 
2079 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
2080 
2081 		switch (opss & SCX_OPSS_STATE_MASK) {
2082 		case SCX_OPSS_NONE:
2083 			break;
2084 		case SCX_OPSS_QUEUEING:
2085 			/*
2086 			 * As @p was never passed to the BPF side, _release is
2087 			 * not strictly necessary. Still do it for consistency.
2088 			 */
2089 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2090 			break;
2091 		default:
2092 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
2093 				  p->comm, p->pid, opss);
2094 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2095 			break;
2096 		}
2097 
2098 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
2099 		list_add_tail(&p->scx.dsq_list.node,
2100 			      &rq->scx.ddsp_deferred_locals);
2101 		schedule_deferred(rq);
2102 		return;
2103 	}
2104 
2105 	dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
2106 }
2107 
scx_rq_online(struct rq * rq)2108 static bool scx_rq_online(struct rq *rq)
2109 {
2110 	/*
2111 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
2112 	 * the online state as seen from the BPF scheduler. cpu_active() test
2113 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
2114 	 * stay set until the current scheduling operation is complete even if
2115 	 * we aren't locking @rq.
2116 	 */
2117 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
2118 }
2119 
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)2120 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
2121 			    int sticky_cpu)
2122 {
2123 	struct task_struct **ddsp_taskp;
2124 	unsigned long qseq;
2125 
2126 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
2127 
2128 	/* rq migration */
2129 	if (sticky_cpu == cpu_of(rq))
2130 		goto local_norefill;
2131 
2132 	/*
2133 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2134 	 * is offline and are just running the hotplug path. Don't bother the
2135 	 * BPF scheduler.
2136 	 */
2137 	if (!scx_rq_online(rq))
2138 		goto local;
2139 
2140 	if (scx_rq_bypassing(rq)) {
2141 		__scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
2142 		goto global;
2143 	}
2144 
2145 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2146 		goto direct;
2147 
2148 	/* see %SCX_OPS_ENQ_EXITING */
2149 	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2150 	    unlikely(p->flags & PF_EXITING)) {
2151 		__scx_add_event(SCX_EV_ENQ_SKIP_EXITING, 1);
2152 		goto local;
2153 	}
2154 
2155 	/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
2156 	if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) &&
2157 	    is_migration_disabled(p)) {
2158 		__scx_add_event(SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
2159 		goto local;
2160 	}
2161 
2162 	if (!SCX_HAS_OP(enqueue))
2163 		goto global;
2164 
2165 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2166 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2167 
2168 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2169 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2170 
2171 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2172 	WARN_ON_ONCE(*ddsp_taskp);
2173 	*ddsp_taskp = p;
2174 
2175 	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2176 
2177 	*ddsp_taskp = NULL;
2178 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2179 		goto direct;
2180 
2181 	/*
2182 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2183 	 * dequeue may be waiting. The store_release matches their load_acquire.
2184 	 */
2185 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2186 	return;
2187 
2188 direct:
2189 	direct_dispatch(p, enq_flags);
2190 	return;
2191 
2192 local:
2193 	/*
2194 	 * For task-ordering, slice refill must be treated as implying the end
2195 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2196 	 * higher priority it becomes from scx_prio_less()'s POV.
2197 	 */
2198 	touch_core_sched(rq, p);
2199 	p->scx.slice = SCX_SLICE_DFL;
2200 	__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
2201 local_norefill:
2202 	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2203 	return;
2204 
2205 global:
2206 	touch_core_sched(rq, p);	/* see the comment in local: */
2207 	p->scx.slice = SCX_SLICE_DFL;
2208 	__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
2209 	dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2210 }
2211 
task_runnable(const struct task_struct * p)2212 static bool task_runnable(const struct task_struct *p)
2213 {
2214 	return !list_empty(&p->scx.runnable_node);
2215 }
2216 
set_task_runnable(struct rq * rq,struct task_struct * p)2217 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2218 {
2219 	lockdep_assert_rq_held(rq);
2220 
2221 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2222 		p->scx.runnable_at = jiffies;
2223 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2224 	}
2225 
2226 	/*
2227 	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2228 	 * appended to the runnable_list.
2229 	 */
2230 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2231 }
2232 
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2233 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2234 {
2235 	list_del_init(&p->scx.runnable_node);
2236 	if (reset_runnable_at)
2237 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2238 }
2239 
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2240 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2241 {
2242 	int sticky_cpu = p->scx.sticky_cpu;
2243 
2244 	if (enq_flags & ENQUEUE_WAKEUP)
2245 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2246 
2247 	enq_flags |= rq->scx.extra_enq_flags;
2248 
2249 	if (sticky_cpu >= 0)
2250 		p->scx.sticky_cpu = -1;
2251 
2252 	/*
2253 	 * Restoring a running task will be immediately followed by
2254 	 * set_next_task_scx() which expects the task to not be on the BPF
2255 	 * scheduler as tasks can only start running through local DSQs. Force
2256 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2257 	 */
2258 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2259 		sticky_cpu = cpu_of(rq);
2260 
2261 	if (p->scx.flags & SCX_TASK_QUEUED) {
2262 		WARN_ON_ONCE(!task_runnable(p));
2263 		goto out;
2264 	}
2265 
2266 	set_task_runnable(rq, p);
2267 	p->scx.flags |= SCX_TASK_QUEUED;
2268 	rq->scx.nr_running++;
2269 	add_nr_running(rq, 1);
2270 
2271 	if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2272 		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2273 
2274 	if (enq_flags & SCX_ENQ_WAKEUP)
2275 		touch_core_sched(rq, p);
2276 
2277 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2278 out:
2279 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2280 
2281 	if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
2282 	    unlikely(cpu_of(rq) != p->scx.selected_cpu))
2283 		__scx_add_event(SCX_EV_SELECT_CPU_FALLBACK, 1);
2284 }
2285 
ops_dequeue(struct task_struct * p,u64 deq_flags)2286 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2287 {
2288 	unsigned long opss;
2289 
2290 	/* dequeue is always temporary, don't reset runnable_at */
2291 	clr_task_runnable(p, false);
2292 
2293 	/* acquire ensures that we see the preceding updates on QUEUED */
2294 	opss = atomic_long_read_acquire(&p->scx.ops_state);
2295 
2296 	switch (opss & SCX_OPSS_STATE_MASK) {
2297 	case SCX_OPSS_NONE:
2298 		break;
2299 	case SCX_OPSS_QUEUEING:
2300 		/*
2301 		 * QUEUEING is started and finished while holding @p's rq lock.
2302 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2303 		 */
2304 		BUG();
2305 	case SCX_OPSS_QUEUED:
2306 		if (SCX_HAS_OP(dequeue))
2307 			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2308 
2309 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2310 					    SCX_OPSS_NONE))
2311 			break;
2312 		fallthrough;
2313 	case SCX_OPSS_DISPATCHING:
2314 		/*
2315 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2316 		 * wait for the transfer to complete so that @p doesn't get
2317 		 * added to its DSQ after dequeueing is complete.
2318 		 *
2319 		 * As we're waiting on DISPATCHING with the rq locked, the
2320 		 * dispatching side shouldn't try to lock the rq while
2321 		 * DISPATCHING is set. See dispatch_to_local_dsq().
2322 		 *
2323 		 * DISPATCHING shouldn't have qseq set and control can reach
2324 		 * here with NONE @opss from the above QUEUED case block.
2325 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2326 		 */
2327 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2328 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2329 		break;
2330 	}
2331 }
2332 
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2333 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2334 {
2335 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2336 		WARN_ON_ONCE(task_runnable(p));
2337 		return true;
2338 	}
2339 
2340 	ops_dequeue(p, deq_flags);
2341 
2342 	/*
2343 	 * A currently running task which is going off @rq first gets dequeued
2344 	 * and then stops running. As we want running <-> stopping transitions
2345 	 * to be contained within runnable <-> quiescent transitions, trigger
2346 	 * ->stopping() early here instead of in put_prev_task_scx().
2347 	 *
2348 	 * @p may go through multiple stopping <-> running transitions between
2349 	 * here and put_prev_task_scx() if task attribute changes occur while
2350 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
2351 	 * information meaningful to the BPF scheduler and can be suppressed by
2352 	 * skipping the callbacks if the task is !QUEUED.
2353 	 */
2354 	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2355 		update_curr_scx(rq);
2356 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2357 	}
2358 
2359 	if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2360 		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2361 
2362 	if (deq_flags & SCX_DEQ_SLEEP)
2363 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2364 	else
2365 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2366 
2367 	p->scx.flags &= ~SCX_TASK_QUEUED;
2368 	rq->scx.nr_running--;
2369 	sub_nr_running(rq, 1);
2370 
2371 	dispatch_dequeue(rq, p);
2372 	return true;
2373 }
2374 
yield_task_scx(struct rq * rq)2375 static void yield_task_scx(struct rq *rq)
2376 {
2377 	struct task_struct *p = rq->curr;
2378 
2379 	if (SCX_HAS_OP(yield))
2380 		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2381 	else
2382 		p->scx.slice = 0;
2383 }
2384 
yield_to_task_scx(struct rq * rq,struct task_struct * to)2385 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2386 {
2387 	struct task_struct *from = rq->curr;
2388 
2389 	if (SCX_HAS_OP(yield))
2390 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2391 	else
2392 		return false;
2393 }
2394 
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2395 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2396 					 struct scx_dispatch_q *src_dsq,
2397 					 struct rq *dst_rq)
2398 {
2399 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2400 
2401 	/* @dsq is locked and @p is on @dst_rq */
2402 	lockdep_assert_held(&src_dsq->lock);
2403 	lockdep_assert_rq_held(dst_rq);
2404 
2405 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2406 
2407 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2408 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2409 	else
2410 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2411 
2412 	dsq_mod_nr(dst_dsq, 1);
2413 	p->scx.dsq = dst_dsq;
2414 }
2415 
2416 #ifdef CONFIG_SMP
2417 /**
2418  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2419  * @p: task to move
2420  * @enq_flags: %SCX_ENQ_*
2421  * @src_rq: rq to move the task from, locked on entry, released on return
2422  * @dst_rq: rq to move the task into, locked on return
2423  *
2424  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2425  */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2426 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2427 					  struct rq *src_rq, struct rq *dst_rq)
2428 {
2429 	lockdep_assert_rq_held(src_rq);
2430 
2431 	/* the following marks @p MIGRATING which excludes dequeue */
2432 	deactivate_task(src_rq, p, 0);
2433 	set_task_cpu(p, cpu_of(dst_rq));
2434 	p->scx.sticky_cpu = cpu_of(dst_rq);
2435 
2436 	raw_spin_rq_unlock(src_rq);
2437 	raw_spin_rq_lock(dst_rq);
2438 
2439 	/*
2440 	 * We want to pass scx-specific enq_flags but activate_task() will
2441 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2442 	 * @rq->scx.extra_enq_flags instead.
2443 	 */
2444 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2445 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2446 	dst_rq->scx.extra_enq_flags = enq_flags;
2447 	activate_task(dst_rq, p, 0);
2448 	dst_rq->scx.extra_enq_flags = 0;
2449 }
2450 
2451 /*
2452  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2453  * differences:
2454  *
2455  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2456  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2457  *   this CPU?".
2458  *
2459  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2460  *   must be allowed to finish on the CPU that it's currently on regardless of
2461  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2462  *   BPF scheduler shouldn't attempt to migrate a task which has migration
2463  *   disabled.
2464  *
2465  * - The BPF scheduler is bypassed while the rq is offline and we can always say
2466  *   no to the BPF scheduler initiated migrations while offline.
2467  *
2468  * The caller must ensure that @p and @rq are on different CPUs.
2469  */
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool enforce)2470 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2471 				      bool enforce)
2472 {
2473 	int cpu = cpu_of(rq);
2474 
2475 	WARN_ON_ONCE(task_cpu(p) == cpu);
2476 
2477 	/*
2478 	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2479 	 * the pinned CPU in migrate_disable_switch() while @p is being switched
2480 	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2481 	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2482 	 * @p passing the below task_allowed_on_cpu() check while migration is
2483 	 * disabled.
2484 	 *
2485 	 * Test the migration disabled state first as the race window is narrow
2486 	 * and the BPF scheduler failing to check migration disabled state can
2487 	 * easily be masked if task_allowed_on_cpu() is done first.
2488 	 */
2489 	if (unlikely(is_migration_disabled(p))) {
2490 		if (enforce)
2491 			scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2492 				      p->comm, p->pid, task_cpu(p), cpu);
2493 		return false;
2494 	}
2495 
2496 	/*
2497 	 * We don't require the BPF scheduler to avoid dispatching to offline
2498 	 * CPUs mostly for convenience but also because CPUs can go offline
2499 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2500 	 * picked CPU is outside the allowed mask.
2501 	 */
2502 	if (!task_allowed_on_cpu(p, cpu)) {
2503 		if (enforce)
2504 			scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2505 				      cpu, p->comm, p->pid);
2506 		return false;
2507 	}
2508 
2509 	if (!scx_rq_online(rq)) {
2510 		if (enforce)
2511 			__scx_add_event(SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
2512 		return false;
2513 	}
2514 
2515 	return true;
2516 }
2517 
2518 /**
2519  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2520  * @p: target task
2521  * @dsq: locked DSQ @p is currently on
2522  * @src_rq: rq @p is currently on, stable with @dsq locked
2523  *
2524  * Called with @dsq locked but no rq's locked. We want to move @p to a different
2525  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2526  * required when transferring into a local DSQ. Even when transferring into a
2527  * non-local DSQ, it's better to use the same mechanism to protect against
2528  * dequeues and maintain the invariant that @p->scx.dsq can only change while
2529  * @src_rq is locked, which e.g. scx_dump_task() depends on.
2530  *
2531  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2532  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2533  * this may race with dequeue, which can't drop the rq lock or fail, do a little
2534  * dancing from our side.
2535  *
2536  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2537  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2538  * would be cleared to -1. While other cpus may have updated it to different
2539  * values afterwards, as this operation can't be preempted or recurse, the
2540  * holding_cpu can never become this CPU again before we're done. Thus, we can
2541  * tell whether we lost to dequeue by testing whether the holding_cpu still
2542  * points to this CPU. See dispatch_dequeue() for the counterpart.
2543  *
2544  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2545  * still valid. %false if lost to dequeue.
2546  */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2547 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2548 				       struct scx_dispatch_q *dsq,
2549 				       struct rq *src_rq)
2550 {
2551 	s32 cpu = raw_smp_processor_id();
2552 
2553 	lockdep_assert_held(&dsq->lock);
2554 
2555 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2556 	task_unlink_from_dsq(p, dsq);
2557 	p->scx.holding_cpu = cpu;
2558 
2559 	raw_spin_unlock(&dsq->lock);
2560 	raw_spin_rq_lock(src_rq);
2561 
2562 	/* task_rq couldn't have changed if we're still the holding cpu */
2563 	return likely(p->scx.holding_cpu == cpu) &&
2564 		!WARN_ON_ONCE(src_rq != task_rq(p));
2565 }
2566 
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2567 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2568 				struct scx_dispatch_q *dsq, struct rq *src_rq)
2569 {
2570 	raw_spin_rq_unlock(this_rq);
2571 
2572 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2573 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2574 		return true;
2575 	} else {
2576 		raw_spin_rq_unlock(src_rq);
2577 		raw_spin_rq_lock(this_rq);
2578 		return false;
2579 	}
2580 }
2581 #else	/* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2582 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool enforce)2583 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool enforce) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2584 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2585 #endif	/* CONFIG_SMP */
2586 
2587 /**
2588  * move_task_between_dsqs() - Move a task from one DSQ to another
2589  * @p: target task
2590  * @enq_flags: %SCX_ENQ_*
2591  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2592  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2593  *
2594  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2595  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2596  * will change. As @p's task_rq is locked, this function doesn't need to use the
2597  * holding_cpu mechanism.
2598  *
2599  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2600  * return value, is locked.
2601  */
move_task_between_dsqs(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)2602 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2603 					 struct scx_dispatch_q *src_dsq,
2604 					 struct scx_dispatch_q *dst_dsq)
2605 {
2606 	struct rq *src_rq = task_rq(p), *dst_rq;
2607 
2608 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2609 	lockdep_assert_held(&src_dsq->lock);
2610 	lockdep_assert_rq_held(src_rq);
2611 
2612 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2613 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2614 		if (src_rq != dst_rq &&
2615 		    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2616 			dst_dsq = find_global_dsq(p);
2617 			dst_rq = src_rq;
2618 		}
2619 	} else {
2620 		/* no need to migrate if destination is a non-local DSQ */
2621 		dst_rq = src_rq;
2622 	}
2623 
2624 	/*
2625 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2626 	 * CPU, @p will be migrated.
2627 	 */
2628 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2629 		/* @p is going from a non-local DSQ to a local DSQ */
2630 		if (src_rq == dst_rq) {
2631 			task_unlink_from_dsq(p, src_dsq);
2632 			move_local_task_to_local_dsq(p, enq_flags,
2633 						     src_dsq, dst_rq);
2634 			raw_spin_unlock(&src_dsq->lock);
2635 		} else {
2636 			raw_spin_unlock(&src_dsq->lock);
2637 			move_remote_task_to_local_dsq(p, enq_flags,
2638 						      src_rq, dst_rq);
2639 		}
2640 	} else {
2641 		/*
2642 		 * @p is going from a non-local DSQ to a non-local DSQ. As
2643 		 * $src_dsq is already locked, do an abbreviated dequeue.
2644 		 */
2645 		task_unlink_from_dsq(p, src_dsq);
2646 		p->scx.dsq = NULL;
2647 		raw_spin_unlock(&src_dsq->lock);
2648 
2649 		dispatch_enqueue(dst_dsq, p, enq_flags);
2650 	}
2651 
2652 	return dst_rq;
2653 }
2654 
2655 /*
2656  * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2657  * banging on the same DSQ on a large NUMA system to the point where switching
2658  * to the bypass mode can take a long time. Inject artificial delays while the
2659  * bypass mode is switching to guarantee timely completion.
2660  */
scx_ops_breather(struct rq * rq)2661 static void scx_ops_breather(struct rq *rq)
2662 {
2663 	u64 until;
2664 
2665 	lockdep_assert_rq_held(rq);
2666 
2667 	if (likely(!atomic_read(&scx_ops_breather_depth)))
2668 		return;
2669 
2670 	raw_spin_rq_unlock(rq);
2671 
2672 	until = ktime_get_ns() + NSEC_PER_MSEC;
2673 
2674 	do {
2675 		int cnt = 1024;
2676 		while (atomic_read(&scx_ops_breather_depth) && --cnt)
2677 			cpu_relax();
2678 	} while (atomic_read(&scx_ops_breather_depth) &&
2679 		 time_before64(ktime_get_ns(), until));
2680 
2681 	raw_spin_rq_lock(rq);
2682 }
2683 
consume_dispatch_q(struct rq * rq,struct scx_dispatch_q * dsq)2684 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2685 {
2686 	struct task_struct *p;
2687 retry:
2688 	/*
2689 	 * This retry loop can repeatedly race against scx_ops_bypass()
2690 	 * dequeueing tasks from @dsq trying to put the system into the bypass
2691 	 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2692 	 * live-lock the machine into soft lockups. Give a breather.
2693 	 */
2694 	scx_ops_breather(rq);
2695 
2696 	/*
2697 	 * The caller can't expect to successfully consume a task if the task's
2698 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2699 	 * @dsq->list without locking and skip if it seems empty.
2700 	 */
2701 	if (list_empty(&dsq->list))
2702 		return false;
2703 
2704 	raw_spin_lock(&dsq->lock);
2705 
2706 	nldsq_for_each_task(p, dsq) {
2707 		struct rq *task_rq = task_rq(p);
2708 
2709 		if (rq == task_rq) {
2710 			task_unlink_from_dsq(p, dsq);
2711 			move_local_task_to_local_dsq(p, 0, dsq, rq);
2712 			raw_spin_unlock(&dsq->lock);
2713 			return true;
2714 		}
2715 
2716 		if (task_can_run_on_remote_rq(p, rq, false)) {
2717 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2718 				return true;
2719 			goto retry;
2720 		}
2721 	}
2722 
2723 	raw_spin_unlock(&dsq->lock);
2724 	return false;
2725 }
2726 
consume_global_dsq(struct rq * rq)2727 static bool consume_global_dsq(struct rq *rq)
2728 {
2729 	int node = cpu_to_node(cpu_of(rq));
2730 
2731 	return consume_dispatch_q(rq, global_dsqs[node]);
2732 }
2733 
2734 /**
2735  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2736  * @rq: current rq which is locked
2737  * @dst_dsq: destination DSQ
2738  * @p: task to dispatch
2739  * @enq_flags: %SCX_ENQ_*
2740  *
2741  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2742  * DSQ. This function performs all the synchronization dancing needed because
2743  * local DSQs are protected with rq locks.
2744  *
2745  * The caller must have exclusive ownership of @p (e.g. through
2746  * %SCX_OPSS_DISPATCHING).
2747  */
dispatch_to_local_dsq(struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2748 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2749 				  struct task_struct *p, u64 enq_flags)
2750 {
2751 	struct rq *src_rq = task_rq(p);
2752 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2753 #ifdef CONFIG_SMP
2754 	struct rq *locked_rq = rq;
2755 #endif
2756 
2757 	/*
2758 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2759 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2760 	 *
2761 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2762 	 */
2763 	if (rq == src_rq && rq == dst_rq) {
2764 		dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2765 		return;
2766 	}
2767 
2768 #ifdef CONFIG_SMP
2769 	if (src_rq != dst_rq &&
2770 	    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2771 		dispatch_enqueue(find_global_dsq(p), p,
2772 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2773 		return;
2774 	}
2775 
2776 	/*
2777 	 * @p is on a possibly remote @src_rq which we need to lock to move the
2778 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2779 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2780 	 * DISPATCHING.
2781 	 *
2782 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2783 	 * we're moving from a DSQ and use the same mechanism - mark the task
2784 	 * under transfer with holding_cpu, release DISPATCHING and then follow
2785 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2786 	 */
2787 	p->scx.holding_cpu = raw_smp_processor_id();
2788 
2789 	/* store_release ensures that dequeue sees the above */
2790 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2791 
2792 	/* switch to @src_rq lock */
2793 	if (locked_rq != src_rq) {
2794 		raw_spin_rq_unlock(locked_rq);
2795 		locked_rq = src_rq;
2796 		raw_spin_rq_lock(src_rq);
2797 	}
2798 
2799 	/* task_rq couldn't have changed if we're still the holding cpu */
2800 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2801 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2802 		/*
2803 		 * If @p is staying on the same rq, there's no need to go
2804 		 * through the full deactivate/activate cycle. Optimize by
2805 		 * abbreviating move_remote_task_to_local_dsq().
2806 		 */
2807 		if (src_rq == dst_rq) {
2808 			p->scx.holding_cpu = -1;
2809 			dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2810 		} else {
2811 			move_remote_task_to_local_dsq(p, enq_flags,
2812 						      src_rq, dst_rq);
2813 			/* task has been moved to dst_rq, which is now locked */
2814 			locked_rq = dst_rq;
2815 		}
2816 
2817 		/* if the destination CPU is idle, wake it up */
2818 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2819 			resched_curr(dst_rq);
2820 	}
2821 
2822 	/* switch back to @rq lock */
2823 	if (locked_rq != rq) {
2824 		raw_spin_rq_unlock(locked_rq);
2825 		raw_spin_rq_lock(rq);
2826 	}
2827 #else	/* CONFIG_SMP */
2828 	BUG();	/* control can not reach here on UP */
2829 #endif	/* CONFIG_SMP */
2830 }
2831 
2832 /**
2833  * finish_dispatch - Asynchronously finish dispatching a task
2834  * @rq: current rq which is locked
2835  * @p: task to finish dispatching
2836  * @qseq_at_dispatch: qseq when @p started getting dispatched
2837  * @dsq_id: destination DSQ ID
2838  * @enq_flags: %SCX_ENQ_*
2839  *
2840  * Dispatching to local DSQs may need to wait for queueing to complete or
2841  * require rq lock dancing. As we don't wanna do either while inside
2842  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2843  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2844  * task and its qseq. Once ops.dispatch() returns, this function is called to
2845  * finish up.
2846  *
2847  * There is no guarantee that @p is still valid for dispatching or even that it
2848  * was valid in the first place. Make sure that the task is still owned by the
2849  * BPF scheduler and claim the ownership before dispatching.
2850  */
finish_dispatch(struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2851 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2852 			    unsigned long qseq_at_dispatch,
2853 			    u64 dsq_id, u64 enq_flags)
2854 {
2855 	struct scx_dispatch_q *dsq;
2856 	unsigned long opss;
2857 
2858 	touch_core_sched_dispatch(rq, p);
2859 retry:
2860 	/*
2861 	 * No need for _acquire here. @p is accessed only after a successful
2862 	 * try_cmpxchg to DISPATCHING.
2863 	 */
2864 	opss = atomic_long_read(&p->scx.ops_state);
2865 
2866 	switch (opss & SCX_OPSS_STATE_MASK) {
2867 	case SCX_OPSS_DISPATCHING:
2868 	case SCX_OPSS_NONE:
2869 		/* someone else already got to it */
2870 		return;
2871 	case SCX_OPSS_QUEUED:
2872 		/*
2873 		 * If qseq doesn't match, @p has gone through at least one
2874 		 * dispatch/dequeue and re-enqueue cycle between
2875 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
2876 		 */
2877 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2878 			return;
2879 
2880 		/*
2881 		 * While we know @p is accessible, we don't yet have a claim on
2882 		 * it - the BPF scheduler is allowed to dispatch tasks
2883 		 * spuriously and there can be a racing dequeue attempt. Let's
2884 		 * claim @p by atomically transitioning it from QUEUED to
2885 		 * DISPATCHING.
2886 		 */
2887 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2888 						   SCX_OPSS_DISPATCHING)))
2889 			break;
2890 		goto retry;
2891 	case SCX_OPSS_QUEUEING:
2892 		/*
2893 		 * do_enqueue_task() is in the process of transferring the task
2894 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2895 		 * holding any kernel or BPF resource that the enqueue path may
2896 		 * depend upon, it's safe to wait.
2897 		 */
2898 		wait_ops_state(p, opss);
2899 		goto retry;
2900 	}
2901 
2902 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2903 
2904 	dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2905 
2906 	if (dsq->id == SCX_DSQ_LOCAL)
2907 		dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2908 	else
2909 		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2910 }
2911 
flush_dispatch_buf(struct rq * rq)2912 static void flush_dispatch_buf(struct rq *rq)
2913 {
2914 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2915 	u32 u;
2916 
2917 	for (u = 0; u < dspc->cursor; u++) {
2918 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2919 
2920 		finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2921 				ent->enq_flags);
2922 	}
2923 
2924 	dspc->nr_tasks += dspc->cursor;
2925 	dspc->cursor = 0;
2926 }
2927 
balance_one(struct rq * rq,struct task_struct * prev)2928 static int balance_one(struct rq *rq, struct task_struct *prev)
2929 {
2930 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2931 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2932 	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2933 	int nr_loops = SCX_DSP_MAX_LOOPS;
2934 
2935 	lockdep_assert_rq_held(rq);
2936 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2937 	rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2938 
2939 	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2940 	    unlikely(rq->scx.cpu_released)) {
2941 		/*
2942 		 * If the previous sched_class for the current CPU was not SCX,
2943 		 * notify the BPF scheduler that it again has control of the
2944 		 * core. This callback complements ->cpu_release(), which is
2945 		 * emitted in switch_class().
2946 		 */
2947 		if (SCX_HAS_OP(cpu_acquire))
2948 			SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2949 		rq->scx.cpu_released = false;
2950 	}
2951 
2952 	if (prev_on_scx) {
2953 		update_curr_scx(rq);
2954 
2955 		/*
2956 		 * If @prev is runnable & has slice left, it has priority and
2957 		 * fetching more just increases latency for the fetched tasks.
2958 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2959 		 * scheduler wants to handle this explicitly, it should
2960 		 * implement ->cpu_release().
2961 		 *
2962 		 * See scx_ops_disable_workfn() for the explanation on the
2963 		 * bypassing test.
2964 		 */
2965 		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2966 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2967 			goto has_tasks;
2968 		}
2969 	}
2970 
2971 	/* if there already are tasks to run, nothing to do */
2972 	if (rq->scx.local_dsq.nr)
2973 		goto has_tasks;
2974 
2975 	if (consume_global_dsq(rq))
2976 		goto has_tasks;
2977 
2978 	if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2979 		goto no_tasks;
2980 
2981 	dspc->rq = rq;
2982 
2983 	/*
2984 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2985 	 * the local DSQ might still end up empty after a successful
2986 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2987 	 * produced some tasks, retry. The BPF scheduler may depend on this
2988 	 * looping behavior to simplify its implementation.
2989 	 */
2990 	do {
2991 		dspc->nr_tasks = 0;
2992 
2993 		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2994 			    prev_on_scx ? prev : NULL);
2995 
2996 		flush_dispatch_buf(rq);
2997 
2998 		if (prev_on_rq && prev->scx.slice) {
2999 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
3000 			goto has_tasks;
3001 		}
3002 		if (rq->scx.local_dsq.nr)
3003 			goto has_tasks;
3004 		if (consume_global_dsq(rq))
3005 			goto has_tasks;
3006 
3007 		/*
3008 		 * ops.dispatch() can trap us in this loop by repeatedly
3009 		 * dispatching ineligible tasks. Break out once in a while to
3010 		 * allow the watchdog to run. As IRQ can't be enabled in
3011 		 * balance(), we want to complete this scheduling cycle and then
3012 		 * start a new one. IOW, we want to call resched_curr() on the
3013 		 * next, most likely idle, task, not the current one. Use
3014 		 * scx_bpf_kick_cpu() for deferred kicking.
3015 		 */
3016 		if (unlikely(!--nr_loops)) {
3017 			scx_bpf_kick_cpu(cpu_of(rq), 0);
3018 			break;
3019 		}
3020 	} while (dspc->nr_tasks);
3021 
3022 no_tasks:
3023 	/*
3024 	 * Didn't find another task to run. Keep running @prev unless
3025 	 * %SCX_OPS_ENQ_LAST is in effect.
3026 	 */
3027 	if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
3028 	     scx_rq_bypassing(rq))) {
3029 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
3030 		__scx_add_event(SCX_EV_DISPATCH_KEEP_LAST, 1);
3031 		goto has_tasks;
3032 	}
3033 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3034 	return false;
3035 
3036 has_tasks:
3037 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3038 	return true;
3039 }
3040 
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)3041 static int balance_scx(struct rq *rq, struct task_struct *prev,
3042 		       struct rq_flags *rf)
3043 {
3044 	int ret;
3045 
3046 	rq_unpin_lock(rq, rf);
3047 
3048 	ret = balance_one(rq, prev);
3049 
3050 #ifdef CONFIG_SCHED_SMT
3051 	/*
3052 	 * When core-sched is enabled, this ops.balance() call will be followed
3053 	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
3054 	 * siblings too.
3055 	 */
3056 	if (sched_core_enabled(rq)) {
3057 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
3058 		int scpu;
3059 
3060 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
3061 			struct rq *srq = cpu_rq(scpu);
3062 			struct task_struct *sprev = srq->curr;
3063 
3064 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
3065 			update_rq_clock(srq);
3066 			balance_one(srq, sprev);
3067 		}
3068 	}
3069 #endif
3070 	rq_repin_lock(rq, rf);
3071 
3072 	return ret;
3073 }
3074 
process_ddsp_deferred_locals(struct rq * rq)3075 static void process_ddsp_deferred_locals(struct rq *rq)
3076 {
3077 	struct task_struct *p;
3078 
3079 	lockdep_assert_rq_held(rq);
3080 
3081 	/*
3082 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
3083 	 * tasks directly dispatched to the local DSQs of other CPUs. See
3084 	 * direct_dispatch(). Keep popping from the head instead of using
3085 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
3086 	 * temporarily.
3087 	 */
3088 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3089 				struct task_struct, scx.dsq_list.node))) {
3090 		struct scx_dispatch_q *dsq;
3091 
3092 		list_del_init(&p->scx.dsq_list.node);
3093 
3094 		dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
3095 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
3096 			dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
3097 	}
3098 }
3099 
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)3100 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
3101 {
3102 	if (p->scx.flags & SCX_TASK_QUEUED) {
3103 		/*
3104 		 * Core-sched might decide to execute @p before it is
3105 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
3106 		 */
3107 		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
3108 		dispatch_dequeue(rq, p);
3109 	}
3110 
3111 	p->se.exec_start = rq_clock_task(rq);
3112 
3113 	/* see dequeue_task_scx() on why we skip when !QUEUED */
3114 	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
3115 		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
3116 
3117 	clr_task_runnable(p, true);
3118 
3119 	/*
3120 	 * @p is getting newly scheduled or got kicked after someone updated its
3121 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
3122 	 */
3123 	if ((p->scx.slice == SCX_SLICE_INF) !=
3124 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
3125 		if (p->scx.slice == SCX_SLICE_INF)
3126 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
3127 		else
3128 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
3129 
3130 		sched_update_tick_dependency(rq);
3131 
3132 		/*
3133 		 * For now, let's refresh the load_avgs just when transitioning
3134 		 * in and out of nohz. In the future, we might want to add a
3135 		 * mechanism which calls the following periodically on
3136 		 * tick-stopped CPUs.
3137 		 */
3138 		update_other_load_avgs(rq);
3139 	}
3140 }
3141 
3142 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)3143 preempt_reason_from_class(const struct sched_class *class)
3144 {
3145 #ifdef CONFIG_SMP
3146 	if (class == &stop_sched_class)
3147 		return SCX_CPU_PREEMPT_STOP;
3148 #endif
3149 	if (class == &dl_sched_class)
3150 		return SCX_CPU_PREEMPT_DL;
3151 	if (class == &rt_sched_class)
3152 		return SCX_CPU_PREEMPT_RT;
3153 	return SCX_CPU_PREEMPT_UNKNOWN;
3154 }
3155 
switch_class(struct rq * rq,struct task_struct * next)3156 static void switch_class(struct rq *rq, struct task_struct *next)
3157 {
3158 	const struct sched_class *next_class = next->sched_class;
3159 
3160 #ifdef CONFIG_SMP
3161 	/*
3162 	 * Pairs with the smp_load_acquire() issued by a CPU in
3163 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3164 	 * resched.
3165 	 */
3166 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3167 #endif
3168 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
3169 		return;
3170 
3171 	/*
3172 	 * The callback is conceptually meant to convey that the CPU is no
3173 	 * longer under the control of SCX. Therefore, don't invoke the callback
3174 	 * if the next class is below SCX (in which case the BPF scheduler has
3175 	 * actively decided not to schedule any tasks on the CPU).
3176 	 */
3177 	if (sched_class_above(&ext_sched_class, next_class))
3178 		return;
3179 
3180 	/*
3181 	 * At this point we know that SCX was preempted by a higher priority
3182 	 * sched_class, so invoke the ->cpu_release() callback if we have not
3183 	 * done so already. We only send the callback once between SCX being
3184 	 * preempted, and it regaining control of the CPU.
3185 	 *
3186 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3187 	 *  next time that balance_scx() is invoked.
3188 	 */
3189 	if (!rq->scx.cpu_released) {
3190 		if (SCX_HAS_OP(cpu_release)) {
3191 			struct scx_cpu_release_args args = {
3192 				.reason = preempt_reason_from_class(next_class),
3193 				.task = next,
3194 			};
3195 
3196 			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3197 				    cpu_release, cpu_of(rq), &args);
3198 		}
3199 		rq->scx.cpu_released = true;
3200 	}
3201 }
3202 
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)3203 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3204 			      struct task_struct *next)
3205 {
3206 	update_curr_scx(rq);
3207 
3208 	/* see dequeue_task_scx() on why we skip when !QUEUED */
3209 	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3210 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3211 
3212 	if (p->scx.flags & SCX_TASK_QUEUED) {
3213 		set_task_runnable(rq, p);
3214 
3215 		/*
3216 		 * If @p has slice left and is being put, @p is getting
3217 		 * preempted by a higher priority scheduler class or core-sched
3218 		 * forcing a different task. Leave it at the head of the local
3219 		 * DSQ.
3220 		 */
3221 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
3222 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3223 			goto switch_class;
3224 		}
3225 
3226 		/*
3227 		 * If @p is runnable but we're about to enter a lower
3228 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3229 		 * ops.enqueue() that @p is the only one available for this cpu,
3230 		 * which should trigger an explicit follow-up scheduling event.
3231 		 */
3232 		if (sched_class_above(&ext_sched_class, next->sched_class)) {
3233 			WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3234 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3235 		} else {
3236 			do_enqueue_task(rq, p, 0, -1);
3237 		}
3238 	}
3239 
3240 switch_class:
3241 	if (next && next->sched_class != &ext_sched_class)
3242 		switch_class(rq, next);
3243 }
3244 
first_local_task(struct rq * rq)3245 static struct task_struct *first_local_task(struct rq *rq)
3246 {
3247 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
3248 					struct task_struct, scx.dsq_list.node);
3249 }
3250 
pick_task_scx(struct rq * rq)3251 static struct task_struct *pick_task_scx(struct rq *rq)
3252 {
3253 	struct task_struct *prev = rq->curr;
3254 	struct task_struct *p;
3255 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3256 	bool kick_idle = false;
3257 
3258 	/*
3259 	 * WORKAROUND:
3260 	 *
3261 	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3262 	 * have gone through balance_scx(). Unfortunately, there currently is a
3263 	 * bug where fair could say yes on balance() but no on pick_task(),
3264 	 * which then ends up calling pick_task_scx() without preceding
3265 	 * balance_scx().
3266 	 *
3267 	 * Keep running @prev if possible and avoid stalling from entering idle
3268 	 * without balancing.
3269 	 *
3270 	 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3271 	 * if pick_task_scx() is called without preceding balance_scx().
3272 	 */
3273 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3274 		if (prev->scx.flags & SCX_TASK_QUEUED) {
3275 			keep_prev = true;
3276 		} else {
3277 			keep_prev = false;
3278 			kick_idle = true;
3279 		}
3280 	} else if (unlikely(keep_prev &&
3281 			    prev->sched_class != &ext_sched_class)) {
3282 		/*
3283 		 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
3284 		 * conditional on scx_enabled() and may have been skipped.
3285 		 */
3286 		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3287 		keep_prev = false;
3288 	}
3289 
3290 	/*
3291 	 * If balance_scx() is telling us to keep running @prev, replenish slice
3292 	 * if necessary and keep running @prev. Otherwise, pop the first one
3293 	 * from the local DSQ.
3294 	 */
3295 	if (keep_prev) {
3296 		p = prev;
3297 		if (!p->scx.slice) {
3298 			p->scx.slice = SCX_SLICE_DFL;
3299 			__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
3300 		}
3301 	} else {
3302 		p = first_local_task(rq);
3303 		if (!p) {
3304 			if (kick_idle)
3305 				scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3306 			return NULL;
3307 		}
3308 
3309 		if (unlikely(!p->scx.slice)) {
3310 			if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3311 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3312 						p->comm, p->pid, __func__);
3313 				scx_warned_zero_slice = true;
3314 			}
3315 			p->scx.slice = SCX_SLICE_DFL;
3316 			__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
3317 		}
3318 	}
3319 
3320 	return p;
3321 }
3322 
3323 #ifdef CONFIG_SCHED_CORE
3324 /**
3325  * scx_prio_less - Task ordering for core-sched
3326  * @a: task A
3327  * @b: task B
3328  * @in_fi: in forced idle state
3329  *
3330  * Core-sched is implemented as an additional scheduling layer on top of the
3331  * usual sched_class'es and needs to find out the expected task ordering. For
3332  * SCX, core-sched calls this function to interrogate the task ordering.
3333  *
3334  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3335  * to implement the default task ordering. The older the timestamp, the higher
3336  * priority the task - the global FIFO ordering matching the default scheduling
3337  * behavior.
3338  *
3339  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3340  * implement FIFO ordering within each local DSQ. See pick_task_scx().
3341  */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3342 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3343 		   bool in_fi)
3344 {
3345 	/*
3346 	 * The const qualifiers are dropped from task_struct pointers when
3347 	 * calling ops.core_sched_before(). Accesses are controlled by the
3348 	 * verifier.
3349 	 */
3350 	if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3351 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3352 					      (struct task_struct *)a,
3353 					      (struct task_struct *)b);
3354 	else
3355 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3356 }
3357 #endif	/* CONFIG_SCHED_CORE */
3358 
3359 #ifdef CONFIG_SMP
3360 
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3361 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3362 {
3363 	bool rq_bypass;
3364 
3365 	/*
3366 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3367 	 * can be a good migration opportunity with low cache and memory
3368 	 * footprint. Returning a CPU different than @prev_cpu triggers
3369 	 * immediate rq migration. However, for SCX, as the current rq
3370 	 * association doesn't dictate where the task is going to run, this
3371 	 * doesn't fit well. If necessary, we can later add a dedicated method
3372 	 * which can decide to preempt self to force it through the regular
3373 	 * scheduling path.
3374 	 */
3375 	if (unlikely(wake_flags & WF_EXEC))
3376 		return prev_cpu;
3377 
3378 	rq_bypass = scx_rq_bypassing(task_rq(p));
3379 	if (SCX_HAS_OP(select_cpu) && !rq_bypass) {
3380 		s32 cpu;
3381 		struct task_struct **ddsp_taskp;
3382 
3383 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3384 		WARN_ON_ONCE(*ddsp_taskp);
3385 		*ddsp_taskp = p;
3386 
3387 		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3388 					   select_cpu, p, prev_cpu, wake_flags);
3389 		p->scx.selected_cpu = cpu;
3390 		*ddsp_taskp = NULL;
3391 		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3392 			return cpu;
3393 		else
3394 			return prev_cpu;
3395 	} else {
3396 		s32 cpu;
3397 
3398 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
3399 		if (cpu >= 0) {
3400 			p->scx.slice = SCX_SLICE_DFL;
3401 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3402 			__scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
3403 		} else {
3404 			cpu = prev_cpu;
3405 		}
3406 		p->scx.selected_cpu = cpu;
3407 
3408 		if (rq_bypass)
3409 			__scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
3410 		return cpu;
3411 	}
3412 }
3413 
task_woken_scx(struct rq * rq,struct task_struct * p)3414 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3415 {
3416 	run_deferred(rq);
3417 }
3418 
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3419 static void set_cpus_allowed_scx(struct task_struct *p,
3420 				 struct affinity_context *ac)
3421 {
3422 	set_cpus_allowed_common(p, ac);
3423 
3424 	/*
3425 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3426 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3427 	 * scheduler the effective one.
3428 	 *
3429 	 * Fine-grained memory write control is enforced by BPF making the const
3430 	 * designation pointless. Cast it away when calling the operation.
3431 	 */
3432 	if (SCX_HAS_OP(set_cpumask))
3433 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3434 				 (struct cpumask *)p->cpus_ptr);
3435 }
3436 
handle_hotplug(struct rq * rq,bool online)3437 static void handle_hotplug(struct rq *rq, bool online)
3438 {
3439 	int cpu = cpu_of(rq);
3440 
3441 	atomic_long_inc(&scx_hotplug_seq);
3442 
3443 	if (scx_enabled())
3444 		scx_idle_update_selcpu_topology(&scx_ops);
3445 
3446 	if (online && SCX_HAS_OP(cpu_online))
3447 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3448 	else if (!online && SCX_HAS_OP(cpu_offline))
3449 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3450 	else
3451 		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3452 			     "cpu %d going %s, exiting scheduler", cpu,
3453 			     online ? "online" : "offline");
3454 }
3455 
scx_rq_activate(struct rq * rq)3456 void scx_rq_activate(struct rq *rq)
3457 {
3458 	handle_hotplug(rq, true);
3459 }
3460 
scx_rq_deactivate(struct rq * rq)3461 void scx_rq_deactivate(struct rq *rq)
3462 {
3463 	handle_hotplug(rq, false);
3464 }
3465 
rq_online_scx(struct rq * rq)3466 static void rq_online_scx(struct rq *rq)
3467 {
3468 	rq->scx.flags |= SCX_RQ_ONLINE;
3469 }
3470 
rq_offline_scx(struct rq * rq)3471 static void rq_offline_scx(struct rq *rq)
3472 {
3473 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3474 }
3475 
3476 #endif	/* CONFIG_SMP */
3477 
check_rq_for_timeouts(struct rq * rq)3478 static bool check_rq_for_timeouts(struct rq *rq)
3479 {
3480 	struct task_struct *p;
3481 	struct rq_flags rf;
3482 	bool timed_out = false;
3483 
3484 	rq_lock_irqsave(rq, &rf);
3485 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3486 		unsigned long last_runnable = p->scx.runnable_at;
3487 
3488 		if (unlikely(time_after(jiffies,
3489 					last_runnable + scx_watchdog_timeout))) {
3490 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3491 
3492 			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3493 					   "%s[%d] failed to run for %u.%03us",
3494 					   p->comm, p->pid,
3495 					   dur_ms / 1000, dur_ms % 1000);
3496 			timed_out = true;
3497 			break;
3498 		}
3499 	}
3500 	rq_unlock_irqrestore(rq, &rf);
3501 
3502 	return timed_out;
3503 }
3504 
scx_watchdog_workfn(struct work_struct * work)3505 static void scx_watchdog_workfn(struct work_struct *work)
3506 {
3507 	int cpu;
3508 
3509 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3510 
3511 	for_each_online_cpu(cpu) {
3512 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3513 			break;
3514 
3515 		cond_resched();
3516 	}
3517 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3518 			   scx_watchdog_timeout / 2);
3519 }
3520 
scx_tick(struct rq * rq)3521 void scx_tick(struct rq *rq)
3522 {
3523 	unsigned long last_check;
3524 
3525 	if (!scx_enabled())
3526 		return;
3527 
3528 	last_check = READ_ONCE(scx_watchdog_timestamp);
3529 	if (unlikely(time_after(jiffies,
3530 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3531 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3532 
3533 		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3534 				   "watchdog failed to check in for %u.%03us",
3535 				   dur_ms / 1000, dur_ms % 1000);
3536 	}
3537 
3538 	update_other_load_avgs(rq);
3539 }
3540 
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3541 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3542 {
3543 	update_curr_scx(rq);
3544 
3545 	/*
3546 	 * While disabling, always resched and refresh core-sched timestamp as
3547 	 * we can't trust the slice management or ops.core_sched_before().
3548 	 */
3549 	if (scx_rq_bypassing(rq)) {
3550 		curr->scx.slice = 0;
3551 		touch_core_sched(rq, curr);
3552 	} else if (SCX_HAS_OP(tick)) {
3553 		SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
3554 	}
3555 
3556 	if (!curr->scx.slice)
3557 		resched_curr(rq);
3558 }
3559 
3560 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3561 static struct cgroup *tg_cgrp(struct task_group *tg)
3562 {
3563 	/*
3564 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3565 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3566 	 * root cgroup.
3567 	 */
3568 	if (tg && tg->css.cgroup)
3569 		return tg->css.cgroup;
3570 	else
3571 		return &cgrp_dfl_root.cgrp;
3572 }
3573 
3574 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3575 
3576 #else	/* CONFIG_EXT_GROUP_SCHED */
3577 
3578 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3579 
3580 #endif	/* CONFIG_EXT_GROUP_SCHED */
3581 
scx_get_task_state(const struct task_struct * p)3582 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3583 {
3584 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3585 }
3586 
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3587 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3588 {
3589 	enum scx_task_state prev_state = scx_get_task_state(p);
3590 	bool warn = false;
3591 
3592 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3593 
3594 	switch (state) {
3595 	case SCX_TASK_NONE:
3596 		break;
3597 	case SCX_TASK_INIT:
3598 		warn = prev_state != SCX_TASK_NONE;
3599 		break;
3600 	case SCX_TASK_READY:
3601 		warn = prev_state == SCX_TASK_NONE;
3602 		break;
3603 	case SCX_TASK_ENABLED:
3604 		warn = prev_state != SCX_TASK_READY;
3605 		break;
3606 	default:
3607 		warn = true;
3608 		return;
3609 	}
3610 
3611 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3612 		  prev_state, state, p->comm, p->pid);
3613 
3614 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3615 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3616 }
3617 
scx_ops_init_task(struct task_struct * p,struct task_group * tg,bool fork)3618 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3619 {
3620 	int ret;
3621 
3622 	p->scx.disallow = false;
3623 
3624 	if (SCX_HAS_OP(init_task)) {
3625 		struct scx_init_task_args args = {
3626 			SCX_INIT_TASK_ARGS_CGROUP(tg)
3627 			.fork = fork,
3628 		};
3629 
3630 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3631 		if (unlikely(ret)) {
3632 			ret = ops_sanitize_err("init_task", ret);
3633 			return ret;
3634 		}
3635 	}
3636 
3637 	scx_set_task_state(p, SCX_TASK_INIT);
3638 
3639 	if (p->scx.disallow) {
3640 		if (!fork) {
3641 			struct rq *rq;
3642 			struct rq_flags rf;
3643 
3644 			rq = task_rq_lock(p, &rf);
3645 
3646 			/*
3647 			 * We're in the load path and @p->policy will be applied
3648 			 * right after. Reverting @p->policy here and rejecting
3649 			 * %SCHED_EXT transitions from scx_check_setscheduler()
3650 			 * guarantees that if ops.init_task() sets @p->disallow,
3651 			 * @p can never be in SCX.
3652 			 */
3653 			if (p->policy == SCHED_EXT) {
3654 				p->policy = SCHED_NORMAL;
3655 				atomic_long_inc(&scx_nr_rejected);
3656 			}
3657 
3658 			task_rq_unlock(rq, p, &rf);
3659 		} else if (p->policy == SCHED_EXT) {
3660 			scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3661 				      p->comm, p->pid);
3662 		}
3663 	}
3664 
3665 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3666 	return 0;
3667 }
3668 
scx_ops_enable_task(struct task_struct * p)3669 static void scx_ops_enable_task(struct task_struct *p)
3670 {
3671 	u32 weight;
3672 
3673 	lockdep_assert_rq_held(task_rq(p));
3674 
3675 	/*
3676 	 * Set the weight before calling ops.enable() so that the scheduler
3677 	 * doesn't see a stale value if they inspect the task struct.
3678 	 */
3679 	if (task_has_idle_policy(p))
3680 		weight = WEIGHT_IDLEPRIO;
3681 	else
3682 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3683 
3684 	p->scx.weight = sched_weight_to_cgroup(weight);
3685 
3686 	if (SCX_HAS_OP(enable))
3687 		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3688 	scx_set_task_state(p, SCX_TASK_ENABLED);
3689 
3690 	if (SCX_HAS_OP(set_weight))
3691 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3692 }
3693 
scx_ops_disable_task(struct task_struct * p)3694 static void scx_ops_disable_task(struct task_struct *p)
3695 {
3696 	lockdep_assert_rq_held(task_rq(p));
3697 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3698 
3699 	if (SCX_HAS_OP(disable))
3700 		SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
3701 	scx_set_task_state(p, SCX_TASK_READY);
3702 }
3703 
scx_ops_exit_task(struct task_struct * p)3704 static void scx_ops_exit_task(struct task_struct *p)
3705 {
3706 	struct scx_exit_task_args args = {
3707 		.cancelled = false,
3708 	};
3709 
3710 	lockdep_assert_rq_held(task_rq(p));
3711 
3712 	switch (scx_get_task_state(p)) {
3713 	case SCX_TASK_NONE:
3714 		return;
3715 	case SCX_TASK_INIT:
3716 		args.cancelled = true;
3717 		break;
3718 	case SCX_TASK_READY:
3719 		break;
3720 	case SCX_TASK_ENABLED:
3721 		scx_ops_disable_task(p);
3722 		break;
3723 	default:
3724 		WARN_ON_ONCE(true);
3725 		return;
3726 	}
3727 
3728 	if (SCX_HAS_OP(exit_task))
3729 		SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
3730 	scx_set_task_state(p, SCX_TASK_NONE);
3731 }
3732 
init_scx_entity(struct sched_ext_entity * scx)3733 void init_scx_entity(struct sched_ext_entity *scx)
3734 {
3735 	memset(scx, 0, sizeof(*scx));
3736 	INIT_LIST_HEAD(&scx->dsq_list.node);
3737 	RB_CLEAR_NODE(&scx->dsq_priq);
3738 	scx->sticky_cpu = -1;
3739 	scx->holding_cpu = -1;
3740 	INIT_LIST_HEAD(&scx->runnable_node);
3741 	scx->runnable_at = jiffies;
3742 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3743 	scx->slice = SCX_SLICE_DFL;
3744 }
3745 
scx_pre_fork(struct task_struct * p)3746 void scx_pre_fork(struct task_struct *p)
3747 {
3748 	/*
3749 	 * BPF scheduler enable/disable paths want to be able to iterate and
3750 	 * update all tasks which can become complex when racing forks. As
3751 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
3752 	 * exclude forks.
3753 	 */
3754 	percpu_down_read(&scx_fork_rwsem);
3755 }
3756 
scx_fork(struct task_struct * p)3757 int scx_fork(struct task_struct *p)
3758 {
3759 	percpu_rwsem_assert_held(&scx_fork_rwsem);
3760 
3761 	if (scx_ops_init_task_enabled)
3762 		return scx_ops_init_task(p, task_group(p), true);
3763 	else
3764 		return 0;
3765 }
3766 
scx_post_fork(struct task_struct * p)3767 void scx_post_fork(struct task_struct *p)
3768 {
3769 	if (scx_ops_init_task_enabled) {
3770 		scx_set_task_state(p, SCX_TASK_READY);
3771 
3772 		/*
3773 		 * Enable the task immediately if it's running on sched_ext.
3774 		 * Otherwise, it'll be enabled in switching_to_scx() if and
3775 		 * when it's ever configured to run with a SCHED_EXT policy.
3776 		 */
3777 		if (p->sched_class == &ext_sched_class) {
3778 			struct rq_flags rf;
3779 			struct rq *rq;
3780 
3781 			rq = task_rq_lock(p, &rf);
3782 			scx_ops_enable_task(p);
3783 			task_rq_unlock(rq, p, &rf);
3784 		}
3785 	}
3786 
3787 	spin_lock_irq(&scx_tasks_lock);
3788 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3789 	spin_unlock_irq(&scx_tasks_lock);
3790 
3791 	percpu_up_read(&scx_fork_rwsem);
3792 }
3793 
scx_cancel_fork(struct task_struct * p)3794 void scx_cancel_fork(struct task_struct *p)
3795 {
3796 	if (scx_enabled()) {
3797 		struct rq *rq;
3798 		struct rq_flags rf;
3799 
3800 		rq = task_rq_lock(p, &rf);
3801 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3802 		scx_ops_exit_task(p);
3803 		task_rq_unlock(rq, p, &rf);
3804 	}
3805 
3806 	percpu_up_read(&scx_fork_rwsem);
3807 }
3808 
sched_ext_free(struct task_struct * p)3809 void sched_ext_free(struct task_struct *p)
3810 {
3811 	unsigned long flags;
3812 
3813 	spin_lock_irqsave(&scx_tasks_lock, flags);
3814 	list_del_init(&p->scx.tasks_node);
3815 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
3816 
3817 	/*
3818 	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
3819 	 * ENABLED transitions can't race us. Disable ops for @p.
3820 	 */
3821 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3822 		struct rq_flags rf;
3823 		struct rq *rq;
3824 
3825 		rq = task_rq_lock(p, &rf);
3826 		scx_ops_exit_task(p);
3827 		task_rq_unlock(rq, p, &rf);
3828 	}
3829 }
3830 
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)3831 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3832 			      const struct load_weight *lw)
3833 {
3834 	lockdep_assert_rq_held(task_rq(p));
3835 
3836 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3837 	if (SCX_HAS_OP(set_weight))
3838 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3839 }
3840 
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)3841 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
3842 {
3843 }
3844 
switching_to_scx(struct rq * rq,struct task_struct * p)3845 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3846 {
3847 	scx_ops_enable_task(p);
3848 
3849 	/*
3850 	 * set_cpus_allowed_scx() is not called while @p is associated with a
3851 	 * different scheduler class. Keep the BPF scheduler up-to-date.
3852 	 */
3853 	if (SCX_HAS_OP(set_cpumask))
3854 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3855 				 (struct cpumask *)p->cpus_ptr);
3856 }
3857 
switched_from_scx(struct rq * rq,struct task_struct * p)3858 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3859 {
3860 	scx_ops_disable_task(p);
3861 }
3862 
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)3863 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)3864 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3865 
scx_check_setscheduler(struct task_struct * p,int policy)3866 int scx_check_setscheduler(struct task_struct *p, int policy)
3867 {
3868 	lockdep_assert_rq_held(task_rq(p));
3869 
3870 	/* if disallow, reject transitioning into SCX */
3871 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3872 	    p->policy != policy && policy == SCHED_EXT)
3873 		return -EACCES;
3874 
3875 	return 0;
3876 }
3877 
3878 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)3879 bool scx_can_stop_tick(struct rq *rq)
3880 {
3881 	struct task_struct *p = rq->curr;
3882 
3883 	if (scx_rq_bypassing(rq))
3884 		return false;
3885 
3886 	if (p->sched_class != &ext_sched_class)
3887 		return true;
3888 
3889 	/*
3890 	 * @rq can dispatch from different DSQs, so we can't tell whether it
3891 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3892 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3893 	 */
3894 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3895 }
3896 #endif
3897 
3898 #ifdef CONFIG_EXT_GROUP_SCHED
3899 
3900 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
3901 static bool scx_cgroup_enabled;
3902 static bool cgroup_warned_missing_weight;
3903 static bool cgroup_warned_missing_idle;
3904 
scx_cgroup_warn_missing_weight(struct task_group * tg)3905 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
3906 {
3907 	if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
3908 	    cgroup_warned_missing_weight)
3909 		return;
3910 
3911 	if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
3912 		return;
3913 
3914 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
3915 		scx_ops.name);
3916 	cgroup_warned_missing_weight = true;
3917 }
3918 
scx_cgroup_warn_missing_idle(struct task_group * tg)3919 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
3920 {
3921 	if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
3922 		return;
3923 
3924 	if (!tg->idle)
3925 		return;
3926 
3927 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
3928 		scx_ops.name);
3929 	cgroup_warned_missing_idle = true;
3930 }
3931 
scx_tg_online(struct task_group * tg)3932 int scx_tg_online(struct task_group *tg)
3933 {
3934 	int ret = 0;
3935 
3936 	WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3937 
3938 	percpu_down_read(&scx_cgroup_rwsem);
3939 
3940 	scx_cgroup_warn_missing_weight(tg);
3941 
3942 	if (scx_cgroup_enabled) {
3943 		if (SCX_HAS_OP(cgroup_init)) {
3944 			struct scx_cgroup_init_args args =
3945 				{ .weight = tg->scx_weight };
3946 
3947 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
3948 					      tg->css.cgroup, &args);
3949 			if (ret)
3950 				ret = ops_sanitize_err("cgroup_init", ret);
3951 		}
3952 		if (ret == 0)
3953 			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3954 	} else {
3955 		tg->scx_flags |= SCX_TG_ONLINE;
3956 	}
3957 
3958 	percpu_up_read(&scx_cgroup_rwsem);
3959 	return ret;
3960 }
3961 
scx_tg_offline(struct task_group * tg)3962 void scx_tg_offline(struct task_group *tg)
3963 {
3964 	WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
3965 
3966 	percpu_down_read(&scx_cgroup_rwsem);
3967 
3968 	if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
3969 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
3970 	tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3971 
3972 	percpu_up_read(&scx_cgroup_rwsem);
3973 }
3974 
scx_cgroup_can_attach(struct cgroup_taskset * tset)3975 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3976 {
3977 	struct cgroup_subsys_state *css;
3978 	struct task_struct *p;
3979 	int ret;
3980 
3981 	/* released in scx_finish/cancel_attach() */
3982 	percpu_down_read(&scx_cgroup_rwsem);
3983 
3984 	if (!scx_cgroup_enabled)
3985 		return 0;
3986 
3987 	cgroup_taskset_for_each(p, css, tset) {
3988 		struct cgroup *from = tg_cgrp(task_group(p));
3989 		struct cgroup *to = tg_cgrp(css_tg(css));
3990 
3991 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
3992 
3993 		/*
3994 		 * sched_move_task() omits identity migrations. Let's match the
3995 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3996 		 * always match one-to-one.
3997 		 */
3998 		if (from == to)
3999 			continue;
4000 
4001 		if (SCX_HAS_OP(cgroup_prep_move)) {
4002 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4003 					      p, from, css->cgroup);
4004 			if (ret)
4005 				goto err;
4006 		}
4007 
4008 		p->scx.cgrp_moving_from = from;
4009 	}
4010 
4011 	return 0;
4012 
4013 err:
4014 	cgroup_taskset_for_each(p, css, tset) {
4015 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4016 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4017 				    p->scx.cgrp_moving_from, css->cgroup);
4018 		p->scx.cgrp_moving_from = NULL;
4019 	}
4020 
4021 	percpu_up_read(&scx_cgroup_rwsem);
4022 	return ops_sanitize_err("cgroup_prep_move", ret);
4023 }
4024 
scx_cgroup_move_task(struct task_struct * p)4025 void scx_cgroup_move_task(struct task_struct *p)
4026 {
4027 	if (!scx_cgroup_enabled)
4028 		return;
4029 
4030 	/*
4031 	 * @p must have ops.cgroup_prep_move() called on it and thus
4032 	 * cgrp_moving_from set.
4033 	 */
4034 	if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4035 		SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4036 			p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4037 	p->scx.cgrp_moving_from = NULL;
4038 }
4039 
scx_cgroup_finish_attach(void)4040 void scx_cgroup_finish_attach(void)
4041 {
4042 	percpu_up_read(&scx_cgroup_rwsem);
4043 }
4044 
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)4045 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4046 {
4047 	struct cgroup_subsys_state *css;
4048 	struct task_struct *p;
4049 
4050 	if (!scx_cgroup_enabled)
4051 		goto out_unlock;
4052 
4053 	cgroup_taskset_for_each(p, css, tset) {
4054 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4055 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4056 				    p->scx.cgrp_moving_from, css->cgroup);
4057 		p->scx.cgrp_moving_from = NULL;
4058 	}
4059 out_unlock:
4060 	percpu_up_read(&scx_cgroup_rwsem);
4061 }
4062 
scx_group_set_weight(struct task_group * tg,unsigned long weight)4063 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4064 {
4065 	percpu_down_read(&scx_cgroup_rwsem);
4066 
4067 	if (scx_cgroup_enabled && tg->scx_weight != weight) {
4068 		if (SCX_HAS_OP(cgroup_set_weight))
4069 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4070 				    tg_cgrp(tg), weight);
4071 		tg->scx_weight = weight;
4072 	}
4073 
4074 	percpu_up_read(&scx_cgroup_rwsem);
4075 }
4076 
scx_group_set_idle(struct task_group * tg,bool idle)4077 void scx_group_set_idle(struct task_group *tg, bool idle)
4078 {
4079 	percpu_down_read(&scx_cgroup_rwsem);
4080 	scx_cgroup_warn_missing_idle(tg);
4081 	percpu_up_read(&scx_cgroup_rwsem);
4082 }
4083 
scx_cgroup_lock(void)4084 static void scx_cgroup_lock(void)
4085 {
4086 	percpu_down_write(&scx_cgroup_rwsem);
4087 }
4088 
scx_cgroup_unlock(void)4089 static void scx_cgroup_unlock(void)
4090 {
4091 	percpu_up_write(&scx_cgroup_rwsem);
4092 }
4093 
4094 #else	/* CONFIG_EXT_GROUP_SCHED */
4095 
scx_cgroup_lock(void)4096 static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)4097 static inline void scx_cgroup_unlock(void) {}
4098 
4099 #endif	/* CONFIG_EXT_GROUP_SCHED */
4100 
4101 /*
4102  * Omitted operations:
4103  *
4104  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4105  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
4106  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
4107  *
4108  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4109  *
4110  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4111  *   their current sched_class. Call them directly from sched core instead.
4112  */
4113 DEFINE_SCHED_CLASS(ext) = {
4114 	.enqueue_task		= enqueue_task_scx,
4115 	.dequeue_task		= dequeue_task_scx,
4116 	.yield_task		= yield_task_scx,
4117 	.yield_to_task		= yield_to_task_scx,
4118 
4119 	.wakeup_preempt		= wakeup_preempt_scx,
4120 
4121 	.balance		= balance_scx,
4122 	.pick_task		= pick_task_scx,
4123 
4124 	.put_prev_task		= put_prev_task_scx,
4125 	.set_next_task		= set_next_task_scx,
4126 
4127 #ifdef CONFIG_SMP
4128 	.select_task_rq		= select_task_rq_scx,
4129 	.task_woken		= task_woken_scx,
4130 	.set_cpus_allowed	= set_cpus_allowed_scx,
4131 
4132 	.rq_online		= rq_online_scx,
4133 	.rq_offline		= rq_offline_scx,
4134 #endif
4135 
4136 	.task_tick		= task_tick_scx,
4137 
4138 	.switching_to		= switching_to_scx,
4139 	.switched_from		= switched_from_scx,
4140 	.switched_to		= switched_to_scx,
4141 	.reweight_task		= reweight_task_scx,
4142 	.prio_changed		= prio_changed_scx,
4143 
4144 	.update_curr		= update_curr_scx,
4145 
4146 #ifdef CONFIG_UCLAMP_TASK
4147 	.uclamp_enabled		= 1,
4148 #endif
4149 };
4150 
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)4151 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4152 {
4153 	memset(dsq, 0, sizeof(*dsq));
4154 
4155 	raw_spin_lock_init(&dsq->lock);
4156 	INIT_LIST_HEAD(&dsq->list);
4157 	dsq->id = dsq_id;
4158 }
4159 
create_dsq(u64 dsq_id,int node)4160 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4161 {
4162 	struct scx_dispatch_q *dsq;
4163 	int ret;
4164 
4165 	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4166 		return ERR_PTR(-EINVAL);
4167 
4168 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4169 	if (!dsq)
4170 		return ERR_PTR(-ENOMEM);
4171 
4172 	init_dsq(dsq, dsq_id);
4173 
4174 	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4175 				     dsq_hash_params);
4176 	if (ret) {
4177 		kfree(dsq);
4178 		return ERR_PTR(ret);
4179 	}
4180 	return dsq;
4181 }
4182 
free_dsq_irq_workfn(struct irq_work * irq_work)4183 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4184 {
4185 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4186 	struct scx_dispatch_q *dsq, *tmp_dsq;
4187 
4188 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4189 		kfree_rcu(dsq, rcu);
4190 }
4191 
4192 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4193 
destroy_dsq(u64 dsq_id)4194 static void destroy_dsq(u64 dsq_id)
4195 {
4196 	struct scx_dispatch_q *dsq;
4197 	unsigned long flags;
4198 
4199 	rcu_read_lock();
4200 
4201 	dsq = find_user_dsq(dsq_id);
4202 	if (!dsq)
4203 		goto out_unlock_rcu;
4204 
4205 	raw_spin_lock_irqsave(&dsq->lock, flags);
4206 
4207 	if (dsq->nr) {
4208 		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4209 			      dsq->id, dsq->nr);
4210 		goto out_unlock_dsq;
4211 	}
4212 
4213 	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4214 		goto out_unlock_dsq;
4215 
4216 	/*
4217 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4218 	 * queueing more tasks. As this function can be called from anywhere,
4219 	 * freeing is bounced through an irq work to avoid nesting RCU
4220 	 * operations inside scheduler locks.
4221 	 */
4222 	dsq->id = SCX_DSQ_INVALID;
4223 	llist_add(&dsq->free_node, &dsqs_to_free);
4224 	irq_work_queue(&free_dsq_irq_work);
4225 
4226 out_unlock_dsq:
4227 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4228 out_unlock_rcu:
4229 	rcu_read_unlock();
4230 }
4231 
4232 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(void)4233 static void scx_cgroup_exit(void)
4234 {
4235 	struct cgroup_subsys_state *css;
4236 
4237 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4238 
4239 	scx_cgroup_enabled = false;
4240 
4241 	/*
4242 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4243 	 * cgroups and exit all the inited ones, all online cgroups are exited.
4244 	 */
4245 	rcu_read_lock();
4246 	css_for_each_descendant_post(css, &root_task_group.css) {
4247 		struct task_group *tg = css_tg(css);
4248 
4249 		if (!(tg->scx_flags & SCX_TG_INITED))
4250 			continue;
4251 		tg->scx_flags &= ~SCX_TG_INITED;
4252 
4253 		if (!scx_ops.cgroup_exit)
4254 			continue;
4255 
4256 		if (WARN_ON_ONCE(!css_tryget(css)))
4257 			continue;
4258 		rcu_read_unlock();
4259 
4260 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4261 
4262 		rcu_read_lock();
4263 		css_put(css);
4264 	}
4265 	rcu_read_unlock();
4266 }
4267 
scx_cgroup_init(void)4268 static int scx_cgroup_init(void)
4269 {
4270 	struct cgroup_subsys_state *css;
4271 	int ret;
4272 
4273 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4274 
4275 	cgroup_warned_missing_weight = false;
4276 	cgroup_warned_missing_idle = false;
4277 
4278 	/*
4279 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4280 	 * cgroups and init, all online cgroups are initialized.
4281 	 */
4282 	rcu_read_lock();
4283 	css_for_each_descendant_pre(css, &root_task_group.css) {
4284 		struct task_group *tg = css_tg(css);
4285 		struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4286 
4287 		scx_cgroup_warn_missing_weight(tg);
4288 		scx_cgroup_warn_missing_idle(tg);
4289 
4290 		if ((tg->scx_flags &
4291 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4292 			continue;
4293 
4294 		if (!scx_ops.cgroup_init) {
4295 			tg->scx_flags |= SCX_TG_INITED;
4296 			continue;
4297 		}
4298 
4299 		if (WARN_ON_ONCE(!css_tryget(css)))
4300 			continue;
4301 		rcu_read_unlock();
4302 
4303 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4304 				      css->cgroup, &args);
4305 		if (ret) {
4306 			css_put(css);
4307 			scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4308 			return ret;
4309 		}
4310 		tg->scx_flags |= SCX_TG_INITED;
4311 
4312 		rcu_read_lock();
4313 		css_put(css);
4314 	}
4315 	rcu_read_unlock();
4316 
4317 	WARN_ON_ONCE(scx_cgroup_enabled);
4318 	scx_cgroup_enabled = true;
4319 
4320 	return 0;
4321 }
4322 
4323 #else
scx_cgroup_exit(void)4324 static void scx_cgroup_exit(void) {}
scx_cgroup_init(void)4325 static int scx_cgroup_init(void) { return 0; }
4326 #endif
4327 
4328 
4329 /********************************************************************************
4330  * Sysfs interface and ops enable/disable.
4331  */
4332 
4333 #define SCX_ATTR(_name)								\
4334 	static struct kobj_attribute scx_attr_##_name = {			\
4335 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4336 		.show = scx_attr_##_name##_show,				\
4337 	}
4338 
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4339 static ssize_t scx_attr_state_show(struct kobject *kobj,
4340 				   struct kobj_attribute *ka, char *buf)
4341 {
4342 	return sysfs_emit(buf, "%s\n",
4343 			  scx_ops_enable_state_str[scx_ops_enable_state()]);
4344 }
4345 SCX_ATTR(state);
4346 
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4347 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4348 					struct kobj_attribute *ka, char *buf)
4349 {
4350 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4351 }
4352 SCX_ATTR(switch_all);
4353 
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4354 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4355 					 struct kobj_attribute *ka, char *buf)
4356 {
4357 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4358 }
4359 SCX_ATTR(nr_rejected);
4360 
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4361 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4362 					 struct kobj_attribute *ka, char *buf)
4363 {
4364 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4365 }
4366 SCX_ATTR(hotplug_seq);
4367 
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4368 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4369 					struct kobj_attribute *ka, char *buf)
4370 {
4371 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4372 }
4373 SCX_ATTR(enable_seq);
4374 
4375 static struct attribute *scx_global_attrs[] = {
4376 	&scx_attr_state.attr,
4377 	&scx_attr_switch_all.attr,
4378 	&scx_attr_nr_rejected.attr,
4379 	&scx_attr_hotplug_seq.attr,
4380 	&scx_attr_enable_seq.attr,
4381 	NULL,
4382 };
4383 
4384 static const struct attribute_group scx_global_attr_group = {
4385 	.attrs = scx_global_attrs,
4386 };
4387 
scx_kobj_release(struct kobject * kobj)4388 static void scx_kobj_release(struct kobject *kobj)
4389 {
4390 	kfree(kobj);
4391 }
4392 
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4393 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4394 				 struct kobj_attribute *ka, char *buf)
4395 {
4396 	return sysfs_emit(buf, "%s\n", scx_ops.name);
4397 }
4398 SCX_ATTR(ops);
4399 
4400 #define scx_attr_event_show(buf, at, events, kind) ({				\
4401 	sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind);		\
4402 })
4403 
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4404 static ssize_t scx_attr_events_show(struct kobject *kobj,
4405 				    struct kobj_attribute *ka, char *buf)
4406 {
4407 	struct scx_event_stats events;
4408 	int at = 0;
4409 
4410 	scx_bpf_events(&events, sizeof(events));
4411 	at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
4412 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4413 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
4414 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
4415 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4416 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SLICE_DFL);
4417 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
4418 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
4419 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
4420 	return at;
4421 }
4422 SCX_ATTR(events);
4423 
4424 static struct attribute *scx_sched_attrs[] = {
4425 	&scx_attr_ops.attr,
4426 	&scx_attr_events.attr,
4427 	NULL,
4428 };
4429 ATTRIBUTE_GROUPS(scx_sched);
4430 
4431 static const struct kobj_type scx_ktype = {
4432 	.release = scx_kobj_release,
4433 	.sysfs_ops = &kobj_sysfs_ops,
4434 	.default_groups = scx_sched_groups,
4435 };
4436 
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4437 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4438 {
4439 	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4440 }
4441 
4442 static const struct kset_uevent_ops scx_uevent_ops = {
4443 	.uevent = scx_uevent,
4444 };
4445 
4446 /*
4447  * Used by sched_fork() and __setscheduler_prio() to pick the matching
4448  * sched_class. dl/rt are already handled.
4449  */
task_should_scx(int policy)4450 bool task_should_scx(int policy)
4451 {
4452 	if (!scx_enabled() ||
4453 	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4454 		return false;
4455 	if (READ_ONCE(scx_switching_all))
4456 		return true;
4457 	return policy == SCHED_EXT;
4458 }
4459 
4460 /**
4461  * scx_softlockup - sched_ext softlockup handler
4462  * @dur_s: number of seconds of CPU stuck due to soft lockup
4463  *
4464  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4465  * live-lock the system by making many CPUs target the same DSQ to the point
4466  * where soft-lockup detection triggers. This function is called from
4467  * soft-lockup watchdog when the triggering point is close and tries to unjam
4468  * the system by enabling the breather and aborting the BPF scheduler.
4469  */
scx_softlockup(u32 dur_s)4470 void scx_softlockup(u32 dur_s)
4471 {
4472 	switch (scx_ops_enable_state()) {
4473 	case SCX_OPS_ENABLING:
4474 	case SCX_OPS_ENABLED:
4475 		break;
4476 	default:
4477 		return;
4478 	}
4479 
4480 	/* allow only one instance, cleared at the end of scx_ops_bypass() */
4481 	if (test_and_set_bit(0, &scx_in_softlockup))
4482 		return;
4483 
4484 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4485 			smp_processor_id(), dur_s, scx_ops.name);
4486 
4487 	/*
4488 	 * Some CPUs may be trapped in the dispatch paths. Enable breather
4489 	 * immediately; otherwise, we might even be able to get to
4490 	 * scx_ops_bypass().
4491 	 */
4492 	atomic_inc(&scx_ops_breather_depth);
4493 
4494 	scx_ops_error("soft lockup - CPU#%d stuck for %us",
4495 		      smp_processor_id(), dur_s);
4496 }
4497 
scx_clear_softlockup(void)4498 static void scx_clear_softlockup(void)
4499 {
4500 	if (test_and_clear_bit(0, &scx_in_softlockup))
4501 		atomic_dec(&scx_ops_breather_depth);
4502 }
4503 
4504 /**
4505  * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4506  * @bypass: true for bypass, false for unbypass
4507  *
4508  * Bypassing guarantees that all runnable tasks make forward progress without
4509  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4510  * be held by tasks that the BPF scheduler is forgetting to run, which
4511  * unfortunately also excludes toggling the static branches.
4512  *
4513  * Let's work around by overriding a couple ops and modifying behaviors based on
4514  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4515  * to force global FIFO scheduling.
4516  *
4517  * - ops.select_cpu() is ignored and the default select_cpu() is used.
4518  *
4519  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4520  *   %SCX_OPS_ENQ_LAST is also ignored.
4521  *
4522  * - ops.dispatch() is ignored.
4523  *
4524  * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4525  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
4526  *   the tail of the queue with core_sched_at touched.
4527  *
4528  * - pick_next_task() suppresses zero slice warning.
4529  *
4530  * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4531  *   operations.
4532  *
4533  * - scx_prio_less() reverts to the default core_sched_at order.
4534  */
scx_ops_bypass(bool bypass)4535 static void scx_ops_bypass(bool bypass)
4536 {
4537 	static DEFINE_RAW_SPINLOCK(bypass_lock);
4538 	static unsigned long bypass_timestamp;
4539 
4540 	int cpu;
4541 	unsigned long flags;
4542 
4543 	raw_spin_lock_irqsave(&bypass_lock, flags);
4544 	if (bypass) {
4545 		scx_ops_bypass_depth++;
4546 		WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4547 		if (scx_ops_bypass_depth != 1)
4548 			goto unlock;
4549 		bypass_timestamp = ktime_get_ns();
4550 		scx_add_event(SCX_EV_BYPASS_ACTIVATE, 1);
4551 	} else {
4552 		scx_ops_bypass_depth--;
4553 		WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4554 		if (scx_ops_bypass_depth != 0)
4555 			goto unlock;
4556 		scx_add_event(SCX_EV_BYPASS_DURATION,
4557 			      ktime_get_ns() - bypass_timestamp);
4558 	}
4559 
4560 	atomic_inc(&scx_ops_breather_depth);
4561 
4562 	/*
4563 	 * No task property is changing. We just need to make sure all currently
4564 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4565 	 * state. As an optimization, walk each rq's runnable_list instead of
4566 	 * the scx_tasks list.
4567 	 *
4568 	 * This function can't trust the scheduler and thus can't use
4569 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4570 	 */
4571 	for_each_possible_cpu(cpu) {
4572 		struct rq *rq = cpu_rq(cpu);
4573 		struct task_struct *p, *n;
4574 
4575 		raw_spin_rq_lock(rq);
4576 
4577 		if (bypass) {
4578 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4579 			rq->scx.flags |= SCX_RQ_BYPASSING;
4580 		} else {
4581 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4582 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4583 		}
4584 
4585 		/*
4586 		 * We need to guarantee that no tasks are on the BPF scheduler
4587 		 * while bypassing. Either we see enabled or the enable path
4588 		 * sees scx_rq_bypassing() before moving tasks to SCX.
4589 		 */
4590 		if (!scx_enabled()) {
4591 			raw_spin_rq_unlock(rq);
4592 			continue;
4593 		}
4594 
4595 		/*
4596 		 * The use of list_for_each_entry_safe_reverse() is required
4597 		 * because each task is going to be removed from and added back
4598 		 * to the runnable_list during iteration. Because they're added
4599 		 * to the tail of the list, safe reverse iteration can still
4600 		 * visit all nodes.
4601 		 */
4602 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4603 						 scx.runnable_node) {
4604 			struct sched_enq_and_set_ctx ctx;
4605 
4606 			/* cycling deq/enq is enough, see the function comment */
4607 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4608 			sched_enq_and_set_task(&ctx);
4609 		}
4610 
4611 		/* resched to restore ticks and idle state */
4612 		if (cpu_online(cpu) || cpu == smp_processor_id())
4613 			resched_curr(rq);
4614 
4615 		raw_spin_rq_unlock(rq);
4616 	}
4617 
4618 	atomic_dec(&scx_ops_breather_depth);
4619 unlock:
4620 	raw_spin_unlock_irqrestore(&bypass_lock, flags);
4621 	scx_clear_softlockup();
4622 }
4623 
free_exit_info(struct scx_exit_info * ei)4624 static void free_exit_info(struct scx_exit_info *ei)
4625 {
4626 	kfree(ei->dump);
4627 	kfree(ei->msg);
4628 	kfree(ei->bt);
4629 	kfree(ei);
4630 }
4631 
alloc_exit_info(size_t exit_dump_len)4632 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4633 {
4634 	struct scx_exit_info *ei;
4635 
4636 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4637 	if (!ei)
4638 		return NULL;
4639 
4640 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4641 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4642 	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4643 
4644 	if (!ei->bt || !ei->msg || !ei->dump) {
4645 		free_exit_info(ei);
4646 		return NULL;
4647 	}
4648 
4649 	return ei;
4650 }
4651 
scx_exit_reason(enum scx_exit_kind kind)4652 static const char *scx_exit_reason(enum scx_exit_kind kind)
4653 {
4654 	switch (kind) {
4655 	case SCX_EXIT_UNREG:
4656 		return "unregistered from user space";
4657 	case SCX_EXIT_UNREG_BPF:
4658 		return "unregistered from BPF";
4659 	case SCX_EXIT_UNREG_KERN:
4660 		return "unregistered from the main kernel";
4661 	case SCX_EXIT_SYSRQ:
4662 		return "disabled by sysrq-S";
4663 	case SCX_EXIT_ERROR:
4664 		return "runtime error";
4665 	case SCX_EXIT_ERROR_BPF:
4666 		return "scx_bpf_error";
4667 	case SCX_EXIT_ERROR_STALL:
4668 		return "runnable task stall";
4669 	default:
4670 		return "<UNKNOWN>";
4671 	}
4672 }
4673 
scx_ops_disable_workfn(struct kthread_work * work)4674 static void scx_ops_disable_workfn(struct kthread_work *work)
4675 {
4676 	struct scx_exit_info *ei = scx_exit_info;
4677 	struct scx_task_iter sti;
4678 	struct task_struct *p;
4679 	struct rhashtable_iter rht_iter;
4680 	struct scx_dispatch_q *dsq;
4681 	int i, kind, cpu;
4682 
4683 	kind = atomic_read(&scx_exit_kind);
4684 	while (true) {
4685 		/*
4686 		 * NONE indicates that a new scx_ops has been registered since
4687 		 * disable was scheduled - don't kill the new ops. DONE
4688 		 * indicates that the ops has already been disabled.
4689 		 */
4690 		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4691 			return;
4692 		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4693 			break;
4694 	}
4695 	ei->kind = kind;
4696 	ei->reason = scx_exit_reason(ei->kind);
4697 
4698 	/* guarantee forward progress by bypassing scx_ops */
4699 	scx_ops_bypass(true);
4700 
4701 	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4702 	case SCX_OPS_DISABLING:
4703 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4704 		break;
4705 	case SCX_OPS_DISABLED:
4706 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
4707 			scx_exit_info->msg);
4708 		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4709 			     SCX_OPS_DISABLING);
4710 		goto done;
4711 	default:
4712 		break;
4713 	}
4714 
4715 	/*
4716 	 * Here, every runnable task is guaranteed to make forward progress and
4717 	 * we can safely use blocking synchronization constructs. Actually
4718 	 * disable ops.
4719 	 */
4720 	mutex_lock(&scx_ops_enable_mutex);
4721 
4722 	static_branch_disable(&__scx_switched_all);
4723 	WRITE_ONCE(scx_switching_all, false);
4724 
4725 	/*
4726 	 * Shut down cgroup support before tasks so that the cgroup attach path
4727 	 * doesn't race against scx_ops_exit_task().
4728 	 */
4729 	scx_cgroup_lock();
4730 	scx_cgroup_exit();
4731 	scx_cgroup_unlock();
4732 
4733 	/*
4734 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4735 	 * must be switched out and exited synchronously.
4736 	 */
4737 	percpu_down_write(&scx_fork_rwsem);
4738 
4739 	scx_ops_init_task_enabled = false;
4740 
4741 	scx_task_iter_start(&sti);
4742 	while ((p = scx_task_iter_next_locked(&sti))) {
4743 		const struct sched_class *old_class = p->sched_class;
4744 		const struct sched_class *new_class =
4745 			__setscheduler_class(p->policy, p->prio);
4746 		struct sched_enq_and_set_ctx ctx;
4747 
4748 		if (old_class != new_class && p->se.sched_delayed)
4749 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4750 
4751 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4752 
4753 		p->sched_class = new_class;
4754 		check_class_changing(task_rq(p), p, old_class);
4755 
4756 		sched_enq_and_set_task(&ctx);
4757 
4758 		check_class_changed(task_rq(p), p, old_class, p->prio);
4759 		scx_ops_exit_task(p);
4760 	}
4761 	scx_task_iter_stop(&sti);
4762 	percpu_up_write(&scx_fork_rwsem);
4763 
4764 	/*
4765 	 * Invalidate all the rq clocks to prevent getting outdated
4766 	 * rq clocks from a previous scx scheduler.
4767 	 */
4768 	for_each_possible_cpu(cpu) {
4769 		struct rq *rq = cpu_rq(cpu);
4770 		scx_rq_clock_invalidate(rq);
4771 	}
4772 
4773 	/* no task is on scx, turn off all the switches and flush in-progress calls */
4774 	static_branch_disable(&__scx_ops_enabled);
4775 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4776 		static_branch_disable(&scx_has_op[i]);
4777 	static_branch_disable(&scx_ops_allow_queued_wakeup);
4778 	static_branch_disable(&scx_ops_enq_last);
4779 	static_branch_disable(&scx_ops_enq_exiting);
4780 	static_branch_disable(&scx_ops_enq_migration_disabled);
4781 	static_branch_disable(&scx_ops_cpu_preempt);
4782 	scx_idle_disable();
4783 	synchronize_rcu();
4784 
4785 	if (ei->kind >= SCX_EXIT_ERROR) {
4786 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4787 		       scx_ops.name, ei->reason);
4788 
4789 		if (ei->msg[0] != '\0')
4790 			pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
4791 #ifdef CONFIG_STACKTRACE
4792 		stack_trace_print(ei->bt, ei->bt_len, 2);
4793 #endif
4794 	} else {
4795 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4796 			scx_ops.name, ei->reason);
4797 	}
4798 
4799 	if (scx_ops.exit)
4800 		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
4801 
4802 	cancel_delayed_work_sync(&scx_watchdog_work);
4803 
4804 	/*
4805 	 * Delete the kobject from the hierarchy eagerly in addition to just
4806 	 * dropping a reference. Otherwise, if the object is deleted
4807 	 * asynchronously, sysfs could observe an object of the same name still
4808 	 * in the hierarchy when another scheduler is loaded.
4809 	 */
4810 	kobject_del(scx_root_kobj);
4811 	kobject_put(scx_root_kobj);
4812 	scx_root_kobj = NULL;
4813 
4814 	memset(&scx_ops, 0, sizeof(scx_ops));
4815 
4816 	rhashtable_walk_enter(&dsq_hash, &rht_iter);
4817 	do {
4818 		rhashtable_walk_start(&rht_iter);
4819 
4820 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4821 			destroy_dsq(dsq->id);
4822 
4823 		rhashtable_walk_stop(&rht_iter);
4824 	} while (dsq == ERR_PTR(-EAGAIN));
4825 	rhashtable_walk_exit(&rht_iter);
4826 
4827 	free_percpu(scx_dsp_ctx);
4828 	scx_dsp_ctx = NULL;
4829 	scx_dsp_max_batch = 0;
4830 
4831 	free_exit_info(scx_exit_info);
4832 	scx_exit_info = NULL;
4833 
4834 	mutex_unlock(&scx_ops_enable_mutex);
4835 
4836 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4837 		     SCX_OPS_DISABLING);
4838 done:
4839 	scx_ops_bypass(false);
4840 }
4841 
4842 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
4843 
schedule_scx_ops_disable_work(void)4844 static void schedule_scx_ops_disable_work(void)
4845 {
4846 	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
4847 
4848 	/*
4849 	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
4850 	 * scx_ops_helper isn't set up yet, there's nothing to do.
4851 	 */
4852 	if (helper)
4853 		kthread_queue_work(helper, &scx_ops_disable_work);
4854 }
4855 
scx_ops_disable(enum scx_exit_kind kind)4856 static void scx_ops_disable(enum scx_exit_kind kind)
4857 {
4858 	int none = SCX_EXIT_NONE;
4859 
4860 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4861 		kind = SCX_EXIT_ERROR;
4862 
4863 	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
4864 
4865 	schedule_scx_ops_disable_work();
4866 }
4867 
dump_newline(struct seq_buf * s)4868 static void dump_newline(struct seq_buf *s)
4869 {
4870 	trace_sched_ext_dump("");
4871 
4872 	/* @s may be zero sized and seq_buf triggers WARN if so */
4873 	if (s->size)
4874 		seq_buf_putc(s, '\n');
4875 }
4876 
dump_line(struct seq_buf * s,const char * fmt,...)4877 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4878 {
4879 	va_list args;
4880 
4881 #ifdef CONFIG_TRACEPOINTS
4882 	if (trace_sched_ext_dump_enabled()) {
4883 		/* protected by scx_dump_state()::dump_lock */
4884 		static char line_buf[SCX_EXIT_MSG_LEN];
4885 
4886 		va_start(args, fmt);
4887 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4888 		va_end(args);
4889 
4890 		trace_sched_ext_dump(line_buf);
4891 	}
4892 #endif
4893 	/* @s may be zero sized and seq_buf triggers WARN if so */
4894 	if (s->size) {
4895 		va_start(args, fmt);
4896 		seq_buf_vprintf(s, fmt, args);
4897 		va_end(args);
4898 
4899 		seq_buf_putc(s, '\n');
4900 	}
4901 }
4902 
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)4903 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4904 			     const unsigned long *bt, unsigned int len)
4905 {
4906 	unsigned int i;
4907 
4908 	for (i = 0; i < len; i++)
4909 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4910 }
4911 
ops_dump_init(struct seq_buf * s,const char * prefix)4912 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4913 {
4914 	struct scx_dump_data *dd = &scx_dump_data;
4915 
4916 	lockdep_assert_irqs_disabled();
4917 
4918 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4919 	dd->first = true;
4920 	dd->cursor = 0;
4921 	dd->s = s;
4922 	dd->prefix = prefix;
4923 }
4924 
ops_dump_flush(void)4925 static void ops_dump_flush(void)
4926 {
4927 	struct scx_dump_data *dd = &scx_dump_data;
4928 	char *line = dd->buf.line;
4929 
4930 	if (!dd->cursor)
4931 		return;
4932 
4933 	/*
4934 	 * There's something to flush and this is the first line. Insert a blank
4935 	 * line to distinguish ops dump.
4936 	 */
4937 	if (dd->first) {
4938 		dump_newline(dd->s);
4939 		dd->first = false;
4940 	}
4941 
4942 	/*
4943 	 * There may be multiple lines in $line. Scan and emit each line
4944 	 * separately.
4945 	 */
4946 	while (true) {
4947 		char *end = line;
4948 		char c;
4949 
4950 		while (*end != '\n' && *end != '\0')
4951 			end++;
4952 
4953 		/*
4954 		 * If $line overflowed, it may not have newline at the end.
4955 		 * Always emit with a newline.
4956 		 */
4957 		c = *end;
4958 		*end = '\0';
4959 		dump_line(dd->s, "%s%s", dd->prefix, line);
4960 		if (c == '\0')
4961 			break;
4962 
4963 		/* move to the next line */
4964 		end++;
4965 		if (*end == '\0')
4966 			break;
4967 		line = end;
4968 	}
4969 
4970 	dd->cursor = 0;
4971 }
4972 
ops_dump_exit(void)4973 static void ops_dump_exit(void)
4974 {
4975 	ops_dump_flush();
4976 	scx_dump_data.cpu = -1;
4977 }
4978 
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)4979 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4980 			  struct task_struct *p, char marker)
4981 {
4982 	static unsigned long bt[SCX_EXIT_BT_LEN];
4983 	char dsq_id_buf[19] = "(n/a)";
4984 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4985 	unsigned int bt_len = 0;
4986 
4987 	if (p->scx.dsq)
4988 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4989 			  (unsigned long long)p->scx.dsq->id);
4990 
4991 	dump_newline(s);
4992 	dump_line(s, " %c%c %s[%d] %+ldms",
4993 		  marker, task_state_to_char(p), p->comm, p->pid,
4994 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4995 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4996 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4997 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4998 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4999 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s",
5000 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
5001 	dump_line(s, "      dsq_vtime=%llu slice=%llu weight=%u",
5002 		  p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
5003 	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5004 
5005 	if (SCX_HAS_OP(dump_task)) {
5006 		ops_dump_init(s, "    ");
5007 		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5008 		ops_dump_exit();
5009 	}
5010 
5011 #ifdef CONFIG_STACKTRACE
5012 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5013 #endif
5014 	if (bt_len) {
5015 		dump_newline(s);
5016 		dump_stack_trace(s, "    ", bt, bt_len);
5017 	}
5018 }
5019 
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)5020 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5021 {
5022 	static DEFINE_SPINLOCK(dump_lock);
5023 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5024 	struct scx_dump_ctx dctx = {
5025 		.kind = ei->kind,
5026 		.exit_code = ei->exit_code,
5027 		.reason = ei->reason,
5028 		.at_ns = ktime_get_ns(),
5029 		.at_jiffies = jiffies,
5030 	};
5031 	struct seq_buf s;
5032 	struct scx_event_stats events;
5033 	unsigned long flags;
5034 	char *buf;
5035 	int cpu;
5036 
5037 	spin_lock_irqsave(&dump_lock, flags);
5038 
5039 	seq_buf_init(&s, ei->dump, dump_len);
5040 
5041 	if (ei->kind == SCX_EXIT_NONE) {
5042 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
5043 	} else {
5044 		dump_line(&s, "%s[%d] triggered exit kind %d:",
5045 			  current->comm, current->pid, ei->kind);
5046 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
5047 		dump_newline(&s);
5048 		dump_line(&s, "Backtrace:");
5049 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
5050 	}
5051 
5052 	if (SCX_HAS_OP(dump)) {
5053 		ops_dump_init(&s, "");
5054 		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5055 		ops_dump_exit();
5056 	}
5057 
5058 	dump_newline(&s);
5059 	dump_line(&s, "CPU states");
5060 	dump_line(&s, "----------");
5061 
5062 	for_each_possible_cpu(cpu) {
5063 		struct rq *rq = cpu_rq(cpu);
5064 		struct rq_flags rf;
5065 		struct task_struct *p;
5066 		struct seq_buf ns;
5067 		size_t avail, used;
5068 		bool idle;
5069 
5070 		rq_lock(rq, &rf);
5071 
5072 		idle = list_empty(&rq->scx.runnable_list) &&
5073 			rq->curr->sched_class == &idle_sched_class;
5074 
5075 		if (idle && !SCX_HAS_OP(dump_cpu))
5076 			goto next;
5077 
5078 		/*
5079 		 * We don't yet know whether ops.dump_cpu() will produce output
5080 		 * and we may want to skip the default CPU dump if it doesn't.
5081 		 * Use a nested seq_buf to generate the standard dump so that we
5082 		 * can decide whether to commit later.
5083 		 */
5084 		avail = seq_buf_get_buf(&s, &buf);
5085 		seq_buf_init(&ns, buf, avail);
5086 
5087 		dump_newline(&ns);
5088 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5089 			  cpu, rq->scx.nr_running, rq->scx.flags,
5090 			  rq->scx.cpu_released, rq->scx.ops_qseq,
5091 			  rq->scx.pnt_seq);
5092 		dump_line(&ns, "          curr=%s[%d] class=%ps",
5093 			  rq->curr->comm, rq->curr->pid,
5094 			  rq->curr->sched_class);
5095 		if (!cpumask_empty(rq->scx.cpus_to_kick))
5096 			dump_line(&ns, "  cpus_to_kick   : %*pb",
5097 				  cpumask_pr_args(rq->scx.cpus_to_kick));
5098 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5099 			dump_line(&ns, "  idle_to_kick   : %*pb",
5100 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5101 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
5102 			dump_line(&ns, "  cpus_to_preempt: %*pb",
5103 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
5104 		if (!cpumask_empty(rq->scx.cpus_to_wait))
5105 			dump_line(&ns, "  cpus_to_wait   : %*pb",
5106 				  cpumask_pr_args(rq->scx.cpus_to_wait));
5107 
5108 		used = seq_buf_used(&ns);
5109 		if (SCX_HAS_OP(dump_cpu)) {
5110 			ops_dump_init(&ns, "  ");
5111 			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5112 			ops_dump_exit();
5113 		}
5114 
5115 		/*
5116 		 * If idle && nothing generated by ops.dump_cpu(), there's
5117 		 * nothing interesting. Skip.
5118 		 */
5119 		if (idle && used == seq_buf_used(&ns))
5120 			goto next;
5121 
5122 		/*
5123 		 * $s may already have overflowed when $ns was created. If so,
5124 		 * calling commit on it will trigger BUG.
5125 		 */
5126 		if (avail) {
5127 			seq_buf_commit(&s, seq_buf_used(&ns));
5128 			if (seq_buf_has_overflowed(&ns))
5129 				seq_buf_set_overflow(&s);
5130 		}
5131 
5132 		if (rq->curr->sched_class == &ext_sched_class)
5133 			scx_dump_task(&s, &dctx, rq->curr, '*');
5134 
5135 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5136 			scx_dump_task(&s, &dctx, p, ' ');
5137 	next:
5138 		rq_unlock(rq, &rf);
5139 	}
5140 
5141 	dump_newline(&s);
5142 	dump_line(&s, "Event counters");
5143 	dump_line(&s, "--------------");
5144 
5145 	scx_bpf_events(&events, sizeof(events));
5146 	scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
5147 	scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
5148 	scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
5149 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
5150 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
5151 	scx_dump_event(s, &events, SCX_EV_ENQ_SLICE_DFL);
5152 	scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
5153 	scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
5154 	scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
5155 
5156 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5157 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5158 		       trunc_marker, sizeof(trunc_marker));
5159 
5160 	spin_unlock_irqrestore(&dump_lock, flags);
5161 }
5162 
scx_ops_error_irq_workfn(struct irq_work * irq_work)5163 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5164 {
5165 	struct scx_exit_info *ei = scx_exit_info;
5166 
5167 	if (ei->kind >= SCX_EXIT_ERROR)
5168 		scx_dump_state(ei, scx_ops.exit_dump_len);
5169 
5170 	schedule_scx_ops_disable_work();
5171 }
5172 
5173 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5174 
scx_ops_exit_kind(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)5175 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5176 					     s64 exit_code,
5177 					     const char *fmt, ...)
5178 {
5179 	struct scx_exit_info *ei = scx_exit_info;
5180 	int none = SCX_EXIT_NONE;
5181 	va_list args;
5182 
5183 	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5184 		return;
5185 
5186 	ei->exit_code = exit_code;
5187 #ifdef CONFIG_STACKTRACE
5188 	if (kind >= SCX_EXIT_ERROR)
5189 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5190 #endif
5191 	va_start(args, fmt);
5192 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5193 	va_end(args);
5194 
5195 	/*
5196 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5197 	 * in scx_ops_disable_workfn().
5198 	 */
5199 	ei->kind = kind;
5200 	ei->reason = scx_exit_reason(ei->kind);
5201 
5202 	irq_work_queue(&scx_ops_error_irq_work);
5203 }
5204 
scx_create_rt_helper(const char * name)5205 static struct kthread_worker *scx_create_rt_helper(const char *name)
5206 {
5207 	struct kthread_worker *helper;
5208 
5209 	helper = kthread_run_worker(0, name);
5210 	if (helper)
5211 		sched_set_fifo(helper->task);
5212 	return helper;
5213 }
5214 
check_hotplug_seq(const struct sched_ext_ops * ops)5215 static void check_hotplug_seq(const struct sched_ext_ops *ops)
5216 {
5217 	unsigned long long global_hotplug_seq;
5218 
5219 	/*
5220 	 * If a hotplug event has occurred between when a scheduler was
5221 	 * initialized, and when we were able to attach, exit and notify user
5222 	 * space about it.
5223 	 */
5224 	if (ops->hotplug_seq) {
5225 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5226 		if (ops->hotplug_seq != global_hotplug_seq) {
5227 			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5228 				     "expected hotplug seq %llu did not match actual %llu",
5229 				     ops->hotplug_seq, global_hotplug_seq);
5230 		}
5231 	}
5232 }
5233 
validate_ops(const struct sched_ext_ops * ops)5234 static int validate_ops(const struct sched_ext_ops *ops)
5235 {
5236 	/*
5237 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5238 	 * ops.enqueue() callback isn't implemented.
5239 	 */
5240 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5241 		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5242 		return -EINVAL;
5243 	}
5244 
5245 	/*
5246 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
5247 	 * selection policy to be enabled.
5248 	 */
5249 	if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
5250 	    (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
5251 		scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
5252 		return -EINVAL;
5253 	}
5254 
5255 	return 0;
5256 }
5257 
scx_ops_enable(struct sched_ext_ops * ops,struct bpf_link * link)5258 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5259 {
5260 	struct scx_task_iter sti;
5261 	struct task_struct *p;
5262 	unsigned long timeout;
5263 	int i, cpu, node, ret;
5264 
5265 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5266 			   cpu_possible_mask)) {
5267 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5268 		return -EINVAL;
5269 	}
5270 
5271 	mutex_lock(&scx_ops_enable_mutex);
5272 
5273 	/*
5274 	 * Clear event counters so a new scx scheduler gets
5275 	 * fresh event counter values.
5276 	 */
5277 	for_each_possible_cpu(cpu) {
5278 		struct scx_event_stats *e = per_cpu_ptr(&event_stats_cpu, cpu);
5279 		memset(e, 0, sizeof(*e));
5280 	}
5281 
5282 	if (!scx_ops_helper) {
5283 		WRITE_ONCE(scx_ops_helper,
5284 			   scx_create_rt_helper("sched_ext_ops_helper"));
5285 		if (!scx_ops_helper) {
5286 			ret = -ENOMEM;
5287 			goto err_unlock;
5288 		}
5289 	}
5290 
5291 	if (!global_dsqs) {
5292 		struct scx_dispatch_q **dsqs;
5293 
5294 		dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5295 		if (!dsqs) {
5296 			ret = -ENOMEM;
5297 			goto err_unlock;
5298 		}
5299 
5300 		for_each_node_state(node, N_POSSIBLE) {
5301 			struct scx_dispatch_q *dsq;
5302 
5303 			dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5304 			if (!dsq) {
5305 				for_each_node_state(node, N_POSSIBLE)
5306 					kfree(dsqs[node]);
5307 				kfree(dsqs);
5308 				ret = -ENOMEM;
5309 				goto err_unlock;
5310 			}
5311 
5312 			init_dsq(dsq, SCX_DSQ_GLOBAL);
5313 			dsqs[node] = dsq;
5314 		}
5315 
5316 		global_dsqs = dsqs;
5317 	}
5318 
5319 	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5320 		ret = -EBUSY;
5321 		goto err_unlock;
5322 	}
5323 
5324 	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5325 	if (!scx_root_kobj) {
5326 		ret = -ENOMEM;
5327 		goto err_unlock;
5328 	}
5329 
5330 	scx_root_kobj->kset = scx_kset;
5331 	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5332 	if (ret < 0)
5333 		goto err;
5334 
5335 	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5336 	if (!scx_exit_info) {
5337 		ret = -ENOMEM;
5338 		goto err_del;
5339 	}
5340 
5341 	/*
5342 	 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5343 	 * disable path. Failure triggers full disabling from here on.
5344 	 */
5345 	scx_ops = *ops;
5346 
5347 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5348 		     SCX_OPS_DISABLED);
5349 
5350 	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5351 	scx_warned_zero_slice = false;
5352 
5353 	atomic_long_set(&scx_nr_rejected, 0);
5354 
5355 	for_each_possible_cpu(cpu)
5356 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5357 
5358 	/*
5359 	 * Keep CPUs stable during enable so that the BPF scheduler can track
5360 	 * online CPUs by watching ->on/offline_cpu() after ->init().
5361 	 */
5362 	cpus_read_lock();
5363 
5364 	if (scx_ops.init) {
5365 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5366 		if (ret) {
5367 			ret = ops_sanitize_err("init", ret);
5368 			cpus_read_unlock();
5369 			scx_ops_error("ops.init() failed (%d)", ret);
5370 			goto err_disable;
5371 		}
5372 	}
5373 
5374 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5375 		if (((void (**)(void))ops)[i])
5376 			static_branch_enable_cpuslocked(&scx_has_op[i]);
5377 
5378 	check_hotplug_seq(ops);
5379 	scx_idle_update_selcpu_topology(ops);
5380 
5381 	cpus_read_unlock();
5382 
5383 	ret = validate_ops(ops);
5384 	if (ret)
5385 		goto err_disable;
5386 
5387 	WARN_ON_ONCE(scx_dsp_ctx);
5388 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5389 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5390 						   scx_dsp_max_batch),
5391 				     __alignof__(struct scx_dsp_ctx));
5392 	if (!scx_dsp_ctx) {
5393 		ret = -ENOMEM;
5394 		goto err_disable;
5395 	}
5396 
5397 	if (ops->timeout_ms)
5398 		timeout = msecs_to_jiffies(ops->timeout_ms);
5399 	else
5400 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5401 
5402 	WRITE_ONCE(scx_watchdog_timeout, timeout);
5403 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5404 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5405 			   scx_watchdog_timeout / 2);
5406 
5407 	/*
5408 	 * Once __scx_ops_enabled is set, %current can be switched to SCX
5409 	 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5410 	 * userspace scheduling) may not function correctly before all tasks are
5411 	 * switched. Init in bypass mode to guarantee forward progress.
5412 	 */
5413 	scx_ops_bypass(true);
5414 
5415 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5416 		if (((void (**)(void))ops)[i])
5417 			static_branch_enable(&scx_has_op[i]);
5418 
5419 	if (ops->flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
5420 		static_branch_enable(&scx_ops_allow_queued_wakeup);
5421 	if (ops->flags & SCX_OPS_ENQ_LAST)
5422 		static_branch_enable(&scx_ops_enq_last);
5423 	if (ops->flags & SCX_OPS_ENQ_EXITING)
5424 		static_branch_enable(&scx_ops_enq_exiting);
5425 	if (ops->flags & SCX_OPS_ENQ_MIGRATION_DISABLED)
5426 		static_branch_enable(&scx_ops_enq_migration_disabled);
5427 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5428 		static_branch_enable(&scx_ops_cpu_preempt);
5429 
5430 	scx_idle_enable(ops);
5431 
5432 	/*
5433 	 * Lock out forks, cgroup on/offlining and moves before opening the
5434 	 * floodgate so that they don't wander into the operations prematurely.
5435 	 */
5436 	percpu_down_write(&scx_fork_rwsem);
5437 
5438 	WARN_ON_ONCE(scx_ops_init_task_enabled);
5439 	scx_ops_init_task_enabled = true;
5440 
5441 	/*
5442 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5443 	 * preventing new tasks from being added. No need to exclude tasks
5444 	 * leaving as sched_ext_free() can handle both prepped and enabled
5445 	 * tasks. Prep all tasks first and then enable them with preemption
5446 	 * disabled.
5447 	 *
5448 	 * All cgroups should be initialized before scx_ops_init_task() so that
5449 	 * the BPF scheduler can reliably track each task's cgroup membership
5450 	 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5451 	 * migrations while tasks are being initialized so that
5452 	 * scx_cgroup_can_attach() never sees uninitialized tasks.
5453 	 */
5454 	scx_cgroup_lock();
5455 	ret = scx_cgroup_init();
5456 	if (ret)
5457 		goto err_disable_unlock_all;
5458 
5459 	scx_task_iter_start(&sti);
5460 	while ((p = scx_task_iter_next_locked(&sti))) {
5461 		/*
5462 		 * @p may already be dead, have lost all its usages counts and
5463 		 * be waiting for RCU grace period before being freed. @p can't
5464 		 * be initialized for SCX in such cases and should be ignored.
5465 		 */
5466 		if (!tryget_task_struct(p))
5467 			continue;
5468 
5469 		scx_task_iter_unlock(&sti);
5470 
5471 		ret = scx_ops_init_task(p, task_group(p), false);
5472 		if (ret) {
5473 			put_task_struct(p);
5474 			scx_task_iter_relock(&sti);
5475 			scx_task_iter_stop(&sti);
5476 			scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5477 				      ret, p->comm, p->pid);
5478 			goto err_disable_unlock_all;
5479 		}
5480 
5481 		scx_set_task_state(p, SCX_TASK_READY);
5482 
5483 		put_task_struct(p);
5484 		scx_task_iter_relock(&sti);
5485 	}
5486 	scx_task_iter_stop(&sti);
5487 	scx_cgroup_unlock();
5488 	percpu_up_write(&scx_fork_rwsem);
5489 
5490 	/*
5491 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5492 	 * all eligible tasks.
5493 	 */
5494 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5495 	static_branch_enable(&__scx_ops_enabled);
5496 
5497 	/*
5498 	 * We're fully committed and can't fail. The task READY -> ENABLED
5499 	 * transitions here are synchronized against sched_ext_free() through
5500 	 * scx_tasks_lock.
5501 	 */
5502 	percpu_down_write(&scx_fork_rwsem);
5503 	scx_task_iter_start(&sti);
5504 	while ((p = scx_task_iter_next_locked(&sti))) {
5505 		const struct sched_class *old_class = p->sched_class;
5506 		const struct sched_class *new_class =
5507 			__setscheduler_class(p->policy, p->prio);
5508 		struct sched_enq_and_set_ctx ctx;
5509 
5510 		if (old_class != new_class && p->se.sched_delayed)
5511 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5512 
5513 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5514 
5515 		p->scx.slice = SCX_SLICE_DFL;
5516 		p->sched_class = new_class;
5517 		check_class_changing(task_rq(p), p, old_class);
5518 
5519 		sched_enq_and_set_task(&ctx);
5520 
5521 		check_class_changed(task_rq(p), p, old_class, p->prio);
5522 	}
5523 	scx_task_iter_stop(&sti);
5524 	percpu_up_write(&scx_fork_rwsem);
5525 
5526 	scx_ops_bypass(false);
5527 
5528 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5529 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5530 		goto err_disable;
5531 	}
5532 
5533 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5534 		static_branch_enable(&__scx_switched_all);
5535 
5536 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5537 		scx_ops.name, scx_switched_all() ? "" : " (partial)");
5538 	kobject_uevent(scx_root_kobj, KOBJ_ADD);
5539 	mutex_unlock(&scx_ops_enable_mutex);
5540 
5541 	atomic_long_inc(&scx_enable_seq);
5542 
5543 	return 0;
5544 
5545 err_del:
5546 	kobject_del(scx_root_kobj);
5547 err:
5548 	kobject_put(scx_root_kobj);
5549 	scx_root_kobj = NULL;
5550 	if (scx_exit_info) {
5551 		free_exit_info(scx_exit_info);
5552 		scx_exit_info = NULL;
5553 	}
5554 err_unlock:
5555 	mutex_unlock(&scx_ops_enable_mutex);
5556 	return ret;
5557 
5558 err_disable_unlock_all:
5559 	scx_cgroup_unlock();
5560 	percpu_up_write(&scx_fork_rwsem);
5561 	scx_ops_bypass(false);
5562 err_disable:
5563 	mutex_unlock(&scx_ops_enable_mutex);
5564 	/*
5565 	 * Returning an error code here would not pass all the error information
5566 	 * to userspace. Record errno using scx_ops_error() for cases
5567 	 * scx_ops_error() wasn't already invoked and exit indicating success so
5568 	 * that the error is notified through ops.exit() with all the details.
5569 	 *
5570 	 * Flush scx_ops_disable_work to ensure that error is reported before
5571 	 * init completion.
5572 	 */
5573 	scx_ops_error("scx_ops_enable() failed (%d)", ret);
5574 	kthread_flush_work(&scx_ops_disable_work);
5575 	return 0;
5576 }
5577 
5578 
5579 /********************************************************************************
5580  * bpf_struct_ops plumbing.
5581  */
5582 #include <linux/bpf_verifier.h>
5583 #include <linux/bpf.h>
5584 #include <linux/btf.h>
5585 
5586 static const struct btf_type *task_struct_type;
5587 
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5588 static bool bpf_scx_is_valid_access(int off, int size,
5589 				    enum bpf_access_type type,
5590 				    const struct bpf_prog *prog,
5591 				    struct bpf_insn_access_aux *info)
5592 {
5593 	if (type != BPF_READ)
5594 		return false;
5595 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5596 		return false;
5597 	if (off % size != 0)
5598 		return false;
5599 
5600 	return btf_ctx_access(off, size, type, prog, info);
5601 }
5602 
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5603 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5604 				     const struct bpf_reg_state *reg, int off,
5605 				     int size)
5606 {
5607 	const struct btf_type *t;
5608 
5609 	t = btf_type_by_id(reg->btf, reg->btf_id);
5610 	if (t == task_struct_type) {
5611 		if (off >= offsetof(struct task_struct, scx.slice) &&
5612 		    off + size <= offsetofend(struct task_struct, scx.slice))
5613 			return SCALAR_VALUE;
5614 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5615 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5616 			return SCALAR_VALUE;
5617 		if (off >= offsetof(struct task_struct, scx.disallow) &&
5618 		    off + size <= offsetofend(struct task_struct, scx.disallow))
5619 			return SCALAR_VALUE;
5620 	}
5621 
5622 	return -EACCES;
5623 }
5624 
5625 static const struct bpf_func_proto *
bpf_scx_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5626 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5627 {
5628 	switch (func_id) {
5629 	case BPF_FUNC_task_storage_get:
5630 		return &bpf_task_storage_get_proto;
5631 	case BPF_FUNC_task_storage_delete:
5632 		return &bpf_task_storage_delete_proto;
5633 	default:
5634 		return bpf_base_func_proto(func_id, prog);
5635 	}
5636 }
5637 
5638 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5639 	.get_func_proto = bpf_scx_get_func_proto,
5640 	.is_valid_access = bpf_scx_is_valid_access,
5641 	.btf_struct_access = bpf_scx_btf_struct_access,
5642 };
5643 
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5644 static int bpf_scx_init_member(const struct btf_type *t,
5645 			       const struct btf_member *member,
5646 			       void *kdata, const void *udata)
5647 {
5648 	const struct sched_ext_ops *uops = udata;
5649 	struct sched_ext_ops *ops = kdata;
5650 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5651 	int ret;
5652 
5653 	switch (moff) {
5654 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5655 		if (*(u32 *)(udata + moff) > INT_MAX)
5656 			return -E2BIG;
5657 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5658 		return 1;
5659 	case offsetof(struct sched_ext_ops, flags):
5660 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5661 			return -EINVAL;
5662 		ops->flags = *(u64 *)(udata + moff);
5663 		return 1;
5664 	case offsetof(struct sched_ext_ops, name):
5665 		ret = bpf_obj_name_cpy(ops->name, uops->name,
5666 				       sizeof(ops->name));
5667 		if (ret < 0)
5668 			return ret;
5669 		if (ret == 0)
5670 			return -EINVAL;
5671 		return 1;
5672 	case offsetof(struct sched_ext_ops, timeout_ms):
5673 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5674 		    SCX_WATCHDOG_MAX_TIMEOUT)
5675 			return -E2BIG;
5676 		ops->timeout_ms = *(u32 *)(udata + moff);
5677 		return 1;
5678 	case offsetof(struct sched_ext_ops, exit_dump_len):
5679 		ops->exit_dump_len =
5680 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5681 		return 1;
5682 	case offsetof(struct sched_ext_ops, hotplug_seq):
5683 		ops->hotplug_seq = *(u64 *)(udata + moff);
5684 		return 1;
5685 	}
5686 
5687 	return 0;
5688 }
5689 
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5690 static int bpf_scx_check_member(const struct btf_type *t,
5691 				const struct btf_member *member,
5692 				const struct bpf_prog *prog)
5693 {
5694 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5695 
5696 	switch (moff) {
5697 	case offsetof(struct sched_ext_ops, init_task):
5698 #ifdef CONFIG_EXT_GROUP_SCHED
5699 	case offsetof(struct sched_ext_ops, cgroup_init):
5700 	case offsetof(struct sched_ext_ops, cgroup_exit):
5701 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5702 #endif
5703 	case offsetof(struct sched_ext_ops, cpu_online):
5704 	case offsetof(struct sched_ext_ops, cpu_offline):
5705 	case offsetof(struct sched_ext_ops, init):
5706 	case offsetof(struct sched_ext_ops, exit):
5707 		break;
5708 	default:
5709 		if (prog->sleepable)
5710 			return -EINVAL;
5711 	}
5712 
5713 	return 0;
5714 }
5715 
bpf_scx_reg(void * kdata,struct bpf_link * link)5716 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5717 {
5718 	return scx_ops_enable(kdata, link);
5719 }
5720 
bpf_scx_unreg(void * kdata,struct bpf_link * link)5721 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5722 {
5723 	scx_ops_disable(SCX_EXIT_UNREG);
5724 	kthread_flush_work(&scx_ops_disable_work);
5725 }
5726 
bpf_scx_init(struct btf * btf)5727 static int bpf_scx_init(struct btf *btf)
5728 {
5729 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5730 
5731 	return 0;
5732 }
5733 
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5734 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5735 {
5736 	/*
5737 	 * sched_ext does not support updating the actively-loaded BPF
5738 	 * scheduler, as registering a BPF scheduler can always fail if the
5739 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5740 	 * etc. Similarly, we can always race with unregistration happening
5741 	 * elsewhere, such as with sysrq.
5742 	 */
5743 	return -EOPNOTSUPP;
5744 }
5745 
bpf_scx_validate(void * kdata)5746 static int bpf_scx_validate(void *kdata)
5747 {
5748 	return 0;
5749 }
5750 
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5751 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)5752 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)5753 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)5754 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)5755 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)5756 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)5757 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)5758 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)5759 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)5760 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)5761 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)5762 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)5763 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)5764 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)5765 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)5766 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)5767 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)5768 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)5769 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)5770 static void sched_ext_ops__disable(struct task_struct *p) {}
5771 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5772 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)5773 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5774 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5775 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5776 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)5777 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
5778 #endif
sched_ext_ops__cpu_online(s32 cpu)5779 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)5780 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)5781 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)5782 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)5783 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5784 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)5785 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5786 
5787 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5788 	.select_cpu		= sched_ext_ops__select_cpu,
5789 	.enqueue		= sched_ext_ops__enqueue,
5790 	.dequeue		= sched_ext_ops__dequeue,
5791 	.dispatch		= sched_ext_ops__dispatch,
5792 	.tick			= sched_ext_ops__tick,
5793 	.runnable		= sched_ext_ops__runnable,
5794 	.running		= sched_ext_ops__running,
5795 	.stopping		= sched_ext_ops__stopping,
5796 	.quiescent		= sched_ext_ops__quiescent,
5797 	.yield			= sched_ext_ops__yield,
5798 	.core_sched_before	= sched_ext_ops__core_sched_before,
5799 	.set_weight		= sched_ext_ops__set_weight,
5800 	.set_cpumask		= sched_ext_ops__set_cpumask,
5801 	.update_idle		= sched_ext_ops__update_idle,
5802 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
5803 	.cpu_release		= sched_ext_ops__cpu_release,
5804 	.init_task		= sched_ext_ops__init_task,
5805 	.exit_task		= sched_ext_ops__exit_task,
5806 	.enable			= sched_ext_ops__enable,
5807 	.disable		= sched_ext_ops__disable,
5808 #ifdef CONFIG_EXT_GROUP_SCHED
5809 	.cgroup_init		= sched_ext_ops__cgroup_init,
5810 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
5811 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
5812 	.cgroup_move		= sched_ext_ops__cgroup_move,
5813 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
5814 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
5815 #endif
5816 	.cpu_online		= sched_ext_ops__cpu_online,
5817 	.cpu_offline		= sched_ext_ops__cpu_offline,
5818 	.init			= sched_ext_ops__init,
5819 	.exit			= sched_ext_ops__exit,
5820 	.dump			= sched_ext_ops__dump,
5821 	.dump_cpu		= sched_ext_ops__dump_cpu,
5822 	.dump_task		= sched_ext_ops__dump_task,
5823 };
5824 
5825 static struct bpf_struct_ops bpf_sched_ext_ops = {
5826 	.verifier_ops = &bpf_scx_verifier_ops,
5827 	.reg = bpf_scx_reg,
5828 	.unreg = bpf_scx_unreg,
5829 	.check_member = bpf_scx_check_member,
5830 	.init_member = bpf_scx_init_member,
5831 	.init = bpf_scx_init,
5832 	.update = bpf_scx_update,
5833 	.validate = bpf_scx_validate,
5834 	.name = "sched_ext_ops",
5835 	.owner = THIS_MODULE,
5836 	.cfi_stubs = &__bpf_ops_sched_ext_ops
5837 };
5838 
5839 
5840 /********************************************************************************
5841  * System integration and init.
5842  */
5843 
sysrq_handle_sched_ext_reset(u8 key)5844 static void sysrq_handle_sched_ext_reset(u8 key)
5845 {
5846 	if (scx_ops_helper)
5847 		scx_ops_disable(SCX_EXIT_SYSRQ);
5848 	else
5849 		pr_info("sched_ext: BPF scheduler not yet used\n");
5850 }
5851 
5852 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5853 	.handler	= sysrq_handle_sched_ext_reset,
5854 	.help_msg	= "reset-sched-ext(S)",
5855 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
5856 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5857 };
5858 
sysrq_handle_sched_ext_dump(u8 key)5859 static void sysrq_handle_sched_ext_dump(u8 key)
5860 {
5861 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5862 
5863 	if (scx_enabled())
5864 		scx_dump_state(&ei, 0);
5865 }
5866 
5867 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5868 	.handler	= sysrq_handle_sched_ext_dump,
5869 	.help_msg	= "dump-sched-ext(D)",
5870 	.action_msg	= "Trigger sched_ext debug dump",
5871 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5872 };
5873 
can_skip_idle_kick(struct rq * rq)5874 static bool can_skip_idle_kick(struct rq *rq)
5875 {
5876 	lockdep_assert_rq_held(rq);
5877 
5878 	/*
5879 	 * We can skip idle kicking if @rq is going to go through at least one
5880 	 * full SCX scheduling cycle before going idle. Just checking whether
5881 	 * curr is not idle is insufficient because we could be racing
5882 	 * balance_one() trying to pull the next task from a remote rq, which
5883 	 * may fail, and @rq may become idle afterwards.
5884 	 *
5885 	 * The race window is small and we don't and can't guarantee that @rq is
5886 	 * only kicked while idle anyway. Skip only when sure.
5887 	 */
5888 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5889 }
5890 
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)5891 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5892 {
5893 	struct rq *rq = cpu_rq(cpu);
5894 	struct scx_rq *this_scx = &this_rq->scx;
5895 	bool should_wait = false;
5896 	unsigned long flags;
5897 
5898 	raw_spin_rq_lock_irqsave(rq, flags);
5899 
5900 	/*
5901 	 * During CPU hotplug, a CPU may depend on kicking itself to make
5902 	 * forward progress. Allow kicking self regardless of online state.
5903 	 */
5904 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5905 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5906 			if (rq->curr->sched_class == &ext_sched_class)
5907 				rq->curr->scx.slice = 0;
5908 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5909 		}
5910 
5911 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5912 			pseqs[cpu] = rq->scx.pnt_seq;
5913 			should_wait = true;
5914 		}
5915 
5916 		resched_curr(rq);
5917 	} else {
5918 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5919 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5920 	}
5921 
5922 	raw_spin_rq_unlock_irqrestore(rq, flags);
5923 
5924 	return should_wait;
5925 }
5926 
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)5927 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5928 {
5929 	struct rq *rq = cpu_rq(cpu);
5930 	unsigned long flags;
5931 
5932 	raw_spin_rq_lock_irqsave(rq, flags);
5933 
5934 	if (!can_skip_idle_kick(rq) &&
5935 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5936 		resched_curr(rq);
5937 
5938 	raw_spin_rq_unlock_irqrestore(rq, flags);
5939 }
5940 
kick_cpus_irq_workfn(struct irq_work * irq_work)5941 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5942 {
5943 	struct rq *this_rq = this_rq();
5944 	struct scx_rq *this_scx = &this_rq->scx;
5945 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5946 	bool should_wait = false;
5947 	s32 cpu;
5948 
5949 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5950 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5951 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5952 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5953 	}
5954 
5955 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5956 		kick_one_cpu_if_idle(cpu, this_rq);
5957 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5958 	}
5959 
5960 	if (!should_wait)
5961 		return;
5962 
5963 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5964 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5965 
5966 		if (cpu != cpu_of(this_rq)) {
5967 			/*
5968 			 * Pairs with smp_store_release() issued by this CPU in
5969 			 * switch_class() on the resched path.
5970 			 *
5971 			 * We busy-wait here to guarantee that no other task can
5972 			 * be scheduled on our core before the target CPU has
5973 			 * entered the resched path.
5974 			 */
5975 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5976 				cpu_relax();
5977 		}
5978 
5979 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5980 	}
5981 }
5982 
5983 /**
5984  * print_scx_info - print out sched_ext scheduler state
5985  * @log_lvl: the log level to use when printing
5986  * @p: target task
5987  *
5988  * If a sched_ext scheduler is enabled, print the name and state of the
5989  * scheduler. If @p is on sched_ext, print further information about the task.
5990  *
5991  * This function can be safely called on any task as long as the task_struct
5992  * itself is accessible. While safe, this function isn't synchronized and may
5993  * print out mixups or garbages of limited length.
5994  */
print_scx_info(const char * log_lvl,struct task_struct * p)5995 void print_scx_info(const char *log_lvl, struct task_struct *p)
5996 {
5997 	enum scx_ops_enable_state state = scx_ops_enable_state();
5998 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5999 	char runnable_at_buf[22] = "?";
6000 	struct sched_class *class;
6001 	unsigned long runnable_at;
6002 
6003 	if (state == SCX_OPS_DISABLED)
6004 		return;
6005 
6006 	/*
6007 	 * Carefully check if the task was running on sched_ext, and then
6008 	 * carefully copy the time it's been runnable, and its state.
6009 	 */
6010 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6011 	    class != &ext_sched_class) {
6012 		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6013 		       scx_ops_enable_state_str[state], all);
6014 		return;
6015 	}
6016 
6017 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6018 				      sizeof(runnable_at)))
6019 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6020 			  jiffies_delta_msecs(runnable_at, jiffies));
6021 
6022 	/* print everything onto one line to conserve console space */
6023 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6024 	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6025 	       runnable_at_buf);
6026 }
6027 
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)6028 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6029 {
6030 	/*
6031 	 * SCX schedulers often have userspace components which are sometimes
6032 	 * involved in critial scheduling paths. PM operations involve freezing
6033 	 * userspace which can lead to scheduling misbehaviors including stalls.
6034 	 * Let's bypass while PM operations are in progress.
6035 	 */
6036 	switch (event) {
6037 	case PM_HIBERNATION_PREPARE:
6038 	case PM_SUSPEND_PREPARE:
6039 	case PM_RESTORE_PREPARE:
6040 		scx_ops_bypass(true);
6041 		break;
6042 	case PM_POST_HIBERNATION:
6043 	case PM_POST_SUSPEND:
6044 	case PM_POST_RESTORE:
6045 		scx_ops_bypass(false);
6046 		break;
6047 	}
6048 
6049 	return NOTIFY_OK;
6050 }
6051 
6052 static struct notifier_block scx_pm_notifier = {
6053 	.notifier_call = scx_pm_handler,
6054 };
6055 
init_sched_ext_class(void)6056 void __init init_sched_ext_class(void)
6057 {
6058 	s32 cpu, v;
6059 
6060 	/*
6061 	 * The following is to prevent the compiler from optimizing out the enum
6062 	 * definitions so that BPF scheduler implementations can use them
6063 	 * through the generated vmlinux.h.
6064 	 */
6065 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6066 		   SCX_TG_ONLINE);
6067 
6068 	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6069 	scx_idle_init_masks();
6070 
6071 	scx_kick_cpus_pnt_seqs =
6072 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6073 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
6074 	BUG_ON(!scx_kick_cpus_pnt_seqs);
6075 
6076 	for_each_possible_cpu(cpu) {
6077 		struct rq *rq = cpu_rq(cpu);
6078 		int  n = cpu_to_node(cpu);
6079 
6080 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6081 		INIT_LIST_HEAD(&rq->scx.runnable_list);
6082 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6083 
6084 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
6085 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
6086 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
6087 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
6088 		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6089 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6090 
6091 		if (cpu_online(cpu))
6092 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6093 	}
6094 
6095 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6096 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6097 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6098 }
6099 
6100 
6101 /********************************************************************************
6102  * Helpers that can be called from the BPF scheduler.
6103  */
scx_dsq_insert_preamble(struct task_struct * p,u64 enq_flags)6104 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6105 {
6106 	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6107 		return false;
6108 
6109 	lockdep_assert_irqs_disabled();
6110 
6111 	if (unlikely(!p)) {
6112 		scx_ops_error("called with NULL task");
6113 		return false;
6114 	}
6115 
6116 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6117 		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6118 		return false;
6119 	}
6120 
6121 	return true;
6122 }
6123 
scx_dsq_insert_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)6124 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6125 				  u64 enq_flags)
6126 {
6127 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6128 	struct task_struct *ddsp_task;
6129 
6130 	ddsp_task = __this_cpu_read(direct_dispatch_task);
6131 	if (ddsp_task) {
6132 		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6133 		return;
6134 	}
6135 
6136 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6137 		scx_ops_error("dispatch buffer overflow");
6138 		return;
6139 	}
6140 
6141 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6142 		.task = p,
6143 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6144 		.dsq_id = dsq_id,
6145 		.enq_flags = enq_flags,
6146 	};
6147 }
6148 
6149 __bpf_kfunc_start_defs();
6150 
6151 /**
6152  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6153  * @p: task_struct to insert
6154  * @dsq_id: DSQ to insert into
6155  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6156  * @enq_flags: SCX_ENQ_*
6157  *
6158  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6159  * call this function spuriously. Can be called from ops.enqueue(),
6160  * ops.select_cpu(), and ops.dispatch().
6161  *
6162  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6163  * and @p must match the task being enqueued.
6164  *
6165  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6166  * will be directly inserted into the corresponding dispatch queue after
6167  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6168  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6169  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6170  * task is inserted.
6171  *
6172  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6173  * and this function can be called upto ops.dispatch_max_batch times to insert
6174  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6175  * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6176  *
6177  * This function doesn't have any locking restrictions and may be called under
6178  * BPF locks (in the future when BPF introduces more flexible locking).
6179  *
6180  * @p is allowed to run for @slice. The scheduling path is triggered on slice
6181  * exhaustion. If zero, the current residual slice is maintained. If
6182  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6183  * scx_bpf_kick_cpu() to trigger scheduling.
6184  */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6185 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6186 				    u64 enq_flags)
6187 {
6188 	if (!scx_dsq_insert_preamble(p, enq_flags))
6189 		return;
6190 
6191 	if (slice)
6192 		p->scx.slice = slice;
6193 	else
6194 		p->scx.slice = p->scx.slice ?: 1;
6195 
6196 	scx_dsq_insert_commit(p, dsq_id, enq_flags);
6197 }
6198 
6199 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6200 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6201 				  u64 enq_flags)
6202 {
6203 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6204 	scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6205 }
6206 
6207 /**
6208  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6209  * @p: task_struct to insert
6210  * @dsq_id: DSQ to insert into
6211  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6212  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6213  * @enq_flags: SCX_ENQ_*
6214  *
6215  * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6216  * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6217  * are identical to scx_bpf_dsq_insert().
6218  *
6219  * @vtime ordering is according to time_before64() which considers wrapping. A
6220  * numerically larger vtime may indicate an earlier position in the ordering and
6221  * vice-versa.
6222  *
6223  * A DSQ can only be used as a FIFO or priority queue at any given time and this
6224  * function must not be called on a DSQ which already has one or more FIFO tasks
6225  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6226  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6227  */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6228 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6229 					  u64 slice, u64 vtime, u64 enq_flags)
6230 {
6231 	if (!scx_dsq_insert_preamble(p, enq_flags))
6232 		return;
6233 
6234 	if (slice)
6235 		p->scx.slice = slice;
6236 	else
6237 		p->scx.slice = p->scx.slice ?: 1;
6238 
6239 	p->scx.dsq_vtime = vtime;
6240 
6241 	scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6242 }
6243 
6244 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6245 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6246 					u64 slice, u64 vtime, u64 enq_flags)
6247 {
6248 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6249 	scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6250 }
6251 
6252 __bpf_kfunc_end_defs();
6253 
6254 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6255 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6256 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6257 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6258 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6259 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6260 
6261 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6262 	.owner			= THIS_MODULE,
6263 	.set			= &scx_kfunc_ids_enqueue_dispatch,
6264 };
6265 
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6266 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6267 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6268 {
6269 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6270 	struct rq *this_rq, *src_rq, *locked_rq;
6271 	bool dispatched = false;
6272 	bool in_balance;
6273 	unsigned long flags;
6274 
6275 	if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6276 		return false;
6277 
6278 	/*
6279 	 * Can be called from either ops.dispatch() locking this_rq() or any
6280 	 * context where no rq lock is held. If latter, lock @p's task_rq which
6281 	 * we'll likely need anyway.
6282 	 */
6283 	src_rq = task_rq(p);
6284 
6285 	local_irq_save(flags);
6286 	this_rq = this_rq();
6287 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6288 
6289 	if (in_balance) {
6290 		if (this_rq != src_rq) {
6291 			raw_spin_rq_unlock(this_rq);
6292 			raw_spin_rq_lock(src_rq);
6293 		}
6294 	} else {
6295 		raw_spin_rq_lock(src_rq);
6296 	}
6297 
6298 	/*
6299 	 * If the BPF scheduler keeps calling this function repeatedly, it can
6300 	 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6301 	 * breather if necessary.
6302 	 */
6303 	scx_ops_breather(src_rq);
6304 
6305 	locked_rq = src_rq;
6306 	raw_spin_lock(&src_dsq->lock);
6307 
6308 	/*
6309 	 * Did someone else get to it? @p could have already left $src_dsq, got
6310 	 * re-enqueud, or be in the process of being consumed by someone else.
6311 	 */
6312 	if (unlikely(p->scx.dsq != src_dsq ||
6313 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6314 		     p->scx.holding_cpu >= 0) ||
6315 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
6316 		raw_spin_unlock(&src_dsq->lock);
6317 		goto out;
6318 	}
6319 
6320 	/* @p is still on $src_dsq and stable, determine the destination */
6321 	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6322 
6323 	/*
6324 	 * Apply vtime and slice updates before moving so that the new time is
6325 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6326 	 * this is safe as we're locking it.
6327 	 */
6328 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6329 		p->scx.dsq_vtime = kit->vtime;
6330 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6331 		p->scx.slice = kit->slice;
6332 
6333 	/* execute move */
6334 	locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6335 	dispatched = true;
6336 out:
6337 	if (in_balance) {
6338 		if (this_rq != locked_rq) {
6339 			raw_spin_rq_unlock(locked_rq);
6340 			raw_spin_rq_lock(this_rq);
6341 		}
6342 	} else {
6343 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6344 	}
6345 
6346 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6347 			       __SCX_DSQ_ITER_HAS_VTIME);
6348 	return dispatched;
6349 }
6350 
6351 __bpf_kfunc_start_defs();
6352 
6353 /**
6354  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6355  *
6356  * Can only be called from ops.dispatch().
6357  */
scx_bpf_dispatch_nr_slots(void)6358 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6359 {
6360 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6361 		return 0;
6362 
6363 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6364 }
6365 
6366 /**
6367  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6368  *
6369  * Cancel the latest dispatch. Can be called multiple times to cancel further
6370  * dispatches. Can only be called from ops.dispatch().
6371  */
scx_bpf_dispatch_cancel(void)6372 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6373 {
6374 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6375 
6376 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6377 		return;
6378 
6379 	if (dspc->cursor > 0)
6380 		dspc->cursor--;
6381 	else
6382 		scx_ops_error("dispatch buffer underflow");
6383 }
6384 
6385 /**
6386  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6387  * @dsq_id: DSQ to move task from
6388  *
6389  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6390  * local DSQ for execution. Can only be called from ops.dispatch().
6391  *
6392  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6393  * before trying to move from the specified DSQ. It may also grab rq locks and
6394  * thus can't be called under any BPF locks.
6395  *
6396  * Returns %true if a task has been moved, %false if there isn't any task to
6397  * move.
6398  */
scx_bpf_dsq_move_to_local(u64 dsq_id)6399 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6400 {
6401 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6402 	struct scx_dispatch_q *dsq;
6403 
6404 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6405 		return false;
6406 
6407 	flush_dispatch_buf(dspc->rq);
6408 
6409 	dsq = find_user_dsq(dsq_id);
6410 	if (unlikely(!dsq)) {
6411 		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6412 		return false;
6413 	}
6414 
6415 	if (consume_dispatch_q(dspc->rq, dsq)) {
6416 		/*
6417 		 * A successfully consumed task can be dequeued before it starts
6418 		 * running while the CPU is trying to migrate other dispatched
6419 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6420 		 * local DSQ.
6421 		 */
6422 		dspc->nr_tasks++;
6423 		return true;
6424 	} else {
6425 		return false;
6426 	}
6427 }
6428 
6429 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_consume(u64 dsq_id)6430 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6431 {
6432 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6433 	return scx_bpf_dsq_move_to_local(dsq_id);
6434 }
6435 
6436 /**
6437  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6438  * @it__iter: DSQ iterator in progress
6439  * @slice: duration the moved task can run for in nsecs
6440  *
6441  * Override the slice of the next task that will be moved from @it__iter using
6442  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6443  * slice duration is kept.
6444  */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6445 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6446 					    u64 slice)
6447 {
6448 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6449 
6450 	kit->slice = slice;
6451 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6452 }
6453 
6454 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6455 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6456 			struct bpf_iter_scx_dsq *it__iter, u64 slice)
6457 {
6458 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6459 	scx_bpf_dsq_move_set_slice(it__iter, slice);
6460 }
6461 
6462 /**
6463  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6464  * @it__iter: DSQ iterator in progress
6465  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6466  *
6467  * Override the vtime of the next task that will be moved from @it__iter using
6468  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6469  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6470  * override is ignored and cleared.
6471  */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6472 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6473 					    u64 vtime)
6474 {
6475 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6476 
6477 	kit->vtime = vtime;
6478 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6479 }
6480 
6481 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6482 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6483 			struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6484 {
6485 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6486 	scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6487 }
6488 
6489 /**
6490  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6491  * @it__iter: DSQ iterator in progress
6492  * @p: task to transfer
6493  * @dsq_id: DSQ to move @p to
6494  * @enq_flags: SCX_ENQ_*
6495  *
6496  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6497  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6498  * be the destination.
6499  *
6500  * For the transfer to be successful, @p must still be on the DSQ and have been
6501  * queued before the DSQ iteration started. This function doesn't care whether
6502  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6503  * been queued before the iteration started.
6504  *
6505  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6506  *
6507  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6508  * lock (e.g. BPF timers or SYSCALL programs).
6509  *
6510  * Returns %true if @p has been consumed, %false if @p had already been consumed
6511  * or dequeued.
6512  */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6513 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6514 				  struct task_struct *p, u64 dsq_id,
6515 				  u64 enq_flags)
6516 {
6517 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6518 			    p, dsq_id, enq_flags);
6519 }
6520 
6521 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6522 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6523 					   struct task_struct *p, u64 dsq_id,
6524 					   u64 enq_flags)
6525 {
6526 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6527 	return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6528 }
6529 
6530 /**
6531  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6532  * @it__iter: DSQ iterator in progress
6533  * @p: task to transfer
6534  * @dsq_id: DSQ to move @p to
6535  * @enq_flags: SCX_ENQ_*
6536  *
6537  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6538  * priority queue of the DSQ specified by @dsq_id. The destination must be a
6539  * user DSQ as only user DSQs support priority queue.
6540  *
6541  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6542  * and scx_bpf_dsq_move_set_vtime() to update.
6543  *
6544  * All other aspects are identical to scx_bpf_dsq_move(). See
6545  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6546  */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6547 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6548 					struct task_struct *p, u64 dsq_id,
6549 					u64 enq_flags)
6550 {
6551 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6552 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6553 }
6554 
6555 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6556 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6557 						 struct task_struct *p, u64 dsq_id,
6558 						 u64 enq_flags)
6559 {
6560 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6561 	return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6562 }
6563 
6564 __bpf_kfunc_end_defs();
6565 
6566 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6567 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6568 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6569 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6570 BTF_ID_FLAGS(func, scx_bpf_consume)
6571 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6572 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6573 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6574 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6575 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6576 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6577 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6578 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6579 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6580 
6581 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6582 	.owner			= THIS_MODULE,
6583 	.set			= &scx_kfunc_ids_dispatch,
6584 };
6585 
6586 __bpf_kfunc_start_defs();
6587 
6588 /**
6589  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6590  *
6591  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6592  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6593  * processed tasks. Can only be called from ops.cpu_release().
6594  */
scx_bpf_reenqueue_local(void)6595 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6596 {
6597 	LIST_HEAD(tasks);
6598 	u32 nr_enqueued = 0;
6599 	struct rq *rq;
6600 	struct task_struct *p, *n;
6601 
6602 	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6603 		return 0;
6604 
6605 	rq = cpu_rq(smp_processor_id());
6606 	lockdep_assert_rq_held(rq);
6607 
6608 	/*
6609 	 * The BPF scheduler may choose to dispatch tasks back to
6610 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6611 	 * first to avoid processing the same tasks repeatedly.
6612 	 */
6613 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6614 				 scx.dsq_list.node) {
6615 		/*
6616 		 * If @p is being migrated, @p's current CPU may not agree with
6617 		 * its allowed CPUs and the migration_cpu_stop is about to
6618 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6619 		 *
6620 		 * While racing sched property changes may also dequeue and
6621 		 * re-enqueue a migrating task while its current CPU and allowed
6622 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6623 		 * the current local DSQ for running tasks and thus are not
6624 		 * visible to the BPF scheduler.
6625 		 *
6626 		 * Also skip re-enqueueing tasks that can only run on this
6627 		 * CPU, as they would just be re-added to the same local
6628 		 * DSQ without any benefit.
6629 		 */
6630 		if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1)
6631 			continue;
6632 
6633 		dispatch_dequeue(rq, p);
6634 		list_add_tail(&p->scx.dsq_list.node, &tasks);
6635 	}
6636 
6637 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6638 		list_del_init(&p->scx.dsq_list.node);
6639 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6640 		nr_enqueued++;
6641 	}
6642 
6643 	return nr_enqueued;
6644 }
6645 
6646 __bpf_kfunc_end_defs();
6647 
6648 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6649 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6650 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6651 
6652 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6653 	.owner			= THIS_MODULE,
6654 	.set			= &scx_kfunc_ids_cpu_release,
6655 };
6656 
6657 __bpf_kfunc_start_defs();
6658 
6659 /**
6660  * scx_bpf_create_dsq - Create a custom DSQ
6661  * @dsq_id: DSQ to create
6662  * @node: NUMA node to allocate from
6663  *
6664  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6665  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6666  */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6667 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6668 {
6669 	if (unlikely(node >= (int)nr_node_ids ||
6670 		     (node < 0 && node != NUMA_NO_NODE)))
6671 		return -EINVAL;
6672 	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6673 }
6674 
6675 __bpf_kfunc_end_defs();
6676 
6677 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6678 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6679 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6680 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6681 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6682 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6683 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6684 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6685 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6686 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6687 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6688 
6689 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6690 	.owner			= THIS_MODULE,
6691 	.set			= &scx_kfunc_ids_unlocked,
6692 };
6693 
6694 __bpf_kfunc_start_defs();
6695 
6696 /**
6697  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6698  * @cpu: cpu to kick
6699  * @flags: %SCX_KICK_* flags
6700  *
6701  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6702  * trigger rescheduling on a busy CPU. This can be called from any online
6703  * scx_ops operation and the actual kicking is performed asynchronously through
6704  * an irq work.
6705  */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6706 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6707 {
6708 	struct rq *this_rq;
6709 	unsigned long irq_flags;
6710 
6711 	if (!ops_cpu_valid(cpu, NULL))
6712 		return;
6713 
6714 	local_irq_save(irq_flags);
6715 
6716 	this_rq = this_rq();
6717 
6718 	/*
6719 	 * While bypassing for PM ops, IRQ handling may not be online which can
6720 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
6721 	 * IRQ status update. Suppress kicking.
6722 	 */
6723 	if (scx_rq_bypassing(this_rq))
6724 		goto out;
6725 
6726 	/*
6727 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6728 	 * rq locks. We can probably be smarter and avoid bouncing if called
6729 	 * from ops which don't hold a rq lock.
6730 	 */
6731 	if (flags & SCX_KICK_IDLE) {
6732 		struct rq *target_rq = cpu_rq(cpu);
6733 
6734 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6735 			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6736 
6737 		if (raw_spin_rq_trylock(target_rq)) {
6738 			if (can_skip_idle_kick(target_rq)) {
6739 				raw_spin_rq_unlock(target_rq);
6740 				goto out;
6741 			}
6742 			raw_spin_rq_unlock(target_rq);
6743 		}
6744 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6745 	} else {
6746 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6747 
6748 		if (flags & SCX_KICK_PREEMPT)
6749 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6750 		if (flags & SCX_KICK_WAIT)
6751 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6752 	}
6753 
6754 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6755 out:
6756 	local_irq_restore(irq_flags);
6757 }
6758 
6759 /**
6760  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6761  * @dsq_id: id of the DSQ
6762  *
6763  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6764  * -%ENOENT is returned.
6765  */
scx_bpf_dsq_nr_queued(u64 dsq_id)6766 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6767 {
6768 	struct scx_dispatch_q *dsq;
6769 	s32 ret;
6770 
6771 	preempt_disable();
6772 
6773 	if (dsq_id == SCX_DSQ_LOCAL) {
6774 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6775 		goto out;
6776 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6777 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6778 
6779 		if (ops_cpu_valid(cpu, NULL)) {
6780 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6781 			goto out;
6782 		}
6783 	} else {
6784 		dsq = find_user_dsq(dsq_id);
6785 		if (dsq) {
6786 			ret = READ_ONCE(dsq->nr);
6787 			goto out;
6788 		}
6789 	}
6790 	ret = -ENOENT;
6791 out:
6792 	preempt_enable();
6793 	return ret;
6794 }
6795 
6796 /**
6797  * scx_bpf_destroy_dsq - Destroy a custom DSQ
6798  * @dsq_id: DSQ to destroy
6799  *
6800  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6801  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6802  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6803  * which doesn't exist. Can be called from any online scx_ops operations.
6804  */
scx_bpf_destroy_dsq(u64 dsq_id)6805 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6806 {
6807 	destroy_dsq(dsq_id);
6808 }
6809 
6810 /**
6811  * bpf_iter_scx_dsq_new - Create a DSQ iterator
6812  * @it: iterator to initialize
6813  * @dsq_id: DSQ to iterate
6814  * @flags: %SCX_DSQ_ITER_*
6815  *
6816  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6817  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6818  * tasks which are already queued when this function is invoked.
6819  */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)6820 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6821 				     u64 flags)
6822 {
6823 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6824 
6825 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6826 		     sizeof(struct bpf_iter_scx_dsq));
6827 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6828 		     __alignof__(struct bpf_iter_scx_dsq));
6829 
6830 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6831 		return -EINVAL;
6832 
6833 	kit->dsq = find_user_dsq(dsq_id);
6834 	if (!kit->dsq)
6835 		return -ENOENT;
6836 
6837 	INIT_LIST_HEAD(&kit->cursor.node);
6838 	kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
6839 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
6840 
6841 	return 0;
6842 }
6843 
6844 /**
6845  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6846  * @it: iterator to progress
6847  *
6848  * Return the next task. See bpf_iter_scx_dsq_new().
6849  */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)6850 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6851 {
6852 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6853 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6854 	struct task_struct *p;
6855 	unsigned long flags;
6856 
6857 	if (!kit->dsq)
6858 		return NULL;
6859 
6860 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6861 
6862 	if (list_empty(&kit->cursor.node))
6863 		p = NULL;
6864 	else
6865 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6866 
6867 	/*
6868 	 * Only tasks which were queued before the iteration started are
6869 	 * visible. This bounds BPF iterations and guarantees that vtime never
6870 	 * jumps in the other direction while iterating.
6871 	 */
6872 	do {
6873 		p = nldsq_next_task(kit->dsq, p, rev);
6874 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6875 
6876 	if (p) {
6877 		if (rev)
6878 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6879 		else
6880 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6881 	} else {
6882 		list_del_init(&kit->cursor.node);
6883 	}
6884 
6885 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6886 
6887 	return p;
6888 }
6889 
6890 /**
6891  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6892  * @it: iterator to destroy
6893  *
6894  * Undo scx_iter_scx_dsq_new().
6895  */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)6896 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6897 {
6898 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6899 
6900 	if (!kit->dsq)
6901 		return;
6902 
6903 	if (!list_empty(&kit->cursor.node)) {
6904 		unsigned long flags;
6905 
6906 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6907 		list_del_init(&kit->cursor.node);
6908 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6909 	}
6910 	kit->dsq = NULL;
6911 }
6912 
6913 __bpf_kfunc_end_defs();
6914 
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)6915 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
6916 			 char *fmt, unsigned long long *data, u32 data__sz)
6917 {
6918 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6919 	s32 ret;
6920 
6921 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6922 	    (data__sz && !data)) {
6923 		scx_ops_error("invalid data=%p and data__sz=%u",
6924 			      (void *)data, data__sz);
6925 		return -EINVAL;
6926 	}
6927 
6928 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6929 	if (ret < 0) {
6930 		scx_ops_error("failed to read data fields (%d)", ret);
6931 		return ret;
6932 	}
6933 
6934 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6935 				  &bprintf_data);
6936 	if (ret < 0) {
6937 		scx_ops_error("format preparation failed (%d)", ret);
6938 		return ret;
6939 	}
6940 
6941 	ret = bstr_printf(line_buf, line_size, fmt,
6942 			  bprintf_data.bin_args);
6943 	bpf_bprintf_cleanup(&bprintf_data);
6944 	if (ret < 0) {
6945 		scx_ops_error("(\"%s\", %p, %u) failed to format",
6946 			      fmt, data, data__sz);
6947 		return ret;
6948 	}
6949 
6950 	return ret;
6951 }
6952 
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)6953 static s32 bstr_format(struct scx_bstr_buf *buf,
6954 		       char *fmt, unsigned long long *data, u32 data__sz)
6955 {
6956 	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
6957 			     fmt, data, data__sz);
6958 }
6959 
6960 __bpf_kfunc_start_defs();
6961 
6962 /**
6963  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6964  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6965  * @fmt: error message format string
6966  * @data: format string parameters packaged using ___bpf_fill() macro
6967  * @data__sz: @data len, must end in '__sz' for the verifier
6968  *
6969  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6970  * disabling.
6971  */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)6972 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6973 				   unsigned long long *data, u32 data__sz)
6974 {
6975 	unsigned long flags;
6976 
6977 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6978 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6979 		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
6980 				  scx_exit_bstr_buf.line);
6981 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6982 }
6983 
6984 /**
6985  * scx_bpf_error_bstr - Indicate fatal error
6986  * @fmt: error message format string
6987  * @data: format string parameters packaged using ___bpf_fill() macro
6988  * @data__sz: @data len, must end in '__sz' for the verifier
6989  *
6990  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6991  * disabling.
6992  */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)6993 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6994 				    u32 data__sz)
6995 {
6996 	unsigned long flags;
6997 
6998 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6999 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7000 		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7001 				  scx_exit_bstr_buf.line);
7002 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7003 }
7004 
7005 /**
7006  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7007  * @fmt: format string
7008  * @data: format string parameters packaged using ___bpf_fill() macro
7009  * @data__sz: @data len, must end in '__sz' for the verifier
7010  *
7011  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7012  * dump_task() to generate extra debug dump specific to the BPF scheduler.
7013  *
7014  * The extra dump may be multiple lines. A single line may be split over
7015  * multiple calls. The last line is automatically terminated.
7016  */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)7017 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7018 				   u32 data__sz)
7019 {
7020 	struct scx_dump_data *dd = &scx_dump_data;
7021 	struct scx_bstr_buf *buf = &dd->buf;
7022 	s32 ret;
7023 
7024 	if (raw_smp_processor_id() != dd->cpu) {
7025 		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7026 		return;
7027 	}
7028 
7029 	/* append the formatted string to the line buf */
7030 	ret = __bstr_format(buf->data, buf->line + dd->cursor,
7031 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7032 	if (ret < 0) {
7033 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7034 			  dd->prefix, fmt, data, data__sz, ret);
7035 		return;
7036 	}
7037 
7038 	dd->cursor += ret;
7039 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7040 
7041 	if (!dd->cursor)
7042 		return;
7043 
7044 	/*
7045 	 * If the line buf overflowed or ends in a newline, flush it into the
7046 	 * dump. This is to allow the caller to generate a single line over
7047 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7048 	 * the line buf, the only case which can lead to an unexpected
7049 	 * truncation is when the caller keeps generating newlines in the middle
7050 	 * instead of the end consecutively. Don't do that.
7051 	 */
7052 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7053 		ops_dump_flush();
7054 }
7055 
7056 /**
7057  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7058  * @cpu: CPU of interest
7059  *
7060  * Return the maximum relative capacity of @cpu in relation to the most
7061  * performant CPU in the system. The return value is in the range [1,
7062  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7063  */
scx_bpf_cpuperf_cap(s32 cpu)7064 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7065 {
7066 	if (ops_cpu_valid(cpu, NULL))
7067 		return arch_scale_cpu_capacity(cpu);
7068 	else
7069 		return SCX_CPUPERF_ONE;
7070 }
7071 
7072 /**
7073  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7074  * @cpu: CPU of interest
7075  *
7076  * Return the current relative performance of @cpu in relation to its maximum.
7077  * The return value is in the range [1, %SCX_CPUPERF_ONE].
7078  *
7079  * The current performance level of a CPU in relation to the maximum performance
7080  * available in the system can be calculated as follows:
7081  *
7082  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7083  *
7084  * The result is in the range [1, %SCX_CPUPERF_ONE].
7085  */
scx_bpf_cpuperf_cur(s32 cpu)7086 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7087 {
7088 	if (ops_cpu_valid(cpu, NULL))
7089 		return arch_scale_freq_capacity(cpu);
7090 	else
7091 		return SCX_CPUPERF_ONE;
7092 }
7093 
7094 /**
7095  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7096  * @cpu: CPU of interest
7097  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7098  *
7099  * Set the target performance level of @cpu to @perf. @perf is in linear
7100  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7101  * schedutil cpufreq governor chooses the target frequency.
7102  *
7103  * The actual performance level chosen, CPU grouping, and the overhead and
7104  * latency of the operations are dependent on the hardware and cpufreq driver in
7105  * use. Consult hardware and cpufreq documentation for more information. The
7106  * current performance level can be monitored using scx_bpf_cpuperf_cur().
7107  */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)7108 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7109 {
7110 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
7111 		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7112 		return;
7113 	}
7114 
7115 	if (ops_cpu_valid(cpu, NULL)) {
7116 		struct rq *rq = cpu_rq(cpu);
7117 
7118 		rq->scx.cpuperf_target = perf;
7119 
7120 		rcu_read_lock_sched_notrace();
7121 		cpufreq_update_util(cpu_rq(cpu), 0);
7122 		rcu_read_unlock_sched_notrace();
7123 	}
7124 }
7125 
7126 /**
7127  * scx_bpf_nr_node_ids - Return the number of possible node IDs
7128  *
7129  * All valid node IDs in the system are smaller than the returned value.
7130  */
scx_bpf_nr_node_ids(void)7131 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
7132 {
7133 	return nr_node_ids;
7134 }
7135 
7136 /**
7137  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7138  *
7139  * All valid CPU IDs in the system are smaller than the returned value.
7140  */
scx_bpf_nr_cpu_ids(void)7141 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7142 {
7143 	return nr_cpu_ids;
7144 }
7145 
7146 /**
7147  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7148  */
scx_bpf_get_possible_cpumask(void)7149 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7150 {
7151 	return cpu_possible_mask;
7152 }
7153 
7154 /**
7155  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7156  */
scx_bpf_get_online_cpumask(void)7157 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7158 {
7159 	return cpu_online_mask;
7160 }
7161 
7162 /**
7163  * scx_bpf_put_cpumask - Release a possible/online cpumask
7164  * @cpumask: cpumask to release
7165  */
scx_bpf_put_cpumask(const struct cpumask * cpumask)7166 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7167 {
7168 	/*
7169 	 * Empty function body because we aren't actually acquiring or releasing
7170 	 * a reference to a global cpumask, which is read-only in the caller and
7171 	 * is never released. The acquire / release semantics here are just used
7172 	 * to make the cpumask is a trusted pointer in the caller.
7173 	 */
7174 }
7175 
7176 /**
7177  * scx_bpf_task_running - Is task currently running?
7178  * @p: task of interest
7179  */
scx_bpf_task_running(const struct task_struct * p)7180 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7181 {
7182 	return task_rq(p)->curr == p;
7183 }
7184 
7185 /**
7186  * scx_bpf_task_cpu - CPU a task is currently associated with
7187  * @p: task of interest
7188  */
scx_bpf_task_cpu(const struct task_struct * p)7189 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7190 {
7191 	return task_cpu(p);
7192 }
7193 
7194 /**
7195  * scx_bpf_cpu_rq - Fetch the rq of a CPU
7196  * @cpu: CPU of the rq
7197  */
scx_bpf_cpu_rq(s32 cpu)7198 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7199 {
7200 	if (!ops_cpu_valid(cpu, NULL))
7201 		return NULL;
7202 
7203 	return cpu_rq(cpu);
7204 }
7205 
7206 /**
7207  * scx_bpf_task_cgroup - Return the sched cgroup of a task
7208  * @p: task of interest
7209  *
7210  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7211  * from the scheduler's POV. SCX operations should use this function to
7212  * determine @p's current cgroup as, unlike following @p->cgroups,
7213  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7214  * rq-locked operations. Can be called on the parameter tasks of rq-locked
7215  * operations. The restriction guarantees that @p's rq is locked by the caller.
7216  */
7217 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7218 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7219 {
7220 	struct task_group *tg = p->sched_task_group;
7221 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7222 
7223 	if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7224 		goto out;
7225 
7226 	cgrp = tg_cgrp(tg);
7227 
7228 out:
7229 	cgroup_get(cgrp);
7230 	return cgrp;
7231 }
7232 #endif
7233 
7234 /**
7235  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7236  * clock for the current CPU. The clock returned is in nanoseconds.
7237  *
7238  * It provides the following properties:
7239  *
7240  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7241  *  to account for execution time and track tasks' runtime properties.
7242  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7243  *  eventually reads a hardware timestamp counter -- is neither performant nor
7244  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
7245  *  using the rq clock in the scheduler core whenever possible.
7246  *
7247  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7248  *  scheduler use cases, the required clock resolution is lower than the most
7249  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7250  *  uses the rq clock in the scheduler core whenever it is valid. It considers
7251  *  that the rq clock is valid from the time the rq clock is updated
7252  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7253  *
7254  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7255  *  guarantees the clock never goes backward when comparing them in the same
7256  *  CPU. On the other hand, when comparing clocks in different CPUs, there
7257  *  is no such guarantee -- the clock can go backward. It provides a
7258  *  monotonically *non-decreasing* clock so that it would provide the same
7259  *  clock values in two different scx_bpf_now() calls in the same CPU
7260  *  during the same period of when the rq clock is valid.
7261  */
scx_bpf_now(void)7262 __bpf_kfunc u64 scx_bpf_now(void)
7263 {
7264 	struct rq *rq;
7265 	u64 clock;
7266 
7267 	preempt_disable();
7268 
7269 	rq = this_rq();
7270 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7271 		/*
7272 		 * If the rq clock is valid, use the cached rq clock.
7273 		 *
7274 		 * Note that scx_bpf_now() is re-entrant between a process
7275 		 * context and an interrupt context (e.g., timer interrupt).
7276 		 * However, we don't need to consider the race between them
7277 		 * because such race is not observable from a caller.
7278 		 */
7279 		clock = READ_ONCE(rq->scx.clock);
7280 	} else {
7281 		/*
7282 		 * Otherwise, return a fresh rq clock.
7283 		 *
7284 		 * The rq clock is updated outside of the rq lock.
7285 		 * In this case, keep the updated rq clock invalid so the next
7286 		 * kfunc call outside the rq lock gets a fresh rq clock.
7287 		 */
7288 		clock = sched_clock_cpu(cpu_of(rq));
7289 	}
7290 
7291 	preempt_enable();
7292 
7293 	return clock;
7294 }
7295 
7296 /*
7297  * scx_bpf_events - Get a system-wide event counter to
7298  * @events: output buffer from a BPF program
7299  * @events__sz: @events len, must end in '__sz'' for the verifier
7300  */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)7301 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
7302 				size_t events__sz)
7303 {
7304 	struct scx_event_stats e_sys, *e_cpu;
7305 	int cpu;
7306 
7307 	/* Aggregate per-CPU event counters into the system-wide counters. */
7308 	memset(&e_sys, 0, sizeof(e_sys));
7309 	for_each_possible_cpu(cpu) {
7310 		e_cpu = per_cpu_ptr(&event_stats_cpu, cpu);
7311 		scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
7312 		scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
7313 		scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
7314 		scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
7315 		scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
7316 		scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SLICE_DFL);
7317 		scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DURATION);
7318 		scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DISPATCH);
7319 		scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_ACTIVATE);
7320 	}
7321 
7322 	/*
7323 	 * We cannot entirely trust a BPF-provided size since a BPF program
7324 	 * might be compiled against a different vmlinux.h, of which
7325 	 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
7326 	 * (an older vmlinux.h). Hence, we use the smaller size to avoid
7327 	 * memory corruption.
7328 	 */
7329 	events__sz = min(events__sz, sizeof(*events));
7330 	memcpy(events, &e_sys, events__sz);
7331 }
7332 
7333 __bpf_kfunc_end_defs();
7334 
7335 BTF_KFUNCS_START(scx_kfunc_ids_any)
7336 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7337 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7338 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7339 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7340 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7341 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7342 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7343 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7344 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7345 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7346 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7347 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7348 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
7349 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7350 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7351 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7352 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7353 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7354 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7355 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7356 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7357 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7358 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7359 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7360 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7361 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7362 #ifdef CONFIG_CGROUP_SCHED
7363 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7364 #endif
7365 BTF_ID_FLAGS(func, scx_bpf_now)
7366 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
7367 BTF_KFUNCS_END(scx_kfunc_ids_any)
7368 
7369 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7370 	.owner			= THIS_MODULE,
7371 	.set			= &scx_kfunc_ids_any,
7372 };
7373 
scx_init(void)7374 static int __init scx_init(void)
7375 {
7376 	int ret;
7377 
7378 	/*
7379 	 * kfunc registration can't be done from init_sched_ext_class() as
7380 	 * register_btf_kfunc_id_set() needs most of the system to be up.
7381 	 *
7382 	 * Some kfuncs are context-sensitive and can only be called from
7383 	 * specific SCX ops. They are grouped into BTF sets accordingly.
7384 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7385 	 * restrictions. Eventually, the verifier should be able to enforce
7386 	 * them. For now, register them the same and make each kfunc explicitly
7387 	 * check using scx_kf_allowed().
7388 	 */
7389 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7390 					     &scx_kfunc_set_enqueue_dispatch)) ||
7391 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7392 					     &scx_kfunc_set_dispatch)) ||
7393 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7394 					     &scx_kfunc_set_cpu_release)) ||
7395 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7396 					     &scx_kfunc_set_unlocked)) ||
7397 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7398 					     &scx_kfunc_set_unlocked)) ||
7399 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7400 					     &scx_kfunc_set_any)) ||
7401 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7402 					     &scx_kfunc_set_any)) ||
7403 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7404 					     &scx_kfunc_set_any))) {
7405 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7406 		return ret;
7407 	}
7408 
7409 	ret = scx_idle_init();
7410 	if (ret) {
7411 		pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
7412 		return ret;
7413 	}
7414 
7415 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7416 	if (ret) {
7417 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7418 		return ret;
7419 	}
7420 
7421 	ret = register_pm_notifier(&scx_pm_notifier);
7422 	if (ret) {
7423 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7424 		return ret;
7425 	}
7426 
7427 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7428 	if (!scx_kset) {
7429 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7430 		return -ENOMEM;
7431 	}
7432 
7433 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7434 	if (ret < 0) {
7435 		pr_err("sched_ext: Failed to add global attributes\n");
7436 		return ret;
7437 	}
7438 
7439 	return 0;
7440 }
7441 __initcall(scx_init);
7442