xref: /linux/kernel/sched/ext.c (revision 6203ef73fa5c0358f7960b038628259be1448724)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10 
11 enum scx_consts {
12 	SCX_DSP_DFL_MAX_BATCH		= 32,
13 	SCX_DSP_MAX_LOOPS		= 32,
14 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
15 
16 	SCX_EXIT_BT_LEN			= 64,
17 	SCX_EXIT_MSG_LEN		= 1024,
18 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
19 
20 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
21 };
22 
23 enum scx_exit_kind {
24 	SCX_EXIT_NONE,
25 	SCX_EXIT_DONE,
26 
27 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
28 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
29 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
30 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
31 
32 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
33 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
34 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
35 };
36 
37 /*
38  * An exit code can be specified when exiting with scx_bpf_exit() or
39  * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
40  * respectively. The codes are 64bit of the format:
41  *
42  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
43  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
44  *
45  *   SYS ACT: System-defined exit actions
46  *   SYS RSN: System-defined exit reasons
47  *   USR    : User-defined exit codes and reasons
48  *
49  * Using the above, users may communicate intention and context by ORing system
50  * actions and/or system reasons with a user-defined exit code.
51  */
52 enum scx_exit_code {
53 	/* Reasons */
54 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
55 
56 	/* Actions */
57 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
58 };
59 
60 /*
61  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
62  * being disabled.
63  */
64 struct scx_exit_info {
65 	/* %SCX_EXIT_* - broad category of the exit reason */
66 	enum scx_exit_kind	kind;
67 
68 	/* exit code if gracefully exiting */
69 	s64			exit_code;
70 
71 	/* textual representation of the above */
72 	const char		*reason;
73 
74 	/* backtrace if exiting due to an error */
75 	unsigned long		*bt;
76 	u32			bt_len;
77 
78 	/* informational message */
79 	char			*msg;
80 
81 	/* debug dump */
82 	char			*dump;
83 };
84 
85 /* sched_ext_ops.flags */
86 enum scx_ops_flags {
87 	/*
88 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
89 	 */
90 	SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
91 
92 	/*
93 	 * By default, if there are no other task to run on the CPU, ext core
94 	 * keeps running the current task even after its slice expires. If this
95 	 * flag is specified, such tasks are passed to ops.enqueue() with
96 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
97 	 */
98 	SCX_OPS_ENQ_LAST	= 1LLU << 1,
99 
100 	/*
101 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
102 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
103 	 * scheduler depends on pid lookup for dispatching, the task will be
104 	 * lost leading to various issues including RCU grace period stalls.
105 	 *
106 	 * To mask this problem, by default, unhashed tasks are automatically
107 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
108 	 * depend on pid lookups and wants to handle these tasks directly, the
109 	 * following flag can be used.
110 	 */
111 	SCX_OPS_ENQ_EXITING	= 1LLU << 2,
112 
113 	/*
114 	 * If set, only tasks with policy set to SCHED_EXT are attached to
115 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
116 	 */
117 	SCX_OPS_SWITCH_PARTIAL	= 1LLU << 3,
118 
119 	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
120 				  SCX_OPS_ENQ_LAST |
121 				  SCX_OPS_ENQ_EXITING |
122 				  SCX_OPS_SWITCH_PARTIAL,
123 };
124 
125 /* argument container for ops.init_task() */
126 struct scx_init_task_args {
127 	/*
128 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
129 	 * to the scheduler transition path.
130 	 */
131 	bool			fork;
132 };
133 
134 /* argument container for ops.exit_task() */
135 struct scx_exit_task_args {
136 	/* Whether the task exited before running on sched_ext. */
137 	bool cancelled;
138 };
139 
140 enum scx_cpu_preempt_reason {
141 	/* next task is being scheduled by &sched_class_rt */
142 	SCX_CPU_PREEMPT_RT,
143 	/* next task is being scheduled by &sched_class_dl */
144 	SCX_CPU_PREEMPT_DL,
145 	/* next task is being scheduled by &sched_class_stop */
146 	SCX_CPU_PREEMPT_STOP,
147 	/* unknown reason for SCX being preempted */
148 	SCX_CPU_PREEMPT_UNKNOWN,
149 };
150 
151 /*
152  * Argument container for ops->cpu_acquire(). Currently empty, but may be
153  * expanded in the future.
154  */
155 struct scx_cpu_acquire_args {};
156 
157 /* argument container for ops->cpu_release() */
158 struct scx_cpu_release_args {
159 	/* the reason the CPU was preempted */
160 	enum scx_cpu_preempt_reason reason;
161 
162 	/* the task that's going to be scheduled on the CPU */
163 	struct task_struct	*task;
164 };
165 
166 /*
167  * Informational context provided to dump operations.
168  */
169 struct scx_dump_ctx {
170 	enum scx_exit_kind	kind;
171 	s64			exit_code;
172 	const char		*reason;
173 	u64			at_ns;
174 	u64			at_jiffies;
175 };
176 
177 /**
178  * struct sched_ext_ops - Operation table for BPF scheduler implementation
179  *
180  * Userland can implement an arbitrary scheduling policy by implementing and
181  * loading operations in this table.
182  */
183 struct sched_ext_ops {
184 	/**
185 	 * select_cpu - Pick the target CPU for a task which is being woken up
186 	 * @p: task being woken up
187 	 * @prev_cpu: the cpu @p was on before sleeping
188 	 * @wake_flags: SCX_WAKE_*
189 	 *
190 	 * Decision made here isn't final. @p may be moved to any CPU while it
191 	 * is getting dispatched for execution later. However, as @p is not on
192 	 * the rq at this point, getting the eventual execution CPU right here
193 	 * saves a small bit of overhead down the line.
194 	 *
195 	 * If an idle CPU is returned, the CPU is kicked and will try to
196 	 * dispatch. While an explicit custom mechanism can be added,
197 	 * select_cpu() serves as the default way to wake up idle CPUs.
198 	 *
199 	 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
200 	 * is dispatched, the ops.enqueue() callback will be skipped. Finally,
201 	 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
202 	 * local DSQ of whatever CPU is returned by this callback.
203 	 */
204 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
205 
206 	/**
207 	 * enqueue - Enqueue a task on the BPF scheduler
208 	 * @p: task being enqueued
209 	 * @enq_flags: %SCX_ENQ_*
210 	 *
211 	 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
212 	 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
213 	 * scheduler owns @p and if it fails to dispatch @p, the task will
214 	 * stall.
215 	 *
216 	 * If @p was dispatched from ops.select_cpu(), this callback is
217 	 * skipped.
218 	 */
219 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
220 
221 	/**
222 	 * dequeue - Remove a task from the BPF scheduler
223 	 * @p: task being dequeued
224 	 * @deq_flags: %SCX_DEQ_*
225 	 *
226 	 * Remove @p from the BPF scheduler. This is usually called to isolate
227 	 * the task while updating its scheduling properties (e.g. priority).
228 	 *
229 	 * The ext core keeps track of whether the BPF side owns a given task or
230 	 * not and can gracefully ignore spurious dispatches from BPF side,
231 	 * which makes it safe to not implement this method. However, depending
232 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
233 	 * scheduling position not being updated across a priority change.
234 	 */
235 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
236 
237 	/**
238 	 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
239 	 * @cpu: CPU to dispatch tasks for
240 	 * @prev: previous task being switched out
241 	 *
242 	 * Called when a CPU's local dsq is empty. The operation should dispatch
243 	 * one or more tasks from the BPF scheduler into the DSQs using
244 	 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
245 	 * scx_bpf_consume().
246 	 *
247 	 * The maximum number of times scx_bpf_dispatch() can be called without
248 	 * an intervening scx_bpf_consume() is specified by
249 	 * ops.dispatch_max_batch. See the comments on top of the two functions
250 	 * for more details.
251 	 *
252 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
253 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
254 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
255 	 * ops.dispatch() returns. To keep executing @prev, return without
256 	 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
257 	 */
258 	void (*dispatch)(s32 cpu, struct task_struct *prev);
259 
260 	/**
261 	 * tick - Periodic tick
262 	 * @p: task running currently
263 	 *
264 	 * This operation is called every 1/HZ seconds on CPUs which are
265 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
266 	 * immediate dispatch cycle on the CPU.
267 	 */
268 	void (*tick)(struct task_struct *p);
269 
270 	/**
271 	 * runnable - A task is becoming runnable on its associated CPU
272 	 * @p: task becoming runnable
273 	 * @enq_flags: %SCX_ENQ_*
274 	 *
275 	 * This and the following three functions can be used to track a task's
276 	 * execution state transitions. A task becomes ->runnable() on a CPU,
277 	 * and then goes through one or more ->running() and ->stopping() pairs
278 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
279 	 * done running on the CPU.
280 	 *
281 	 * @p is becoming runnable on the CPU because it's
282 	 *
283 	 * - waking up (%SCX_ENQ_WAKEUP)
284 	 * - being moved from another CPU
285 	 * - being restored after temporarily taken off the queue for an
286 	 *   attribute change.
287 	 *
288 	 * This and ->enqueue() are related but not coupled. This operation
289 	 * notifies @p's state transition and may not be followed by ->enqueue()
290 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
291 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
292 	 * task may be ->enqueue()'d without being preceded by this operation
293 	 * e.g. after exhausting its slice.
294 	 */
295 	void (*runnable)(struct task_struct *p, u64 enq_flags);
296 
297 	/**
298 	 * running - A task is starting to run on its associated CPU
299 	 * @p: task starting to run
300 	 *
301 	 * See ->runnable() for explanation on the task state notifiers.
302 	 */
303 	void (*running)(struct task_struct *p);
304 
305 	/**
306 	 * stopping - A task is stopping execution
307 	 * @p: task stopping to run
308 	 * @runnable: is task @p still runnable?
309 	 *
310 	 * See ->runnable() for explanation on the task state notifiers. If
311 	 * !@runnable, ->quiescent() will be invoked after this operation
312 	 * returns.
313 	 */
314 	void (*stopping)(struct task_struct *p, bool runnable);
315 
316 	/**
317 	 * quiescent - A task is becoming not runnable on its associated CPU
318 	 * @p: task becoming not runnable
319 	 * @deq_flags: %SCX_DEQ_*
320 	 *
321 	 * See ->runnable() for explanation on the task state notifiers.
322 	 *
323 	 * @p is becoming quiescent on the CPU because it's
324 	 *
325 	 * - sleeping (%SCX_DEQ_SLEEP)
326 	 * - being moved to another CPU
327 	 * - being temporarily taken off the queue for an attribute change
328 	 *   (%SCX_DEQ_SAVE)
329 	 *
330 	 * This and ->dequeue() are related but not coupled. This operation
331 	 * notifies @p's state transition and may not be preceded by ->dequeue()
332 	 * e.g. when @p is being dispatched to a remote CPU.
333 	 */
334 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
335 
336 	/**
337 	 * yield - Yield CPU
338 	 * @from: yielding task
339 	 * @to: optional yield target task
340 	 *
341 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
342 	 * The BPF scheduler should ensure that other available tasks are
343 	 * dispatched before the yielding task. Return value is ignored in this
344 	 * case.
345 	 *
346 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
347 	 * scheduler can implement the request, return %true; otherwise, %false.
348 	 */
349 	bool (*yield)(struct task_struct *from, struct task_struct *to);
350 
351 	/**
352 	 * core_sched_before - Task ordering for core-sched
353 	 * @a: task A
354 	 * @b: task B
355 	 *
356 	 * Used by core-sched to determine the ordering between two tasks. See
357 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
358 	 * core-sched.
359 	 *
360 	 * Both @a and @b are runnable and may or may not currently be queued on
361 	 * the BPF scheduler. Should return %true if @a should run before @b.
362 	 * %false if there's no required ordering or @b should run before @a.
363 	 *
364 	 * If not specified, the default is ordering them according to when they
365 	 * became runnable.
366 	 */
367 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
368 
369 	/**
370 	 * set_weight - Set task weight
371 	 * @p: task to set weight for
372 	 * @weight: new weight [1..10000]
373 	 *
374 	 * Update @p's weight to @weight.
375 	 */
376 	void (*set_weight)(struct task_struct *p, u32 weight);
377 
378 	/**
379 	 * set_cpumask - Set CPU affinity
380 	 * @p: task to set CPU affinity for
381 	 * @cpumask: cpumask of cpus that @p can run on
382 	 *
383 	 * Update @p's CPU affinity to @cpumask.
384 	 */
385 	void (*set_cpumask)(struct task_struct *p,
386 			    const struct cpumask *cpumask);
387 
388 	/**
389 	 * update_idle - Update the idle state of a CPU
390 	 * @cpu: CPU to udpate the idle state for
391 	 * @idle: whether entering or exiting the idle state
392 	 *
393 	 * This operation is called when @rq's CPU goes or leaves the idle
394 	 * state. By default, implementing this operation disables the built-in
395 	 * idle CPU tracking and the following helpers become unavailable:
396 	 *
397 	 * - scx_bpf_select_cpu_dfl()
398 	 * - scx_bpf_test_and_clear_cpu_idle()
399 	 * - scx_bpf_pick_idle_cpu()
400 	 *
401 	 * The user also must implement ops.select_cpu() as the default
402 	 * implementation relies on scx_bpf_select_cpu_dfl().
403 	 *
404 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
405 	 * tracking.
406 	 */
407 	void (*update_idle)(s32 cpu, bool idle);
408 
409 	/**
410 	 * cpu_acquire - A CPU is becoming available to the BPF scheduler
411 	 * @cpu: The CPU being acquired by the BPF scheduler.
412 	 * @args: Acquire arguments, see the struct definition.
413 	 *
414 	 * A CPU that was previously released from the BPF scheduler is now once
415 	 * again under its control.
416 	 */
417 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
418 
419 	/**
420 	 * cpu_release - A CPU is taken away from the BPF scheduler
421 	 * @cpu: The CPU being released by the BPF scheduler.
422 	 * @args: Release arguments, see the struct definition.
423 	 *
424 	 * The specified CPU is no longer under the control of the BPF
425 	 * scheduler. This could be because it was preempted by a higher
426 	 * priority sched_class, though there may be other reasons as well. The
427 	 * caller should consult @args->reason to determine the cause.
428 	 */
429 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
430 
431 	/**
432 	 * init_task - Initialize a task to run in a BPF scheduler
433 	 * @p: task to initialize for BPF scheduling
434 	 * @args: init arguments, see the struct definition
435 	 *
436 	 * Either we're loading a BPF scheduler or a new task is being forked.
437 	 * Initialize @p for BPF scheduling. This operation may block and can
438 	 * be used for allocations, and is called exactly once for a task.
439 	 *
440 	 * Return 0 for success, -errno for failure. An error return while
441 	 * loading will abort loading of the BPF scheduler. During a fork, it
442 	 * will abort that specific fork.
443 	 */
444 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
445 
446 	/**
447 	 * exit_task - Exit a previously-running task from the system
448 	 * @p: task to exit
449 	 *
450 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
451 	 * necessary cleanup for @p.
452 	 */
453 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
454 
455 	/**
456 	 * enable - Enable BPF scheduling for a task
457 	 * @p: task to enable BPF scheduling for
458 	 *
459 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
460 	 * enters SCX, and is always paired with a matching disable().
461 	 */
462 	void (*enable)(struct task_struct *p);
463 
464 	/**
465 	 * disable - Disable BPF scheduling for a task
466 	 * @p: task to disable BPF scheduling for
467 	 *
468 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
469 	 * Disable BPF scheduling for @p. A disable() call is always matched
470 	 * with a prior enable() call.
471 	 */
472 	void (*disable)(struct task_struct *p);
473 
474 	/**
475 	 * dump - Dump BPF scheduler state on error
476 	 * @ctx: debug dump context
477 	 *
478 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
479 	 */
480 	void (*dump)(struct scx_dump_ctx *ctx);
481 
482 	/**
483 	 * dump_cpu - Dump BPF scheduler state for a CPU on error
484 	 * @ctx: debug dump context
485 	 * @cpu: CPU to generate debug dump for
486 	 * @idle: @cpu is currently idle without any runnable tasks
487 	 *
488 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
489 	 * @cpu. If @idle is %true and this operation doesn't produce any
490 	 * output, @cpu is skipped for dump.
491 	 */
492 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
493 
494 	/**
495 	 * dump_task - Dump BPF scheduler state for a runnable task on error
496 	 * @ctx: debug dump context
497 	 * @p: runnable task to generate debug dump for
498 	 *
499 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
500 	 * @p.
501 	 */
502 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
503 
504 	/*
505 	 * All online ops must come before ops.cpu_online().
506 	 */
507 
508 	/**
509 	 * cpu_online - A CPU became online
510 	 * @cpu: CPU which just came up
511 	 *
512 	 * @cpu just came online. @cpu will not call ops.enqueue() or
513 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
514 	 */
515 	void (*cpu_online)(s32 cpu);
516 
517 	/**
518 	 * cpu_offline - A CPU is going offline
519 	 * @cpu: CPU which is going offline
520 	 *
521 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
522 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
523 	 */
524 	void (*cpu_offline)(s32 cpu);
525 
526 	/*
527 	 * All CPU hotplug ops must come before ops.init().
528 	 */
529 
530 	/**
531 	 * init - Initialize the BPF scheduler
532 	 */
533 	s32 (*init)(void);
534 
535 	/**
536 	 * exit - Clean up after the BPF scheduler
537 	 * @info: Exit info
538 	 */
539 	void (*exit)(struct scx_exit_info *info);
540 
541 	/**
542 	 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
543 	 */
544 	u32 dispatch_max_batch;
545 
546 	/**
547 	 * flags - %SCX_OPS_* flags
548 	 */
549 	u64 flags;
550 
551 	/**
552 	 * timeout_ms - The maximum amount of time, in milliseconds, that a
553 	 * runnable task should be able to wait before being scheduled. The
554 	 * maximum timeout may not exceed the default timeout of 30 seconds.
555 	 *
556 	 * Defaults to the maximum allowed timeout value of 30 seconds.
557 	 */
558 	u32 timeout_ms;
559 
560 	/**
561 	 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
562 	 * value of 32768 is used.
563 	 */
564 	u32 exit_dump_len;
565 
566 	/**
567 	 * hotplug_seq - A sequence number that may be set by the scheduler to
568 	 * detect when a hotplug event has occurred during the loading process.
569 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
570 	 * load if the sequence number does not match @scx_hotplug_seq on the
571 	 * enable path.
572 	 */
573 	u64 hotplug_seq;
574 
575 	/**
576 	 * name - BPF scheduler's name
577 	 *
578 	 * Must be a non-zero valid BPF object name including only isalnum(),
579 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
580 	 * BPF scheduler is enabled.
581 	 */
582 	char name[SCX_OPS_NAME_LEN];
583 };
584 
585 enum scx_opi {
586 	SCX_OPI_BEGIN			= 0,
587 	SCX_OPI_NORMAL_BEGIN		= 0,
588 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
589 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
590 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
591 	SCX_OPI_END			= SCX_OP_IDX(init),
592 };
593 
594 enum scx_wake_flags {
595 	/* expose select WF_* flags as enums */
596 	SCX_WAKE_FORK		= WF_FORK,
597 	SCX_WAKE_TTWU		= WF_TTWU,
598 	SCX_WAKE_SYNC		= WF_SYNC,
599 };
600 
601 enum scx_enq_flags {
602 	/* expose select ENQUEUE_* flags as enums */
603 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
604 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
605 
606 	/* high 32bits are SCX specific */
607 
608 	/*
609 	 * Set the following to trigger preemption when calling
610 	 * scx_bpf_dispatch() with a local dsq as the target. The slice of the
611 	 * current task is cleared to zero and the CPU is kicked into the
612 	 * scheduling path. Implies %SCX_ENQ_HEAD.
613 	 */
614 	SCX_ENQ_PREEMPT		= 1LLU << 32,
615 
616 	/*
617 	 * The task being enqueued was previously enqueued on the current CPU's
618 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
619 	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
620 	 * invoked in a ->cpu_release() callback, and the task is again
621 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
622 	 * task will not be scheduled on the CPU until at least the next invocation
623 	 * of the ->cpu_acquire() callback.
624 	 */
625 	SCX_ENQ_REENQ		= 1LLU << 40,
626 
627 	/*
628 	 * The task being enqueued is the only task available for the cpu. By
629 	 * default, ext core keeps executing such tasks but when
630 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
631 	 * %SCX_ENQ_LAST flag set.
632 	 *
633 	 * If the BPF scheduler wants to continue executing the task,
634 	 * ops.enqueue() should dispatch the task to %SCX_DSQ_LOCAL immediately.
635 	 * If the task gets queued on a different dsq or the BPF side, the BPF
636 	 * scheduler is responsible for triggering a follow-up scheduling event.
637 	 * Otherwise, Execution may stall.
638 	 */
639 	SCX_ENQ_LAST		= 1LLU << 41,
640 
641 	/* high 8 bits are internal */
642 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
643 
644 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
645 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
646 };
647 
648 enum scx_deq_flags {
649 	/* expose select DEQUEUE_* flags as enums */
650 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
651 
652 	/* high 32bits are SCX specific */
653 
654 	/*
655 	 * The generic core-sched layer decided to execute the task even though
656 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
657 	 */
658 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
659 };
660 
661 enum scx_pick_idle_cpu_flags {
662 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
663 };
664 
665 enum scx_kick_flags {
666 	/*
667 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
668 	 * through at least one full scheduling cycle before going idle. If the
669 	 * target CPU can be determined to be currently not idle and going to go
670 	 * through a scheduling cycle before going idle, noop.
671 	 */
672 	SCX_KICK_IDLE		= 1LLU << 0,
673 
674 	/*
675 	 * Preempt the current task and execute the dispatch path. If the
676 	 * current task of the target CPU is an SCX task, its ->scx.slice is
677 	 * cleared to zero before the scheduling path is invoked so that the
678 	 * task expires and the dispatch path is invoked.
679 	 */
680 	SCX_KICK_PREEMPT	= 1LLU << 1,
681 
682 	/*
683 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
684 	 * return after the target CPU finishes picking the next task.
685 	 */
686 	SCX_KICK_WAIT		= 1LLU << 2,
687 };
688 
689 enum scx_ops_enable_state {
690 	SCX_OPS_PREPPING,
691 	SCX_OPS_ENABLING,
692 	SCX_OPS_ENABLED,
693 	SCX_OPS_DISABLING,
694 	SCX_OPS_DISABLED,
695 };
696 
697 static const char *scx_ops_enable_state_str[] = {
698 	[SCX_OPS_PREPPING]	= "prepping",
699 	[SCX_OPS_ENABLING]	= "enabling",
700 	[SCX_OPS_ENABLED]	= "enabled",
701 	[SCX_OPS_DISABLING]	= "disabling",
702 	[SCX_OPS_DISABLED]	= "disabled",
703 };
704 
705 /*
706  * sched_ext_entity->ops_state
707  *
708  * Used to track the task ownership between the SCX core and the BPF scheduler.
709  * State transitions look as follows:
710  *
711  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
712  *   ^              |                 |
713  *   |              v                 v
714  *   \-------------------------------/
715  *
716  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
717  * sites for explanations on the conditions being waited upon and why they are
718  * safe. Transitions out of them into NONE or QUEUED must store_release and the
719  * waiters should load_acquire.
720  *
721  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
722  * any given task can be dispatched by the BPF scheduler at all times and thus
723  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
724  * to try to dispatch any task anytime regardless of its state as the SCX core
725  * can safely reject invalid dispatches.
726  */
727 enum scx_ops_state {
728 	SCX_OPSS_NONE,		/* owned by the SCX core */
729 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
730 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
731 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
732 
733 	/*
734 	 * QSEQ brands each QUEUED instance so that, when dispatch races
735 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
736 	 * on the task being dispatched.
737 	 *
738 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
739 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
740 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
741 	 * and runs with IRQ disabled. 30 bits should be sufficient.
742 	 */
743 	SCX_OPSS_QSEQ_SHIFT	= 2,
744 };
745 
746 /* Use macros to ensure that the type is unsigned long for the masks */
747 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
748 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
749 
750 /*
751  * During exit, a task may schedule after losing its PIDs. When disabling the
752  * BPF scheduler, we need to be able to iterate tasks in every state to
753  * guarantee system safety. Maintain a dedicated task list which contains every
754  * task between its fork and eventual free.
755  */
756 static DEFINE_SPINLOCK(scx_tasks_lock);
757 static LIST_HEAD(scx_tasks);
758 
759 /* ops enable/disable */
760 static struct kthread_worker *scx_ops_helper;
761 static DEFINE_MUTEX(scx_ops_enable_mutex);
762 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
763 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
764 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
765 static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
766 static bool scx_switching_all;
767 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
768 
769 static struct sched_ext_ops scx_ops;
770 static bool scx_warned_zero_slice;
771 
772 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
773 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
774 DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
775 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
776 
777 struct static_key_false scx_has_op[SCX_OPI_END] =
778 	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
779 
780 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
781 static struct scx_exit_info *scx_exit_info;
782 
783 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
784 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
785 
786 /*
787  * The maximum amount of time in jiffies that a task may be runnable without
788  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
789  * scx_ops_error().
790  */
791 static unsigned long scx_watchdog_timeout;
792 
793 /*
794  * The last time the delayed work was run. This delayed work relies on
795  * ksoftirqd being able to run to service timer interrupts, so it's possible
796  * that this work itself could get wedged. To account for this, we check that
797  * it's not stalled in the timer tick, and trigger an error if it is.
798  */
799 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
800 
801 static struct delayed_work scx_watchdog_work;
802 
803 /* idle tracking */
804 #ifdef CONFIG_SMP
805 #ifdef CONFIG_CPUMASK_OFFSTACK
806 #define CL_ALIGNED_IF_ONSTACK
807 #else
808 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
809 #endif
810 
811 static struct {
812 	cpumask_var_t cpu;
813 	cpumask_var_t smt;
814 } idle_masks CL_ALIGNED_IF_ONSTACK;
815 
816 #endif	/* CONFIG_SMP */
817 
818 /* for %SCX_KICK_WAIT */
819 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
820 
821 /*
822  * Direct dispatch marker.
823  *
824  * Non-NULL values are used for direct dispatch from enqueue path. A valid
825  * pointer points to the task currently being enqueued. An ERR_PTR value is used
826  * to indicate that direct dispatch has already happened.
827  */
828 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
829 
830 /* dispatch queues */
831 static struct scx_dispatch_q __cacheline_aligned_in_smp scx_dsq_global;
832 
833 static const struct rhashtable_params dsq_hash_params = {
834 	.key_len		= 8,
835 	.key_offset		= offsetof(struct scx_dispatch_q, id),
836 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
837 };
838 
839 static struct rhashtable dsq_hash;
840 static LLIST_HEAD(dsqs_to_free);
841 
842 /* dispatch buf */
843 struct scx_dsp_buf_ent {
844 	struct task_struct	*task;
845 	unsigned long		qseq;
846 	u64			dsq_id;
847 	u64			enq_flags;
848 };
849 
850 static u32 scx_dsp_max_batch;
851 
852 struct scx_dsp_ctx {
853 	struct rq		*rq;
854 	struct rq_flags		*rf;
855 	u32			cursor;
856 	u32			nr_tasks;
857 	struct scx_dsp_buf_ent	buf[];
858 };
859 
860 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
861 
862 /* string formatting from BPF */
863 struct scx_bstr_buf {
864 	u64			data[MAX_BPRINTF_VARARGS];
865 	char			line[SCX_EXIT_MSG_LEN];
866 };
867 
868 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
869 static struct scx_bstr_buf scx_exit_bstr_buf;
870 
871 /* ops debug dump */
872 struct scx_dump_data {
873 	s32			cpu;
874 	bool			first;
875 	s32			cursor;
876 	struct seq_buf		*s;
877 	const char		*prefix;
878 	struct scx_bstr_buf	buf;
879 };
880 
881 struct scx_dump_data scx_dump_data = {
882 	.cpu			= -1,
883 };
884 
885 /* /sys/kernel/sched_ext interface */
886 static struct kset *scx_kset;
887 static struct kobject *scx_root_kobj;
888 
889 #define CREATE_TRACE_POINTS
890 #include <trace/events/sched_ext.h>
891 
892 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
893 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
894 					     s64 exit_code,
895 					     const char *fmt, ...);
896 
897 #define scx_ops_error_kind(err, fmt, args...)					\
898 	scx_ops_exit_kind((err), 0, fmt, ##args)
899 
900 #define scx_ops_exit(code, fmt, args...)					\
901 	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
902 
903 #define scx_ops_error(fmt, args...)						\
904 	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
905 
906 #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
907 
908 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
909 {
910 	if (time_after(at, now))
911 		return jiffies_to_msecs(at - now);
912 	else
913 		return -(long)jiffies_to_msecs(now - at);
914 }
915 
916 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
917 static u32 higher_bits(u32 flags)
918 {
919 	return ~((1 << fls(flags)) - 1);
920 }
921 
922 /* return the mask with only the highest bit set */
923 static u32 highest_bit(u32 flags)
924 {
925 	int bit = fls(flags);
926 	return ((u64)1 << bit) >> 1;
927 }
928 
929 /*
930  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
931  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
932  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
933  * whether it's running from an allowed context.
934  *
935  * @mask is constant, always inline to cull the mask calculations.
936  */
937 static __always_inline void scx_kf_allow(u32 mask)
938 {
939 	/* nesting is allowed only in increasing scx_kf_mask order */
940 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
941 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
942 		  current->scx.kf_mask, mask);
943 	current->scx.kf_mask |= mask;
944 	barrier();
945 }
946 
947 static void scx_kf_disallow(u32 mask)
948 {
949 	barrier();
950 	current->scx.kf_mask &= ~mask;
951 }
952 
953 #define SCX_CALL_OP(mask, op, args...)						\
954 do {										\
955 	if (mask) {								\
956 		scx_kf_allow(mask);						\
957 		scx_ops.op(args);						\
958 		scx_kf_disallow(mask);						\
959 	} else {								\
960 		scx_ops.op(args);						\
961 	}									\
962 } while (0)
963 
964 #define SCX_CALL_OP_RET(mask, op, args...)					\
965 ({										\
966 	__typeof__(scx_ops.op(args)) __ret;					\
967 	if (mask) {								\
968 		scx_kf_allow(mask);						\
969 		__ret = scx_ops.op(args);					\
970 		scx_kf_disallow(mask);						\
971 	} else {								\
972 		__ret = scx_ops.op(args);					\
973 	}									\
974 	__ret;									\
975 })
976 
977 /*
978  * Some kfuncs are allowed only on the tasks that are subjects of the
979  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
980  * restrictions, the following SCX_CALL_OP_*() variants should be used when
981  * invoking scx_ops operations that take task arguments. These can only be used
982  * for non-nesting operations due to the way the tasks are tracked.
983  *
984  * kfuncs which can only operate on such tasks can in turn use
985  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
986  * the specific task.
987  */
988 #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
989 do {										\
990 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
991 	current->scx.kf_tasks[0] = task;					\
992 	SCX_CALL_OP(mask, op, task, ##args);					\
993 	current->scx.kf_tasks[0] = NULL;					\
994 } while (0)
995 
996 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
997 ({										\
998 	__typeof__(scx_ops.op(task, ##args)) __ret;				\
999 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1000 	current->scx.kf_tasks[0] = task;					\
1001 	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1002 	current->scx.kf_tasks[0] = NULL;					\
1003 	__ret;									\
1004 })
1005 
1006 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1007 ({										\
1008 	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1009 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1010 	current->scx.kf_tasks[0] = task0;					\
1011 	current->scx.kf_tasks[1] = task1;					\
1012 	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1013 	current->scx.kf_tasks[0] = NULL;					\
1014 	current->scx.kf_tasks[1] = NULL;					\
1015 	__ret;									\
1016 })
1017 
1018 /* @mask is constant, always inline to cull unnecessary branches */
1019 static __always_inline bool scx_kf_allowed(u32 mask)
1020 {
1021 	if (unlikely(!(current->scx.kf_mask & mask))) {
1022 		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1023 			      mask, current->scx.kf_mask);
1024 		return false;
1025 	}
1026 
1027 	if (unlikely((mask & SCX_KF_SLEEPABLE) && in_interrupt())) {
1028 		scx_ops_error("sleepable kfunc called from non-sleepable context");
1029 		return false;
1030 	}
1031 
1032 	/*
1033 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1034 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1035 	 * inside ops.dispatch(). We don't need to check the SCX_KF_SLEEPABLE
1036 	 * boundary thanks to the above in_interrupt() check.
1037 	 */
1038 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1039 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1040 		scx_ops_error("cpu_release kfunc called from a nested operation");
1041 		return false;
1042 	}
1043 
1044 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1045 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1046 		scx_ops_error("dispatch kfunc called from a nested operation");
1047 		return false;
1048 	}
1049 
1050 	return true;
1051 }
1052 
1053 /* see SCX_CALL_OP_TASK() */
1054 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1055 							struct task_struct *p)
1056 {
1057 	if (!scx_kf_allowed(mask))
1058 		return false;
1059 
1060 	if (unlikely((p != current->scx.kf_tasks[0] &&
1061 		      p != current->scx.kf_tasks[1]))) {
1062 		scx_ops_error("called on a task not being operated on");
1063 		return false;
1064 	}
1065 
1066 	return true;
1067 }
1068 
1069 
1070 /*
1071  * SCX task iterator.
1072  */
1073 struct scx_task_iter {
1074 	struct sched_ext_entity		cursor;
1075 	struct task_struct		*locked;
1076 	struct rq			*rq;
1077 	struct rq_flags			rf;
1078 };
1079 
1080 /**
1081  * scx_task_iter_init - Initialize a task iterator
1082  * @iter: iterator to init
1083  *
1084  * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
1085  * @iter must eventually be exited with scx_task_iter_exit().
1086  *
1087  * scx_tasks_lock may be released between this and the first next() call or
1088  * between any two next() calls. If scx_tasks_lock is released between two
1089  * next() calls, the caller is responsible for ensuring that the task being
1090  * iterated remains accessible either through RCU read lock or obtaining a
1091  * reference count.
1092  *
1093  * All tasks which existed when the iteration started are guaranteed to be
1094  * visited as long as they still exist.
1095  */
1096 static void scx_task_iter_init(struct scx_task_iter *iter)
1097 {
1098 	lockdep_assert_held(&scx_tasks_lock);
1099 
1100 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1101 	list_add(&iter->cursor.tasks_node, &scx_tasks);
1102 	iter->locked = NULL;
1103 }
1104 
1105 /**
1106  * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
1107  * @iter: iterator to unlock rq for
1108  *
1109  * If @iter is in the middle of a locked iteration, it may be locking the rq of
1110  * the task currently being visited. Unlock the rq if so. This function can be
1111  * safely called anytime during an iteration.
1112  *
1113  * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
1114  * not locking an rq.
1115  */
1116 static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1117 {
1118 	if (iter->locked) {
1119 		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1120 		iter->locked = NULL;
1121 		return true;
1122 	} else {
1123 		return false;
1124 	}
1125 }
1126 
1127 /**
1128  * scx_task_iter_exit - Exit a task iterator
1129  * @iter: iterator to exit
1130  *
1131  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
1132  * If the iterator holds a task's rq lock, that rq lock is released. See
1133  * scx_task_iter_init() for details.
1134  */
1135 static void scx_task_iter_exit(struct scx_task_iter *iter)
1136 {
1137 	lockdep_assert_held(&scx_tasks_lock);
1138 
1139 	scx_task_iter_rq_unlock(iter);
1140 	list_del_init(&iter->cursor.tasks_node);
1141 }
1142 
1143 /**
1144  * scx_task_iter_next - Next task
1145  * @iter: iterator to walk
1146  *
1147  * Visit the next task. See scx_task_iter_init() for details.
1148  */
1149 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1150 {
1151 	struct list_head *cursor = &iter->cursor.tasks_node;
1152 	struct sched_ext_entity *pos;
1153 
1154 	lockdep_assert_held(&scx_tasks_lock);
1155 
1156 	list_for_each_entry(pos, cursor, tasks_node) {
1157 		if (&pos->tasks_node == &scx_tasks)
1158 			return NULL;
1159 		if (!(pos->flags & SCX_TASK_CURSOR)) {
1160 			list_move(cursor, &pos->tasks_node);
1161 			return container_of(pos, struct task_struct, scx);
1162 		}
1163 	}
1164 
1165 	/* can't happen, should always terminate at scx_tasks above */
1166 	BUG();
1167 }
1168 
1169 /**
1170  * scx_task_iter_next_locked - Next non-idle task with its rq locked
1171  * @iter: iterator to walk
1172  * @include_dead: Whether we should include dead tasks in the iteration
1173  *
1174  * Visit the non-idle task with its rq lock held. Allows callers to specify
1175  * whether they would like to filter out dead tasks. See scx_task_iter_init()
1176  * for details.
1177  */
1178 static struct task_struct *
1179 scx_task_iter_next_locked(struct scx_task_iter *iter, bool include_dead)
1180 {
1181 	struct task_struct *p;
1182 retry:
1183 	scx_task_iter_rq_unlock(iter);
1184 
1185 	while ((p = scx_task_iter_next(iter))) {
1186 		/*
1187 		 * is_idle_task() tests %PF_IDLE which may not be set for CPUs
1188 		 * which haven't yet been onlined. Test sched_class directly.
1189 		 */
1190 		if (p->sched_class != &idle_sched_class)
1191 			break;
1192 	}
1193 	if (!p)
1194 		return NULL;
1195 
1196 	iter->rq = task_rq_lock(p, &iter->rf);
1197 	iter->locked = p;
1198 
1199 	/*
1200 	 * If we see %TASK_DEAD, @p already disabled preemption, is about to do
1201 	 * the final __schedule(), won't ever need to be scheduled again and can
1202 	 * thus be safely ignored. If we don't see %TASK_DEAD, @p can't enter
1203 	 * the final __schedle() while we're locking its rq and thus will stay
1204 	 * alive until the rq is unlocked.
1205 	 */
1206 	if (!include_dead && READ_ONCE(p->__state) == TASK_DEAD)
1207 		goto retry;
1208 
1209 	return p;
1210 }
1211 
1212 static enum scx_ops_enable_state scx_ops_enable_state(void)
1213 {
1214 	return atomic_read(&scx_ops_enable_state_var);
1215 }
1216 
1217 static enum scx_ops_enable_state
1218 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1219 {
1220 	return atomic_xchg(&scx_ops_enable_state_var, to);
1221 }
1222 
1223 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1224 					enum scx_ops_enable_state from)
1225 {
1226 	int from_v = from;
1227 
1228 	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1229 }
1230 
1231 static bool scx_ops_bypassing(void)
1232 {
1233 	return unlikely(atomic_read(&scx_ops_bypass_depth));
1234 }
1235 
1236 /**
1237  * wait_ops_state - Busy-wait the specified ops state to end
1238  * @p: target task
1239  * @opss: state to wait the end of
1240  *
1241  * Busy-wait for @p to transition out of @opss. This can only be used when the
1242  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1243  * has load_acquire semantics to ensure that the caller can see the updates made
1244  * in the enqueueing and dispatching paths.
1245  */
1246 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1247 {
1248 	do {
1249 		cpu_relax();
1250 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1251 }
1252 
1253 /**
1254  * ops_cpu_valid - Verify a cpu number
1255  * @cpu: cpu number which came from a BPF ops
1256  * @where: extra information reported on error
1257  *
1258  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1259  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1260  * an ops error.
1261  */
1262 static bool ops_cpu_valid(s32 cpu, const char *where)
1263 {
1264 	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1265 		return true;
1266 	} else {
1267 		scx_ops_error("invalid CPU %d%s%s", cpu,
1268 			      where ? " " : "", where ?: "");
1269 		return false;
1270 	}
1271 }
1272 
1273 /**
1274  * ops_sanitize_err - Sanitize a -errno value
1275  * @ops_name: operation to blame on failure
1276  * @err: -errno value to sanitize
1277  *
1278  * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1279  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1280  * cause misbehaviors. For an example, a large negative return from
1281  * ops.init_task() triggers an oops when passed up the call chain because the
1282  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1283  * handled as a pointer.
1284  */
1285 static int ops_sanitize_err(const char *ops_name, s32 err)
1286 {
1287 	if (err < 0 && err >= -MAX_ERRNO)
1288 		return err;
1289 
1290 	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1291 	return -EPROTO;
1292 }
1293 
1294 /**
1295  * touch_core_sched - Update timestamp used for core-sched task ordering
1296  * @rq: rq to read clock from, must be locked
1297  * @p: task to update the timestamp for
1298  *
1299  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1300  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1301  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1302  * exhaustion).
1303  */
1304 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1305 {
1306 #ifdef CONFIG_SCHED_CORE
1307 	/*
1308 	 * It's okay to update the timestamp spuriously. Use
1309 	 * sched_core_disabled() which is cheaper than enabled().
1310 	 */
1311 	if (!sched_core_disabled())
1312 		p->scx.core_sched_at = rq_clock_task(rq);
1313 #endif
1314 }
1315 
1316 /**
1317  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1318  * @rq: rq to read clock from, must be locked
1319  * @p: task being dispatched
1320  *
1321  * If the BPF scheduler implements custom core-sched ordering via
1322  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1323  * ordering within each local DSQ. This function is called from dispatch paths
1324  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1325  */
1326 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1327 {
1328 	lockdep_assert_rq_held(rq);
1329 	assert_clock_updated(rq);
1330 
1331 #ifdef CONFIG_SCHED_CORE
1332 	if (SCX_HAS_OP(core_sched_before))
1333 		touch_core_sched(rq, p);
1334 #endif
1335 }
1336 
1337 static void update_curr_scx(struct rq *rq)
1338 {
1339 	struct task_struct *curr = rq->curr;
1340 	u64 now = rq_clock_task(rq);
1341 	u64 delta_exec;
1342 
1343 	if (time_before_eq64(now, curr->se.exec_start))
1344 		return;
1345 
1346 	delta_exec = now - curr->se.exec_start;
1347 	curr->se.exec_start = now;
1348 	curr->se.sum_exec_runtime += delta_exec;
1349 	account_group_exec_runtime(curr, delta_exec);
1350 	cgroup_account_cputime(curr, delta_exec);
1351 
1352 	if (curr->scx.slice != SCX_SLICE_INF) {
1353 		curr->scx.slice -= min(curr->scx.slice, delta_exec);
1354 		if (!curr->scx.slice)
1355 			touch_core_sched(rq, curr);
1356 	}
1357 }
1358 
1359 static bool scx_dsq_priq_less(struct rb_node *node_a,
1360 			      const struct rb_node *node_b)
1361 {
1362 	const struct task_struct *a =
1363 		container_of(node_a, struct task_struct, scx.dsq_node.priq);
1364 	const struct task_struct *b =
1365 		container_of(node_b, struct task_struct, scx.dsq_node.priq);
1366 
1367 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1368 }
1369 
1370 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1371 {
1372 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1373 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1374 }
1375 
1376 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1377 			     u64 enq_flags)
1378 {
1379 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1380 
1381 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_node.list));
1382 	WARN_ON_ONCE((p->scx.dsq_node.flags & SCX_TASK_DSQ_ON_PRIQ) ||
1383 		     !RB_EMPTY_NODE(&p->scx.dsq_node.priq));
1384 
1385 	if (!is_local) {
1386 		raw_spin_lock(&dsq->lock);
1387 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1388 			scx_ops_error("attempting to dispatch to a destroyed dsq");
1389 			/* fall back to the global dsq */
1390 			raw_spin_unlock(&dsq->lock);
1391 			dsq = &scx_dsq_global;
1392 			raw_spin_lock(&dsq->lock);
1393 		}
1394 	}
1395 
1396 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1397 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1398 		/*
1399 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1400 		 * their FIFO queues. To avoid confusion and accidentally
1401 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1402 		 * disallow any internal DSQ from doing vtime ordering of
1403 		 * tasks.
1404 		 */
1405 		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1406 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1407 	}
1408 
1409 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1410 		struct rb_node *rbp;
1411 
1412 		/*
1413 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1414 		 * linked to both the rbtree and list on PRIQs, this can only be
1415 		 * tested easily when adding the first task.
1416 		 */
1417 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1418 			     !list_empty(&dsq->list)))
1419 			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1420 				      dsq->id);
1421 
1422 		p->scx.dsq_node.flags |= SCX_TASK_DSQ_ON_PRIQ;
1423 		rb_add(&p->scx.dsq_node.priq, &dsq->priq, scx_dsq_priq_less);
1424 
1425 		/*
1426 		 * Find the previous task and insert after it on the list so
1427 		 * that @dsq->list is vtime ordered.
1428 		 */
1429 		rbp = rb_prev(&p->scx.dsq_node.priq);
1430 		if (rbp) {
1431 			struct task_struct *prev =
1432 				container_of(rbp, struct task_struct,
1433 					     scx.dsq_node.priq);
1434 			list_add(&p->scx.dsq_node.list, &prev->scx.dsq_node.list);
1435 		} else {
1436 			list_add(&p->scx.dsq_node.list, &dsq->list);
1437 		}
1438 	} else {
1439 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1440 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1441 			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1442 				      dsq->id);
1443 
1444 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1445 			list_add(&p->scx.dsq_node.list, &dsq->list);
1446 		else
1447 			list_add_tail(&p->scx.dsq_node.list, &dsq->list);
1448 	}
1449 
1450 	dsq_mod_nr(dsq, 1);
1451 	p->scx.dsq = dsq;
1452 
1453 	/*
1454 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1455 	 * direct dispatch path, but we clear them here because the direct
1456 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1457 	 * bypass.
1458 	 */
1459 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1460 	p->scx.ddsp_enq_flags = 0;
1461 
1462 	/*
1463 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1464 	 * match waiters' load_acquire.
1465 	 */
1466 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1467 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1468 
1469 	if (is_local) {
1470 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1471 		bool preempt = false;
1472 
1473 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1474 		    rq->curr->sched_class == &ext_sched_class) {
1475 			rq->curr->scx.slice = 0;
1476 			preempt = true;
1477 		}
1478 
1479 		if (preempt || sched_class_above(&ext_sched_class,
1480 						 rq->curr->sched_class))
1481 			resched_curr(rq);
1482 	} else {
1483 		raw_spin_unlock(&dsq->lock);
1484 	}
1485 }
1486 
1487 static void task_unlink_from_dsq(struct task_struct *p,
1488 				 struct scx_dispatch_q *dsq)
1489 {
1490 	if (p->scx.dsq_node.flags & SCX_TASK_DSQ_ON_PRIQ) {
1491 		rb_erase(&p->scx.dsq_node.priq, &dsq->priq);
1492 		RB_CLEAR_NODE(&p->scx.dsq_node.priq);
1493 		p->scx.dsq_node.flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1494 	}
1495 
1496 	list_del_init(&p->scx.dsq_node.list);
1497 }
1498 
1499 static bool task_linked_on_dsq(struct task_struct *p)
1500 {
1501 	return !list_empty(&p->scx.dsq_node.list);
1502 }
1503 
1504 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1505 {
1506 	struct scx_dispatch_q *dsq = p->scx.dsq;
1507 	bool is_local = dsq == &rq->scx.local_dsq;
1508 
1509 	if (!dsq) {
1510 		WARN_ON_ONCE(task_linked_on_dsq(p));
1511 		/*
1512 		 * When dispatching directly from the BPF scheduler to a local
1513 		 * DSQ, the task isn't associated with any DSQ but
1514 		 * @p->scx.holding_cpu may be set under the protection of
1515 		 * %SCX_OPSS_DISPATCHING.
1516 		 */
1517 		if (p->scx.holding_cpu >= 0)
1518 			p->scx.holding_cpu = -1;
1519 		return;
1520 	}
1521 
1522 	if (!is_local)
1523 		raw_spin_lock(&dsq->lock);
1524 
1525 	/*
1526 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_node
1527 	 * can't change underneath us.
1528 	*/
1529 	if (p->scx.holding_cpu < 0) {
1530 		/* @p must still be on @dsq, dequeue */
1531 		WARN_ON_ONCE(!task_linked_on_dsq(p));
1532 		task_unlink_from_dsq(p, dsq);
1533 		dsq_mod_nr(dsq, -1);
1534 	} else {
1535 		/*
1536 		 * We're racing against dispatch_to_local_dsq() which already
1537 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1538 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1539 		 * the race.
1540 		 */
1541 		WARN_ON_ONCE(task_linked_on_dsq(p));
1542 		p->scx.holding_cpu = -1;
1543 	}
1544 	p->scx.dsq = NULL;
1545 
1546 	if (!is_local)
1547 		raw_spin_unlock(&dsq->lock);
1548 }
1549 
1550 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1551 {
1552 	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1553 }
1554 
1555 static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id)
1556 {
1557 	lockdep_assert(rcu_read_lock_any_held());
1558 
1559 	if (dsq_id == SCX_DSQ_GLOBAL)
1560 		return &scx_dsq_global;
1561 	else
1562 		return find_user_dsq(dsq_id);
1563 }
1564 
1565 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1566 						    struct task_struct *p)
1567 {
1568 	struct scx_dispatch_q *dsq;
1569 
1570 	if (dsq_id == SCX_DSQ_LOCAL)
1571 		return &rq->scx.local_dsq;
1572 
1573 	dsq = find_non_local_dsq(dsq_id);
1574 	if (unlikely(!dsq)) {
1575 		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1576 			      dsq_id, p->comm, p->pid);
1577 		return &scx_dsq_global;
1578 	}
1579 
1580 	return dsq;
1581 }
1582 
1583 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1584 				 struct task_struct *p, u64 dsq_id,
1585 				 u64 enq_flags)
1586 {
1587 	/*
1588 	 * Mark that dispatch already happened from ops.select_cpu() or
1589 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1590 	 * which can never match a valid task pointer.
1591 	 */
1592 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1593 
1594 	/* @p must match the task on the enqueue path */
1595 	if (unlikely(p != ddsp_task)) {
1596 		if (IS_ERR(ddsp_task))
1597 			scx_ops_error("%s[%d] already direct-dispatched",
1598 				      p->comm, p->pid);
1599 		else
1600 			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1601 				      ddsp_task->comm, ddsp_task->pid,
1602 				      p->comm, p->pid);
1603 		return;
1604 	}
1605 
1606 	/*
1607 	 * %SCX_DSQ_LOCAL_ON is not supported during direct dispatch because
1608 	 * dispatching to the local DSQ of a different CPU requires unlocking
1609 	 * the current rq which isn't allowed in the enqueue path. Use
1610 	 * ops.select_cpu() to be on the target CPU and then %SCX_DSQ_LOCAL.
1611 	 */
1612 	if (unlikely((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON)) {
1613 		scx_ops_error("SCX_DSQ_LOCAL_ON can't be used for direct-dispatch");
1614 		return;
1615 	}
1616 
1617 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1618 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1619 
1620 	p->scx.ddsp_dsq_id = dsq_id;
1621 	p->scx.ddsp_enq_flags = enq_flags;
1622 }
1623 
1624 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1625 {
1626 	struct scx_dispatch_q *dsq;
1627 
1628 	touch_core_sched_dispatch(task_rq(p), p);
1629 
1630 	enq_flags |= (p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1631 	dsq = find_dsq_for_dispatch(task_rq(p), p->scx.ddsp_dsq_id, p);
1632 	dispatch_enqueue(dsq, p, enq_flags);
1633 }
1634 
1635 static bool scx_rq_online(struct rq *rq)
1636 {
1637 	return likely(rq->scx.flags & SCX_RQ_ONLINE);
1638 }
1639 
1640 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1641 			    int sticky_cpu)
1642 {
1643 	struct task_struct **ddsp_taskp;
1644 	unsigned long qseq;
1645 
1646 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1647 
1648 	/* rq migration */
1649 	if (sticky_cpu == cpu_of(rq))
1650 		goto local_norefill;
1651 
1652 	/*
1653 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1654 	 * is offline and are just running the hotplug path. Don't bother the
1655 	 * BPF scheduler.
1656 	 */
1657 	if (!scx_rq_online(rq))
1658 		goto local;
1659 
1660 	if (scx_ops_bypassing()) {
1661 		if (enq_flags & SCX_ENQ_LAST)
1662 			goto local;
1663 		else
1664 			goto global;
1665 	}
1666 
1667 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1668 		goto direct;
1669 
1670 	/* see %SCX_OPS_ENQ_EXITING */
1671 	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
1672 	    unlikely(p->flags & PF_EXITING))
1673 		goto local;
1674 
1675 	/* see %SCX_OPS_ENQ_LAST */
1676 	if (!static_branch_unlikely(&scx_ops_enq_last) &&
1677 	    (enq_flags & SCX_ENQ_LAST))
1678 		goto local;
1679 
1680 	if (!SCX_HAS_OP(enqueue))
1681 		goto global;
1682 
1683 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1684 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1685 
1686 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1687 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1688 
1689 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1690 	WARN_ON_ONCE(*ddsp_taskp);
1691 	*ddsp_taskp = p;
1692 
1693 	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
1694 
1695 	*ddsp_taskp = NULL;
1696 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1697 		goto direct;
1698 
1699 	/*
1700 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1701 	 * dequeue may be waiting. The store_release matches their load_acquire.
1702 	 */
1703 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1704 	return;
1705 
1706 direct:
1707 	direct_dispatch(p, enq_flags);
1708 	return;
1709 
1710 local:
1711 	/*
1712 	 * For task-ordering, slice refill must be treated as implying the end
1713 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1714 	 * higher priority it becomes from scx_prio_less()'s POV.
1715 	 */
1716 	touch_core_sched(rq, p);
1717 	p->scx.slice = SCX_SLICE_DFL;
1718 local_norefill:
1719 	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
1720 	return;
1721 
1722 global:
1723 	touch_core_sched(rq, p);	/* see the comment in local: */
1724 	p->scx.slice = SCX_SLICE_DFL;
1725 	dispatch_enqueue(&scx_dsq_global, p, enq_flags);
1726 }
1727 
1728 static bool task_runnable(const struct task_struct *p)
1729 {
1730 	return !list_empty(&p->scx.runnable_node);
1731 }
1732 
1733 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1734 {
1735 	lockdep_assert_rq_held(rq);
1736 
1737 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1738 		p->scx.runnable_at = jiffies;
1739 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1740 	}
1741 
1742 	/*
1743 	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
1744 	 * appened to the runnable_list.
1745 	 */
1746 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1747 }
1748 
1749 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1750 {
1751 	list_del_init(&p->scx.runnable_node);
1752 	if (reset_runnable_at)
1753 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1754 }
1755 
1756 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
1757 {
1758 	int sticky_cpu = p->scx.sticky_cpu;
1759 
1760 	enq_flags |= rq->scx.extra_enq_flags;
1761 
1762 	if (sticky_cpu >= 0)
1763 		p->scx.sticky_cpu = -1;
1764 
1765 	/*
1766 	 * Restoring a running task will be immediately followed by
1767 	 * set_next_task_scx() which expects the task to not be on the BPF
1768 	 * scheduler as tasks can only start running through local DSQs. Force
1769 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1770 	 */
1771 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1772 		sticky_cpu = cpu_of(rq);
1773 
1774 	if (p->scx.flags & SCX_TASK_QUEUED) {
1775 		WARN_ON_ONCE(!task_runnable(p));
1776 		return;
1777 	}
1778 
1779 	set_task_runnable(rq, p);
1780 	p->scx.flags |= SCX_TASK_QUEUED;
1781 	rq->scx.nr_running++;
1782 	add_nr_running(rq, 1);
1783 
1784 	if (SCX_HAS_OP(runnable))
1785 		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
1786 
1787 	if (enq_flags & SCX_ENQ_WAKEUP)
1788 		touch_core_sched(rq, p);
1789 
1790 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
1791 }
1792 
1793 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
1794 {
1795 	unsigned long opss;
1796 
1797 	/* dequeue is always temporary, don't reset runnable_at */
1798 	clr_task_runnable(p, false);
1799 
1800 	/* acquire ensures that we see the preceding updates on QUEUED */
1801 	opss = atomic_long_read_acquire(&p->scx.ops_state);
1802 
1803 	switch (opss & SCX_OPSS_STATE_MASK) {
1804 	case SCX_OPSS_NONE:
1805 		break;
1806 	case SCX_OPSS_QUEUEING:
1807 		/*
1808 		 * QUEUEING is started and finished while holding @p's rq lock.
1809 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
1810 		 */
1811 		BUG();
1812 	case SCX_OPSS_QUEUED:
1813 		if (SCX_HAS_OP(dequeue))
1814 			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
1815 
1816 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
1817 					    SCX_OPSS_NONE))
1818 			break;
1819 		fallthrough;
1820 	case SCX_OPSS_DISPATCHING:
1821 		/*
1822 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
1823 		 * wait for the transfer to complete so that @p doesn't get
1824 		 * added to its DSQ after dequeueing is complete.
1825 		 *
1826 		 * As we're waiting on DISPATCHING with the rq locked, the
1827 		 * dispatching side shouldn't try to lock the rq while
1828 		 * DISPATCHING is set. See dispatch_to_local_dsq().
1829 		 *
1830 		 * DISPATCHING shouldn't have qseq set and control can reach
1831 		 * here with NONE @opss from the above QUEUED case block.
1832 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
1833 		 */
1834 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
1835 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1836 		break;
1837 	}
1838 }
1839 
1840 static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
1841 {
1842 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
1843 		WARN_ON_ONCE(task_runnable(p));
1844 		return;
1845 	}
1846 
1847 	ops_dequeue(p, deq_flags);
1848 
1849 	/*
1850 	 * A currently running task which is going off @rq first gets dequeued
1851 	 * and then stops running. As we want running <-> stopping transitions
1852 	 * to be contained within runnable <-> quiescent transitions, trigger
1853 	 * ->stopping() early here instead of in put_prev_task_scx().
1854 	 *
1855 	 * @p may go through multiple stopping <-> running transitions between
1856 	 * here and put_prev_task_scx() if task attribute changes occur while
1857 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
1858 	 * information meaningful to the BPF scheduler and can be suppressed by
1859 	 * skipping the callbacks if the task is !QUEUED.
1860 	 */
1861 	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
1862 		update_curr_scx(rq);
1863 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
1864 	}
1865 
1866 	if (SCX_HAS_OP(quiescent))
1867 		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
1868 
1869 	if (deq_flags & SCX_DEQ_SLEEP)
1870 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
1871 	else
1872 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
1873 
1874 	p->scx.flags &= ~SCX_TASK_QUEUED;
1875 	rq->scx.nr_running--;
1876 	sub_nr_running(rq, 1);
1877 
1878 	dispatch_dequeue(rq, p);
1879 }
1880 
1881 static void yield_task_scx(struct rq *rq)
1882 {
1883 	struct task_struct *p = rq->curr;
1884 
1885 	if (SCX_HAS_OP(yield))
1886 		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
1887 	else
1888 		p->scx.slice = 0;
1889 }
1890 
1891 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
1892 {
1893 	struct task_struct *from = rq->curr;
1894 
1895 	if (SCX_HAS_OP(yield))
1896 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
1897 	else
1898 		return false;
1899 }
1900 
1901 #ifdef CONFIG_SMP
1902 /**
1903  * move_task_to_local_dsq - Move a task from a different rq to a local DSQ
1904  * @rq: rq to move the task into, currently locked
1905  * @p: task to move
1906  * @enq_flags: %SCX_ENQ_*
1907  *
1908  * Move @p which is currently on a different rq to @rq's local DSQ. The caller
1909  * must:
1910  *
1911  * 1. Start with exclusive access to @p either through its DSQ lock or
1912  *    %SCX_OPSS_DISPATCHING flag.
1913  *
1914  * 2. Set @p->scx.holding_cpu to raw_smp_processor_id().
1915  *
1916  * 3. Remember task_rq(@p). Release the exclusive access so that we don't
1917  *    deadlock with dequeue.
1918  *
1919  * 4. Lock @rq and the task_rq from #3.
1920  *
1921  * 5. Call this function.
1922  *
1923  * Returns %true if @p was successfully moved. %false after racing dequeue and
1924  * losing.
1925  */
1926 static bool move_task_to_local_dsq(struct rq *rq, struct task_struct *p,
1927 				   u64 enq_flags)
1928 {
1929 	struct rq *task_rq;
1930 
1931 	lockdep_assert_rq_held(rq);
1932 
1933 	/*
1934 	 * If dequeue got to @p while we were trying to lock both rq's, it'd
1935 	 * have cleared @p->scx.holding_cpu to -1. While other cpus may have
1936 	 * updated it to different values afterwards, as this operation can't be
1937 	 * preempted or recurse, @p->scx.holding_cpu can never become
1938 	 * raw_smp_processor_id() again before we're done. Thus, we can tell
1939 	 * whether we lost to dequeue by testing whether @p->scx.holding_cpu is
1940 	 * still raw_smp_processor_id().
1941 	 *
1942 	 * See dispatch_dequeue() for the counterpart.
1943 	 */
1944 	if (unlikely(p->scx.holding_cpu != raw_smp_processor_id()))
1945 		return false;
1946 
1947 	/* @p->rq couldn't have changed if we're still the holding cpu */
1948 	task_rq = task_rq(p);
1949 	lockdep_assert_rq_held(task_rq);
1950 
1951 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(rq), p->cpus_ptr));
1952 	deactivate_task(task_rq, p, 0);
1953 	set_task_cpu(p, cpu_of(rq));
1954 	p->scx.sticky_cpu = cpu_of(rq);
1955 
1956 	/*
1957 	 * We want to pass scx-specific enq_flags but activate_task() will
1958 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
1959 	 * @rq->scx.extra_enq_flags instead.
1960 	 */
1961 	WARN_ON_ONCE(rq->scx.extra_enq_flags);
1962 	rq->scx.extra_enq_flags = enq_flags;
1963 	activate_task(rq, p, 0);
1964 	rq->scx.extra_enq_flags = 0;
1965 
1966 	return true;
1967 }
1968 
1969 /**
1970  * dispatch_to_local_dsq_lock - Ensure source and destination rq's are locked
1971  * @rq: current rq which is locked
1972  * @rf: rq_flags to use when unlocking @rq
1973  * @src_rq: rq to move task from
1974  * @dst_rq: rq to move task to
1975  *
1976  * We're holding @rq lock and trying to dispatch a task from @src_rq to
1977  * @dst_rq's local DSQ and thus need to lock both @src_rq and @dst_rq. Whether
1978  * @rq stays locked isn't important as long as the state is restored after
1979  * dispatch_to_local_dsq_unlock().
1980  */
1981 static void dispatch_to_local_dsq_lock(struct rq *rq, struct rq_flags *rf,
1982 				       struct rq *src_rq, struct rq *dst_rq)
1983 {
1984 	rq_unpin_lock(rq, rf);
1985 
1986 	if (src_rq == dst_rq) {
1987 		raw_spin_rq_unlock(rq);
1988 		raw_spin_rq_lock(dst_rq);
1989 	} else if (rq == src_rq) {
1990 		double_lock_balance(rq, dst_rq);
1991 		rq_repin_lock(rq, rf);
1992 	} else if (rq == dst_rq) {
1993 		double_lock_balance(rq, src_rq);
1994 		rq_repin_lock(rq, rf);
1995 	} else {
1996 		raw_spin_rq_unlock(rq);
1997 		double_rq_lock(src_rq, dst_rq);
1998 	}
1999 }
2000 
2001 /**
2002  * dispatch_to_local_dsq_unlock - Undo dispatch_to_local_dsq_lock()
2003  * @rq: current rq which is locked
2004  * @rf: rq_flags to use when unlocking @rq
2005  * @src_rq: rq to move task from
2006  * @dst_rq: rq to move task to
2007  *
2008  * Unlock @src_rq and @dst_rq and ensure that @rq is locked on return.
2009  */
2010 static void dispatch_to_local_dsq_unlock(struct rq *rq, struct rq_flags *rf,
2011 					 struct rq *src_rq, struct rq *dst_rq)
2012 {
2013 	if (src_rq == dst_rq) {
2014 		raw_spin_rq_unlock(dst_rq);
2015 		raw_spin_rq_lock(rq);
2016 		rq_repin_lock(rq, rf);
2017 	} else if (rq == src_rq) {
2018 		double_unlock_balance(rq, dst_rq);
2019 	} else if (rq == dst_rq) {
2020 		double_unlock_balance(rq, src_rq);
2021 	} else {
2022 		double_rq_unlock(src_rq, dst_rq);
2023 		raw_spin_rq_lock(rq);
2024 		rq_repin_lock(rq, rf);
2025 	}
2026 }
2027 #endif	/* CONFIG_SMP */
2028 
2029 static void consume_local_task(struct rq *rq, struct scx_dispatch_q *dsq,
2030 			       struct task_struct *p)
2031 {
2032 	lockdep_assert_held(&dsq->lock);	/* released on return */
2033 
2034 	/* @dsq is locked and @p is on this rq */
2035 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2036 	task_unlink_from_dsq(p, dsq);
2037 	list_add_tail(&p->scx.dsq_node.list, &rq->scx.local_dsq.list);
2038 	dsq_mod_nr(dsq, -1);
2039 	dsq_mod_nr(&rq->scx.local_dsq, 1);
2040 	p->scx.dsq = &rq->scx.local_dsq;
2041 	raw_spin_unlock(&dsq->lock);
2042 }
2043 
2044 #ifdef CONFIG_SMP
2045 /*
2046  * Similar to kernel/sched/core.c::is_cpu_allowed() but we're testing whether @p
2047  * can be pulled to @rq.
2048  */
2049 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq)
2050 {
2051 	int cpu = cpu_of(rq);
2052 
2053 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2054 		return false;
2055 	if (unlikely(is_migration_disabled(p)))
2056 		return false;
2057 	if (!(p->flags & PF_KTHREAD) && unlikely(!task_cpu_possible(cpu, p)))
2058 		return false;
2059 	if (!scx_rq_online(rq))
2060 		return false;
2061 	return true;
2062 }
2063 
2064 static bool consume_remote_task(struct rq *rq, struct rq_flags *rf,
2065 				struct scx_dispatch_q *dsq,
2066 				struct task_struct *p, struct rq *task_rq)
2067 {
2068 	bool moved = false;
2069 
2070 	lockdep_assert_held(&dsq->lock);	/* released on return */
2071 
2072 	/*
2073 	 * @dsq is locked and @p is on a remote rq. @p is currently protected by
2074 	 * @dsq->lock. We want to pull @p to @rq but may deadlock if we grab
2075 	 * @task_rq while holding @dsq and @rq locks. As dequeue can't drop the
2076 	 * rq lock or fail, do a little dancing from our side. See
2077 	 * move_task_to_local_dsq().
2078 	 */
2079 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2080 	task_unlink_from_dsq(p, dsq);
2081 	dsq_mod_nr(dsq, -1);
2082 	p->scx.holding_cpu = raw_smp_processor_id();
2083 	raw_spin_unlock(&dsq->lock);
2084 
2085 	rq_unpin_lock(rq, rf);
2086 	double_lock_balance(rq, task_rq);
2087 	rq_repin_lock(rq, rf);
2088 
2089 	moved = move_task_to_local_dsq(rq, p, 0);
2090 
2091 	double_unlock_balance(rq, task_rq);
2092 
2093 	return moved;
2094 }
2095 #else	/* CONFIG_SMP */
2096 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq) { return false; }
2097 static bool consume_remote_task(struct rq *rq, struct rq_flags *rf,
2098 				struct scx_dispatch_q *dsq,
2099 				struct task_struct *p, struct rq *task_rq) { return false; }
2100 #endif	/* CONFIG_SMP */
2101 
2102 static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
2103 			       struct scx_dispatch_q *dsq)
2104 {
2105 	struct task_struct *p;
2106 retry:
2107 	if (list_empty(&dsq->list))
2108 		return false;
2109 
2110 	raw_spin_lock(&dsq->lock);
2111 
2112 	list_for_each_entry(p, &dsq->list, scx.dsq_node.list) {
2113 		struct rq *task_rq = task_rq(p);
2114 
2115 		if (rq == task_rq) {
2116 			consume_local_task(rq, dsq, p);
2117 			return true;
2118 		}
2119 
2120 		if (task_can_run_on_remote_rq(p, rq)) {
2121 			if (likely(consume_remote_task(rq, rf, dsq, p, task_rq)))
2122 				return true;
2123 			goto retry;
2124 		}
2125 	}
2126 
2127 	raw_spin_unlock(&dsq->lock);
2128 	return false;
2129 }
2130 
2131 enum dispatch_to_local_dsq_ret {
2132 	DTL_DISPATCHED,		/* successfully dispatched */
2133 	DTL_LOST,		/* lost race to dequeue */
2134 	DTL_NOT_LOCAL,		/* destination is not a local DSQ */
2135 	DTL_INVALID,		/* invalid local dsq_id */
2136 };
2137 
2138 /**
2139  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2140  * @rq: current rq which is locked
2141  * @rf: rq_flags to use when unlocking @rq
2142  * @dsq_id: destination dsq ID
2143  * @p: task to dispatch
2144  * @enq_flags: %SCX_ENQ_*
2145  *
2146  * We're holding @rq lock and want to dispatch @p to the local DSQ identified by
2147  * @dsq_id. This function performs all the synchronization dancing needed
2148  * because local DSQs are protected with rq locks.
2149  *
2150  * The caller must have exclusive ownership of @p (e.g. through
2151  * %SCX_OPSS_DISPATCHING).
2152  */
2153 static enum dispatch_to_local_dsq_ret
2154 dispatch_to_local_dsq(struct rq *rq, struct rq_flags *rf, u64 dsq_id,
2155 		      struct task_struct *p, u64 enq_flags)
2156 {
2157 	struct rq *src_rq = task_rq(p);
2158 	struct rq *dst_rq;
2159 
2160 	/*
2161 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2162 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2163 	 */
2164 	if (dsq_id == SCX_DSQ_LOCAL) {
2165 		dst_rq = rq;
2166 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
2167 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
2168 
2169 		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2170 			return DTL_INVALID;
2171 		dst_rq = cpu_rq(cpu);
2172 	} else {
2173 		return DTL_NOT_LOCAL;
2174 	}
2175 
2176 	/* if dispatching to @rq that @p is already on, no lock dancing needed */
2177 	if (rq == src_rq && rq == dst_rq) {
2178 		dispatch_enqueue(&dst_rq->scx.local_dsq, p,
2179 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2180 		return DTL_DISPATCHED;
2181 	}
2182 
2183 #ifdef CONFIG_SMP
2184 	if (cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)) {
2185 		struct rq *locked_dst_rq = dst_rq;
2186 		bool dsp;
2187 
2188 		/*
2189 		 * @p is on a possibly remote @src_rq which we need to lock to
2190 		 * move the task. If dequeue is in progress, it'd be locking
2191 		 * @src_rq and waiting on DISPATCHING, so we can't grab @src_rq
2192 		 * lock while holding DISPATCHING.
2193 		 *
2194 		 * As DISPATCHING guarantees that @p is wholly ours, we can
2195 		 * pretend that we're moving from a DSQ and use the same
2196 		 * mechanism - mark the task under transfer with holding_cpu,
2197 		 * release DISPATCHING and then follow the same protocol.
2198 		 */
2199 		p->scx.holding_cpu = raw_smp_processor_id();
2200 
2201 		/* store_release ensures that dequeue sees the above */
2202 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2203 
2204 		dispatch_to_local_dsq_lock(rq, rf, src_rq, locked_dst_rq);
2205 
2206 		/*
2207 		 * We don't require the BPF scheduler to avoid dispatching to
2208 		 * offline CPUs mostly for convenience but also because CPUs can
2209 		 * go offline between scx_bpf_dispatch() calls and here. If @p
2210 		 * is destined to an offline CPU, queue it on its current CPU
2211 		 * instead, which should always be safe. As this is an allowed
2212 		 * behavior, don't trigger an ops error.
2213 		 */
2214 		if (!scx_rq_online(dst_rq))
2215 			dst_rq = src_rq;
2216 
2217 		if (src_rq == dst_rq) {
2218 			/*
2219 			 * As @p is staying on the same rq, there's no need to
2220 			 * go through the full deactivate/activate cycle.
2221 			 * Optimize by abbreviating the operations in
2222 			 * move_task_to_local_dsq().
2223 			 */
2224 			dsp = p->scx.holding_cpu == raw_smp_processor_id();
2225 			if (likely(dsp)) {
2226 				p->scx.holding_cpu = -1;
2227 				dispatch_enqueue(&dst_rq->scx.local_dsq, p,
2228 						 enq_flags);
2229 			}
2230 		} else {
2231 			dsp = move_task_to_local_dsq(dst_rq, p, enq_flags);
2232 		}
2233 
2234 		/* if the destination CPU is idle, wake it up */
2235 		if (dsp && sched_class_above(p->sched_class,
2236 					     dst_rq->curr->sched_class))
2237 			resched_curr(dst_rq);
2238 
2239 		dispatch_to_local_dsq_unlock(rq, rf, src_rq, locked_dst_rq);
2240 
2241 		return dsp ? DTL_DISPATCHED : DTL_LOST;
2242 	}
2243 #endif	/* CONFIG_SMP */
2244 
2245 	scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2246 		      cpu_of(dst_rq), p->comm, p->pid);
2247 	return DTL_INVALID;
2248 }
2249 
2250 /**
2251  * finish_dispatch - Asynchronously finish dispatching a task
2252  * @rq: current rq which is locked
2253  * @rf: rq_flags to use when unlocking @rq
2254  * @p: task to finish dispatching
2255  * @qseq_at_dispatch: qseq when @p started getting dispatched
2256  * @dsq_id: destination DSQ ID
2257  * @enq_flags: %SCX_ENQ_*
2258  *
2259  * Dispatching to local DSQs may need to wait for queueing to complete or
2260  * require rq lock dancing. As we don't wanna do either while inside
2261  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2262  * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
2263  * task and its qseq. Once ops.dispatch() returns, this function is called to
2264  * finish up.
2265  *
2266  * There is no guarantee that @p is still valid for dispatching or even that it
2267  * was valid in the first place. Make sure that the task is still owned by the
2268  * BPF scheduler and claim the ownership before dispatching.
2269  */
2270 static void finish_dispatch(struct rq *rq, struct rq_flags *rf,
2271 			    struct task_struct *p,
2272 			    unsigned long qseq_at_dispatch,
2273 			    u64 dsq_id, u64 enq_flags)
2274 {
2275 	struct scx_dispatch_q *dsq;
2276 	unsigned long opss;
2277 
2278 	touch_core_sched_dispatch(rq, p);
2279 retry:
2280 	/*
2281 	 * No need for _acquire here. @p is accessed only after a successful
2282 	 * try_cmpxchg to DISPATCHING.
2283 	 */
2284 	opss = atomic_long_read(&p->scx.ops_state);
2285 
2286 	switch (opss & SCX_OPSS_STATE_MASK) {
2287 	case SCX_OPSS_DISPATCHING:
2288 	case SCX_OPSS_NONE:
2289 		/* someone else already got to it */
2290 		return;
2291 	case SCX_OPSS_QUEUED:
2292 		/*
2293 		 * If qseq doesn't match, @p has gone through at least one
2294 		 * dispatch/dequeue and re-enqueue cycle between
2295 		 * scx_bpf_dispatch() and here and we have no claim on it.
2296 		 */
2297 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2298 			return;
2299 
2300 		/*
2301 		 * While we know @p is accessible, we don't yet have a claim on
2302 		 * it - the BPF scheduler is allowed to dispatch tasks
2303 		 * spuriously and there can be a racing dequeue attempt. Let's
2304 		 * claim @p by atomically transitioning it from QUEUED to
2305 		 * DISPATCHING.
2306 		 */
2307 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2308 						   SCX_OPSS_DISPATCHING)))
2309 			break;
2310 		goto retry;
2311 	case SCX_OPSS_QUEUEING:
2312 		/*
2313 		 * do_enqueue_task() is in the process of transferring the task
2314 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2315 		 * holding any kernel or BPF resource that the enqueue path may
2316 		 * depend upon, it's safe to wait.
2317 		 */
2318 		wait_ops_state(p, opss);
2319 		goto retry;
2320 	}
2321 
2322 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2323 
2324 	switch (dispatch_to_local_dsq(rq, rf, dsq_id, p, enq_flags)) {
2325 	case DTL_DISPATCHED:
2326 		break;
2327 	case DTL_LOST:
2328 		break;
2329 	case DTL_INVALID:
2330 		dsq_id = SCX_DSQ_GLOBAL;
2331 		fallthrough;
2332 	case DTL_NOT_LOCAL:
2333 		dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()),
2334 					    dsq_id, p);
2335 		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2336 		break;
2337 	}
2338 }
2339 
2340 static void flush_dispatch_buf(struct rq *rq, struct rq_flags *rf)
2341 {
2342 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2343 	u32 u;
2344 
2345 	for (u = 0; u < dspc->cursor; u++) {
2346 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2347 
2348 		finish_dispatch(rq, rf, ent->task, ent->qseq, ent->dsq_id,
2349 				ent->enq_flags);
2350 	}
2351 
2352 	dspc->nr_tasks += dspc->cursor;
2353 	dspc->cursor = 0;
2354 }
2355 
2356 static int balance_one(struct rq *rq, struct task_struct *prev,
2357 		       struct rq_flags *rf, bool local)
2358 {
2359 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2360 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2361 	int nr_loops = SCX_DSP_MAX_LOOPS;
2362 	bool has_tasks = false;
2363 
2364 	lockdep_assert_rq_held(rq);
2365 	rq->scx.flags |= SCX_RQ_BALANCING;
2366 
2367 	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2368 	    unlikely(rq->scx.cpu_released)) {
2369 		/*
2370 		 * If the previous sched_class for the current CPU was not SCX,
2371 		 * notify the BPF scheduler that it again has control of the
2372 		 * core. This callback complements ->cpu_release(), which is
2373 		 * emitted in scx_next_task_picked().
2374 		 */
2375 		if (SCX_HAS_OP(cpu_acquire))
2376 			SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
2377 		rq->scx.cpu_released = false;
2378 	}
2379 
2380 	if (prev_on_scx) {
2381 		WARN_ON_ONCE(local && (prev->scx.flags & SCX_TASK_BAL_KEEP));
2382 		update_curr_scx(rq);
2383 
2384 		/*
2385 		 * If @prev is runnable & has slice left, it has priority and
2386 		 * fetching more just increases latency for the fetched tasks.
2387 		 * Tell put_prev_task_scx() to put @prev on local_dsq. If the
2388 		 * BPF scheduler wants to handle this explicitly, it should
2389 		 * implement ->cpu_released().
2390 		 *
2391 		 * See scx_ops_disable_workfn() for the explanation on the
2392 		 * bypassing test.
2393 		 *
2394 		 * When balancing a remote CPU for core-sched, there won't be a
2395 		 * following put_prev_task_scx() call and we don't own
2396 		 * %SCX_TASK_BAL_KEEP. Instead, pick_task_scx() will test the
2397 		 * same conditions later and pick @rq->curr accordingly.
2398 		 */
2399 		if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2400 		    prev->scx.slice && !scx_ops_bypassing()) {
2401 			if (local)
2402 				prev->scx.flags |= SCX_TASK_BAL_KEEP;
2403 			goto has_tasks;
2404 		}
2405 	}
2406 
2407 	/* if there already are tasks to run, nothing to do */
2408 	if (rq->scx.local_dsq.nr)
2409 		goto has_tasks;
2410 
2411 	if (consume_dispatch_q(rq, rf, &scx_dsq_global))
2412 		goto has_tasks;
2413 
2414 	if (!SCX_HAS_OP(dispatch) || scx_ops_bypassing() || !scx_rq_online(rq))
2415 		goto out;
2416 
2417 	dspc->rq = rq;
2418 	dspc->rf = rf;
2419 
2420 	/*
2421 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2422 	 * the local DSQ might still end up empty after a successful
2423 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2424 	 * produced some tasks, retry. The BPF scheduler may depend on this
2425 	 * looping behavior to simplify its implementation.
2426 	 */
2427 	do {
2428 		dspc->nr_tasks = 0;
2429 
2430 		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2431 			    prev_on_scx ? prev : NULL);
2432 
2433 		flush_dispatch_buf(rq, rf);
2434 
2435 		if (rq->scx.local_dsq.nr)
2436 			goto has_tasks;
2437 		if (consume_dispatch_q(rq, rf, &scx_dsq_global))
2438 			goto has_tasks;
2439 
2440 		/*
2441 		 * ops.dispatch() can trap us in this loop by repeatedly
2442 		 * dispatching ineligible tasks. Break out once in a while to
2443 		 * allow the watchdog to run. As IRQ can't be enabled in
2444 		 * balance(), we want to complete this scheduling cycle and then
2445 		 * start a new one. IOW, we want to call resched_curr() on the
2446 		 * next, most likely idle, task, not the current one. Use
2447 		 * scx_bpf_kick_cpu() for deferred kicking.
2448 		 */
2449 		if (unlikely(!--nr_loops)) {
2450 			scx_bpf_kick_cpu(cpu_of(rq), 0);
2451 			break;
2452 		}
2453 	} while (dspc->nr_tasks);
2454 
2455 	goto out;
2456 
2457 has_tasks:
2458 	has_tasks = true;
2459 out:
2460 	rq->scx.flags &= ~SCX_RQ_BALANCING;
2461 	return has_tasks;
2462 }
2463 
2464 static int balance_scx(struct rq *rq, struct task_struct *prev,
2465 		       struct rq_flags *rf)
2466 {
2467 	int ret;
2468 
2469 	ret = balance_one(rq, prev, rf, true);
2470 
2471 #ifdef CONFIG_SCHED_SMT
2472 	/*
2473 	 * When core-sched is enabled, this ops.balance() call will be followed
2474 	 * by put_prev_scx() and pick_task_scx() on this CPU and pick_task_scx()
2475 	 * on the SMT siblings. Balance the siblings too.
2476 	 */
2477 	if (sched_core_enabled(rq)) {
2478 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2479 		int scpu;
2480 
2481 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2482 			struct rq *srq = cpu_rq(scpu);
2483 			struct rq_flags srf;
2484 			struct task_struct *sprev = srq->curr;
2485 
2486 			/*
2487 			 * While core-scheduling, rq lock is shared among
2488 			 * siblings but the debug annotations and rq clock
2489 			 * aren't. Do pinning dance to transfer the ownership.
2490 			 */
2491 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2492 			rq_unpin_lock(rq, rf);
2493 			rq_pin_lock(srq, &srf);
2494 
2495 			update_rq_clock(srq);
2496 			balance_one(srq, sprev, &srf, false);
2497 
2498 			rq_unpin_lock(srq, &srf);
2499 			rq_repin_lock(rq, rf);
2500 		}
2501 	}
2502 #endif
2503 	return ret;
2504 }
2505 
2506 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2507 {
2508 	if (p->scx.flags & SCX_TASK_QUEUED) {
2509 		/*
2510 		 * Core-sched might decide to execute @p before it is
2511 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2512 		 */
2513 		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2514 		dispatch_dequeue(rq, p);
2515 	}
2516 
2517 	p->se.exec_start = rq_clock_task(rq);
2518 
2519 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2520 	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2521 		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2522 
2523 	clr_task_runnable(p, true);
2524 
2525 	/*
2526 	 * @p is getting newly scheduled or got kicked after someone updated its
2527 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2528 	 */
2529 	if ((p->scx.slice == SCX_SLICE_INF) !=
2530 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2531 		if (p->scx.slice == SCX_SLICE_INF)
2532 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2533 		else
2534 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2535 
2536 		sched_update_tick_dependency(rq);
2537 
2538 		/*
2539 		 * For now, let's refresh the load_avgs just when transitioning
2540 		 * in and out of nohz. In the future, we might want to add a
2541 		 * mechanism which calls the following periodically on
2542 		 * tick-stopped CPUs.
2543 		 */
2544 		update_other_load_avgs(rq);
2545 	}
2546 }
2547 
2548 static void put_prev_task_scx(struct rq *rq, struct task_struct *p)
2549 {
2550 #ifndef CONFIG_SMP
2551 	/*
2552 	 * UP workaround.
2553 	 *
2554 	 * Because SCX may transfer tasks across CPUs during dispatch, dispatch
2555 	 * is performed from its balance operation which isn't called in UP.
2556 	 * Let's work around by calling it from the operations which come right
2557 	 * after.
2558 	 *
2559 	 * 1. If the prev task is on SCX, pick_next_task() calls
2560 	 *    .put_prev_task() right after. As .put_prev_task() is also called
2561 	 *    from other places, we need to distinguish the calls which can be
2562 	 *    done by looking at the previous task's state - if still queued or
2563 	 *    dequeued with %SCX_DEQ_SLEEP, the caller must be pick_next_task().
2564 	 *    This case is handled here.
2565 	 *
2566 	 * 2. If the prev task is not on SCX, the first following call into SCX
2567 	 *    will be .pick_next_task(), which is covered by calling
2568 	 *    balance_scx() from pick_next_task_scx().
2569 	 *
2570 	 * Note that we can't merge the first case into the second as
2571 	 * balance_scx() must be called before the previous SCX task goes
2572 	 * through put_prev_task_scx().
2573 	 *
2574 	 * As UP doesn't transfer tasks around, balance_scx() doesn't need @rf.
2575 	 * Pass in %NULL.
2576 	 */
2577 	if (p->scx.flags & (SCX_TASK_QUEUED | SCX_TASK_DEQD_FOR_SLEEP))
2578 		balance_scx(rq, p, NULL);
2579 #endif
2580 
2581 	update_curr_scx(rq);
2582 
2583 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2584 	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2585 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
2586 
2587 	/*
2588 	 * If we're being called from put_prev_task_balance(), balance_scx() may
2589 	 * have decided that @p should keep running.
2590 	 */
2591 	if (p->scx.flags & SCX_TASK_BAL_KEEP) {
2592 		p->scx.flags &= ~SCX_TASK_BAL_KEEP;
2593 		set_task_runnable(rq, p);
2594 		dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
2595 		return;
2596 	}
2597 
2598 	if (p->scx.flags & SCX_TASK_QUEUED) {
2599 		set_task_runnable(rq, p);
2600 
2601 		/*
2602 		 * If @p has slice left and balance_scx() didn't tag it for
2603 		 * keeping, @p is getting preempted by a higher priority
2604 		 * scheduler class or core-sched forcing a different task. Leave
2605 		 * it at the head of the local DSQ.
2606 		 */
2607 		if (p->scx.slice && !scx_ops_bypassing()) {
2608 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
2609 			return;
2610 		}
2611 
2612 		/*
2613 		 * If we're in the pick_next_task path, balance_scx() should
2614 		 * have already populated the local DSQ if there are any other
2615 		 * available tasks. If empty, tell ops.enqueue() that @p is the
2616 		 * only one available for this cpu. ops.enqueue() should put it
2617 		 * on the local DSQ so that the subsequent pick_next_task_scx()
2618 		 * can find the task unless it wants to trigger a separate
2619 		 * follow-up scheduling event.
2620 		 */
2621 		if (list_empty(&rq->scx.local_dsq.list))
2622 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2623 		else
2624 			do_enqueue_task(rq, p, 0, -1);
2625 	}
2626 }
2627 
2628 static struct task_struct *first_local_task(struct rq *rq)
2629 {
2630 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
2631 					struct task_struct, scx.dsq_node.list);
2632 }
2633 
2634 static struct task_struct *pick_next_task_scx(struct rq *rq)
2635 {
2636 	struct task_struct *p;
2637 
2638 #ifndef CONFIG_SMP
2639 	/* UP workaround - see the comment at the head of put_prev_task_scx() */
2640 	if (unlikely(rq->curr->sched_class != &ext_sched_class))
2641 		balance_scx(rq, rq->curr, NULL);
2642 #endif
2643 
2644 	p = first_local_task(rq);
2645 	if (!p)
2646 		return NULL;
2647 
2648 	set_next_task_scx(rq, p, true);
2649 
2650 	if (unlikely(!p->scx.slice)) {
2651 		if (!scx_ops_bypassing() && !scx_warned_zero_slice) {
2652 			printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n",
2653 					p->comm, p->pid);
2654 			scx_warned_zero_slice = true;
2655 		}
2656 		p->scx.slice = SCX_SLICE_DFL;
2657 	}
2658 
2659 	return p;
2660 }
2661 
2662 #ifdef CONFIG_SCHED_CORE
2663 /**
2664  * scx_prio_less - Task ordering for core-sched
2665  * @a: task A
2666  * @b: task B
2667  *
2668  * Core-sched is implemented as an additional scheduling layer on top of the
2669  * usual sched_class'es and needs to find out the expected task ordering. For
2670  * SCX, core-sched calls this function to interrogate the task ordering.
2671  *
2672  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2673  * to implement the default task ordering. The older the timestamp, the higher
2674  * prority the task - the global FIFO ordering matching the default scheduling
2675  * behavior.
2676  *
2677  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2678  * implement FIFO ordering within each local DSQ. See pick_task_scx().
2679  */
2680 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2681 		   bool in_fi)
2682 {
2683 	/*
2684 	 * The const qualifiers are dropped from task_struct pointers when
2685 	 * calling ops.core_sched_before(). Accesses are controlled by the
2686 	 * verifier.
2687 	 */
2688 	if (SCX_HAS_OP(core_sched_before) && !scx_ops_bypassing())
2689 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
2690 					      (struct task_struct *)a,
2691 					      (struct task_struct *)b);
2692 	else
2693 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
2694 }
2695 
2696 /**
2697  * pick_task_scx - Pick a candidate task for core-sched
2698  * @rq: rq to pick the candidate task from
2699  *
2700  * Core-sched calls this function on each SMT sibling to determine the next
2701  * tasks to run on the SMT siblings. balance_one() has been called on all
2702  * siblings and put_prev_task_scx() has been called only for the current CPU.
2703  *
2704  * As put_prev_task_scx() hasn't been called on remote CPUs, we can't just look
2705  * at the first task in the local dsq. @rq->curr has to be considered explicitly
2706  * to mimic %SCX_TASK_BAL_KEEP.
2707  */
2708 static struct task_struct *pick_task_scx(struct rq *rq)
2709 {
2710 	struct task_struct *curr = rq->curr;
2711 	struct task_struct *first = first_local_task(rq);
2712 
2713 	if (curr->scx.flags & SCX_TASK_QUEUED) {
2714 		/* is curr the only runnable task? */
2715 		if (!first)
2716 			return curr;
2717 
2718 		/*
2719 		 * Does curr trump first? We can always go by core_sched_at for
2720 		 * this comparison as it represents global FIFO ordering when
2721 		 * the default core-sched ordering is used and local-DSQ FIFO
2722 		 * ordering otherwise.
2723 		 *
2724 		 * We can have a task with an earlier timestamp on the DSQ. For
2725 		 * example, when a current task is preempted by a sibling
2726 		 * picking a different cookie, the task would be requeued at the
2727 		 * head of the local DSQ with an earlier timestamp than the
2728 		 * core-sched picked next task. Besides, the BPF scheduler may
2729 		 * dispatch any tasks to the local DSQ anytime.
2730 		 */
2731 		if (curr->scx.slice && time_before64(curr->scx.core_sched_at,
2732 						     first->scx.core_sched_at))
2733 			return curr;
2734 	}
2735 
2736 	return first;	/* this may be %NULL */
2737 }
2738 #endif	/* CONFIG_SCHED_CORE */
2739 
2740 static enum scx_cpu_preempt_reason
2741 preempt_reason_from_class(const struct sched_class *class)
2742 {
2743 #ifdef CONFIG_SMP
2744 	if (class == &stop_sched_class)
2745 		return SCX_CPU_PREEMPT_STOP;
2746 #endif
2747 	if (class == &dl_sched_class)
2748 		return SCX_CPU_PREEMPT_DL;
2749 	if (class == &rt_sched_class)
2750 		return SCX_CPU_PREEMPT_RT;
2751 	return SCX_CPU_PREEMPT_UNKNOWN;
2752 }
2753 
2754 static void switch_class_scx(struct rq *rq, struct task_struct *next)
2755 {
2756 	const struct sched_class *next_class = next->sched_class;
2757 
2758 	if (!scx_enabled())
2759 		return;
2760 #ifdef CONFIG_SMP
2761 	/*
2762 	 * Pairs with the smp_load_acquire() issued by a CPU in
2763 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2764 	 * resched.
2765 	 */
2766 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2767 #endif
2768 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2769 		return;
2770 
2771 	/*
2772 	 * The callback is conceptually meant to convey that the CPU is no
2773 	 * longer under the control of SCX. Therefore, don't invoke the callback
2774 	 * if the next class is below SCX (in which case the BPF scheduler has
2775 	 * actively decided not to schedule any tasks on the CPU).
2776 	 */
2777 	if (sched_class_above(&ext_sched_class, next_class))
2778 		return;
2779 
2780 	/*
2781 	 * At this point we know that SCX was preempted by a higher priority
2782 	 * sched_class, so invoke the ->cpu_release() callback if we have not
2783 	 * done so already. We only send the callback once between SCX being
2784 	 * preempted, and it regaining control of the CPU.
2785 	 *
2786 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2787 	 *  next time that balance_scx() is invoked.
2788 	 */
2789 	if (!rq->scx.cpu_released) {
2790 		if (SCX_HAS_OP(cpu_release)) {
2791 			struct scx_cpu_release_args args = {
2792 				.reason = preempt_reason_from_class(next_class),
2793 				.task = next,
2794 			};
2795 
2796 			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2797 				    cpu_release, cpu_of(rq), &args);
2798 		}
2799 		rq->scx.cpu_released = true;
2800 	}
2801 }
2802 
2803 #ifdef CONFIG_SMP
2804 
2805 static bool test_and_clear_cpu_idle(int cpu)
2806 {
2807 #ifdef CONFIG_SCHED_SMT
2808 	/*
2809 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
2810 	 * cluster is not wholly idle either way. This also prevents
2811 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
2812 	 */
2813 	if (sched_smt_active()) {
2814 		const struct cpumask *smt = cpu_smt_mask(cpu);
2815 
2816 		/*
2817 		 * If offline, @cpu is not its own sibling and
2818 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
2819 		 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
2820 		 * is eventually cleared.
2821 		 */
2822 		if (cpumask_intersects(smt, idle_masks.smt))
2823 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
2824 		else if (cpumask_test_cpu(cpu, idle_masks.smt))
2825 			__cpumask_clear_cpu(cpu, idle_masks.smt);
2826 	}
2827 #endif
2828 	return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
2829 }
2830 
2831 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
2832 {
2833 	int cpu;
2834 
2835 retry:
2836 	if (sched_smt_active()) {
2837 		cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
2838 		if (cpu < nr_cpu_ids)
2839 			goto found;
2840 
2841 		if (flags & SCX_PICK_IDLE_CORE)
2842 			return -EBUSY;
2843 	}
2844 
2845 	cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
2846 	if (cpu >= nr_cpu_ids)
2847 		return -EBUSY;
2848 
2849 found:
2850 	if (test_and_clear_cpu_idle(cpu))
2851 		return cpu;
2852 	else
2853 		goto retry;
2854 }
2855 
2856 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
2857 			      u64 wake_flags, bool *found)
2858 {
2859 	s32 cpu;
2860 
2861 	*found = false;
2862 
2863 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
2864 		scx_ops_error("built-in idle tracking is disabled");
2865 		return prev_cpu;
2866 	}
2867 
2868 	/*
2869 	 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
2870 	 * under utilized, wake up @p to the local DSQ of the waker. Checking
2871 	 * only for an empty local DSQ is insufficient as it could give the
2872 	 * wakee an unfair advantage when the system is oversaturated.
2873 	 * Checking only for the presence of idle CPUs is also insufficient as
2874 	 * the local DSQ of the waker could have tasks piled up on it even if
2875 	 * there is an idle core elsewhere on the system.
2876 	 */
2877 	cpu = smp_processor_id();
2878 	if ((wake_flags & SCX_WAKE_SYNC) && p->nr_cpus_allowed > 1 &&
2879 	    !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
2880 	    cpu_rq(cpu)->scx.local_dsq.nr == 0) {
2881 		if (cpumask_test_cpu(cpu, p->cpus_ptr))
2882 			goto cpu_found;
2883 	}
2884 
2885 	if (p->nr_cpus_allowed == 1) {
2886 		if (test_and_clear_cpu_idle(prev_cpu)) {
2887 			cpu = prev_cpu;
2888 			goto cpu_found;
2889 		} else {
2890 			return prev_cpu;
2891 		}
2892 	}
2893 
2894 	/*
2895 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
2896 	 * partially idle @prev_cpu.
2897 	 */
2898 	if (sched_smt_active()) {
2899 		if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
2900 		    test_and_clear_cpu_idle(prev_cpu)) {
2901 			cpu = prev_cpu;
2902 			goto cpu_found;
2903 		}
2904 
2905 		cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
2906 		if (cpu >= 0)
2907 			goto cpu_found;
2908 	}
2909 
2910 	if (test_and_clear_cpu_idle(prev_cpu)) {
2911 		cpu = prev_cpu;
2912 		goto cpu_found;
2913 	}
2914 
2915 	cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
2916 	if (cpu >= 0)
2917 		goto cpu_found;
2918 
2919 	return prev_cpu;
2920 
2921 cpu_found:
2922 	*found = true;
2923 	return cpu;
2924 }
2925 
2926 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
2927 {
2928 	/*
2929 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
2930 	 * can be a good migration opportunity with low cache and memory
2931 	 * footprint. Returning a CPU different than @prev_cpu triggers
2932 	 * immediate rq migration. However, for SCX, as the current rq
2933 	 * association doesn't dictate where the task is going to run, this
2934 	 * doesn't fit well. If necessary, we can later add a dedicated method
2935 	 * which can decide to preempt self to force it through the regular
2936 	 * scheduling path.
2937 	 */
2938 	if (unlikely(wake_flags & WF_EXEC))
2939 		return prev_cpu;
2940 
2941 	if (SCX_HAS_OP(select_cpu)) {
2942 		s32 cpu;
2943 		struct task_struct **ddsp_taskp;
2944 
2945 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2946 		WARN_ON_ONCE(*ddsp_taskp);
2947 		*ddsp_taskp = p;
2948 
2949 		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
2950 					   select_cpu, p, prev_cpu, wake_flags);
2951 		*ddsp_taskp = NULL;
2952 		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
2953 			return cpu;
2954 		else
2955 			return prev_cpu;
2956 	} else {
2957 		bool found;
2958 		s32 cpu;
2959 
2960 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
2961 		if (found) {
2962 			p->scx.slice = SCX_SLICE_DFL;
2963 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
2964 		}
2965 		return cpu;
2966 	}
2967 }
2968 
2969 static void set_cpus_allowed_scx(struct task_struct *p,
2970 				 struct affinity_context *ac)
2971 {
2972 	set_cpus_allowed_common(p, ac);
2973 
2974 	/*
2975 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
2976 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
2977 	 * scheduler the effective one.
2978 	 *
2979 	 * Fine-grained memory write control is enforced by BPF making the const
2980 	 * designation pointless. Cast it away when calling the operation.
2981 	 */
2982 	if (SCX_HAS_OP(set_cpumask))
2983 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
2984 				 (struct cpumask *)p->cpus_ptr);
2985 }
2986 
2987 static void reset_idle_masks(void)
2988 {
2989 	/*
2990 	 * Consider all online cpus idle. Should converge to the actual state
2991 	 * quickly.
2992 	 */
2993 	cpumask_copy(idle_masks.cpu, cpu_online_mask);
2994 	cpumask_copy(idle_masks.smt, cpu_online_mask);
2995 }
2996 
2997 void __scx_update_idle(struct rq *rq, bool idle)
2998 {
2999 	int cpu = cpu_of(rq);
3000 
3001 	if (SCX_HAS_OP(update_idle)) {
3002 		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3003 		if (!static_branch_unlikely(&scx_builtin_idle_enabled))
3004 			return;
3005 	}
3006 
3007 	if (idle)
3008 		cpumask_set_cpu(cpu, idle_masks.cpu);
3009 	else
3010 		cpumask_clear_cpu(cpu, idle_masks.cpu);
3011 
3012 #ifdef CONFIG_SCHED_SMT
3013 	if (sched_smt_active()) {
3014 		const struct cpumask *smt = cpu_smt_mask(cpu);
3015 
3016 		if (idle) {
3017 			/*
3018 			 * idle_masks.smt handling is racy but that's fine as
3019 			 * it's only for optimization and self-correcting.
3020 			 */
3021 			for_each_cpu(cpu, smt) {
3022 				if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3023 					return;
3024 			}
3025 			cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3026 		} else {
3027 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3028 		}
3029 	}
3030 #endif
3031 }
3032 
3033 static void handle_hotplug(struct rq *rq, bool online)
3034 {
3035 	int cpu = cpu_of(rq);
3036 
3037 	atomic_long_inc(&scx_hotplug_seq);
3038 
3039 	if (online && SCX_HAS_OP(cpu_online))
3040 		SCX_CALL_OP(SCX_KF_SLEEPABLE, cpu_online, cpu);
3041 	else if (!online && SCX_HAS_OP(cpu_offline))
3042 		SCX_CALL_OP(SCX_KF_SLEEPABLE, cpu_offline, cpu);
3043 	else
3044 		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3045 			     "cpu %d going %s, exiting scheduler", cpu,
3046 			     online ? "online" : "offline");
3047 }
3048 
3049 void scx_rq_activate(struct rq *rq)
3050 {
3051 	handle_hotplug(rq, true);
3052 }
3053 
3054 void scx_rq_deactivate(struct rq *rq)
3055 {
3056 	handle_hotplug(rq, false);
3057 }
3058 
3059 static void rq_online_scx(struct rq *rq)
3060 {
3061 	rq->scx.flags |= SCX_RQ_ONLINE;
3062 }
3063 
3064 static void rq_offline_scx(struct rq *rq)
3065 {
3066 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3067 }
3068 
3069 #else	/* CONFIG_SMP */
3070 
3071 static bool test_and_clear_cpu_idle(int cpu) { return false; }
3072 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3073 static void reset_idle_masks(void) {}
3074 
3075 #endif	/* CONFIG_SMP */
3076 
3077 static bool check_rq_for_timeouts(struct rq *rq)
3078 {
3079 	struct task_struct *p;
3080 	struct rq_flags rf;
3081 	bool timed_out = false;
3082 
3083 	rq_lock_irqsave(rq, &rf);
3084 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3085 		unsigned long last_runnable = p->scx.runnable_at;
3086 
3087 		if (unlikely(time_after(jiffies,
3088 					last_runnable + scx_watchdog_timeout))) {
3089 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3090 
3091 			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3092 					   "%s[%d] failed to run for %u.%03us",
3093 					   p->comm, p->pid,
3094 					   dur_ms / 1000, dur_ms % 1000);
3095 			timed_out = true;
3096 			break;
3097 		}
3098 	}
3099 	rq_unlock_irqrestore(rq, &rf);
3100 
3101 	return timed_out;
3102 }
3103 
3104 static void scx_watchdog_workfn(struct work_struct *work)
3105 {
3106 	int cpu;
3107 
3108 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3109 
3110 	for_each_online_cpu(cpu) {
3111 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3112 			break;
3113 
3114 		cond_resched();
3115 	}
3116 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3117 			   scx_watchdog_timeout / 2);
3118 }
3119 
3120 void scx_tick(struct rq *rq)
3121 {
3122 	unsigned long last_check;
3123 
3124 	if (!scx_enabled())
3125 		return;
3126 
3127 	last_check = READ_ONCE(scx_watchdog_timestamp);
3128 	if (unlikely(time_after(jiffies,
3129 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3130 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3131 
3132 		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3133 				   "watchdog failed to check in for %u.%03us",
3134 				   dur_ms / 1000, dur_ms % 1000);
3135 	}
3136 
3137 	update_other_load_avgs(rq);
3138 }
3139 
3140 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3141 {
3142 	update_curr_scx(rq);
3143 
3144 	/*
3145 	 * While disabling, always resched and refresh core-sched timestamp as
3146 	 * we can't trust the slice management or ops.core_sched_before().
3147 	 */
3148 	if (scx_ops_bypassing()) {
3149 		curr->scx.slice = 0;
3150 		touch_core_sched(rq, curr);
3151 	} else if (SCX_HAS_OP(tick)) {
3152 		SCX_CALL_OP(SCX_KF_REST, tick, curr);
3153 	}
3154 
3155 	if (!curr->scx.slice)
3156 		resched_curr(rq);
3157 }
3158 
3159 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3160 {
3161 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3162 }
3163 
3164 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3165 {
3166 	enum scx_task_state prev_state = scx_get_task_state(p);
3167 	bool warn = false;
3168 
3169 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3170 
3171 	switch (state) {
3172 	case SCX_TASK_NONE:
3173 		break;
3174 	case SCX_TASK_INIT:
3175 		warn = prev_state != SCX_TASK_NONE;
3176 		break;
3177 	case SCX_TASK_READY:
3178 		warn = prev_state == SCX_TASK_NONE;
3179 		break;
3180 	case SCX_TASK_ENABLED:
3181 		warn = prev_state != SCX_TASK_READY;
3182 		break;
3183 	default:
3184 		warn = true;
3185 		return;
3186 	}
3187 
3188 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3189 		  prev_state, state, p->comm, p->pid);
3190 
3191 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3192 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3193 }
3194 
3195 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3196 {
3197 	int ret;
3198 
3199 	p->scx.disallow = false;
3200 
3201 	if (SCX_HAS_OP(init_task)) {
3202 		struct scx_init_task_args args = {
3203 			.fork = fork,
3204 		};
3205 
3206 		ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, init_task, p, &args);
3207 		if (unlikely(ret)) {
3208 			ret = ops_sanitize_err("init_task", ret);
3209 			return ret;
3210 		}
3211 	}
3212 
3213 	scx_set_task_state(p, SCX_TASK_INIT);
3214 
3215 	if (p->scx.disallow) {
3216 		struct rq *rq;
3217 		struct rq_flags rf;
3218 
3219 		rq = task_rq_lock(p, &rf);
3220 
3221 		/*
3222 		 * We're either in fork or load path and @p->policy will be
3223 		 * applied right after. Reverting @p->policy here and rejecting
3224 		 * %SCHED_EXT transitions from scx_check_setscheduler()
3225 		 * guarantees that if ops.init_task() sets @p->disallow, @p can
3226 		 * never be in SCX.
3227 		 */
3228 		if (p->policy == SCHED_EXT) {
3229 			p->policy = SCHED_NORMAL;
3230 			atomic_long_inc(&scx_nr_rejected);
3231 		}
3232 
3233 		task_rq_unlock(rq, p, &rf);
3234 	}
3235 
3236 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3237 	return 0;
3238 }
3239 
3240 static void set_task_scx_weight(struct task_struct *p)
3241 {
3242 	u32 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3243 
3244 	p->scx.weight = sched_weight_to_cgroup(weight);
3245 }
3246 
3247 static void scx_ops_enable_task(struct task_struct *p)
3248 {
3249 	lockdep_assert_rq_held(task_rq(p));
3250 
3251 	/*
3252 	 * Set the weight before calling ops.enable() so that the scheduler
3253 	 * doesn't see a stale value if they inspect the task struct.
3254 	 */
3255 	set_task_scx_weight(p);
3256 	if (SCX_HAS_OP(enable))
3257 		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3258 	scx_set_task_state(p, SCX_TASK_ENABLED);
3259 
3260 	if (SCX_HAS_OP(set_weight))
3261 		SCX_CALL_OP(SCX_KF_REST, set_weight, p, p->scx.weight);
3262 }
3263 
3264 static void scx_ops_disable_task(struct task_struct *p)
3265 {
3266 	lockdep_assert_rq_held(task_rq(p));
3267 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3268 
3269 	if (SCX_HAS_OP(disable))
3270 		SCX_CALL_OP(SCX_KF_REST, disable, p);
3271 	scx_set_task_state(p, SCX_TASK_READY);
3272 }
3273 
3274 static void scx_ops_exit_task(struct task_struct *p)
3275 {
3276 	struct scx_exit_task_args args = {
3277 		.cancelled = false,
3278 	};
3279 
3280 	lockdep_assert_rq_held(task_rq(p));
3281 
3282 	switch (scx_get_task_state(p)) {
3283 	case SCX_TASK_NONE:
3284 		return;
3285 	case SCX_TASK_INIT:
3286 		args.cancelled = true;
3287 		break;
3288 	case SCX_TASK_READY:
3289 		break;
3290 	case SCX_TASK_ENABLED:
3291 		scx_ops_disable_task(p);
3292 		break;
3293 	default:
3294 		WARN_ON_ONCE(true);
3295 		return;
3296 	}
3297 
3298 	if (SCX_HAS_OP(exit_task))
3299 		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3300 	scx_set_task_state(p, SCX_TASK_NONE);
3301 }
3302 
3303 void init_scx_entity(struct sched_ext_entity *scx)
3304 {
3305 	/*
3306 	 * init_idle() calls this function again after fork sequence is
3307 	 * complete. Don't touch ->tasks_node as it's already linked.
3308 	 */
3309 	memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
3310 
3311 	INIT_LIST_HEAD(&scx->dsq_node.list);
3312 	RB_CLEAR_NODE(&scx->dsq_node.priq);
3313 	scx->sticky_cpu = -1;
3314 	scx->holding_cpu = -1;
3315 	INIT_LIST_HEAD(&scx->runnable_node);
3316 	scx->runnable_at = jiffies;
3317 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3318 	scx->slice = SCX_SLICE_DFL;
3319 }
3320 
3321 void scx_pre_fork(struct task_struct *p)
3322 {
3323 	/*
3324 	 * BPF scheduler enable/disable paths want to be able to iterate and
3325 	 * update all tasks which can become complex when racing forks. As
3326 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
3327 	 * exclude forks.
3328 	 */
3329 	percpu_down_read(&scx_fork_rwsem);
3330 }
3331 
3332 int scx_fork(struct task_struct *p)
3333 {
3334 	percpu_rwsem_assert_held(&scx_fork_rwsem);
3335 
3336 	if (scx_enabled())
3337 		return scx_ops_init_task(p, task_group(p), true);
3338 	else
3339 		return 0;
3340 }
3341 
3342 void scx_post_fork(struct task_struct *p)
3343 {
3344 	if (scx_enabled()) {
3345 		scx_set_task_state(p, SCX_TASK_READY);
3346 
3347 		/*
3348 		 * Enable the task immediately if it's running on sched_ext.
3349 		 * Otherwise, it'll be enabled in switching_to_scx() if and
3350 		 * when it's ever configured to run with a SCHED_EXT policy.
3351 		 */
3352 		if (p->sched_class == &ext_sched_class) {
3353 			struct rq_flags rf;
3354 			struct rq *rq;
3355 
3356 			rq = task_rq_lock(p, &rf);
3357 			scx_ops_enable_task(p);
3358 			task_rq_unlock(rq, p, &rf);
3359 		}
3360 	}
3361 
3362 	spin_lock_irq(&scx_tasks_lock);
3363 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3364 	spin_unlock_irq(&scx_tasks_lock);
3365 
3366 	percpu_up_read(&scx_fork_rwsem);
3367 }
3368 
3369 void scx_cancel_fork(struct task_struct *p)
3370 {
3371 	if (scx_enabled()) {
3372 		struct rq *rq;
3373 		struct rq_flags rf;
3374 
3375 		rq = task_rq_lock(p, &rf);
3376 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3377 		scx_ops_exit_task(p);
3378 		task_rq_unlock(rq, p, &rf);
3379 	}
3380 
3381 	percpu_up_read(&scx_fork_rwsem);
3382 }
3383 
3384 void sched_ext_free(struct task_struct *p)
3385 {
3386 	unsigned long flags;
3387 
3388 	spin_lock_irqsave(&scx_tasks_lock, flags);
3389 	list_del_init(&p->scx.tasks_node);
3390 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
3391 
3392 	/*
3393 	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
3394 	 * ENABLED transitions can't race us. Disable ops for @p.
3395 	 */
3396 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3397 		struct rq_flags rf;
3398 		struct rq *rq;
3399 
3400 		rq = task_rq_lock(p, &rf);
3401 		scx_ops_exit_task(p);
3402 		task_rq_unlock(rq, p, &rf);
3403 	}
3404 }
3405 
3406 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3407 			      const struct load_weight *lw)
3408 {
3409 	lockdep_assert_rq_held(task_rq(p));
3410 
3411 	set_task_scx_weight(p);
3412 	if (SCX_HAS_OP(set_weight))
3413 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3414 }
3415 
3416 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
3417 {
3418 }
3419 
3420 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3421 {
3422 	scx_ops_enable_task(p);
3423 
3424 	/*
3425 	 * set_cpus_allowed_scx() is not called while @p is associated with a
3426 	 * different scheduler class. Keep the BPF scheduler up-to-date.
3427 	 */
3428 	if (SCX_HAS_OP(set_cpumask))
3429 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3430 				 (struct cpumask *)p->cpus_ptr);
3431 }
3432 
3433 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3434 {
3435 	scx_ops_disable_task(p);
3436 }
3437 
3438 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
3439 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3440 
3441 int scx_check_setscheduler(struct task_struct *p, int policy)
3442 {
3443 	lockdep_assert_rq_held(task_rq(p));
3444 
3445 	/* if disallow, reject transitioning into SCX */
3446 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3447 	    p->policy != policy && policy == SCHED_EXT)
3448 		return -EACCES;
3449 
3450 	return 0;
3451 }
3452 
3453 #ifdef CONFIG_NO_HZ_FULL
3454 bool scx_can_stop_tick(struct rq *rq)
3455 {
3456 	struct task_struct *p = rq->curr;
3457 
3458 	if (scx_ops_bypassing())
3459 		return false;
3460 
3461 	if (p->sched_class != &ext_sched_class)
3462 		return true;
3463 
3464 	/*
3465 	 * @rq can dispatch from different DSQs, so we can't tell whether it
3466 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3467 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3468 	 */
3469 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3470 }
3471 #endif
3472 
3473 /*
3474  * Omitted operations:
3475  *
3476  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3477  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
3478  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
3479  *
3480  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3481  *
3482  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3483  *   their current sched_class. Call them directly from sched core instead.
3484  *
3485  * - task_woken: Unnecessary.
3486  */
3487 DEFINE_SCHED_CLASS(ext) = {
3488 	.enqueue_task		= enqueue_task_scx,
3489 	.dequeue_task		= dequeue_task_scx,
3490 	.yield_task		= yield_task_scx,
3491 	.yield_to_task		= yield_to_task_scx,
3492 
3493 	.wakeup_preempt		= wakeup_preempt_scx,
3494 
3495 	.pick_next_task		= pick_next_task_scx,
3496 
3497 	.put_prev_task		= put_prev_task_scx,
3498 	.set_next_task		= set_next_task_scx,
3499 
3500 	.switch_class		= switch_class_scx,
3501 
3502 #ifdef CONFIG_SMP
3503 	.balance		= balance_scx,
3504 	.select_task_rq		= select_task_rq_scx,
3505 	.set_cpus_allowed	= set_cpus_allowed_scx,
3506 
3507 	.rq_online		= rq_online_scx,
3508 	.rq_offline		= rq_offline_scx,
3509 #endif
3510 
3511 #ifdef CONFIG_SCHED_CORE
3512 	.pick_task		= pick_task_scx,
3513 #endif
3514 
3515 	.task_tick		= task_tick_scx,
3516 
3517 	.switching_to		= switching_to_scx,
3518 	.switched_from		= switched_from_scx,
3519 	.switched_to		= switched_to_scx,
3520 	.reweight_task		= reweight_task_scx,
3521 	.prio_changed		= prio_changed_scx,
3522 
3523 	.update_curr		= update_curr_scx,
3524 
3525 #ifdef CONFIG_UCLAMP_TASK
3526 	.uclamp_enabled		= 1,
3527 #endif
3528 };
3529 
3530 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3531 {
3532 	memset(dsq, 0, sizeof(*dsq));
3533 
3534 	raw_spin_lock_init(&dsq->lock);
3535 	INIT_LIST_HEAD(&dsq->list);
3536 	dsq->id = dsq_id;
3537 }
3538 
3539 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
3540 {
3541 	struct scx_dispatch_q *dsq;
3542 	int ret;
3543 
3544 	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
3545 		return ERR_PTR(-EINVAL);
3546 
3547 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
3548 	if (!dsq)
3549 		return ERR_PTR(-ENOMEM);
3550 
3551 	init_dsq(dsq, dsq_id);
3552 
3553 	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
3554 				     dsq_hash_params);
3555 	if (ret) {
3556 		kfree(dsq);
3557 		return ERR_PTR(ret);
3558 	}
3559 	return dsq;
3560 }
3561 
3562 static void free_dsq_irq_workfn(struct irq_work *irq_work)
3563 {
3564 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
3565 	struct scx_dispatch_q *dsq, *tmp_dsq;
3566 
3567 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
3568 		kfree_rcu(dsq, rcu);
3569 }
3570 
3571 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
3572 
3573 static void destroy_dsq(u64 dsq_id)
3574 {
3575 	struct scx_dispatch_q *dsq;
3576 	unsigned long flags;
3577 
3578 	rcu_read_lock();
3579 
3580 	dsq = find_user_dsq(dsq_id);
3581 	if (!dsq)
3582 		goto out_unlock_rcu;
3583 
3584 	raw_spin_lock_irqsave(&dsq->lock, flags);
3585 
3586 	if (dsq->nr) {
3587 		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
3588 			      dsq->id, dsq->nr);
3589 		goto out_unlock_dsq;
3590 	}
3591 
3592 	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
3593 		goto out_unlock_dsq;
3594 
3595 	/*
3596 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
3597 	 * queueing more tasks. As this function can be called from anywhere,
3598 	 * freeing is bounced through an irq work to avoid nesting RCU
3599 	 * operations inside scheduler locks.
3600 	 */
3601 	dsq->id = SCX_DSQ_INVALID;
3602 	llist_add(&dsq->free_node, &dsqs_to_free);
3603 	irq_work_queue(&free_dsq_irq_work);
3604 
3605 out_unlock_dsq:
3606 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
3607 out_unlock_rcu:
3608 	rcu_read_unlock();
3609 }
3610 
3611 
3612 /********************************************************************************
3613  * Sysfs interface and ops enable/disable.
3614  */
3615 
3616 #define SCX_ATTR(_name)								\
3617 	static struct kobj_attribute scx_attr_##_name = {			\
3618 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
3619 		.show = scx_attr_##_name##_show,				\
3620 	}
3621 
3622 static ssize_t scx_attr_state_show(struct kobject *kobj,
3623 				   struct kobj_attribute *ka, char *buf)
3624 {
3625 	return sysfs_emit(buf, "%s\n",
3626 			  scx_ops_enable_state_str[scx_ops_enable_state()]);
3627 }
3628 SCX_ATTR(state);
3629 
3630 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
3631 					struct kobj_attribute *ka, char *buf)
3632 {
3633 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
3634 }
3635 SCX_ATTR(switch_all);
3636 
3637 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
3638 					 struct kobj_attribute *ka, char *buf)
3639 {
3640 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
3641 }
3642 SCX_ATTR(nr_rejected);
3643 
3644 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
3645 					 struct kobj_attribute *ka, char *buf)
3646 {
3647 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
3648 }
3649 SCX_ATTR(hotplug_seq);
3650 
3651 static struct attribute *scx_global_attrs[] = {
3652 	&scx_attr_state.attr,
3653 	&scx_attr_switch_all.attr,
3654 	&scx_attr_nr_rejected.attr,
3655 	&scx_attr_hotplug_seq.attr,
3656 	NULL,
3657 };
3658 
3659 static const struct attribute_group scx_global_attr_group = {
3660 	.attrs = scx_global_attrs,
3661 };
3662 
3663 static void scx_kobj_release(struct kobject *kobj)
3664 {
3665 	kfree(kobj);
3666 }
3667 
3668 static ssize_t scx_attr_ops_show(struct kobject *kobj,
3669 				 struct kobj_attribute *ka, char *buf)
3670 {
3671 	return sysfs_emit(buf, "%s\n", scx_ops.name);
3672 }
3673 SCX_ATTR(ops);
3674 
3675 static struct attribute *scx_sched_attrs[] = {
3676 	&scx_attr_ops.attr,
3677 	NULL,
3678 };
3679 ATTRIBUTE_GROUPS(scx_sched);
3680 
3681 static const struct kobj_type scx_ktype = {
3682 	.release = scx_kobj_release,
3683 	.sysfs_ops = &kobj_sysfs_ops,
3684 	.default_groups = scx_sched_groups,
3685 };
3686 
3687 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
3688 {
3689 	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
3690 }
3691 
3692 static const struct kset_uevent_ops scx_uevent_ops = {
3693 	.uevent = scx_uevent,
3694 };
3695 
3696 /*
3697  * Used by sched_fork() and __setscheduler_prio() to pick the matching
3698  * sched_class. dl/rt are already handled.
3699  */
3700 bool task_should_scx(struct task_struct *p)
3701 {
3702 	if (!scx_enabled() ||
3703 	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
3704 		return false;
3705 	if (READ_ONCE(scx_switching_all))
3706 		return true;
3707 	return p->policy == SCHED_EXT;
3708 }
3709 
3710 /**
3711  * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
3712  *
3713  * Bypassing guarantees that all runnable tasks make forward progress without
3714  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
3715  * be held by tasks that the BPF scheduler is forgetting to run, which
3716  * unfortunately also excludes toggling the static branches.
3717  *
3718  * Let's work around by overriding a couple ops and modifying behaviors based on
3719  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
3720  * to force global FIFO scheduling.
3721  *
3722  * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
3723  *
3724  * b. ops.dispatch() is ignored.
3725  *
3726  * c. balance_scx() never sets %SCX_TASK_BAL_KEEP as the slice value can't be
3727  *    trusted. Whenever a tick triggers, the running task is rotated to the tail
3728  *    of the queue with core_sched_at touched.
3729  *
3730  * d. pick_next_task() suppresses zero slice warning.
3731  *
3732  * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
3733  *    operations.
3734  *
3735  * f. scx_prio_less() reverts to the default core_sched_at order.
3736  */
3737 static void scx_ops_bypass(bool bypass)
3738 {
3739 	int depth, cpu;
3740 
3741 	if (bypass) {
3742 		depth = atomic_inc_return(&scx_ops_bypass_depth);
3743 		WARN_ON_ONCE(depth <= 0);
3744 		if (depth != 1)
3745 			return;
3746 	} else {
3747 		depth = atomic_dec_return(&scx_ops_bypass_depth);
3748 		WARN_ON_ONCE(depth < 0);
3749 		if (depth != 0)
3750 			return;
3751 	}
3752 
3753 	/*
3754 	 * We need to guarantee that no tasks are on the BPF scheduler while
3755 	 * bypassing. Either we see enabled or the enable path sees the
3756 	 * increased bypass_depth before moving tasks to SCX.
3757 	 */
3758 	if (!scx_enabled())
3759 		return;
3760 
3761 	/*
3762 	 * No task property is changing. We just need to make sure all currently
3763 	 * queued tasks are re-queued according to the new scx_ops_bypassing()
3764 	 * state. As an optimization, walk each rq's runnable_list instead of
3765 	 * the scx_tasks list.
3766 	 *
3767 	 * This function can't trust the scheduler and thus can't use
3768 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
3769 	 */
3770 	for_each_possible_cpu(cpu) {
3771 		struct rq *rq = cpu_rq(cpu);
3772 		struct rq_flags rf;
3773 		struct task_struct *p, *n;
3774 
3775 		rq_lock_irqsave(rq, &rf);
3776 
3777 		/*
3778 		 * The use of list_for_each_entry_safe_reverse() is required
3779 		 * because each task is going to be removed from and added back
3780 		 * to the runnable_list during iteration. Because they're added
3781 		 * to the tail of the list, safe reverse iteration can still
3782 		 * visit all nodes.
3783 		 */
3784 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
3785 						 scx.runnable_node) {
3786 			struct sched_enq_and_set_ctx ctx;
3787 
3788 			/* cycling deq/enq is enough, see the function comment */
3789 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
3790 			sched_enq_and_set_task(&ctx);
3791 		}
3792 
3793 		rq_unlock_irqrestore(rq, &rf);
3794 
3795 		/* kick to restore ticks */
3796 		resched_cpu(cpu);
3797 	}
3798 }
3799 
3800 static void free_exit_info(struct scx_exit_info *ei)
3801 {
3802 	kfree(ei->dump);
3803 	kfree(ei->msg);
3804 	kfree(ei->bt);
3805 	kfree(ei);
3806 }
3807 
3808 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
3809 {
3810 	struct scx_exit_info *ei;
3811 
3812 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
3813 	if (!ei)
3814 		return NULL;
3815 
3816 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
3817 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
3818 	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
3819 
3820 	if (!ei->bt || !ei->msg || !ei->dump) {
3821 		free_exit_info(ei);
3822 		return NULL;
3823 	}
3824 
3825 	return ei;
3826 }
3827 
3828 static const char *scx_exit_reason(enum scx_exit_kind kind)
3829 {
3830 	switch (kind) {
3831 	case SCX_EXIT_UNREG:
3832 		return "Scheduler unregistered from user space";
3833 	case SCX_EXIT_UNREG_BPF:
3834 		return "Scheduler unregistered from BPF";
3835 	case SCX_EXIT_UNREG_KERN:
3836 		return "Scheduler unregistered from the main kernel";
3837 	case SCX_EXIT_SYSRQ:
3838 		return "disabled by sysrq-S";
3839 	case SCX_EXIT_ERROR:
3840 		return "runtime error";
3841 	case SCX_EXIT_ERROR_BPF:
3842 		return "scx_bpf_error";
3843 	case SCX_EXIT_ERROR_STALL:
3844 		return "runnable task stall";
3845 	default:
3846 		return "<UNKNOWN>";
3847 	}
3848 }
3849 
3850 static void scx_ops_disable_workfn(struct kthread_work *work)
3851 {
3852 	struct scx_exit_info *ei = scx_exit_info;
3853 	struct scx_task_iter sti;
3854 	struct task_struct *p;
3855 	struct rhashtable_iter rht_iter;
3856 	struct scx_dispatch_q *dsq;
3857 	int i, kind;
3858 
3859 	kind = atomic_read(&scx_exit_kind);
3860 	while (true) {
3861 		/*
3862 		 * NONE indicates that a new scx_ops has been registered since
3863 		 * disable was scheduled - don't kill the new ops. DONE
3864 		 * indicates that the ops has already been disabled.
3865 		 */
3866 		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
3867 			return;
3868 		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
3869 			break;
3870 	}
3871 	ei->kind = kind;
3872 	ei->reason = scx_exit_reason(ei->kind);
3873 
3874 	/* guarantee forward progress by bypassing scx_ops */
3875 	scx_ops_bypass(true);
3876 
3877 	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
3878 	case SCX_OPS_DISABLING:
3879 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
3880 		break;
3881 	case SCX_OPS_DISABLED:
3882 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
3883 			scx_exit_info->msg);
3884 		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
3885 			     SCX_OPS_DISABLING);
3886 		goto done;
3887 	default:
3888 		break;
3889 	}
3890 
3891 	/*
3892 	 * Here, every runnable task is guaranteed to make forward progress and
3893 	 * we can safely use blocking synchronization constructs. Actually
3894 	 * disable ops.
3895 	 */
3896 	mutex_lock(&scx_ops_enable_mutex);
3897 
3898 	static_branch_disable(&__scx_switched_all);
3899 	WRITE_ONCE(scx_switching_all, false);
3900 
3901 	/*
3902 	 * Avoid racing against fork. See scx_ops_enable() for explanation on
3903 	 * the locking order.
3904 	 */
3905 	percpu_down_write(&scx_fork_rwsem);
3906 	cpus_read_lock();
3907 
3908 	spin_lock_irq(&scx_tasks_lock);
3909 	scx_task_iter_init(&sti);
3910 	/*
3911 	 * Invoke scx_ops_exit_task() on all non-idle tasks, including
3912 	 * TASK_DEAD tasks. Because dead tasks may have a nonzero refcount,
3913 	 * we may not have invoked sched_ext_free() on them by the time a
3914 	 * scheduler is disabled. We must therefore exit the task here, or we'd
3915 	 * fail to invoke ops.exit_task(), as the scheduler will have been
3916 	 * unloaded by the time the task is subsequently exited on the
3917 	 * sched_ext_free() path.
3918 	 */
3919 	while ((p = scx_task_iter_next_locked(&sti, true))) {
3920 		const struct sched_class *old_class = p->sched_class;
3921 		struct sched_enq_and_set_ctx ctx;
3922 
3923 		if (READ_ONCE(p->__state) != TASK_DEAD) {
3924 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE,
3925 					       &ctx);
3926 
3927 			p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
3928 			__setscheduler_prio(p, p->prio);
3929 			check_class_changing(task_rq(p), p, old_class);
3930 
3931 			sched_enq_and_set_task(&ctx);
3932 
3933 			check_class_changed(task_rq(p), p, old_class, p->prio);
3934 		}
3935 		scx_ops_exit_task(p);
3936 	}
3937 	scx_task_iter_exit(&sti);
3938 	spin_unlock_irq(&scx_tasks_lock);
3939 
3940 	/* no task is on scx, turn off all the switches and flush in-progress calls */
3941 	static_branch_disable_cpuslocked(&__scx_ops_enabled);
3942 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
3943 		static_branch_disable_cpuslocked(&scx_has_op[i]);
3944 	static_branch_disable_cpuslocked(&scx_ops_enq_last);
3945 	static_branch_disable_cpuslocked(&scx_ops_enq_exiting);
3946 	static_branch_disable_cpuslocked(&scx_ops_cpu_preempt);
3947 	static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
3948 	synchronize_rcu();
3949 
3950 	cpus_read_unlock();
3951 	percpu_up_write(&scx_fork_rwsem);
3952 
3953 	if (ei->kind >= SCX_EXIT_ERROR) {
3954 		printk(KERN_ERR "sched_ext: BPF scheduler \"%s\" errored, disabling\n", scx_ops.name);
3955 
3956 		if (ei->msg[0] == '\0')
3957 			printk(KERN_ERR "sched_ext: %s\n", ei->reason);
3958 		else
3959 			printk(KERN_ERR "sched_ext: %s (%s)\n", ei->reason, ei->msg);
3960 
3961 		stack_trace_print(ei->bt, ei->bt_len, 2);
3962 	}
3963 
3964 	if (scx_ops.exit)
3965 		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
3966 
3967 	cancel_delayed_work_sync(&scx_watchdog_work);
3968 
3969 	/*
3970 	 * Delete the kobject from the hierarchy eagerly in addition to just
3971 	 * dropping a reference. Otherwise, if the object is deleted
3972 	 * asynchronously, sysfs could observe an object of the same name still
3973 	 * in the hierarchy when another scheduler is loaded.
3974 	 */
3975 	kobject_del(scx_root_kobj);
3976 	kobject_put(scx_root_kobj);
3977 	scx_root_kobj = NULL;
3978 
3979 	memset(&scx_ops, 0, sizeof(scx_ops));
3980 
3981 	rhashtable_walk_enter(&dsq_hash, &rht_iter);
3982 	do {
3983 		rhashtable_walk_start(&rht_iter);
3984 
3985 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
3986 			destroy_dsq(dsq->id);
3987 
3988 		rhashtable_walk_stop(&rht_iter);
3989 	} while (dsq == ERR_PTR(-EAGAIN));
3990 	rhashtable_walk_exit(&rht_iter);
3991 
3992 	free_percpu(scx_dsp_ctx);
3993 	scx_dsp_ctx = NULL;
3994 	scx_dsp_max_batch = 0;
3995 
3996 	free_exit_info(scx_exit_info);
3997 	scx_exit_info = NULL;
3998 
3999 	mutex_unlock(&scx_ops_enable_mutex);
4000 
4001 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4002 		     SCX_OPS_DISABLING);
4003 done:
4004 	scx_ops_bypass(false);
4005 }
4006 
4007 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
4008 
4009 static void schedule_scx_ops_disable_work(void)
4010 {
4011 	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
4012 
4013 	/*
4014 	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
4015 	 * scx_ops_helper isn't set up yet, there's nothing to do.
4016 	 */
4017 	if (helper)
4018 		kthread_queue_work(helper, &scx_ops_disable_work);
4019 }
4020 
4021 static void scx_ops_disable(enum scx_exit_kind kind)
4022 {
4023 	int none = SCX_EXIT_NONE;
4024 
4025 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4026 		kind = SCX_EXIT_ERROR;
4027 
4028 	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
4029 
4030 	schedule_scx_ops_disable_work();
4031 }
4032 
4033 static void dump_newline(struct seq_buf *s)
4034 {
4035 	trace_sched_ext_dump("");
4036 
4037 	/* @s may be zero sized and seq_buf triggers WARN if so */
4038 	if (s->size)
4039 		seq_buf_putc(s, '\n');
4040 }
4041 
4042 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4043 {
4044 	va_list args;
4045 
4046 #ifdef CONFIG_TRACEPOINTS
4047 	if (trace_sched_ext_dump_enabled()) {
4048 		/* protected by scx_dump_state()::dump_lock */
4049 		static char line_buf[SCX_EXIT_MSG_LEN];
4050 
4051 		va_start(args, fmt);
4052 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4053 		va_end(args);
4054 
4055 		trace_sched_ext_dump(line_buf);
4056 	}
4057 #endif
4058 	/* @s may be zero sized and seq_buf triggers WARN if so */
4059 	if (s->size) {
4060 		va_start(args, fmt);
4061 		seq_buf_vprintf(s, fmt, args);
4062 		va_end(args);
4063 
4064 		seq_buf_putc(s, '\n');
4065 	}
4066 }
4067 
4068 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4069 			     const unsigned long *bt, unsigned int len)
4070 {
4071 	unsigned int i;
4072 
4073 	for (i = 0; i < len; i++)
4074 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4075 }
4076 
4077 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4078 {
4079 	struct scx_dump_data *dd = &scx_dump_data;
4080 
4081 	lockdep_assert_irqs_disabled();
4082 
4083 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4084 	dd->first = true;
4085 	dd->cursor = 0;
4086 	dd->s = s;
4087 	dd->prefix = prefix;
4088 }
4089 
4090 static void ops_dump_flush(void)
4091 {
4092 	struct scx_dump_data *dd = &scx_dump_data;
4093 	char *line = dd->buf.line;
4094 
4095 	if (!dd->cursor)
4096 		return;
4097 
4098 	/*
4099 	 * There's something to flush and this is the first line. Insert a blank
4100 	 * line to distinguish ops dump.
4101 	 */
4102 	if (dd->first) {
4103 		dump_newline(dd->s);
4104 		dd->first = false;
4105 	}
4106 
4107 	/*
4108 	 * There may be multiple lines in $line. Scan and emit each line
4109 	 * separately.
4110 	 */
4111 	while (true) {
4112 		char *end = line;
4113 		char c;
4114 
4115 		while (*end != '\n' && *end != '\0')
4116 			end++;
4117 
4118 		/*
4119 		 * If $line overflowed, it may not have newline at the end.
4120 		 * Always emit with a newline.
4121 		 */
4122 		c = *end;
4123 		*end = '\0';
4124 		dump_line(dd->s, "%s%s", dd->prefix, line);
4125 		if (c == '\0')
4126 			break;
4127 
4128 		/* move to the next line */
4129 		end++;
4130 		if (*end == '\0')
4131 			break;
4132 		line = end;
4133 	}
4134 
4135 	dd->cursor = 0;
4136 }
4137 
4138 static void ops_dump_exit(void)
4139 {
4140 	ops_dump_flush();
4141 	scx_dump_data.cpu = -1;
4142 }
4143 
4144 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4145 			  struct task_struct *p, char marker)
4146 {
4147 	static unsigned long bt[SCX_EXIT_BT_LEN];
4148 	char dsq_id_buf[19] = "(n/a)";
4149 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4150 	unsigned int bt_len;
4151 
4152 	if (p->scx.dsq)
4153 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4154 			  (unsigned long long)p->scx.dsq->id);
4155 
4156 	dump_newline(s);
4157 	dump_line(s, " %c%c %s[%d] %+ldms",
4158 		  marker, task_state_to_char(p), p->comm, p->pid,
4159 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4160 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4161 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4162 		  p->scx.dsq_node.flags, ops_state & SCX_OPSS_STATE_MASK,
4163 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4164 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
4165 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
4166 		  p->scx.dsq_vtime);
4167 	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
4168 
4169 	if (SCX_HAS_OP(dump_task)) {
4170 		ops_dump_init(s, "    ");
4171 		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
4172 		ops_dump_exit();
4173 	}
4174 
4175 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4176 	if (bt_len) {
4177 		dump_newline(s);
4178 		dump_stack_trace(s, "    ", bt, bt_len);
4179 	}
4180 }
4181 
4182 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4183 {
4184 	static DEFINE_SPINLOCK(dump_lock);
4185 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4186 	struct scx_dump_ctx dctx = {
4187 		.kind = ei->kind,
4188 		.exit_code = ei->exit_code,
4189 		.reason = ei->reason,
4190 		.at_ns = ktime_get_ns(),
4191 		.at_jiffies = jiffies,
4192 	};
4193 	struct seq_buf s;
4194 	unsigned long flags;
4195 	char *buf;
4196 	int cpu;
4197 
4198 	spin_lock_irqsave(&dump_lock, flags);
4199 
4200 	seq_buf_init(&s, ei->dump, dump_len);
4201 
4202 	if (ei->kind == SCX_EXIT_NONE) {
4203 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
4204 	} else {
4205 		dump_line(&s, "%s[%d] triggered exit kind %d:",
4206 			  current->comm, current->pid, ei->kind);
4207 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
4208 		dump_newline(&s);
4209 		dump_line(&s, "Backtrace:");
4210 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
4211 	}
4212 
4213 	if (SCX_HAS_OP(dump)) {
4214 		ops_dump_init(&s, "");
4215 		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
4216 		ops_dump_exit();
4217 	}
4218 
4219 	dump_newline(&s);
4220 	dump_line(&s, "CPU states");
4221 	dump_line(&s, "----------");
4222 
4223 	for_each_possible_cpu(cpu) {
4224 		struct rq *rq = cpu_rq(cpu);
4225 		struct rq_flags rf;
4226 		struct task_struct *p;
4227 		struct seq_buf ns;
4228 		size_t avail, used;
4229 		bool idle;
4230 
4231 		rq_lock(rq, &rf);
4232 
4233 		idle = list_empty(&rq->scx.runnable_list) &&
4234 			rq->curr->sched_class == &idle_sched_class;
4235 
4236 		if (idle && !SCX_HAS_OP(dump_cpu))
4237 			goto next;
4238 
4239 		/*
4240 		 * We don't yet know whether ops.dump_cpu() will produce output
4241 		 * and we may want to skip the default CPU dump if it doesn't.
4242 		 * Use a nested seq_buf to generate the standard dump so that we
4243 		 * can decide whether to commit later.
4244 		 */
4245 		avail = seq_buf_get_buf(&s, &buf);
4246 		seq_buf_init(&ns, buf, avail);
4247 
4248 		dump_newline(&ns);
4249 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
4250 			  cpu, rq->scx.nr_running, rq->scx.flags,
4251 			  rq->scx.cpu_released, rq->scx.ops_qseq,
4252 			  rq->scx.pnt_seq);
4253 		dump_line(&ns, "          curr=%s[%d] class=%ps",
4254 			  rq->curr->comm, rq->curr->pid,
4255 			  rq->curr->sched_class);
4256 		if (!cpumask_empty(rq->scx.cpus_to_kick))
4257 			dump_line(&ns, "  cpus_to_kick   : %*pb",
4258 				  cpumask_pr_args(rq->scx.cpus_to_kick));
4259 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4260 			dump_line(&ns, "  idle_to_kick   : %*pb",
4261 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4262 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
4263 			dump_line(&ns, "  cpus_to_preempt: %*pb",
4264 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
4265 		if (!cpumask_empty(rq->scx.cpus_to_wait))
4266 			dump_line(&ns, "  cpus_to_wait   : %*pb",
4267 				  cpumask_pr_args(rq->scx.cpus_to_wait));
4268 
4269 		used = seq_buf_used(&ns);
4270 		if (SCX_HAS_OP(dump_cpu)) {
4271 			ops_dump_init(&ns, "  ");
4272 			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
4273 			ops_dump_exit();
4274 		}
4275 
4276 		/*
4277 		 * If idle && nothing generated by ops.dump_cpu(), there's
4278 		 * nothing interesting. Skip.
4279 		 */
4280 		if (idle && used == seq_buf_used(&ns))
4281 			goto next;
4282 
4283 		/*
4284 		 * $s may already have overflowed when $ns was created. If so,
4285 		 * calling commit on it will trigger BUG.
4286 		 */
4287 		if (avail) {
4288 			seq_buf_commit(&s, seq_buf_used(&ns));
4289 			if (seq_buf_has_overflowed(&ns))
4290 				seq_buf_set_overflow(&s);
4291 		}
4292 
4293 		if (rq->curr->sched_class == &ext_sched_class)
4294 			scx_dump_task(&s, &dctx, rq->curr, '*');
4295 
4296 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4297 			scx_dump_task(&s, &dctx, p, ' ');
4298 	next:
4299 		rq_unlock(rq, &rf);
4300 	}
4301 
4302 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4303 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4304 		       trunc_marker, sizeof(trunc_marker));
4305 
4306 	spin_unlock_irqrestore(&dump_lock, flags);
4307 }
4308 
4309 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
4310 {
4311 	struct scx_exit_info *ei = scx_exit_info;
4312 
4313 	if (ei->kind >= SCX_EXIT_ERROR)
4314 		scx_dump_state(ei, scx_ops.exit_dump_len);
4315 
4316 	schedule_scx_ops_disable_work();
4317 }
4318 
4319 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
4320 
4321 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
4322 					     s64 exit_code,
4323 					     const char *fmt, ...)
4324 {
4325 	struct scx_exit_info *ei = scx_exit_info;
4326 	int none = SCX_EXIT_NONE;
4327 	va_list args;
4328 
4329 	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
4330 		return;
4331 
4332 	ei->exit_code = exit_code;
4333 
4334 	if (kind >= SCX_EXIT_ERROR)
4335 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4336 
4337 	va_start(args, fmt);
4338 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4339 	va_end(args);
4340 
4341 	/*
4342 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4343 	 * in scx_ops_disable_workfn().
4344 	 */
4345 	ei->kind = kind;
4346 	ei->reason = scx_exit_reason(ei->kind);
4347 
4348 	irq_work_queue(&scx_ops_error_irq_work);
4349 }
4350 
4351 static struct kthread_worker *scx_create_rt_helper(const char *name)
4352 {
4353 	struct kthread_worker *helper;
4354 
4355 	helper = kthread_create_worker(0, name);
4356 	if (helper)
4357 		sched_set_fifo(helper->task);
4358 	return helper;
4359 }
4360 
4361 static void check_hotplug_seq(const struct sched_ext_ops *ops)
4362 {
4363 	unsigned long long global_hotplug_seq;
4364 
4365 	/*
4366 	 * If a hotplug event has occurred between when a scheduler was
4367 	 * initialized, and when we were able to attach, exit and notify user
4368 	 * space about it.
4369 	 */
4370 	if (ops->hotplug_seq) {
4371 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4372 		if (ops->hotplug_seq != global_hotplug_seq) {
4373 			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4374 				     "expected hotplug seq %llu did not match actual %llu",
4375 				     ops->hotplug_seq, global_hotplug_seq);
4376 		}
4377 	}
4378 }
4379 
4380 static int validate_ops(const struct sched_ext_ops *ops)
4381 {
4382 	/*
4383 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4384 	 * ops.enqueue() callback isn't implemented.
4385 	 */
4386 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4387 		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4388 		return -EINVAL;
4389 	}
4390 
4391 	return 0;
4392 }
4393 
4394 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4395 {
4396 	struct scx_task_iter sti;
4397 	struct task_struct *p;
4398 	unsigned long timeout;
4399 	int i, cpu, ret;
4400 
4401 	mutex_lock(&scx_ops_enable_mutex);
4402 
4403 	if (!scx_ops_helper) {
4404 		WRITE_ONCE(scx_ops_helper,
4405 			   scx_create_rt_helper("sched_ext_ops_helper"));
4406 		if (!scx_ops_helper) {
4407 			ret = -ENOMEM;
4408 			goto err_unlock;
4409 		}
4410 	}
4411 
4412 	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
4413 		ret = -EBUSY;
4414 		goto err_unlock;
4415 	}
4416 
4417 	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
4418 	if (!scx_root_kobj) {
4419 		ret = -ENOMEM;
4420 		goto err_unlock;
4421 	}
4422 
4423 	scx_root_kobj->kset = scx_kset;
4424 	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
4425 	if (ret < 0)
4426 		goto err;
4427 
4428 	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
4429 	if (!scx_exit_info) {
4430 		ret = -ENOMEM;
4431 		goto err_del;
4432 	}
4433 
4434 	/*
4435 	 * Set scx_ops, transition to PREPPING and clear exit info to arm the
4436 	 * disable path. Failure triggers full disabling from here on.
4437 	 */
4438 	scx_ops = *ops;
4439 
4440 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_PREPPING) !=
4441 		     SCX_OPS_DISABLED);
4442 
4443 	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
4444 	scx_warned_zero_slice = false;
4445 
4446 	atomic_long_set(&scx_nr_rejected, 0);
4447 
4448 	for_each_possible_cpu(cpu)
4449 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
4450 
4451 	/*
4452 	 * Keep CPUs stable during enable so that the BPF scheduler can track
4453 	 * online CPUs by watching ->on/offline_cpu() after ->init().
4454 	 */
4455 	cpus_read_lock();
4456 
4457 	if (scx_ops.init) {
4458 		ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, init);
4459 		if (ret) {
4460 			ret = ops_sanitize_err("init", ret);
4461 			goto err_disable_unlock_cpus;
4462 		}
4463 	}
4464 
4465 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
4466 		if (((void (**)(void))ops)[i])
4467 			static_branch_enable_cpuslocked(&scx_has_op[i]);
4468 
4469 	cpus_read_unlock();
4470 
4471 	ret = validate_ops(ops);
4472 	if (ret)
4473 		goto err_disable;
4474 
4475 	WARN_ON_ONCE(scx_dsp_ctx);
4476 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
4477 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
4478 						   scx_dsp_max_batch),
4479 				     __alignof__(struct scx_dsp_ctx));
4480 	if (!scx_dsp_ctx) {
4481 		ret = -ENOMEM;
4482 		goto err_disable;
4483 	}
4484 
4485 	if (ops->timeout_ms)
4486 		timeout = msecs_to_jiffies(ops->timeout_ms);
4487 	else
4488 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
4489 
4490 	WRITE_ONCE(scx_watchdog_timeout, timeout);
4491 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
4492 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
4493 			   scx_watchdog_timeout / 2);
4494 
4495 	/*
4496 	 * Lock out forks before opening the floodgate so that they don't wander
4497 	 * into the operations prematurely.
4498 	 *
4499 	 * We don't need to keep the CPUs stable but grab cpus_read_lock() to
4500 	 * ease future locking changes for cgroup suport.
4501 	 *
4502 	 * Note that cpu_hotplug_lock must nest inside scx_fork_rwsem due to the
4503 	 * following dependency chain:
4504 	 *
4505 	 *   scx_fork_rwsem --> pernet_ops_rwsem --> cpu_hotplug_lock
4506 	 */
4507 	percpu_down_write(&scx_fork_rwsem);
4508 	cpus_read_lock();
4509 
4510 	check_hotplug_seq(ops);
4511 
4512 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
4513 		if (((void (**)(void))ops)[i])
4514 			static_branch_enable_cpuslocked(&scx_has_op[i]);
4515 
4516 	if (ops->flags & SCX_OPS_ENQ_LAST)
4517 		static_branch_enable_cpuslocked(&scx_ops_enq_last);
4518 
4519 	if (ops->flags & SCX_OPS_ENQ_EXITING)
4520 		static_branch_enable_cpuslocked(&scx_ops_enq_exiting);
4521 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
4522 		static_branch_enable_cpuslocked(&scx_ops_cpu_preempt);
4523 
4524 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
4525 		reset_idle_masks();
4526 		static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
4527 	} else {
4528 		static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
4529 	}
4530 
4531 	static_branch_enable_cpuslocked(&__scx_ops_enabled);
4532 
4533 	/*
4534 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
4535 	 * preventing new tasks from being added. No need to exclude tasks
4536 	 * leaving as sched_ext_free() can handle both prepped and enabled
4537 	 * tasks. Prep all tasks first and then enable them with preemption
4538 	 * disabled.
4539 	 */
4540 	spin_lock_irq(&scx_tasks_lock);
4541 
4542 	scx_task_iter_init(&sti);
4543 	while ((p = scx_task_iter_next_locked(&sti, false))) {
4544 		get_task_struct(p);
4545 		scx_task_iter_rq_unlock(&sti);
4546 		spin_unlock_irq(&scx_tasks_lock);
4547 
4548 		ret = scx_ops_init_task(p, task_group(p), false);
4549 		if (ret) {
4550 			put_task_struct(p);
4551 			spin_lock_irq(&scx_tasks_lock);
4552 			scx_task_iter_exit(&sti);
4553 			spin_unlock_irq(&scx_tasks_lock);
4554 			pr_err("sched_ext: ops.init_task() failed (%d) for %s[%d] while loading\n",
4555 			       ret, p->comm, p->pid);
4556 			goto err_disable_unlock_all;
4557 		}
4558 
4559 		put_task_struct(p);
4560 		spin_lock_irq(&scx_tasks_lock);
4561 	}
4562 	scx_task_iter_exit(&sti);
4563 
4564 	/*
4565 	 * All tasks are prepped but are still ops-disabled. Ensure that
4566 	 * %current can't be scheduled out and switch everyone.
4567 	 * preempt_disable() is necessary because we can't guarantee that
4568 	 * %current won't be starved if scheduled out while switching.
4569 	 */
4570 	preempt_disable();
4571 
4572 	/*
4573 	 * From here on, the disable path must assume that tasks have ops
4574 	 * enabled and need to be recovered.
4575 	 *
4576 	 * Transition to ENABLING fails iff the BPF scheduler has already
4577 	 * triggered scx_bpf_error(). Returning an error code here would lose
4578 	 * the recorded error information. Exit indicating success so that the
4579 	 * error is notified through ops.exit() with all the details.
4580 	 */
4581 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLING, SCX_OPS_PREPPING)) {
4582 		preempt_enable();
4583 		spin_unlock_irq(&scx_tasks_lock);
4584 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
4585 		ret = 0;
4586 		goto err_disable_unlock_all;
4587 	}
4588 
4589 	/*
4590 	 * We're fully committed and can't fail. The PREPPED -> ENABLED
4591 	 * transitions here are synchronized against sched_ext_free() through
4592 	 * scx_tasks_lock.
4593 	 */
4594 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
4595 
4596 	scx_task_iter_init(&sti);
4597 	while ((p = scx_task_iter_next_locked(&sti, false))) {
4598 		const struct sched_class *old_class = p->sched_class;
4599 		struct sched_enq_and_set_ctx ctx;
4600 
4601 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4602 
4603 		scx_set_task_state(p, SCX_TASK_READY);
4604 		__setscheduler_prio(p, p->prio);
4605 		check_class_changing(task_rq(p), p, old_class);
4606 
4607 		sched_enq_and_set_task(&ctx);
4608 
4609 		check_class_changed(task_rq(p), p, old_class, p->prio);
4610 	}
4611 	scx_task_iter_exit(&sti);
4612 
4613 	spin_unlock_irq(&scx_tasks_lock);
4614 	preempt_enable();
4615 	cpus_read_unlock();
4616 	percpu_up_write(&scx_fork_rwsem);
4617 
4618 	/* see above ENABLING transition for the explanation on exiting with 0 */
4619 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
4620 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
4621 		ret = 0;
4622 		goto err_disable;
4623 	}
4624 
4625 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
4626 		static_branch_enable(&__scx_switched_all);
4627 
4628 	kobject_uevent(scx_root_kobj, KOBJ_ADD);
4629 	mutex_unlock(&scx_ops_enable_mutex);
4630 
4631 	return 0;
4632 
4633 err_del:
4634 	kobject_del(scx_root_kobj);
4635 err:
4636 	kobject_put(scx_root_kobj);
4637 	scx_root_kobj = NULL;
4638 	if (scx_exit_info) {
4639 		free_exit_info(scx_exit_info);
4640 		scx_exit_info = NULL;
4641 	}
4642 err_unlock:
4643 	mutex_unlock(&scx_ops_enable_mutex);
4644 	return ret;
4645 
4646 err_disable_unlock_all:
4647 	percpu_up_write(&scx_fork_rwsem);
4648 err_disable_unlock_cpus:
4649 	cpus_read_unlock();
4650 err_disable:
4651 	mutex_unlock(&scx_ops_enable_mutex);
4652 	/* must be fully disabled before returning */
4653 	scx_ops_disable(SCX_EXIT_ERROR);
4654 	kthread_flush_work(&scx_ops_disable_work);
4655 	return ret;
4656 }
4657 
4658 
4659 /********************************************************************************
4660  * bpf_struct_ops plumbing.
4661  */
4662 #include <linux/bpf_verifier.h>
4663 #include <linux/bpf.h>
4664 #include <linux/btf.h>
4665 
4666 extern struct btf *btf_vmlinux;
4667 static const struct btf_type *task_struct_type;
4668 static u32 task_struct_type_id;
4669 
4670 static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
4671 			       enum bpf_access_type type,
4672 			       const struct bpf_prog *prog,
4673 			       struct bpf_insn_access_aux *info)
4674 {
4675 	struct btf *btf = bpf_get_btf_vmlinux();
4676 	const struct bpf_struct_ops_desc *st_ops_desc;
4677 	const struct btf_member *member;
4678 	const struct btf_type *t;
4679 	u32 btf_id, member_idx;
4680 	const char *mname;
4681 
4682 	/* struct_ops op args are all sequential, 64-bit numbers */
4683 	if (off != arg_n * sizeof(__u64))
4684 		return false;
4685 
4686 	/* btf_id should be the type id of struct sched_ext_ops */
4687 	btf_id = prog->aux->attach_btf_id;
4688 	st_ops_desc = bpf_struct_ops_find(btf, btf_id);
4689 	if (!st_ops_desc)
4690 		return false;
4691 
4692 	/* BTF type of struct sched_ext_ops */
4693 	t = st_ops_desc->type;
4694 
4695 	member_idx = prog->expected_attach_type;
4696 	if (member_idx >= btf_type_vlen(t))
4697 		return false;
4698 
4699 	/*
4700 	 * Get the member name of this struct_ops program, which corresponds to
4701 	 * a field in struct sched_ext_ops. For example, the member name of the
4702 	 * dispatch struct_ops program (callback) is "dispatch".
4703 	 */
4704 	member = &btf_type_member(t)[member_idx];
4705 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
4706 
4707 	if (!strcmp(mname, op)) {
4708 		/*
4709 		 * The value is a pointer to a type (struct task_struct) given
4710 		 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
4711 		 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
4712 		 * should check the pointer to make sure it is not NULL before
4713 		 * using it, or the verifier will reject the program.
4714 		 *
4715 		 * Longer term, this is something that should be addressed by
4716 		 * BTF, and be fully contained within the verifier.
4717 		 */
4718 		info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
4719 		info->btf = btf_vmlinux;
4720 		info->btf_id = task_struct_type_id;
4721 
4722 		return true;
4723 	}
4724 
4725 	return false;
4726 }
4727 
4728 static bool bpf_scx_is_valid_access(int off, int size,
4729 				    enum bpf_access_type type,
4730 				    const struct bpf_prog *prog,
4731 				    struct bpf_insn_access_aux *info)
4732 {
4733 	if (type != BPF_READ)
4734 		return false;
4735 	if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
4736 	    set_arg_maybe_null("yield", 1, off, size, type, prog, info))
4737 		return true;
4738 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
4739 		return false;
4740 	if (off % size != 0)
4741 		return false;
4742 
4743 	return btf_ctx_access(off, size, type, prog, info);
4744 }
4745 
4746 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
4747 				     const struct bpf_reg_state *reg, int off,
4748 				     int size)
4749 {
4750 	const struct btf_type *t;
4751 
4752 	t = btf_type_by_id(reg->btf, reg->btf_id);
4753 	if (t == task_struct_type) {
4754 		if (off >= offsetof(struct task_struct, scx.slice) &&
4755 		    off + size <= offsetofend(struct task_struct, scx.slice))
4756 			return SCALAR_VALUE;
4757 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
4758 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
4759 			return SCALAR_VALUE;
4760 		if (off >= offsetof(struct task_struct, scx.disallow) &&
4761 		    off + size <= offsetofend(struct task_struct, scx.disallow))
4762 			return SCALAR_VALUE;
4763 	}
4764 
4765 	return -EACCES;
4766 }
4767 
4768 static const struct bpf_func_proto *
4769 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4770 {
4771 	switch (func_id) {
4772 	case BPF_FUNC_task_storage_get:
4773 		return &bpf_task_storage_get_proto;
4774 	case BPF_FUNC_task_storage_delete:
4775 		return &bpf_task_storage_delete_proto;
4776 	default:
4777 		return bpf_base_func_proto(func_id, prog);
4778 	}
4779 }
4780 
4781 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
4782 	.get_func_proto = bpf_scx_get_func_proto,
4783 	.is_valid_access = bpf_scx_is_valid_access,
4784 	.btf_struct_access = bpf_scx_btf_struct_access,
4785 };
4786 
4787 static int bpf_scx_init_member(const struct btf_type *t,
4788 			       const struct btf_member *member,
4789 			       void *kdata, const void *udata)
4790 {
4791 	const struct sched_ext_ops *uops = udata;
4792 	struct sched_ext_ops *ops = kdata;
4793 	u32 moff = __btf_member_bit_offset(t, member) / 8;
4794 	int ret;
4795 
4796 	switch (moff) {
4797 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
4798 		if (*(u32 *)(udata + moff) > INT_MAX)
4799 			return -E2BIG;
4800 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
4801 		return 1;
4802 	case offsetof(struct sched_ext_ops, flags):
4803 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
4804 			return -EINVAL;
4805 		ops->flags = *(u64 *)(udata + moff);
4806 		return 1;
4807 	case offsetof(struct sched_ext_ops, name):
4808 		ret = bpf_obj_name_cpy(ops->name, uops->name,
4809 				       sizeof(ops->name));
4810 		if (ret < 0)
4811 			return ret;
4812 		if (ret == 0)
4813 			return -EINVAL;
4814 		return 1;
4815 	case offsetof(struct sched_ext_ops, timeout_ms):
4816 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
4817 		    SCX_WATCHDOG_MAX_TIMEOUT)
4818 			return -E2BIG;
4819 		ops->timeout_ms = *(u32 *)(udata + moff);
4820 		return 1;
4821 	case offsetof(struct sched_ext_ops, exit_dump_len):
4822 		ops->exit_dump_len =
4823 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
4824 		return 1;
4825 	case offsetof(struct sched_ext_ops, hotplug_seq):
4826 		ops->hotplug_seq = *(u64 *)(udata + moff);
4827 		return 1;
4828 	}
4829 
4830 	return 0;
4831 }
4832 
4833 static int bpf_scx_check_member(const struct btf_type *t,
4834 				const struct btf_member *member,
4835 				const struct bpf_prog *prog)
4836 {
4837 	u32 moff = __btf_member_bit_offset(t, member) / 8;
4838 
4839 	switch (moff) {
4840 	case offsetof(struct sched_ext_ops, init_task):
4841 	case offsetof(struct sched_ext_ops, cpu_online):
4842 	case offsetof(struct sched_ext_ops, cpu_offline):
4843 	case offsetof(struct sched_ext_ops, init):
4844 	case offsetof(struct sched_ext_ops, exit):
4845 		break;
4846 	default:
4847 		if (prog->sleepable)
4848 			return -EINVAL;
4849 	}
4850 
4851 	return 0;
4852 }
4853 
4854 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
4855 {
4856 	return scx_ops_enable(kdata, link);
4857 }
4858 
4859 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
4860 {
4861 	scx_ops_disable(SCX_EXIT_UNREG);
4862 	kthread_flush_work(&scx_ops_disable_work);
4863 }
4864 
4865 static int bpf_scx_init(struct btf *btf)
4866 {
4867 	u32 type_id;
4868 
4869 	type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
4870 	if (type_id < 0)
4871 		return -EINVAL;
4872 	task_struct_type = btf_type_by_id(btf, type_id);
4873 	task_struct_type_id = type_id;
4874 
4875 	return 0;
4876 }
4877 
4878 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
4879 {
4880 	/*
4881 	 * sched_ext does not support updating the actively-loaded BPF
4882 	 * scheduler, as registering a BPF scheduler can always fail if the
4883 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
4884 	 * etc. Similarly, we can always race with unregistration happening
4885 	 * elsewhere, such as with sysrq.
4886 	 */
4887 	return -EOPNOTSUPP;
4888 }
4889 
4890 static int bpf_scx_validate(void *kdata)
4891 {
4892 	return 0;
4893 }
4894 
4895 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
4896 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
4897 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
4898 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
4899 static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
4900 static void running_stub(struct task_struct *p) {}
4901 static void stopping_stub(struct task_struct *p, bool runnable) {}
4902 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
4903 static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
4904 static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
4905 static void set_weight_stub(struct task_struct *p, u32 weight) {}
4906 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
4907 static void update_idle_stub(s32 cpu, bool idle) {}
4908 static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
4909 static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
4910 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
4911 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
4912 static void enable_stub(struct task_struct *p) {}
4913 static void disable_stub(struct task_struct *p) {}
4914 static void cpu_online_stub(s32 cpu) {}
4915 static void cpu_offline_stub(s32 cpu) {}
4916 static s32 init_stub(void) { return -EINVAL; }
4917 static void exit_stub(struct scx_exit_info *info) {}
4918 
4919 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
4920 	.select_cpu = select_cpu_stub,
4921 	.enqueue = enqueue_stub,
4922 	.dequeue = dequeue_stub,
4923 	.dispatch = dispatch_stub,
4924 	.runnable = runnable_stub,
4925 	.running = running_stub,
4926 	.stopping = stopping_stub,
4927 	.quiescent = quiescent_stub,
4928 	.yield = yield_stub,
4929 	.core_sched_before = core_sched_before_stub,
4930 	.set_weight = set_weight_stub,
4931 	.set_cpumask = set_cpumask_stub,
4932 	.update_idle = update_idle_stub,
4933 	.cpu_acquire = cpu_acquire_stub,
4934 	.cpu_release = cpu_release_stub,
4935 	.init_task = init_task_stub,
4936 	.exit_task = exit_task_stub,
4937 	.enable = enable_stub,
4938 	.disable = disable_stub,
4939 	.cpu_online = cpu_online_stub,
4940 	.cpu_offline = cpu_offline_stub,
4941 	.init = init_stub,
4942 	.exit = exit_stub,
4943 };
4944 
4945 static struct bpf_struct_ops bpf_sched_ext_ops = {
4946 	.verifier_ops = &bpf_scx_verifier_ops,
4947 	.reg = bpf_scx_reg,
4948 	.unreg = bpf_scx_unreg,
4949 	.check_member = bpf_scx_check_member,
4950 	.init_member = bpf_scx_init_member,
4951 	.init = bpf_scx_init,
4952 	.update = bpf_scx_update,
4953 	.validate = bpf_scx_validate,
4954 	.name = "sched_ext_ops",
4955 	.owner = THIS_MODULE,
4956 	.cfi_stubs = &__bpf_ops_sched_ext_ops
4957 };
4958 
4959 
4960 /********************************************************************************
4961  * System integration and init.
4962  */
4963 
4964 static void sysrq_handle_sched_ext_reset(u8 key)
4965 {
4966 	if (scx_ops_helper)
4967 		scx_ops_disable(SCX_EXIT_SYSRQ);
4968 	else
4969 		pr_info("sched_ext: BPF scheduler not yet used\n");
4970 }
4971 
4972 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
4973 	.handler	= sysrq_handle_sched_ext_reset,
4974 	.help_msg	= "reset-sched-ext(S)",
4975 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
4976 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
4977 };
4978 
4979 static void sysrq_handle_sched_ext_dump(u8 key)
4980 {
4981 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
4982 
4983 	if (scx_enabled())
4984 		scx_dump_state(&ei, 0);
4985 }
4986 
4987 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
4988 	.handler	= sysrq_handle_sched_ext_dump,
4989 	.help_msg	= "dump-sched-ext(D)",
4990 	.action_msg	= "Trigger sched_ext debug dump",
4991 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
4992 };
4993 
4994 static bool can_skip_idle_kick(struct rq *rq)
4995 {
4996 	lockdep_assert_rq_held(rq);
4997 
4998 	/*
4999 	 * We can skip idle kicking if @rq is going to go through at least one
5000 	 * full SCX scheduling cycle before going idle. Just checking whether
5001 	 * curr is not idle is insufficient because we could be racing
5002 	 * balance_one() trying to pull the next task from a remote rq, which
5003 	 * may fail, and @rq may become idle afterwards.
5004 	 *
5005 	 * The race window is small and we don't and can't guarantee that @rq is
5006 	 * only kicked while idle anyway. Skip only when sure.
5007 	 */
5008 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_BALANCING);
5009 }
5010 
5011 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5012 {
5013 	struct rq *rq = cpu_rq(cpu);
5014 	struct scx_rq *this_scx = &this_rq->scx;
5015 	bool should_wait = false;
5016 	unsigned long flags;
5017 
5018 	raw_spin_rq_lock_irqsave(rq, flags);
5019 
5020 	/*
5021 	 * During CPU hotplug, a CPU may depend on kicking itself to make
5022 	 * forward progress. Allow kicking self regardless of online state.
5023 	 */
5024 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5025 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5026 			if (rq->curr->sched_class == &ext_sched_class)
5027 				rq->curr->scx.slice = 0;
5028 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5029 		}
5030 
5031 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5032 			pseqs[cpu] = rq->scx.pnt_seq;
5033 			should_wait = true;
5034 		}
5035 
5036 		resched_curr(rq);
5037 	} else {
5038 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5039 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5040 	}
5041 
5042 	raw_spin_rq_unlock_irqrestore(rq, flags);
5043 
5044 	return should_wait;
5045 }
5046 
5047 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5048 {
5049 	struct rq *rq = cpu_rq(cpu);
5050 	unsigned long flags;
5051 
5052 	raw_spin_rq_lock_irqsave(rq, flags);
5053 
5054 	if (!can_skip_idle_kick(rq) &&
5055 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5056 		resched_curr(rq);
5057 
5058 	raw_spin_rq_unlock_irqrestore(rq, flags);
5059 }
5060 
5061 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5062 {
5063 	struct rq *this_rq = this_rq();
5064 	struct scx_rq *this_scx = &this_rq->scx;
5065 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5066 	bool should_wait = false;
5067 	s32 cpu;
5068 
5069 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5070 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5071 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5072 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5073 	}
5074 
5075 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5076 		kick_one_cpu_if_idle(cpu, this_rq);
5077 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5078 	}
5079 
5080 	if (!should_wait)
5081 		return;
5082 
5083 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5084 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5085 
5086 		if (cpu != cpu_of(this_rq)) {
5087 			/*
5088 			 * Pairs with smp_store_release() issued by this CPU in
5089 			 * scx_next_task_picked() on the resched path.
5090 			 *
5091 			 * We busy-wait here to guarantee that no other task can
5092 			 * be scheduled on our core before the target CPU has
5093 			 * entered the resched path.
5094 			 */
5095 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5096 				cpu_relax();
5097 		}
5098 
5099 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5100 	}
5101 }
5102 
5103 /**
5104  * print_scx_info - print out sched_ext scheduler state
5105  * @log_lvl: the log level to use when printing
5106  * @p: target task
5107  *
5108  * If a sched_ext scheduler is enabled, print the name and state of the
5109  * scheduler. If @p is on sched_ext, print further information about the task.
5110  *
5111  * This function can be safely called on any task as long as the task_struct
5112  * itself is accessible. While safe, this function isn't synchronized and may
5113  * print out mixups or garbages of limited length.
5114  */
5115 void print_scx_info(const char *log_lvl, struct task_struct *p)
5116 {
5117 	enum scx_ops_enable_state state = scx_ops_enable_state();
5118 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5119 	char runnable_at_buf[22] = "?";
5120 	struct sched_class *class;
5121 	unsigned long runnable_at;
5122 
5123 	if (state == SCX_OPS_DISABLED)
5124 		return;
5125 
5126 	/*
5127 	 * Carefully check if the task was running on sched_ext, and then
5128 	 * carefully copy the time it's been runnable, and its state.
5129 	 */
5130 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5131 	    class != &ext_sched_class) {
5132 		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
5133 		       scx_ops_enable_state_str[state], all);
5134 		return;
5135 	}
5136 
5137 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5138 				      sizeof(runnable_at)))
5139 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5140 			  jiffies_delta_msecs(runnable_at, jiffies));
5141 
5142 	/* print everything onto one line to conserve console space */
5143 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5144 	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
5145 	       runnable_at_buf);
5146 }
5147 
5148 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5149 {
5150 	/*
5151 	 * SCX schedulers often have userspace components which are sometimes
5152 	 * involved in critial scheduling paths. PM operations involve freezing
5153 	 * userspace which can lead to scheduling misbehaviors including stalls.
5154 	 * Let's bypass while PM operations are in progress.
5155 	 */
5156 	switch (event) {
5157 	case PM_HIBERNATION_PREPARE:
5158 	case PM_SUSPEND_PREPARE:
5159 	case PM_RESTORE_PREPARE:
5160 		scx_ops_bypass(true);
5161 		break;
5162 	case PM_POST_HIBERNATION:
5163 	case PM_POST_SUSPEND:
5164 	case PM_POST_RESTORE:
5165 		scx_ops_bypass(false);
5166 		break;
5167 	}
5168 
5169 	return NOTIFY_OK;
5170 }
5171 
5172 static struct notifier_block scx_pm_notifier = {
5173 	.notifier_call = scx_pm_handler,
5174 };
5175 
5176 void __init init_sched_ext_class(void)
5177 {
5178 	s32 cpu, v;
5179 
5180 	/*
5181 	 * The following is to prevent the compiler from optimizing out the enum
5182 	 * definitions so that BPF scheduler implementations can use them
5183 	 * through the generated vmlinux.h.
5184 	 */
5185 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT);
5186 
5187 	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
5188 	init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL);
5189 #ifdef CONFIG_SMP
5190 	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
5191 	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
5192 #endif
5193 	scx_kick_cpus_pnt_seqs =
5194 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
5195 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
5196 	BUG_ON(!scx_kick_cpus_pnt_seqs);
5197 
5198 	for_each_possible_cpu(cpu) {
5199 		struct rq *rq = cpu_rq(cpu);
5200 
5201 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5202 		INIT_LIST_HEAD(&rq->scx.runnable_list);
5203 
5204 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
5205 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
5206 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
5207 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
5208 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5209 
5210 		if (cpu_online(cpu))
5211 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5212 	}
5213 
5214 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5215 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5216 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5217 }
5218 
5219 
5220 /********************************************************************************
5221  * Helpers that can be called from the BPF scheduler.
5222  */
5223 #include <linux/btf_ids.h>
5224 
5225 __bpf_kfunc_start_defs();
5226 
5227 /**
5228  * scx_bpf_create_dsq - Create a custom DSQ
5229  * @dsq_id: DSQ to create
5230  * @node: NUMA node to allocate from
5231  *
5232  * Create a custom DSQ identified by @dsq_id. Can be called from ops.init() and
5233  * ops.init_task().
5234  */
5235 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
5236 {
5237 	if (!scx_kf_allowed(SCX_KF_SLEEPABLE))
5238 		return -EINVAL;
5239 
5240 	if (unlikely(node >= (int)nr_node_ids ||
5241 		     (node < 0 && node != NUMA_NO_NODE)))
5242 		return -EINVAL;
5243 	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
5244 }
5245 
5246 __bpf_kfunc_end_defs();
5247 
5248 BTF_KFUNCS_START(scx_kfunc_ids_sleepable)
5249 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
5250 BTF_KFUNCS_END(scx_kfunc_ids_sleepable)
5251 
5252 static const struct btf_kfunc_id_set scx_kfunc_set_sleepable = {
5253 	.owner			= THIS_MODULE,
5254 	.set			= &scx_kfunc_ids_sleepable,
5255 };
5256 
5257 __bpf_kfunc_start_defs();
5258 
5259 /**
5260  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
5261  * @p: task_struct to select a CPU for
5262  * @prev_cpu: CPU @p was on previously
5263  * @wake_flags: %SCX_WAKE_* flags
5264  * @is_idle: out parameter indicating whether the returned CPU is idle
5265  *
5266  * Can only be called from ops.select_cpu() if the built-in CPU selection is
5267  * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
5268  * @p, @prev_cpu and @wake_flags match ops.select_cpu().
5269  *
5270  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
5271  * currently idle and thus a good candidate for direct dispatching.
5272  */
5273 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
5274 				       u64 wake_flags, bool *is_idle)
5275 {
5276 	if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
5277 		*is_idle = false;
5278 		return prev_cpu;
5279 	}
5280 #ifdef CONFIG_SMP
5281 	return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
5282 #else
5283 	*is_idle = false;
5284 	return prev_cpu;
5285 #endif
5286 }
5287 
5288 __bpf_kfunc_end_defs();
5289 
5290 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
5291 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
5292 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
5293 
5294 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
5295 	.owner			= THIS_MODULE,
5296 	.set			= &scx_kfunc_ids_select_cpu,
5297 };
5298 
5299 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
5300 {
5301 	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5302 		return false;
5303 
5304 	lockdep_assert_irqs_disabled();
5305 
5306 	if (unlikely(!p)) {
5307 		scx_ops_error("called with NULL task");
5308 		return false;
5309 	}
5310 
5311 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5312 		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
5313 		return false;
5314 	}
5315 
5316 	return true;
5317 }
5318 
5319 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
5320 {
5321 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5322 	struct task_struct *ddsp_task;
5323 
5324 	ddsp_task = __this_cpu_read(direct_dispatch_task);
5325 	if (ddsp_task) {
5326 		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
5327 		return;
5328 	}
5329 
5330 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5331 		scx_ops_error("dispatch buffer overflow");
5332 		return;
5333 	}
5334 
5335 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5336 		.task = p,
5337 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5338 		.dsq_id = dsq_id,
5339 		.enq_flags = enq_flags,
5340 	};
5341 }
5342 
5343 __bpf_kfunc_start_defs();
5344 
5345 /**
5346  * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
5347  * @p: task_struct to dispatch
5348  * @dsq_id: DSQ to dispatch to
5349  * @slice: duration @p can run for in nsecs
5350  * @enq_flags: SCX_ENQ_*
5351  *
5352  * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
5353  * to call this function spuriously. Can be called from ops.enqueue(),
5354  * ops.select_cpu(), and ops.dispatch().
5355  *
5356  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5357  * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
5358  * used to target the local DSQ of a CPU other than the enqueueing one. Use
5359  * ops.select_cpu() to be on the target CPU in the first place.
5360  *
5361  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5362  * will be directly dispatched to the corresponding dispatch queue after
5363  * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
5364  * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
5365  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5366  * task is dispatched.
5367  *
5368  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5369  * and this function can be called upto ops.dispatch_max_batch times to dispatch
5370  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5371  * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
5372  *
5373  * This function doesn't have any locking restrictions and may be called under
5374  * BPF locks (in the future when BPF introduces more flexible locking).
5375  *
5376  * @p is allowed to run for @slice. The scheduling path is triggered on slice
5377  * exhaustion. If zero, the current residual slice is maintained. If
5378  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5379  * scx_bpf_kick_cpu() to trigger scheduling.
5380  */
5381 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
5382 				  u64 enq_flags)
5383 {
5384 	if (!scx_dispatch_preamble(p, enq_flags))
5385 		return;
5386 
5387 	if (slice)
5388 		p->scx.slice = slice;
5389 	else
5390 		p->scx.slice = p->scx.slice ?: 1;
5391 
5392 	scx_dispatch_commit(p, dsq_id, enq_flags);
5393 }
5394 
5395 /**
5396  * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
5397  * @p: task_struct to dispatch
5398  * @dsq_id: DSQ to dispatch to
5399  * @slice: duration @p can run for in nsecs
5400  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
5401  * @enq_flags: SCX_ENQ_*
5402  *
5403  * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
5404  * Tasks queued into the priority queue are ordered by @vtime and always
5405  * consumed after the tasks in the FIFO queue. All other aspects are identical
5406  * to scx_bpf_dispatch().
5407  *
5408  * @vtime ordering is according to time_before64() which considers wrapping. A
5409  * numerically larger vtime may indicate an earlier position in the ordering and
5410  * vice-versa.
5411  */
5412 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
5413 					u64 slice, u64 vtime, u64 enq_flags)
5414 {
5415 	if (!scx_dispatch_preamble(p, enq_flags))
5416 		return;
5417 
5418 	if (slice)
5419 		p->scx.slice = slice;
5420 	else
5421 		p->scx.slice = p->scx.slice ?: 1;
5422 
5423 	p->scx.dsq_vtime = vtime;
5424 
5425 	scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
5426 }
5427 
5428 __bpf_kfunc_end_defs();
5429 
5430 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
5431 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
5432 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
5433 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
5434 
5435 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
5436 	.owner			= THIS_MODULE,
5437 	.set			= &scx_kfunc_ids_enqueue_dispatch,
5438 };
5439 
5440 __bpf_kfunc_start_defs();
5441 
5442 /**
5443  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
5444  *
5445  * Can only be called from ops.dispatch().
5446  */
5447 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
5448 {
5449 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
5450 		return 0;
5451 
5452 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
5453 }
5454 
5455 /**
5456  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
5457  *
5458  * Cancel the latest dispatch. Can be called multiple times to cancel further
5459  * dispatches. Can only be called from ops.dispatch().
5460  */
5461 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
5462 {
5463 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5464 
5465 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
5466 		return;
5467 
5468 	if (dspc->cursor > 0)
5469 		dspc->cursor--;
5470 	else
5471 		scx_ops_error("dispatch buffer underflow");
5472 }
5473 
5474 /**
5475  * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
5476  * @dsq_id: DSQ to consume
5477  *
5478  * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
5479  * to the current CPU's local DSQ for execution. Can only be called from
5480  * ops.dispatch().
5481  *
5482  * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
5483  * trying to consume the specified DSQ. It may also grab rq locks and thus can't
5484  * be called under any BPF locks.
5485  *
5486  * Returns %true if a task has been consumed, %false if there isn't any task to
5487  * consume.
5488  */
5489 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
5490 {
5491 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5492 	struct scx_dispatch_q *dsq;
5493 
5494 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
5495 		return false;
5496 
5497 	flush_dispatch_buf(dspc->rq, dspc->rf);
5498 
5499 	dsq = find_non_local_dsq(dsq_id);
5500 	if (unlikely(!dsq)) {
5501 		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
5502 		return false;
5503 	}
5504 
5505 	if (consume_dispatch_q(dspc->rq, dspc->rf, dsq)) {
5506 		/*
5507 		 * A successfully consumed task can be dequeued before it starts
5508 		 * running while the CPU is trying to migrate other dispatched
5509 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
5510 		 * local DSQ.
5511 		 */
5512 		dspc->nr_tasks++;
5513 		return true;
5514 	} else {
5515 		return false;
5516 	}
5517 }
5518 
5519 __bpf_kfunc_end_defs();
5520 
5521 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
5522 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
5523 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
5524 BTF_ID_FLAGS(func, scx_bpf_consume)
5525 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
5526 
5527 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
5528 	.owner			= THIS_MODULE,
5529 	.set			= &scx_kfunc_ids_dispatch,
5530 };
5531 
5532 __bpf_kfunc_start_defs();
5533 
5534 /**
5535  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
5536  *
5537  * Iterate over all of the tasks currently enqueued on the local DSQ of the
5538  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
5539  * processed tasks. Can only be called from ops.cpu_release().
5540  */
5541 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
5542 {
5543 	u32 nr_enqueued, i;
5544 	struct rq *rq;
5545 
5546 	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
5547 		return 0;
5548 
5549 	rq = cpu_rq(smp_processor_id());
5550 	lockdep_assert_rq_held(rq);
5551 
5552 	/*
5553 	 * Get the number of tasks on the local DSQ before iterating over it to
5554 	 * pull off tasks. The enqueue callback below can signal that it wants
5555 	 * the task to stay on the local DSQ, and we want to prevent the BPF
5556 	 * scheduler from causing us to loop indefinitely.
5557 	 */
5558 	nr_enqueued = rq->scx.local_dsq.nr;
5559 	for (i = 0; i < nr_enqueued; i++) {
5560 		struct task_struct *p;
5561 
5562 		p = first_local_task(rq);
5563 		WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) !=
5564 			     SCX_OPSS_NONE);
5565 		WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
5566 		WARN_ON_ONCE(p->scx.holding_cpu != -1);
5567 		dispatch_dequeue(rq, p);
5568 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
5569 	}
5570 
5571 	return nr_enqueued;
5572 }
5573 
5574 __bpf_kfunc_end_defs();
5575 
5576 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
5577 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
5578 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
5579 
5580 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
5581 	.owner			= THIS_MODULE,
5582 	.set			= &scx_kfunc_ids_cpu_release,
5583 };
5584 
5585 __bpf_kfunc_start_defs();
5586 
5587 /**
5588  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
5589  * @cpu: cpu to kick
5590  * @flags: %SCX_KICK_* flags
5591  *
5592  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
5593  * trigger rescheduling on a busy CPU. This can be called from any online
5594  * scx_ops operation and the actual kicking is performed asynchronously through
5595  * an irq work.
5596  */
5597 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
5598 {
5599 	struct rq *this_rq;
5600 	unsigned long irq_flags;
5601 
5602 	if (!ops_cpu_valid(cpu, NULL))
5603 		return;
5604 
5605 	/*
5606 	 * While bypassing for PM ops, IRQ handling may not be online which can
5607 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
5608 	 * IRQ status update. Suppress kicking.
5609 	 */
5610 	if (scx_ops_bypassing())
5611 		return;
5612 
5613 	local_irq_save(irq_flags);
5614 
5615 	this_rq = this_rq();
5616 
5617 	/*
5618 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
5619 	 * rq locks. We can probably be smarter and avoid bouncing if called
5620 	 * from ops which don't hold a rq lock.
5621 	 */
5622 	if (flags & SCX_KICK_IDLE) {
5623 		struct rq *target_rq = cpu_rq(cpu);
5624 
5625 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
5626 			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
5627 
5628 		if (raw_spin_rq_trylock(target_rq)) {
5629 			if (can_skip_idle_kick(target_rq)) {
5630 				raw_spin_rq_unlock(target_rq);
5631 				goto out;
5632 			}
5633 			raw_spin_rq_unlock(target_rq);
5634 		}
5635 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
5636 	} else {
5637 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
5638 
5639 		if (flags & SCX_KICK_PREEMPT)
5640 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
5641 		if (flags & SCX_KICK_WAIT)
5642 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
5643 	}
5644 
5645 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
5646 out:
5647 	local_irq_restore(irq_flags);
5648 }
5649 
5650 /**
5651  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
5652  * @dsq_id: id of the DSQ
5653  *
5654  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
5655  * -%ENOENT is returned.
5656  */
5657 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
5658 {
5659 	struct scx_dispatch_q *dsq;
5660 	s32 ret;
5661 
5662 	preempt_disable();
5663 
5664 	if (dsq_id == SCX_DSQ_LOCAL) {
5665 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
5666 		goto out;
5667 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
5668 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
5669 
5670 		if (ops_cpu_valid(cpu, NULL)) {
5671 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
5672 			goto out;
5673 		}
5674 	} else {
5675 		dsq = find_non_local_dsq(dsq_id);
5676 		if (dsq) {
5677 			ret = READ_ONCE(dsq->nr);
5678 			goto out;
5679 		}
5680 	}
5681 	ret = -ENOENT;
5682 out:
5683 	preempt_enable();
5684 	return ret;
5685 }
5686 
5687 /**
5688  * scx_bpf_destroy_dsq - Destroy a custom DSQ
5689  * @dsq_id: DSQ to destroy
5690  *
5691  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
5692  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
5693  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
5694  * which doesn't exist. Can be called from any online scx_ops operations.
5695  */
5696 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
5697 {
5698 	destroy_dsq(dsq_id);
5699 }
5700 
5701 __bpf_kfunc_end_defs();
5702 
5703 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
5704 			 char *fmt, unsigned long long *data, u32 data__sz)
5705 {
5706 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
5707 	s32 ret;
5708 
5709 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
5710 	    (data__sz && !data)) {
5711 		scx_ops_error("invalid data=%p and data__sz=%u",
5712 			      (void *)data, data__sz);
5713 		return -EINVAL;
5714 	}
5715 
5716 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
5717 	if (ret < 0) {
5718 		scx_ops_error("failed to read data fields (%d)", ret);
5719 		return ret;
5720 	}
5721 
5722 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
5723 				  &bprintf_data);
5724 	if (ret < 0) {
5725 		scx_ops_error("format preparation failed (%d)", ret);
5726 		return ret;
5727 	}
5728 
5729 	ret = bstr_printf(line_buf, line_size, fmt,
5730 			  bprintf_data.bin_args);
5731 	bpf_bprintf_cleanup(&bprintf_data);
5732 	if (ret < 0) {
5733 		scx_ops_error("(\"%s\", %p, %u) failed to format",
5734 			      fmt, data, data__sz);
5735 		return ret;
5736 	}
5737 
5738 	return ret;
5739 }
5740 
5741 static s32 bstr_format(struct scx_bstr_buf *buf,
5742 		       char *fmt, unsigned long long *data, u32 data__sz)
5743 {
5744 	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
5745 			     fmt, data, data__sz);
5746 }
5747 
5748 __bpf_kfunc_start_defs();
5749 
5750 /**
5751  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
5752  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
5753  * @fmt: error message format string
5754  * @data: format string parameters packaged using ___bpf_fill() macro
5755  * @data__sz: @data len, must end in '__sz' for the verifier
5756  *
5757  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
5758  * disabling.
5759  */
5760 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
5761 				   unsigned long long *data, u32 data__sz)
5762 {
5763 	unsigned long flags;
5764 
5765 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
5766 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
5767 		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
5768 				  scx_exit_bstr_buf.line);
5769 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
5770 }
5771 
5772 /**
5773  * scx_bpf_error_bstr - Indicate fatal error
5774  * @fmt: error message format string
5775  * @data: format string parameters packaged using ___bpf_fill() macro
5776  * @data__sz: @data len, must end in '__sz' for the verifier
5777  *
5778  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
5779  * disabling.
5780  */
5781 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
5782 				    u32 data__sz)
5783 {
5784 	unsigned long flags;
5785 
5786 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
5787 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
5788 		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
5789 				  scx_exit_bstr_buf.line);
5790 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
5791 }
5792 
5793 /**
5794  * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
5795  * @fmt: format string
5796  * @data: format string parameters packaged using ___bpf_fill() macro
5797  * @data__sz: @data len, must end in '__sz' for the verifier
5798  *
5799  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
5800  * dump_task() to generate extra debug dump specific to the BPF scheduler.
5801  *
5802  * The extra dump may be multiple lines. A single line may be split over
5803  * multiple calls. The last line is automatically terminated.
5804  */
5805 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
5806 				   u32 data__sz)
5807 {
5808 	struct scx_dump_data *dd = &scx_dump_data;
5809 	struct scx_bstr_buf *buf = &dd->buf;
5810 	s32 ret;
5811 
5812 	if (raw_smp_processor_id() != dd->cpu) {
5813 		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
5814 		return;
5815 	}
5816 
5817 	/* append the formatted string to the line buf */
5818 	ret = __bstr_format(buf->data, buf->line + dd->cursor,
5819 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
5820 	if (ret < 0) {
5821 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
5822 			  dd->prefix, fmt, data, data__sz, ret);
5823 		return;
5824 	}
5825 
5826 	dd->cursor += ret;
5827 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
5828 
5829 	if (!dd->cursor)
5830 		return;
5831 
5832 	/*
5833 	 * If the line buf overflowed or ends in a newline, flush it into the
5834 	 * dump. This is to allow the caller to generate a single line over
5835 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
5836 	 * the line buf, the only case which can lead to an unexpected
5837 	 * truncation is when the caller keeps generating newlines in the middle
5838 	 * instead of the end consecutively. Don't do that.
5839 	 */
5840 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
5841 		ops_dump_flush();
5842 }
5843 
5844 /**
5845  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
5846  * @cpu: CPU of interest
5847  *
5848  * Return the maximum relative capacity of @cpu in relation to the most
5849  * performant CPU in the system. The return value is in the range [1,
5850  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
5851  */
5852 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
5853 {
5854 	if (ops_cpu_valid(cpu, NULL))
5855 		return arch_scale_cpu_capacity(cpu);
5856 	else
5857 		return SCX_CPUPERF_ONE;
5858 }
5859 
5860 /**
5861  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
5862  * @cpu: CPU of interest
5863  *
5864  * Return the current relative performance of @cpu in relation to its maximum.
5865  * The return value is in the range [1, %SCX_CPUPERF_ONE].
5866  *
5867  * The current performance level of a CPU in relation to the maximum performance
5868  * available in the system can be calculated as follows:
5869  *
5870  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
5871  *
5872  * The result is in the range [1, %SCX_CPUPERF_ONE].
5873  */
5874 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
5875 {
5876 	if (ops_cpu_valid(cpu, NULL))
5877 		return arch_scale_freq_capacity(cpu);
5878 	else
5879 		return SCX_CPUPERF_ONE;
5880 }
5881 
5882 /**
5883  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
5884  * @cpu: CPU of interest
5885  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
5886  * @flags: %SCX_CPUPERF_* flags
5887  *
5888  * Set the target performance level of @cpu to @perf. @perf is in linear
5889  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
5890  * schedutil cpufreq governor chooses the target frequency.
5891  *
5892  * The actual performance level chosen, CPU grouping, and the overhead and
5893  * latency of the operations are dependent on the hardware and cpufreq driver in
5894  * use. Consult hardware and cpufreq documentation for more information. The
5895  * current performance level can be monitored using scx_bpf_cpuperf_cur().
5896  */
5897 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
5898 {
5899 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
5900 		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
5901 		return;
5902 	}
5903 
5904 	if (ops_cpu_valid(cpu, NULL)) {
5905 		struct rq *rq = cpu_rq(cpu);
5906 
5907 		rq->scx.cpuperf_target = perf;
5908 
5909 		rcu_read_lock_sched_notrace();
5910 		cpufreq_update_util(cpu_rq(cpu), 0);
5911 		rcu_read_unlock_sched_notrace();
5912 	}
5913 }
5914 
5915 /**
5916  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
5917  *
5918  * All valid CPU IDs in the system are smaller than the returned value.
5919  */
5920 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
5921 {
5922 	return nr_cpu_ids;
5923 }
5924 
5925 /**
5926  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
5927  */
5928 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
5929 {
5930 	return cpu_possible_mask;
5931 }
5932 
5933 /**
5934  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
5935  */
5936 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
5937 {
5938 	return cpu_online_mask;
5939 }
5940 
5941 /**
5942  * scx_bpf_put_cpumask - Release a possible/online cpumask
5943  * @cpumask: cpumask to release
5944  */
5945 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
5946 {
5947 	/*
5948 	 * Empty function body because we aren't actually acquiring or releasing
5949 	 * a reference to a global cpumask, which is read-only in the caller and
5950 	 * is never released. The acquire / release semantics here are just used
5951 	 * to make the cpumask is a trusted pointer in the caller.
5952 	 */
5953 }
5954 
5955 /**
5956  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
5957  * per-CPU cpumask.
5958  *
5959  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
5960  */
5961 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
5962 {
5963 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
5964 		scx_ops_error("built-in idle tracking is disabled");
5965 		return cpu_none_mask;
5966 	}
5967 
5968 #ifdef CONFIG_SMP
5969 	return idle_masks.cpu;
5970 #else
5971 	return cpu_none_mask;
5972 #endif
5973 }
5974 
5975 /**
5976  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
5977  * per-physical-core cpumask. Can be used to determine if an entire physical
5978  * core is free.
5979  *
5980  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
5981  */
5982 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
5983 {
5984 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
5985 		scx_ops_error("built-in idle tracking is disabled");
5986 		return cpu_none_mask;
5987 	}
5988 
5989 #ifdef CONFIG_SMP
5990 	if (sched_smt_active())
5991 		return idle_masks.smt;
5992 	else
5993 		return idle_masks.cpu;
5994 #else
5995 	return cpu_none_mask;
5996 #endif
5997 }
5998 
5999 /**
6000  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
6001  * either the percpu, or SMT idle-tracking cpumask.
6002  */
6003 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
6004 {
6005 	/*
6006 	 * Empty function body because we aren't actually acquiring or releasing
6007 	 * a reference to a global idle cpumask, which is read-only in the
6008 	 * caller and is never released. The acquire / release semantics here
6009 	 * are just used to make the cpumask a trusted pointer in the caller.
6010 	 */
6011 }
6012 
6013 /**
6014  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
6015  * @cpu: cpu to test and clear idle for
6016  *
6017  * Returns %true if @cpu was idle and its idle state was successfully cleared.
6018  * %false otherwise.
6019  *
6020  * Unavailable if ops.update_idle() is implemented and
6021  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
6022  */
6023 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
6024 {
6025 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6026 		scx_ops_error("built-in idle tracking is disabled");
6027 		return false;
6028 	}
6029 
6030 	if (ops_cpu_valid(cpu, NULL))
6031 		return test_and_clear_cpu_idle(cpu);
6032 	else
6033 		return false;
6034 }
6035 
6036 /**
6037  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
6038  * @cpus_allowed: Allowed cpumask
6039  * @flags: %SCX_PICK_IDLE_CPU_* flags
6040  *
6041  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
6042  * number on success. -%EBUSY if no matching cpu was found.
6043  *
6044  * Idle CPU tracking may race against CPU scheduling state transitions. For
6045  * example, this function may return -%EBUSY as CPUs are transitioning into the
6046  * idle state. If the caller then assumes that there will be dispatch events on
6047  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
6048  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
6049  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
6050  * event in the near future.
6051  *
6052  * Unavailable if ops.update_idle() is implemented and
6053  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
6054  */
6055 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
6056 				      u64 flags)
6057 {
6058 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6059 		scx_ops_error("built-in idle tracking is disabled");
6060 		return -EBUSY;
6061 	}
6062 
6063 	return scx_pick_idle_cpu(cpus_allowed, flags);
6064 }
6065 
6066 /**
6067  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
6068  * @cpus_allowed: Allowed cpumask
6069  * @flags: %SCX_PICK_IDLE_CPU_* flags
6070  *
6071  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
6072  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
6073  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
6074  * empty.
6075  *
6076  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
6077  * set, this function can't tell which CPUs are idle and will always pick any
6078  * CPU.
6079  */
6080 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
6081 				     u64 flags)
6082 {
6083 	s32 cpu;
6084 
6085 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
6086 		cpu = scx_pick_idle_cpu(cpus_allowed, flags);
6087 		if (cpu >= 0)
6088 			return cpu;
6089 	}
6090 
6091 	cpu = cpumask_any_distribute(cpus_allowed);
6092 	if (cpu < nr_cpu_ids)
6093 		return cpu;
6094 	else
6095 		return -EBUSY;
6096 }
6097 
6098 /**
6099  * scx_bpf_task_running - Is task currently running?
6100  * @p: task of interest
6101  */
6102 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
6103 {
6104 	return task_rq(p)->curr == p;
6105 }
6106 
6107 /**
6108  * scx_bpf_task_cpu - CPU a task is currently associated with
6109  * @p: task of interest
6110  */
6111 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
6112 {
6113 	return task_cpu(p);
6114 }
6115 
6116 /**
6117  * scx_bpf_cpu_rq - Fetch the rq of a CPU
6118  * @cpu: CPU of the rq
6119  */
6120 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
6121 {
6122 	if (!ops_cpu_valid(cpu, NULL))
6123 		return NULL;
6124 
6125 	return cpu_rq(cpu);
6126 }
6127 
6128 __bpf_kfunc_end_defs();
6129 
6130 BTF_KFUNCS_START(scx_kfunc_ids_any)
6131 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
6132 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
6133 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
6134 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
6135 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
6136 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
6137 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
6138 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
6139 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
6140 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
6141 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
6142 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
6143 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
6144 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
6145 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
6146 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
6147 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
6148 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
6149 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
6150 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
6151 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
6152 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
6153 BTF_KFUNCS_END(scx_kfunc_ids_any)
6154 
6155 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
6156 	.owner			= THIS_MODULE,
6157 	.set			= &scx_kfunc_ids_any,
6158 };
6159 
6160 static int __init scx_init(void)
6161 {
6162 	int ret;
6163 
6164 	/*
6165 	 * kfunc registration can't be done from init_sched_ext_class() as
6166 	 * register_btf_kfunc_id_set() needs most of the system to be up.
6167 	 *
6168 	 * Some kfuncs are context-sensitive and can only be called from
6169 	 * specific SCX ops. They are grouped into BTF sets accordingly.
6170 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
6171 	 * restrictions. Eventually, the verifier should be able to enforce
6172 	 * them. For now, register them the same and make each kfunc explicitly
6173 	 * check using scx_kf_allowed().
6174 	 */
6175 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6176 					     &scx_kfunc_set_sleepable)) ||
6177 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6178 					     &scx_kfunc_set_select_cpu)) ||
6179 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6180 					     &scx_kfunc_set_enqueue_dispatch)) ||
6181 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6182 					     &scx_kfunc_set_dispatch)) ||
6183 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6184 					     &scx_kfunc_set_cpu_release)) ||
6185 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6186 					     &scx_kfunc_set_any)) ||
6187 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
6188 					     &scx_kfunc_set_any)) ||
6189 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
6190 					     &scx_kfunc_set_any))) {
6191 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
6192 		return ret;
6193 	}
6194 
6195 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
6196 	if (ret) {
6197 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
6198 		return ret;
6199 	}
6200 
6201 	ret = register_pm_notifier(&scx_pm_notifier);
6202 	if (ret) {
6203 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
6204 		return ret;
6205 	}
6206 
6207 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
6208 	if (!scx_kset) {
6209 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
6210 		return -ENOMEM;
6211 	}
6212 
6213 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
6214 	if (ret < 0) {
6215 		pr_err("sched_ext: Failed to add global attributes\n");
6216 		return ret;
6217 	}
6218 
6219 	return 0;
6220 }
6221 __initcall(scx_init);
6222