xref: /linux/kernel/sched/ext.c (revision 38c6104e0bc7c8af20ab4897cb0504e3339e4fe4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10 
11 enum scx_consts {
12 	SCX_DSP_DFL_MAX_BATCH		= 32,
13 	SCX_DSP_MAX_LOOPS		= 32,
14 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
15 
16 	SCX_EXIT_BT_LEN			= 64,
17 	SCX_EXIT_MSG_LEN		= 1024,
18 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
19 
20 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
21 
22 	/*
23 	 * Iterating all tasks may take a while. Periodically drop
24 	 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25 	 */
26 	SCX_OPS_TASK_ITER_BATCH		= 32,
27 };
28 
29 enum scx_exit_kind {
30 	SCX_EXIT_NONE,
31 	SCX_EXIT_DONE,
32 
33 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
34 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
35 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
36 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
37 
38 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
39 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
40 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
41 };
42 
43 /*
44  * An exit code can be specified when exiting with scx_bpf_exit() or
45  * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46  * respectively. The codes are 64bit of the format:
47  *
48  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
49  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
50  *
51  *   SYS ACT: System-defined exit actions
52  *   SYS RSN: System-defined exit reasons
53  *   USR    : User-defined exit codes and reasons
54  *
55  * Using the above, users may communicate intention and context by ORing system
56  * actions and/or system reasons with a user-defined exit code.
57  */
58 enum scx_exit_code {
59 	/* Reasons */
60 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
61 
62 	/* Actions */
63 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
64 };
65 
66 /*
67  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
68  * being disabled.
69  */
70 struct scx_exit_info {
71 	/* %SCX_EXIT_* - broad category of the exit reason */
72 	enum scx_exit_kind	kind;
73 
74 	/* exit code if gracefully exiting */
75 	s64			exit_code;
76 
77 	/* textual representation of the above */
78 	const char		*reason;
79 
80 	/* backtrace if exiting due to an error */
81 	unsigned long		*bt;
82 	u32			bt_len;
83 
84 	/* informational message */
85 	char			*msg;
86 
87 	/* debug dump */
88 	char			*dump;
89 };
90 
91 /* sched_ext_ops.flags */
92 enum scx_ops_flags {
93 	/*
94 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
95 	 */
96 	SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
97 
98 	/*
99 	 * By default, if there are no other task to run on the CPU, ext core
100 	 * keeps running the current task even after its slice expires. If this
101 	 * flag is specified, such tasks are passed to ops.enqueue() with
102 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
103 	 */
104 	SCX_OPS_ENQ_LAST	= 1LLU << 1,
105 
106 	/*
107 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
108 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
109 	 * scheduler depends on pid lookup for dispatching, the task will be
110 	 * lost leading to various issues including RCU grace period stalls.
111 	 *
112 	 * To mask this problem, by default, unhashed tasks are automatically
113 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114 	 * depend on pid lookups and wants to handle these tasks directly, the
115 	 * following flag can be used.
116 	 */
117 	SCX_OPS_ENQ_EXITING	= 1LLU << 2,
118 
119 	/*
120 	 * If set, only tasks with policy set to SCHED_EXT are attached to
121 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
122 	 */
123 	SCX_OPS_SWITCH_PARTIAL	= 1LLU << 3,
124 
125 	/*
126 	 * A migration disabled task can only execute on its current CPU. By
127 	 * default, such tasks are automatically put on the CPU's local DSQ with
128 	 * the default slice on enqueue. If this ops flag is set, they also go
129 	 * through ops.enqueue().
130 	 *
131 	 * A migration disabled task never invokes ops.select_cpu() as it can
132 	 * only select the current CPU. Also, p->cpus_ptr will only contain its
133 	 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
134 	 * and thus may disagree with cpumask_weight(p->cpus_ptr).
135 	 */
136 	SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
137 
138 	/*
139 	 * CPU cgroup support flags
140 	 */
141 	SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16,	/* cpu.weight */
142 
143 	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
144 				  SCX_OPS_ENQ_LAST |
145 				  SCX_OPS_ENQ_EXITING |
146 				  SCX_OPS_ENQ_MIGRATION_DISABLED |
147 				  SCX_OPS_SWITCH_PARTIAL |
148 				  SCX_OPS_HAS_CGROUP_WEIGHT,
149 };
150 
151 /* argument container for ops.init_task() */
152 struct scx_init_task_args {
153 	/*
154 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
155 	 * to the scheduler transition path.
156 	 */
157 	bool			fork;
158 #ifdef CONFIG_EXT_GROUP_SCHED
159 	/* the cgroup the task is joining */
160 	struct cgroup		*cgroup;
161 #endif
162 };
163 
164 /* argument container for ops.exit_task() */
165 struct scx_exit_task_args {
166 	/* Whether the task exited before running on sched_ext. */
167 	bool cancelled;
168 };
169 
170 /* argument container for ops->cgroup_init() */
171 struct scx_cgroup_init_args {
172 	/* the weight of the cgroup [1..10000] */
173 	u32			weight;
174 };
175 
176 enum scx_cpu_preempt_reason {
177 	/* next task is being scheduled by &sched_class_rt */
178 	SCX_CPU_PREEMPT_RT,
179 	/* next task is being scheduled by &sched_class_dl */
180 	SCX_CPU_PREEMPT_DL,
181 	/* next task is being scheduled by &sched_class_stop */
182 	SCX_CPU_PREEMPT_STOP,
183 	/* unknown reason for SCX being preempted */
184 	SCX_CPU_PREEMPT_UNKNOWN,
185 };
186 
187 /*
188  * Argument container for ops->cpu_acquire(). Currently empty, but may be
189  * expanded in the future.
190  */
191 struct scx_cpu_acquire_args {};
192 
193 /* argument container for ops->cpu_release() */
194 struct scx_cpu_release_args {
195 	/* the reason the CPU was preempted */
196 	enum scx_cpu_preempt_reason reason;
197 
198 	/* the task that's going to be scheduled on the CPU */
199 	struct task_struct	*task;
200 };
201 
202 /*
203  * Informational context provided to dump operations.
204  */
205 struct scx_dump_ctx {
206 	enum scx_exit_kind	kind;
207 	s64			exit_code;
208 	const char		*reason;
209 	u64			at_ns;
210 	u64			at_jiffies;
211 };
212 
213 /**
214  * struct sched_ext_ops - Operation table for BPF scheduler implementation
215  *
216  * A BPF scheduler can implement an arbitrary scheduling policy by
217  * implementing and loading operations in this table. Note that a userland
218  * scheduling policy can also be implemented using the BPF scheduler
219  * as a shim layer.
220  */
221 struct sched_ext_ops {
222 	/**
223 	 * @select_cpu: Pick the target CPU for a task which is being woken up
224 	 * @p: task being woken up
225 	 * @prev_cpu: the cpu @p was on before sleeping
226 	 * @wake_flags: SCX_WAKE_*
227 	 *
228 	 * Decision made here isn't final. @p may be moved to any CPU while it
229 	 * is getting dispatched for execution later. However, as @p is not on
230 	 * the rq at this point, getting the eventual execution CPU right here
231 	 * saves a small bit of overhead down the line.
232 	 *
233 	 * If an idle CPU is returned, the CPU is kicked and will try to
234 	 * dispatch. While an explicit custom mechanism can be added,
235 	 * select_cpu() serves as the default way to wake up idle CPUs.
236 	 *
237 	 * @p may be inserted into a DSQ directly by calling
238 	 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
239 	 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
240 	 * of the CPU returned by this operation.
241 	 *
242 	 * Note that select_cpu() is never called for tasks that can only run
243 	 * on a single CPU or tasks with migration disabled, as they don't have
244 	 * the option to select a different CPU. See select_task_rq() for
245 	 * details.
246 	 */
247 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
248 
249 	/**
250 	 * @enqueue: Enqueue a task on the BPF scheduler
251 	 * @p: task being enqueued
252 	 * @enq_flags: %SCX_ENQ_*
253 	 *
254 	 * @p is ready to run. Insert directly into a DSQ by calling
255 	 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
256 	 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
257 	 * the task will stall.
258 	 *
259 	 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
260 	 * skipped.
261 	 */
262 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
263 
264 	/**
265 	 * @dequeue: Remove a task from the BPF scheduler
266 	 * @p: task being dequeued
267 	 * @deq_flags: %SCX_DEQ_*
268 	 *
269 	 * Remove @p from the BPF scheduler. This is usually called to isolate
270 	 * the task while updating its scheduling properties (e.g. priority).
271 	 *
272 	 * The ext core keeps track of whether the BPF side owns a given task or
273 	 * not and can gracefully ignore spurious dispatches from BPF side,
274 	 * which makes it safe to not implement this method. However, depending
275 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
276 	 * scheduling position not being updated across a priority change.
277 	 */
278 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
279 
280 	/**
281 	 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
282 	 * @cpu: CPU to dispatch tasks for
283 	 * @prev: previous task being switched out
284 	 *
285 	 * Called when a CPU's local dsq is empty. The operation should dispatch
286 	 * one or more tasks from the BPF scheduler into the DSQs using
287 	 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
288 	 * using scx_bpf_dsq_move_to_local().
289 	 *
290 	 * The maximum number of times scx_bpf_dsq_insert() can be called
291 	 * without an intervening scx_bpf_dsq_move_to_local() is specified by
292 	 * ops.dispatch_max_batch. See the comments on top of the two functions
293 	 * for more details.
294 	 *
295 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
296 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
297 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
298 	 * ops.dispatch() returns. To keep executing @prev, return without
299 	 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
300 	 */
301 	void (*dispatch)(s32 cpu, struct task_struct *prev);
302 
303 	/**
304 	 * @tick: Periodic tick
305 	 * @p: task running currently
306 	 *
307 	 * This operation is called every 1/HZ seconds on CPUs which are
308 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
309 	 * immediate dispatch cycle on the CPU.
310 	 */
311 	void (*tick)(struct task_struct *p);
312 
313 	/**
314 	 * @runnable: A task is becoming runnable on its associated CPU
315 	 * @p: task becoming runnable
316 	 * @enq_flags: %SCX_ENQ_*
317 	 *
318 	 * This and the following three functions can be used to track a task's
319 	 * execution state transitions. A task becomes ->runnable() on a CPU,
320 	 * and then goes through one or more ->running() and ->stopping() pairs
321 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
322 	 * done running on the CPU.
323 	 *
324 	 * @p is becoming runnable on the CPU because it's
325 	 *
326 	 * - waking up (%SCX_ENQ_WAKEUP)
327 	 * - being moved from another CPU
328 	 * - being restored after temporarily taken off the queue for an
329 	 *   attribute change.
330 	 *
331 	 * This and ->enqueue() are related but not coupled. This operation
332 	 * notifies @p's state transition and may not be followed by ->enqueue()
333 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
334 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
335 	 * task may be ->enqueue()'d without being preceded by this operation
336 	 * e.g. after exhausting its slice.
337 	 */
338 	void (*runnable)(struct task_struct *p, u64 enq_flags);
339 
340 	/**
341 	 * @running: A task is starting to run on its associated CPU
342 	 * @p: task starting to run
343 	 *
344 	 * See ->runnable() for explanation on the task state notifiers.
345 	 */
346 	void (*running)(struct task_struct *p);
347 
348 	/**
349 	 * @stopping: A task is stopping execution
350 	 * @p: task stopping to run
351 	 * @runnable: is task @p still runnable?
352 	 *
353 	 * See ->runnable() for explanation on the task state notifiers. If
354 	 * !@runnable, ->quiescent() will be invoked after this operation
355 	 * returns.
356 	 */
357 	void (*stopping)(struct task_struct *p, bool runnable);
358 
359 	/**
360 	 * @quiescent: A task is becoming not runnable on its associated CPU
361 	 * @p: task becoming not runnable
362 	 * @deq_flags: %SCX_DEQ_*
363 	 *
364 	 * See ->runnable() for explanation on the task state notifiers.
365 	 *
366 	 * @p is becoming quiescent on the CPU because it's
367 	 *
368 	 * - sleeping (%SCX_DEQ_SLEEP)
369 	 * - being moved to another CPU
370 	 * - being temporarily taken off the queue for an attribute change
371 	 *   (%SCX_DEQ_SAVE)
372 	 *
373 	 * This and ->dequeue() are related but not coupled. This operation
374 	 * notifies @p's state transition and may not be preceded by ->dequeue()
375 	 * e.g. when @p is being dispatched to a remote CPU.
376 	 */
377 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
378 
379 	/**
380 	 * @yield: Yield CPU
381 	 * @from: yielding task
382 	 * @to: optional yield target task
383 	 *
384 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
385 	 * The BPF scheduler should ensure that other available tasks are
386 	 * dispatched before the yielding task. Return value is ignored in this
387 	 * case.
388 	 *
389 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
390 	 * scheduler can implement the request, return %true; otherwise, %false.
391 	 */
392 	bool (*yield)(struct task_struct *from, struct task_struct *to);
393 
394 	/**
395 	 * @core_sched_before: Task ordering for core-sched
396 	 * @a: task A
397 	 * @b: task B
398 	 *
399 	 * Used by core-sched to determine the ordering between two tasks. See
400 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
401 	 * core-sched.
402 	 *
403 	 * Both @a and @b are runnable and may or may not currently be queued on
404 	 * the BPF scheduler. Should return %true if @a should run before @b.
405 	 * %false if there's no required ordering or @b should run before @a.
406 	 *
407 	 * If not specified, the default is ordering them according to when they
408 	 * became runnable.
409 	 */
410 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
411 
412 	/**
413 	 * @set_weight: Set task weight
414 	 * @p: task to set weight for
415 	 * @weight: new weight [1..10000]
416 	 *
417 	 * Update @p's weight to @weight.
418 	 */
419 	void (*set_weight)(struct task_struct *p, u32 weight);
420 
421 	/**
422 	 * @set_cpumask: Set CPU affinity
423 	 * @p: task to set CPU affinity for
424 	 * @cpumask: cpumask of cpus that @p can run on
425 	 *
426 	 * Update @p's CPU affinity to @cpumask.
427 	 */
428 	void (*set_cpumask)(struct task_struct *p,
429 			    const struct cpumask *cpumask);
430 
431 	/**
432 	 * @update_idle: Update the idle state of a CPU
433 	 * @cpu: CPU to update the idle state for
434 	 * @idle: whether entering or exiting the idle state
435 	 *
436 	 * This operation is called when @rq's CPU goes or leaves the idle
437 	 * state. By default, implementing this operation disables the built-in
438 	 * idle CPU tracking and the following helpers become unavailable:
439 	 *
440 	 * - scx_bpf_select_cpu_dfl()
441 	 * - scx_bpf_test_and_clear_cpu_idle()
442 	 * - scx_bpf_pick_idle_cpu()
443 	 *
444 	 * The user also must implement ops.select_cpu() as the default
445 	 * implementation relies on scx_bpf_select_cpu_dfl().
446 	 *
447 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
448 	 * tracking.
449 	 */
450 	void (*update_idle)(s32 cpu, bool idle);
451 
452 	/**
453 	 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
454 	 * @cpu: The CPU being acquired by the BPF scheduler.
455 	 * @args: Acquire arguments, see the struct definition.
456 	 *
457 	 * A CPU that was previously released from the BPF scheduler is now once
458 	 * again under its control.
459 	 */
460 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
461 
462 	/**
463 	 * @cpu_release: A CPU is taken away from the BPF scheduler
464 	 * @cpu: The CPU being released by the BPF scheduler.
465 	 * @args: Release arguments, see the struct definition.
466 	 *
467 	 * The specified CPU is no longer under the control of the BPF
468 	 * scheduler. This could be because it was preempted by a higher
469 	 * priority sched_class, though there may be other reasons as well. The
470 	 * caller should consult @args->reason to determine the cause.
471 	 */
472 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
473 
474 	/**
475 	 * @init_task: Initialize a task to run in a BPF scheduler
476 	 * @p: task to initialize for BPF scheduling
477 	 * @args: init arguments, see the struct definition
478 	 *
479 	 * Either we're loading a BPF scheduler or a new task is being forked.
480 	 * Initialize @p for BPF scheduling. This operation may block and can
481 	 * be used for allocations, and is called exactly once for a task.
482 	 *
483 	 * Return 0 for success, -errno for failure. An error return while
484 	 * loading will abort loading of the BPF scheduler. During a fork, it
485 	 * will abort that specific fork.
486 	 */
487 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
488 
489 	/**
490 	 * @exit_task: Exit a previously-running task from the system
491 	 * @p: task to exit
492 	 * @args: exit arguments, see the struct definition
493 	 *
494 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
495 	 * necessary cleanup for @p.
496 	 */
497 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
498 
499 	/**
500 	 * @enable: Enable BPF scheduling for a task
501 	 * @p: task to enable BPF scheduling for
502 	 *
503 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
504 	 * enters SCX, and is always paired with a matching disable().
505 	 */
506 	void (*enable)(struct task_struct *p);
507 
508 	/**
509 	 * @disable: Disable BPF scheduling for a task
510 	 * @p: task to disable BPF scheduling for
511 	 *
512 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
513 	 * Disable BPF scheduling for @p. A disable() call is always matched
514 	 * with a prior enable() call.
515 	 */
516 	void (*disable)(struct task_struct *p);
517 
518 	/**
519 	 * @dump: Dump BPF scheduler state on error
520 	 * @ctx: debug dump context
521 	 *
522 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
523 	 */
524 	void (*dump)(struct scx_dump_ctx *ctx);
525 
526 	/**
527 	 * @dump_cpu: Dump BPF scheduler state for a CPU on error
528 	 * @ctx: debug dump context
529 	 * @cpu: CPU to generate debug dump for
530 	 * @idle: @cpu is currently idle without any runnable tasks
531 	 *
532 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
533 	 * @cpu. If @idle is %true and this operation doesn't produce any
534 	 * output, @cpu is skipped for dump.
535 	 */
536 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
537 
538 	/**
539 	 * @dump_task: Dump BPF scheduler state for a runnable task on error
540 	 * @ctx: debug dump context
541 	 * @p: runnable task to generate debug dump for
542 	 *
543 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
544 	 * @p.
545 	 */
546 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
547 
548 #ifdef CONFIG_EXT_GROUP_SCHED
549 	/**
550 	 * @cgroup_init: Initialize a cgroup
551 	 * @cgrp: cgroup being initialized
552 	 * @args: init arguments, see the struct definition
553 	 *
554 	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
555 	 * @cgrp for sched_ext. This operation may block.
556 	 *
557 	 * Return 0 for success, -errno for failure. An error return while
558 	 * loading will abort loading of the BPF scheduler. During cgroup
559 	 * creation, it will abort the specific cgroup creation.
560 	 */
561 	s32 (*cgroup_init)(struct cgroup *cgrp,
562 			   struct scx_cgroup_init_args *args);
563 
564 	/**
565 	 * @cgroup_exit: Exit a cgroup
566 	 * @cgrp: cgroup being exited
567 	 *
568 	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
569 	 * @cgrp for sched_ext. This operation my block.
570 	 */
571 	void (*cgroup_exit)(struct cgroup *cgrp);
572 
573 	/**
574 	 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
575 	 * @p: task being moved
576 	 * @from: cgroup @p is being moved from
577 	 * @to: cgroup @p is being moved to
578 	 *
579 	 * Prepare @p for move from cgroup @from to @to. This operation may
580 	 * block and can be used for allocations.
581 	 *
582 	 * Return 0 for success, -errno for failure. An error return aborts the
583 	 * migration.
584 	 */
585 	s32 (*cgroup_prep_move)(struct task_struct *p,
586 				struct cgroup *from, struct cgroup *to);
587 
588 	/**
589 	 * @cgroup_move: Commit cgroup move
590 	 * @p: task being moved
591 	 * @from: cgroup @p is being moved from
592 	 * @to: cgroup @p is being moved to
593 	 *
594 	 * Commit the move. @p is dequeued during this operation.
595 	 */
596 	void (*cgroup_move)(struct task_struct *p,
597 			    struct cgroup *from, struct cgroup *to);
598 
599 	/**
600 	 * @cgroup_cancel_move: Cancel cgroup move
601 	 * @p: task whose cgroup move is being canceled
602 	 * @from: cgroup @p was being moved from
603 	 * @to: cgroup @p was being moved to
604 	 *
605 	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
606 	 * Undo the preparation.
607 	 */
608 	void (*cgroup_cancel_move)(struct task_struct *p,
609 				   struct cgroup *from, struct cgroup *to);
610 
611 	/**
612 	 * @cgroup_set_weight: A cgroup's weight is being changed
613 	 * @cgrp: cgroup whose weight is being updated
614 	 * @weight: new weight [1..10000]
615 	 *
616 	 * Update @tg's weight to @weight.
617 	 */
618 	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
619 #endif	/* CONFIG_EXT_GROUP_SCHED */
620 
621 	/*
622 	 * All online ops must come before ops.cpu_online().
623 	 */
624 
625 	/**
626 	 * @cpu_online: A CPU became online
627 	 * @cpu: CPU which just came up
628 	 *
629 	 * @cpu just came online. @cpu will not call ops.enqueue() or
630 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
631 	 */
632 	void (*cpu_online)(s32 cpu);
633 
634 	/**
635 	 * @cpu_offline: A CPU is going offline
636 	 * @cpu: CPU which is going offline
637 	 *
638 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
639 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
640 	 */
641 	void (*cpu_offline)(s32 cpu);
642 
643 	/*
644 	 * All CPU hotplug ops must come before ops.init().
645 	 */
646 
647 	/**
648 	 * @init: Initialize the BPF scheduler
649 	 */
650 	s32 (*init)(void);
651 
652 	/**
653 	 * @exit: Clean up after the BPF scheduler
654 	 * @info: Exit info
655 	 *
656 	 * ops.exit() is also called on ops.init() failure, which is a bit
657 	 * unusual. This is to allow rich reporting through @info on how
658 	 * ops.init() failed.
659 	 */
660 	void (*exit)(struct scx_exit_info *info);
661 
662 	/**
663 	 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
664 	 */
665 	u32 dispatch_max_batch;
666 
667 	/**
668 	 * @flags: %SCX_OPS_* flags
669 	 */
670 	u64 flags;
671 
672 	/**
673 	 * @timeout_ms: The maximum amount of time, in milliseconds, that a
674 	 * runnable task should be able to wait before being scheduled. The
675 	 * maximum timeout may not exceed the default timeout of 30 seconds.
676 	 *
677 	 * Defaults to the maximum allowed timeout value of 30 seconds.
678 	 */
679 	u32 timeout_ms;
680 
681 	/**
682 	 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
683 	 * value of 32768 is used.
684 	 */
685 	u32 exit_dump_len;
686 
687 	/**
688 	 * @hotplug_seq: A sequence number that may be set by the scheduler to
689 	 * detect when a hotplug event has occurred during the loading process.
690 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
691 	 * load if the sequence number does not match @scx_hotplug_seq on the
692 	 * enable path.
693 	 */
694 	u64 hotplug_seq;
695 
696 	/**
697 	 * @name: BPF scheduler's name
698 	 *
699 	 * Must be a non-zero valid BPF object name including only isalnum(),
700 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
701 	 * BPF scheduler is enabled.
702 	 */
703 	char name[SCX_OPS_NAME_LEN];
704 };
705 
706 enum scx_opi {
707 	SCX_OPI_BEGIN			= 0,
708 	SCX_OPI_NORMAL_BEGIN		= 0,
709 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
710 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
711 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
712 	SCX_OPI_END			= SCX_OP_IDX(init),
713 };
714 
715 enum scx_wake_flags {
716 	/* expose select WF_* flags as enums */
717 	SCX_WAKE_FORK		= WF_FORK,
718 	SCX_WAKE_TTWU		= WF_TTWU,
719 	SCX_WAKE_SYNC		= WF_SYNC,
720 };
721 
722 enum scx_enq_flags {
723 	/* expose select ENQUEUE_* flags as enums */
724 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
725 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
726 	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
727 
728 	/* high 32bits are SCX specific */
729 
730 	/*
731 	 * Set the following to trigger preemption when calling
732 	 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
733 	 * current task is cleared to zero and the CPU is kicked into the
734 	 * scheduling path. Implies %SCX_ENQ_HEAD.
735 	 */
736 	SCX_ENQ_PREEMPT		= 1LLU << 32,
737 
738 	/*
739 	 * The task being enqueued was previously enqueued on the current CPU's
740 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
741 	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
742 	 * invoked in a ->cpu_release() callback, and the task is again
743 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
744 	 * task will not be scheduled on the CPU until at least the next invocation
745 	 * of the ->cpu_acquire() callback.
746 	 */
747 	SCX_ENQ_REENQ		= 1LLU << 40,
748 
749 	/*
750 	 * The task being enqueued is the only task available for the cpu. By
751 	 * default, ext core keeps executing such tasks but when
752 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
753 	 * %SCX_ENQ_LAST flag set.
754 	 *
755 	 * The BPF scheduler is responsible for triggering a follow-up
756 	 * scheduling event. Otherwise, Execution may stall.
757 	 */
758 	SCX_ENQ_LAST		= 1LLU << 41,
759 
760 	/* high 8 bits are internal */
761 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
762 
763 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
764 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
765 };
766 
767 enum scx_deq_flags {
768 	/* expose select DEQUEUE_* flags as enums */
769 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
770 
771 	/* high 32bits are SCX specific */
772 
773 	/*
774 	 * The generic core-sched layer decided to execute the task even though
775 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
776 	 */
777 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
778 };
779 
780 enum scx_pick_idle_cpu_flags {
781 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
782 };
783 
784 enum scx_kick_flags {
785 	/*
786 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
787 	 * through at least one full scheduling cycle before going idle. If the
788 	 * target CPU can be determined to be currently not idle and going to go
789 	 * through a scheduling cycle before going idle, noop.
790 	 */
791 	SCX_KICK_IDLE		= 1LLU << 0,
792 
793 	/*
794 	 * Preempt the current task and execute the dispatch path. If the
795 	 * current task of the target CPU is an SCX task, its ->scx.slice is
796 	 * cleared to zero before the scheduling path is invoked so that the
797 	 * task expires and the dispatch path is invoked.
798 	 */
799 	SCX_KICK_PREEMPT	= 1LLU << 1,
800 
801 	/*
802 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
803 	 * return after the target CPU finishes picking the next task.
804 	 */
805 	SCX_KICK_WAIT		= 1LLU << 2,
806 };
807 
808 enum scx_tg_flags {
809 	SCX_TG_ONLINE		= 1U << 0,
810 	SCX_TG_INITED		= 1U << 1,
811 };
812 
813 enum scx_ops_enable_state {
814 	SCX_OPS_ENABLING,
815 	SCX_OPS_ENABLED,
816 	SCX_OPS_DISABLING,
817 	SCX_OPS_DISABLED,
818 };
819 
820 static const char *scx_ops_enable_state_str[] = {
821 	[SCX_OPS_ENABLING]	= "enabling",
822 	[SCX_OPS_ENABLED]	= "enabled",
823 	[SCX_OPS_DISABLING]	= "disabling",
824 	[SCX_OPS_DISABLED]	= "disabled",
825 };
826 
827 /*
828  * sched_ext_entity->ops_state
829  *
830  * Used to track the task ownership between the SCX core and the BPF scheduler.
831  * State transitions look as follows:
832  *
833  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
834  *   ^              |                 |
835  *   |              v                 v
836  *   \-------------------------------/
837  *
838  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
839  * sites for explanations on the conditions being waited upon and why they are
840  * safe. Transitions out of them into NONE or QUEUED must store_release and the
841  * waiters should load_acquire.
842  *
843  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
844  * any given task can be dispatched by the BPF scheduler at all times and thus
845  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
846  * to try to dispatch any task anytime regardless of its state as the SCX core
847  * can safely reject invalid dispatches.
848  */
849 enum scx_ops_state {
850 	SCX_OPSS_NONE,		/* owned by the SCX core */
851 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
852 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
853 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
854 
855 	/*
856 	 * QSEQ brands each QUEUED instance so that, when dispatch races
857 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
858 	 * on the task being dispatched.
859 	 *
860 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
861 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
862 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
863 	 * and runs with IRQ disabled. 30 bits should be sufficient.
864 	 */
865 	SCX_OPSS_QSEQ_SHIFT	= 2,
866 };
867 
868 /* Use macros to ensure that the type is unsigned long for the masks */
869 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
870 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
871 
872 /*
873  * During exit, a task may schedule after losing its PIDs. When disabling the
874  * BPF scheduler, we need to be able to iterate tasks in every state to
875  * guarantee system safety. Maintain a dedicated task list which contains every
876  * task between its fork and eventual free.
877  */
878 static DEFINE_SPINLOCK(scx_tasks_lock);
879 static LIST_HEAD(scx_tasks);
880 
881 /* ops enable/disable */
882 static struct kthread_worker *scx_ops_helper;
883 static DEFINE_MUTEX(scx_ops_enable_mutex);
884 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
885 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
886 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
887 static unsigned long scx_in_softlockup;
888 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
889 static int scx_ops_bypass_depth;
890 static bool scx_ops_init_task_enabled;
891 static bool scx_switching_all;
892 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
893 
894 static struct sched_ext_ops scx_ops;
895 static bool scx_warned_zero_slice;
896 
897 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
898 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
899 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_migration_disabled);
900 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
901 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
902 
903 #ifdef CONFIG_SMP
904 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
905 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
906 #endif
907 
908 static struct static_key_false scx_has_op[SCX_OPI_END] =
909 	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
910 
911 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
912 static struct scx_exit_info *scx_exit_info;
913 
914 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
915 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
916 
917 /*
918  * A monotically increasing sequence number that is incremented every time a
919  * scheduler is enabled. This can be used by to check if any custom sched_ext
920  * scheduler has ever been used in the system.
921  */
922 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
923 
924 /*
925  * The maximum amount of time in jiffies that a task may be runnable without
926  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
927  * scx_ops_error().
928  */
929 static unsigned long scx_watchdog_timeout;
930 
931 /*
932  * The last time the delayed work was run. This delayed work relies on
933  * ksoftirqd being able to run to service timer interrupts, so it's possible
934  * that this work itself could get wedged. To account for this, we check that
935  * it's not stalled in the timer tick, and trigger an error if it is.
936  */
937 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
938 
939 static struct delayed_work scx_watchdog_work;
940 
941 /* idle tracking */
942 #ifdef CONFIG_SMP
943 #ifdef CONFIG_CPUMASK_OFFSTACK
944 #define CL_ALIGNED_IF_ONSTACK
945 #else
946 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
947 #endif
948 
949 static struct {
950 	cpumask_var_t cpu;
951 	cpumask_var_t smt;
952 } idle_masks CL_ALIGNED_IF_ONSTACK;
953 
954 #endif	/* CONFIG_SMP */
955 
956 /* for %SCX_KICK_WAIT */
957 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
958 
959 /*
960  * Direct dispatch marker.
961  *
962  * Non-NULL values are used for direct dispatch from enqueue path. A valid
963  * pointer points to the task currently being enqueued. An ERR_PTR value is used
964  * to indicate that direct dispatch has already happened.
965  */
966 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
967 
968 /*
969  * Dispatch queues.
970  *
971  * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
972  * to avoid live-locking in bypass mode where all tasks are dispatched to
973  * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
974  * sufficient, it can be further split.
975  */
976 static struct scx_dispatch_q **global_dsqs;
977 
978 static const struct rhashtable_params dsq_hash_params = {
979 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
980 	.key_offset		= offsetof(struct scx_dispatch_q, id),
981 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
982 };
983 
984 static struct rhashtable dsq_hash;
985 static LLIST_HEAD(dsqs_to_free);
986 
987 /* dispatch buf */
988 struct scx_dsp_buf_ent {
989 	struct task_struct	*task;
990 	unsigned long		qseq;
991 	u64			dsq_id;
992 	u64			enq_flags;
993 };
994 
995 static u32 scx_dsp_max_batch;
996 
997 struct scx_dsp_ctx {
998 	struct rq		*rq;
999 	u32			cursor;
1000 	u32			nr_tasks;
1001 	struct scx_dsp_buf_ent	buf[];
1002 };
1003 
1004 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
1005 
1006 /* string formatting from BPF */
1007 struct scx_bstr_buf {
1008 	u64			data[MAX_BPRINTF_VARARGS];
1009 	char			line[SCX_EXIT_MSG_LEN];
1010 };
1011 
1012 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
1013 static struct scx_bstr_buf scx_exit_bstr_buf;
1014 
1015 /* ops debug dump */
1016 struct scx_dump_data {
1017 	s32			cpu;
1018 	bool			first;
1019 	s32			cursor;
1020 	struct seq_buf		*s;
1021 	const char		*prefix;
1022 	struct scx_bstr_buf	buf;
1023 };
1024 
1025 static struct scx_dump_data scx_dump_data = {
1026 	.cpu			= -1,
1027 };
1028 
1029 /* /sys/kernel/sched_ext interface */
1030 static struct kset *scx_kset;
1031 static struct kobject *scx_root_kobj;
1032 
1033 #define CREATE_TRACE_POINTS
1034 #include <trace/events/sched_ext.h>
1035 
1036 static void process_ddsp_deferred_locals(struct rq *rq);
1037 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1038 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1039 					     s64 exit_code,
1040 					     const char *fmt, ...);
1041 
1042 #define scx_ops_error_kind(err, fmt, args...)					\
1043 	scx_ops_exit_kind((err), 0, fmt, ##args)
1044 
1045 #define scx_ops_exit(code, fmt, args...)					\
1046 	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1047 
1048 #define scx_ops_error(fmt, args...)						\
1049 	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1050 
1051 #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1052 
1053 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1054 {
1055 	if (time_after(at, now))
1056 		return jiffies_to_msecs(at - now);
1057 	else
1058 		return -(long)jiffies_to_msecs(now - at);
1059 }
1060 
1061 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
1062 static u32 higher_bits(u32 flags)
1063 {
1064 	return ~((1 << fls(flags)) - 1);
1065 }
1066 
1067 /* return the mask with only the highest bit set */
1068 static u32 highest_bit(u32 flags)
1069 {
1070 	int bit = fls(flags);
1071 	return ((u64)1 << bit) >> 1;
1072 }
1073 
1074 static bool u32_before(u32 a, u32 b)
1075 {
1076 	return (s32)(a - b) < 0;
1077 }
1078 
1079 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1080 {
1081 	return global_dsqs[cpu_to_node(task_cpu(p))];
1082 }
1083 
1084 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1085 {
1086 	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1087 }
1088 
1089 /*
1090  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1091  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1092  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1093  * whether it's running from an allowed context.
1094  *
1095  * @mask is constant, always inline to cull the mask calculations.
1096  */
1097 static __always_inline void scx_kf_allow(u32 mask)
1098 {
1099 	/* nesting is allowed only in increasing scx_kf_mask order */
1100 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1101 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1102 		  current->scx.kf_mask, mask);
1103 	current->scx.kf_mask |= mask;
1104 	barrier();
1105 }
1106 
1107 static void scx_kf_disallow(u32 mask)
1108 {
1109 	barrier();
1110 	current->scx.kf_mask &= ~mask;
1111 }
1112 
1113 #define SCX_CALL_OP(mask, op, args...)						\
1114 do {										\
1115 	if (mask) {								\
1116 		scx_kf_allow(mask);						\
1117 		scx_ops.op(args);						\
1118 		scx_kf_disallow(mask);						\
1119 	} else {								\
1120 		scx_ops.op(args);						\
1121 	}									\
1122 } while (0)
1123 
1124 #define SCX_CALL_OP_RET(mask, op, args...)					\
1125 ({										\
1126 	__typeof__(scx_ops.op(args)) __ret;					\
1127 	if (mask) {								\
1128 		scx_kf_allow(mask);						\
1129 		__ret = scx_ops.op(args);					\
1130 		scx_kf_disallow(mask);						\
1131 	} else {								\
1132 		__ret = scx_ops.op(args);					\
1133 	}									\
1134 	__ret;									\
1135 })
1136 
1137 /*
1138  * Some kfuncs are allowed only on the tasks that are subjects of the
1139  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1140  * restrictions, the following SCX_CALL_OP_*() variants should be used when
1141  * invoking scx_ops operations that take task arguments. These can only be used
1142  * for non-nesting operations due to the way the tasks are tracked.
1143  *
1144  * kfuncs which can only operate on such tasks can in turn use
1145  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1146  * the specific task.
1147  */
1148 #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
1149 do {										\
1150 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1151 	current->scx.kf_tasks[0] = task;					\
1152 	SCX_CALL_OP(mask, op, task, ##args);					\
1153 	current->scx.kf_tasks[0] = NULL;					\
1154 } while (0)
1155 
1156 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
1157 ({										\
1158 	__typeof__(scx_ops.op(task, ##args)) __ret;				\
1159 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1160 	current->scx.kf_tasks[0] = task;					\
1161 	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1162 	current->scx.kf_tasks[0] = NULL;					\
1163 	__ret;									\
1164 })
1165 
1166 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1167 ({										\
1168 	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1169 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1170 	current->scx.kf_tasks[0] = task0;					\
1171 	current->scx.kf_tasks[1] = task1;					\
1172 	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1173 	current->scx.kf_tasks[0] = NULL;					\
1174 	current->scx.kf_tasks[1] = NULL;					\
1175 	__ret;									\
1176 })
1177 
1178 /* @mask is constant, always inline to cull unnecessary branches */
1179 static __always_inline bool scx_kf_allowed(u32 mask)
1180 {
1181 	if (unlikely(!(current->scx.kf_mask & mask))) {
1182 		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1183 			      mask, current->scx.kf_mask);
1184 		return false;
1185 	}
1186 
1187 	/*
1188 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1189 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1190 	 * inside ops.dispatch(). We don't need to check boundaries for any
1191 	 * blocking kfuncs as the verifier ensures they're only called from
1192 	 * sleepable progs.
1193 	 */
1194 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1195 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1196 		scx_ops_error("cpu_release kfunc called from a nested operation");
1197 		return false;
1198 	}
1199 
1200 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1201 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1202 		scx_ops_error("dispatch kfunc called from a nested operation");
1203 		return false;
1204 	}
1205 
1206 	return true;
1207 }
1208 
1209 /* see SCX_CALL_OP_TASK() */
1210 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1211 							struct task_struct *p)
1212 {
1213 	if (!scx_kf_allowed(mask))
1214 		return false;
1215 
1216 	if (unlikely((p != current->scx.kf_tasks[0] &&
1217 		      p != current->scx.kf_tasks[1]))) {
1218 		scx_ops_error("called on a task not being operated on");
1219 		return false;
1220 	}
1221 
1222 	return true;
1223 }
1224 
1225 static bool scx_kf_allowed_if_unlocked(void)
1226 {
1227 	return !current->scx.kf_mask;
1228 }
1229 
1230 /**
1231  * nldsq_next_task - Iterate to the next task in a non-local DSQ
1232  * @dsq: user dsq being iterated
1233  * @cur: current position, %NULL to start iteration
1234  * @rev: walk backwards
1235  *
1236  * Returns %NULL when iteration is finished.
1237  */
1238 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1239 					   struct task_struct *cur, bool rev)
1240 {
1241 	struct list_head *list_node;
1242 	struct scx_dsq_list_node *dsq_lnode;
1243 
1244 	lockdep_assert_held(&dsq->lock);
1245 
1246 	if (cur)
1247 		list_node = &cur->scx.dsq_list.node;
1248 	else
1249 		list_node = &dsq->list;
1250 
1251 	/* find the next task, need to skip BPF iteration cursors */
1252 	do {
1253 		if (rev)
1254 			list_node = list_node->prev;
1255 		else
1256 			list_node = list_node->next;
1257 
1258 		if (list_node == &dsq->list)
1259 			return NULL;
1260 
1261 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1262 					 node);
1263 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1264 
1265 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1266 }
1267 
1268 #define nldsq_for_each_task(p, dsq)						\
1269 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
1270 	     (p) = nldsq_next_task((dsq), (p), false))
1271 
1272 
1273 /*
1274  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1275  * dispatch order. BPF-visible iterator is opaque and larger to allow future
1276  * changes without breaking backward compatibility. Can be used with
1277  * bpf_for_each(). See bpf_iter_scx_dsq_*().
1278  */
1279 enum scx_dsq_iter_flags {
1280 	/* iterate in the reverse dispatch order */
1281 	SCX_DSQ_ITER_REV		= 1U << 16,
1282 
1283 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
1284 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
1285 
1286 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
1287 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
1288 					  __SCX_DSQ_ITER_HAS_SLICE |
1289 					  __SCX_DSQ_ITER_HAS_VTIME,
1290 };
1291 
1292 struct bpf_iter_scx_dsq_kern {
1293 	struct scx_dsq_list_node	cursor;
1294 	struct scx_dispatch_q		*dsq;
1295 	u64				slice;
1296 	u64				vtime;
1297 } __attribute__((aligned(8)));
1298 
1299 struct bpf_iter_scx_dsq {
1300 	u64				__opaque[6];
1301 } __attribute__((aligned(8)));
1302 
1303 
1304 /*
1305  * SCX task iterator.
1306  */
1307 struct scx_task_iter {
1308 	struct sched_ext_entity		cursor;
1309 	struct task_struct		*locked;
1310 	struct rq			*rq;
1311 	struct rq_flags			rf;
1312 	u32				cnt;
1313 };
1314 
1315 /**
1316  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1317  * @iter: iterator to init
1318  *
1319  * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1320  * must eventually be stopped with scx_task_iter_stop().
1321  *
1322  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1323  * between this and the first next() call or between any two next() calls. If
1324  * the locks are released between two next() calls, the caller is responsible
1325  * for ensuring that the task being iterated remains accessible either through
1326  * RCU read lock or obtaining a reference count.
1327  *
1328  * All tasks which existed when the iteration started are guaranteed to be
1329  * visited as long as they still exist.
1330  */
1331 static void scx_task_iter_start(struct scx_task_iter *iter)
1332 {
1333 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1334 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1335 
1336 	spin_lock_irq(&scx_tasks_lock);
1337 
1338 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1339 	list_add(&iter->cursor.tasks_node, &scx_tasks);
1340 	iter->locked = NULL;
1341 	iter->cnt = 0;
1342 }
1343 
1344 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1345 {
1346 	if (iter->locked) {
1347 		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1348 		iter->locked = NULL;
1349 	}
1350 }
1351 
1352 /**
1353  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1354  * @iter: iterator to unlock
1355  *
1356  * If @iter is in the middle of a locked iteration, it may be locking the rq of
1357  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1358  * This function can be safely called anytime during an iteration.
1359  */
1360 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1361 {
1362 	__scx_task_iter_rq_unlock(iter);
1363 	spin_unlock_irq(&scx_tasks_lock);
1364 }
1365 
1366 /**
1367  * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1368  * @iter: iterator to re-lock
1369  *
1370  * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1371  * doesn't re-lock the rq lock. Must be called before other iterator operations.
1372  */
1373 static void scx_task_iter_relock(struct scx_task_iter *iter)
1374 {
1375 	spin_lock_irq(&scx_tasks_lock);
1376 }
1377 
1378 /**
1379  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1380  * @iter: iterator to exit
1381  *
1382  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1383  * which is released on return. If the iterator holds a task's rq lock, that rq
1384  * lock is also released. See scx_task_iter_start() for details.
1385  */
1386 static void scx_task_iter_stop(struct scx_task_iter *iter)
1387 {
1388 	list_del_init(&iter->cursor.tasks_node);
1389 	scx_task_iter_unlock(iter);
1390 }
1391 
1392 /**
1393  * scx_task_iter_next - Next task
1394  * @iter: iterator to walk
1395  *
1396  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1397  * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1398  * stalls by holding scx_tasks_lock for too long.
1399  */
1400 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1401 {
1402 	struct list_head *cursor = &iter->cursor.tasks_node;
1403 	struct sched_ext_entity *pos;
1404 
1405 	if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1406 		scx_task_iter_unlock(iter);
1407 		cond_resched();
1408 		scx_task_iter_relock(iter);
1409 	}
1410 
1411 	list_for_each_entry(pos, cursor, tasks_node) {
1412 		if (&pos->tasks_node == &scx_tasks)
1413 			return NULL;
1414 		if (!(pos->flags & SCX_TASK_CURSOR)) {
1415 			list_move(cursor, &pos->tasks_node);
1416 			return container_of(pos, struct task_struct, scx);
1417 		}
1418 	}
1419 
1420 	/* can't happen, should always terminate at scx_tasks above */
1421 	BUG();
1422 }
1423 
1424 /**
1425  * scx_task_iter_next_locked - Next non-idle task with its rq locked
1426  * @iter: iterator to walk
1427  *
1428  * Visit the non-idle task with its rq lock held. Allows callers to specify
1429  * whether they would like to filter out dead tasks. See scx_task_iter_start()
1430  * for details.
1431  */
1432 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1433 {
1434 	struct task_struct *p;
1435 
1436 	__scx_task_iter_rq_unlock(iter);
1437 
1438 	while ((p = scx_task_iter_next(iter))) {
1439 		/*
1440 		 * scx_task_iter is used to prepare and move tasks into SCX
1441 		 * while loading the BPF scheduler and vice-versa while
1442 		 * unloading. The init_tasks ("swappers") should be excluded
1443 		 * from the iteration because:
1444 		 *
1445 		 * - It's unsafe to use __setschduler_prio() on an init_task to
1446 		 *   determine the sched_class to use as it won't preserve its
1447 		 *   idle_sched_class.
1448 		 *
1449 		 * - ops.init/exit_task() can easily be confused if called with
1450 		 *   init_tasks as they, e.g., share PID 0.
1451 		 *
1452 		 * As init_tasks are never scheduled through SCX, they can be
1453 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1454 		 * doesn't work here:
1455 		 *
1456 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1457 		 *   yet been onlined.
1458 		 *
1459 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1460 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
1461 		 *
1462 		 * Test for idle_sched_class as only init_tasks are on it.
1463 		 */
1464 		if (p->sched_class != &idle_sched_class)
1465 			break;
1466 	}
1467 	if (!p)
1468 		return NULL;
1469 
1470 	iter->rq = task_rq_lock(p, &iter->rf);
1471 	iter->locked = p;
1472 
1473 	return p;
1474 }
1475 
1476 static enum scx_ops_enable_state scx_ops_enable_state(void)
1477 {
1478 	return atomic_read(&scx_ops_enable_state_var);
1479 }
1480 
1481 static enum scx_ops_enable_state
1482 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1483 {
1484 	return atomic_xchg(&scx_ops_enable_state_var, to);
1485 }
1486 
1487 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1488 					enum scx_ops_enable_state from)
1489 {
1490 	int from_v = from;
1491 
1492 	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1493 }
1494 
1495 static bool scx_rq_bypassing(struct rq *rq)
1496 {
1497 	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1498 }
1499 
1500 /**
1501  * wait_ops_state - Busy-wait the specified ops state to end
1502  * @p: target task
1503  * @opss: state to wait the end of
1504  *
1505  * Busy-wait for @p to transition out of @opss. This can only be used when the
1506  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1507  * has load_acquire semantics to ensure that the caller can see the updates made
1508  * in the enqueueing and dispatching paths.
1509  */
1510 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1511 {
1512 	do {
1513 		cpu_relax();
1514 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1515 }
1516 
1517 /**
1518  * ops_cpu_valid - Verify a cpu number
1519  * @cpu: cpu number which came from a BPF ops
1520  * @where: extra information reported on error
1521  *
1522  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1523  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1524  * an ops error.
1525  */
1526 static bool ops_cpu_valid(s32 cpu, const char *where)
1527 {
1528 	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1529 		return true;
1530 	} else {
1531 		scx_ops_error("invalid CPU %d%s%s", cpu,
1532 			      where ? " " : "", where ?: "");
1533 		return false;
1534 	}
1535 }
1536 
1537 /**
1538  * ops_sanitize_err - Sanitize a -errno value
1539  * @ops_name: operation to blame on failure
1540  * @err: -errno value to sanitize
1541  *
1542  * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1543  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1544  * cause misbehaviors. For an example, a large negative return from
1545  * ops.init_task() triggers an oops when passed up the call chain because the
1546  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1547  * handled as a pointer.
1548  */
1549 static int ops_sanitize_err(const char *ops_name, s32 err)
1550 {
1551 	if (err < 0 && err >= -MAX_ERRNO)
1552 		return err;
1553 
1554 	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1555 	return -EPROTO;
1556 }
1557 
1558 static void run_deferred(struct rq *rq)
1559 {
1560 	process_ddsp_deferred_locals(rq);
1561 }
1562 
1563 #ifdef CONFIG_SMP
1564 static void deferred_bal_cb_workfn(struct rq *rq)
1565 {
1566 	run_deferred(rq);
1567 }
1568 #endif
1569 
1570 static void deferred_irq_workfn(struct irq_work *irq_work)
1571 {
1572 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1573 
1574 	raw_spin_rq_lock(rq);
1575 	run_deferred(rq);
1576 	raw_spin_rq_unlock(rq);
1577 }
1578 
1579 /**
1580  * schedule_deferred - Schedule execution of deferred actions on an rq
1581  * @rq: target rq
1582  *
1583  * Schedule execution of deferred actions on @rq. Must be called with @rq
1584  * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1585  * can unlock @rq to e.g. migrate tasks to other rqs.
1586  */
1587 static void schedule_deferred(struct rq *rq)
1588 {
1589 	lockdep_assert_rq_held(rq);
1590 
1591 #ifdef CONFIG_SMP
1592 	/*
1593 	 * If in the middle of waking up a task, task_woken_scx() will be called
1594 	 * afterwards which will then run the deferred actions, no need to
1595 	 * schedule anything.
1596 	 */
1597 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1598 		return;
1599 
1600 	/*
1601 	 * If in balance, the balance callbacks will be called before rq lock is
1602 	 * released. Schedule one.
1603 	 */
1604 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1605 		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1606 				       deferred_bal_cb_workfn);
1607 		return;
1608 	}
1609 #endif
1610 	/*
1611 	 * No scheduler hooks available. Queue an irq work. They are executed on
1612 	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1613 	 * The above WAKEUP and BALANCE paths should cover most of the cases and
1614 	 * the time to IRQ re-enable shouldn't be long.
1615 	 */
1616 	irq_work_queue(&rq->scx.deferred_irq_work);
1617 }
1618 
1619 /**
1620  * touch_core_sched - Update timestamp used for core-sched task ordering
1621  * @rq: rq to read clock from, must be locked
1622  * @p: task to update the timestamp for
1623  *
1624  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1625  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1626  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1627  * exhaustion).
1628  */
1629 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1630 {
1631 	lockdep_assert_rq_held(rq);
1632 
1633 #ifdef CONFIG_SCHED_CORE
1634 	/*
1635 	 * It's okay to update the timestamp spuriously. Use
1636 	 * sched_core_disabled() which is cheaper than enabled().
1637 	 *
1638 	 * As this is used to determine ordering between tasks of sibling CPUs,
1639 	 * it may be better to use per-core dispatch sequence instead.
1640 	 */
1641 	if (!sched_core_disabled())
1642 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1643 #endif
1644 }
1645 
1646 /**
1647  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1648  * @rq: rq to read clock from, must be locked
1649  * @p: task being dispatched
1650  *
1651  * If the BPF scheduler implements custom core-sched ordering via
1652  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1653  * ordering within each local DSQ. This function is called from dispatch paths
1654  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1655  */
1656 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1657 {
1658 	lockdep_assert_rq_held(rq);
1659 
1660 #ifdef CONFIG_SCHED_CORE
1661 	if (SCX_HAS_OP(core_sched_before))
1662 		touch_core_sched(rq, p);
1663 #endif
1664 }
1665 
1666 static void update_curr_scx(struct rq *rq)
1667 {
1668 	struct task_struct *curr = rq->curr;
1669 	s64 delta_exec;
1670 
1671 	delta_exec = update_curr_common(rq);
1672 	if (unlikely(delta_exec <= 0))
1673 		return;
1674 
1675 	if (curr->scx.slice != SCX_SLICE_INF) {
1676 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1677 		if (!curr->scx.slice)
1678 			touch_core_sched(rq, curr);
1679 	}
1680 }
1681 
1682 static bool scx_dsq_priq_less(struct rb_node *node_a,
1683 			      const struct rb_node *node_b)
1684 {
1685 	const struct task_struct *a =
1686 		container_of(node_a, struct task_struct, scx.dsq_priq);
1687 	const struct task_struct *b =
1688 		container_of(node_b, struct task_struct, scx.dsq_priq);
1689 
1690 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1691 }
1692 
1693 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1694 {
1695 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1696 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1697 }
1698 
1699 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1700 			     u64 enq_flags)
1701 {
1702 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1703 
1704 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1705 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1706 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1707 
1708 	if (!is_local) {
1709 		raw_spin_lock(&dsq->lock);
1710 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1711 			scx_ops_error("attempting to dispatch to a destroyed dsq");
1712 			/* fall back to the global dsq */
1713 			raw_spin_unlock(&dsq->lock);
1714 			dsq = find_global_dsq(p);
1715 			raw_spin_lock(&dsq->lock);
1716 		}
1717 	}
1718 
1719 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1720 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1721 		/*
1722 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1723 		 * their FIFO queues. To avoid confusion and accidentally
1724 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1725 		 * disallow any internal DSQ from doing vtime ordering of
1726 		 * tasks.
1727 		 */
1728 		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1729 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1730 	}
1731 
1732 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1733 		struct rb_node *rbp;
1734 
1735 		/*
1736 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1737 		 * linked to both the rbtree and list on PRIQs, this can only be
1738 		 * tested easily when adding the first task.
1739 		 */
1740 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1741 			     nldsq_next_task(dsq, NULL, false)))
1742 			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1743 				      dsq->id);
1744 
1745 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1746 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1747 
1748 		/*
1749 		 * Find the previous task and insert after it on the list so
1750 		 * that @dsq->list is vtime ordered.
1751 		 */
1752 		rbp = rb_prev(&p->scx.dsq_priq);
1753 		if (rbp) {
1754 			struct task_struct *prev =
1755 				container_of(rbp, struct task_struct,
1756 					     scx.dsq_priq);
1757 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1758 		} else {
1759 			list_add(&p->scx.dsq_list.node, &dsq->list);
1760 		}
1761 	} else {
1762 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1763 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1764 			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1765 				      dsq->id);
1766 
1767 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1768 			list_add(&p->scx.dsq_list.node, &dsq->list);
1769 		else
1770 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1771 	}
1772 
1773 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1774 	dsq->seq++;
1775 	p->scx.dsq_seq = dsq->seq;
1776 
1777 	dsq_mod_nr(dsq, 1);
1778 	p->scx.dsq = dsq;
1779 
1780 	/*
1781 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1782 	 * direct dispatch path, but we clear them here because the direct
1783 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1784 	 * bypass.
1785 	 */
1786 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1787 	p->scx.ddsp_enq_flags = 0;
1788 
1789 	/*
1790 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1791 	 * match waiters' load_acquire.
1792 	 */
1793 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1794 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1795 
1796 	if (is_local) {
1797 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1798 		bool preempt = false;
1799 
1800 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1801 		    rq->curr->sched_class == &ext_sched_class) {
1802 			rq->curr->scx.slice = 0;
1803 			preempt = true;
1804 		}
1805 
1806 		if (preempt || sched_class_above(&ext_sched_class,
1807 						 rq->curr->sched_class))
1808 			resched_curr(rq);
1809 	} else {
1810 		raw_spin_unlock(&dsq->lock);
1811 	}
1812 }
1813 
1814 static void task_unlink_from_dsq(struct task_struct *p,
1815 				 struct scx_dispatch_q *dsq)
1816 {
1817 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1818 
1819 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1820 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1821 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1822 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1823 	}
1824 
1825 	list_del_init(&p->scx.dsq_list.node);
1826 	dsq_mod_nr(dsq, -1);
1827 }
1828 
1829 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1830 {
1831 	struct scx_dispatch_q *dsq = p->scx.dsq;
1832 	bool is_local = dsq == &rq->scx.local_dsq;
1833 
1834 	if (!dsq) {
1835 		/*
1836 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1837 		 * Unlinking is all that's needed to cancel.
1838 		 */
1839 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1840 			list_del_init(&p->scx.dsq_list.node);
1841 
1842 		/*
1843 		 * When dispatching directly from the BPF scheduler to a local
1844 		 * DSQ, the task isn't associated with any DSQ but
1845 		 * @p->scx.holding_cpu may be set under the protection of
1846 		 * %SCX_OPSS_DISPATCHING.
1847 		 */
1848 		if (p->scx.holding_cpu >= 0)
1849 			p->scx.holding_cpu = -1;
1850 
1851 		return;
1852 	}
1853 
1854 	if (!is_local)
1855 		raw_spin_lock(&dsq->lock);
1856 
1857 	/*
1858 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1859 	 * change underneath us.
1860 	*/
1861 	if (p->scx.holding_cpu < 0) {
1862 		/* @p must still be on @dsq, dequeue */
1863 		task_unlink_from_dsq(p, dsq);
1864 	} else {
1865 		/*
1866 		 * We're racing against dispatch_to_local_dsq() which already
1867 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1868 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1869 		 * the race.
1870 		 */
1871 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1872 		p->scx.holding_cpu = -1;
1873 	}
1874 	p->scx.dsq = NULL;
1875 
1876 	if (!is_local)
1877 		raw_spin_unlock(&dsq->lock);
1878 }
1879 
1880 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1881 						    struct task_struct *p)
1882 {
1883 	struct scx_dispatch_q *dsq;
1884 
1885 	if (dsq_id == SCX_DSQ_LOCAL)
1886 		return &rq->scx.local_dsq;
1887 
1888 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1889 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1890 
1891 		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1892 			return find_global_dsq(p);
1893 
1894 		return &cpu_rq(cpu)->scx.local_dsq;
1895 	}
1896 
1897 	if (dsq_id == SCX_DSQ_GLOBAL)
1898 		dsq = find_global_dsq(p);
1899 	else
1900 		dsq = find_user_dsq(dsq_id);
1901 
1902 	if (unlikely(!dsq)) {
1903 		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1904 			      dsq_id, p->comm, p->pid);
1905 		return find_global_dsq(p);
1906 	}
1907 
1908 	return dsq;
1909 }
1910 
1911 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1912 				 struct task_struct *p, u64 dsq_id,
1913 				 u64 enq_flags)
1914 {
1915 	/*
1916 	 * Mark that dispatch already happened from ops.select_cpu() or
1917 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1918 	 * which can never match a valid task pointer.
1919 	 */
1920 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1921 
1922 	/* @p must match the task on the enqueue path */
1923 	if (unlikely(p != ddsp_task)) {
1924 		if (IS_ERR(ddsp_task))
1925 			scx_ops_error("%s[%d] already direct-dispatched",
1926 				      p->comm, p->pid);
1927 		else
1928 			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1929 				      ddsp_task->comm, ddsp_task->pid,
1930 				      p->comm, p->pid);
1931 		return;
1932 	}
1933 
1934 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1935 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1936 
1937 	p->scx.ddsp_dsq_id = dsq_id;
1938 	p->scx.ddsp_enq_flags = enq_flags;
1939 }
1940 
1941 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1942 {
1943 	struct rq *rq = task_rq(p);
1944 	struct scx_dispatch_q *dsq =
1945 		find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1946 
1947 	touch_core_sched_dispatch(rq, p);
1948 
1949 	p->scx.ddsp_enq_flags |= enq_flags;
1950 
1951 	/*
1952 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1953 	 * double lock a remote rq and enqueue to its local DSQ. For
1954 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1955 	 * the enqueue so that it's executed when @rq can be unlocked.
1956 	 */
1957 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1958 		unsigned long opss;
1959 
1960 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1961 
1962 		switch (opss & SCX_OPSS_STATE_MASK) {
1963 		case SCX_OPSS_NONE:
1964 			break;
1965 		case SCX_OPSS_QUEUEING:
1966 			/*
1967 			 * As @p was never passed to the BPF side, _release is
1968 			 * not strictly necessary. Still do it for consistency.
1969 			 */
1970 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1971 			break;
1972 		default:
1973 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1974 				  p->comm, p->pid, opss);
1975 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1976 			break;
1977 		}
1978 
1979 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1980 		list_add_tail(&p->scx.dsq_list.node,
1981 			      &rq->scx.ddsp_deferred_locals);
1982 		schedule_deferred(rq);
1983 		return;
1984 	}
1985 
1986 	dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1987 }
1988 
1989 static bool scx_rq_online(struct rq *rq)
1990 {
1991 	/*
1992 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1993 	 * the online state as seen from the BPF scheduler. cpu_active() test
1994 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1995 	 * stay set until the current scheduling operation is complete even if
1996 	 * we aren't locking @rq.
1997 	 */
1998 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1999 }
2000 
2001 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
2002 			    int sticky_cpu)
2003 {
2004 	struct task_struct **ddsp_taskp;
2005 	unsigned long qseq;
2006 
2007 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
2008 
2009 	/* rq migration */
2010 	if (sticky_cpu == cpu_of(rq))
2011 		goto local_norefill;
2012 
2013 	/*
2014 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2015 	 * is offline and are just running the hotplug path. Don't bother the
2016 	 * BPF scheduler.
2017 	 */
2018 	if (!scx_rq_online(rq))
2019 		goto local;
2020 
2021 	if (scx_rq_bypassing(rq))
2022 		goto global;
2023 
2024 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2025 		goto direct;
2026 
2027 	/* see %SCX_OPS_ENQ_EXITING */
2028 	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2029 	    unlikely(p->flags & PF_EXITING))
2030 		goto local;
2031 
2032 	/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
2033 	if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) &&
2034 	    is_migration_disabled(p))
2035 		goto local;
2036 
2037 	if (!SCX_HAS_OP(enqueue))
2038 		goto global;
2039 
2040 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2041 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2042 
2043 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2044 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2045 
2046 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2047 	WARN_ON_ONCE(*ddsp_taskp);
2048 	*ddsp_taskp = p;
2049 
2050 	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2051 
2052 	*ddsp_taskp = NULL;
2053 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2054 		goto direct;
2055 
2056 	/*
2057 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2058 	 * dequeue may be waiting. The store_release matches their load_acquire.
2059 	 */
2060 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2061 	return;
2062 
2063 direct:
2064 	direct_dispatch(p, enq_flags);
2065 	return;
2066 
2067 local:
2068 	/*
2069 	 * For task-ordering, slice refill must be treated as implying the end
2070 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2071 	 * higher priority it becomes from scx_prio_less()'s POV.
2072 	 */
2073 	touch_core_sched(rq, p);
2074 	p->scx.slice = SCX_SLICE_DFL;
2075 local_norefill:
2076 	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2077 	return;
2078 
2079 global:
2080 	touch_core_sched(rq, p);	/* see the comment in local: */
2081 	p->scx.slice = SCX_SLICE_DFL;
2082 	dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2083 }
2084 
2085 static bool task_runnable(const struct task_struct *p)
2086 {
2087 	return !list_empty(&p->scx.runnable_node);
2088 }
2089 
2090 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2091 {
2092 	lockdep_assert_rq_held(rq);
2093 
2094 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2095 		p->scx.runnable_at = jiffies;
2096 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2097 	}
2098 
2099 	/*
2100 	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2101 	 * appended to the runnable_list.
2102 	 */
2103 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2104 }
2105 
2106 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2107 {
2108 	list_del_init(&p->scx.runnable_node);
2109 	if (reset_runnable_at)
2110 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2111 }
2112 
2113 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2114 {
2115 	int sticky_cpu = p->scx.sticky_cpu;
2116 
2117 	if (enq_flags & ENQUEUE_WAKEUP)
2118 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2119 
2120 	enq_flags |= rq->scx.extra_enq_flags;
2121 
2122 	if (sticky_cpu >= 0)
2123 		p->scx.sticky_cpu = -1;
2124 
2125 	/*
2126 	 * Restoring a running task will be immediately followed by
2127 	 * set_next_task_scx() which expects the task to not be on the BPF
2128 	 * scheduler as tasks can only start running through local DSQs. Force
2129 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2130 	 */
2131 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2132 		sticky_cpu = cpu_of(rq);
2133 
2134 	if (p->scx.flags & SCX_TASK_QUEUED) {
2135 		WARN_ON_ONCE(!task_runnable(p));
2136 		goto out;
2137 	}
2138 
2139 	set_task_runnable(rq, p);
2140 	p->scx.flags |= SCX_TASK_QUEUED;
2141 	rq->scx.nr_running++;
2142 	add_nr_running(rq, 1);
2143 
2144 	if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2145 		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2146 
2147 	if (enq_flags & SCX_ENQ_WAKEUP)
2148 		touch_core_sched(rq, p);
2149 
2150 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2151 out:
2152 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2153 }
2154 
2155 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2156 {
2157 	unsigned long opss;
2158 
2159 	/* dequeue is always temporary, don't reset runnable_at */
2160 	clr_task_runnable(p, false);
2161 
2162 	/* acquire ensures that we see the preceding updates on QUEUED */
2163 	opss = atomic_long_read_acquire(&p->scx.ops_state);
2164 
2165 	switch (opss & SCX_OPSS_STATE_MASK) {
2166 	case SCX_OPSS_NONE:
2167 		break;
2168 	case SCX_OPSS_QUEUEING:
2169 		/*
2170 		 * QUEUEING is started and finished while holding @p's rq lock.
2171 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2172 		 */
2173 		BUG();
2174 	case SCX_OPSS_QUEUED:
2175 		if (SCX_HAS_OP(dequeue))
2176 			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2177 
2178 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2179 					    SCX_OPSS_NONE))
2180 			break;
2181 		fallthrough;
2182 	case SCX_OPSS_DISPATCHING:
2183 		/*
2184 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2185 		 * wait for the transfer to complete so that @p doesn't get
2186 		 * added to its DSQ after dequeueing is complete.
2187 		 *
2188 		 * As we're waiting on DISPATCHING with the rq locked, the
2189 		 * dispatching side shouldn't try to lock the rq while
2190 		 * DISPATCHING is set. See dispatch_to_local_dsq().
2191 		 *
2192 		 * DISPATCHING shouldn't have qseq set and control can reach
2193 		 * here with NONE @opss from the above QUEUED case block.
2194 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2195 		 */
2196 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2197 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2198 		break;
2199 	}
2200 }
2201 
2202 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2203 {
2204 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2205 		WARN_ON_ONCE(task_runnable(p));
2206 		return true;
2207 	}
2208 
2209 	ops_dequeue(p, deq_flags);
2210 
2211 	/*
2212 	 * A currently running task which is going off @rq first gets dequeued
2213 	 * and then stops running. As we want running <-> stopping transitions
2214 	 * to be contained within runnable <-> quiescent transitions, trigger
2215 	 * ->stopping() early here instead of in put_prev_task_scx().
2216 	 *
2217 	 * @p may go through multiple stopping <-> running transitions between
2218 	 * here and put_prev_task_scx() if task attribute changes occur while
2219 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
2220 	 * information meaningful to the BPF scheduler and can be suppressed by
2221 	 * skipping the callbacks if the task is !QUEUED.
2222 	 */
2223 	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2224 		update_curr_scx(rq);
2225 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2226 	}
2227 
2228 	if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2229 		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2230 
2231 	if (deq_flags & SCX_DEQ_SLEEP)
2232 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2233 	else
2234 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2235 
2236 	p->scx.flags &= ~SCX_TASK_QUEUED;
2237 	rq->scx.nr_running--;
2238 	sub_nr_running(rq, 1);
2239 
2240 	dispatch_dequeue(rq, p);
2241 	return true;
2242 }
2243 
2244 static void yield_task_scx(struct rq *rq)
2245 {
2246 	struct task_struct *p = rq->curr;
2247 
2248 	if (SCX_HAS_OP(yield))
2249 		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2250 	else
2251 		p->scx.slice = 0;
2252 }
2253 
2254 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2255 {
2256 	struct task_struct *from = rq->curr;
2257 
2258 	if (SCX_HAS_OP(yield))
2259 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2260 	else
2261 		return false;
2262 }
2263 
2264 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2265 					 struct scx_dispatch_q *src_dsq,
2266 					 struct rq *dst_rq)
2267 {
2268 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2269 
2270 	/* @dsq is locked and @p is on @dst_rq */
2271 	lockdep_assert_held(&src_dsq->lock);
2272 	lockdep_assert_rq_held(dst_rq);
2273 
2274 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2275 
2276 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2277 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2278 	else
2279 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2280 
2281 	dsq_mod_nr(dst_dsq, 1);
2282 	p->scx.dsq = dst_dsq;
2283 }
2284 
2285 #ifdef CONFIG_SMP
2286 /**
2287  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2288  * @p: task to move
2289  * @enq_flags: %SCX_ENQ_*
2290  * @src_rq: rq to move the task from, locked on entry, released on return
2291  * @dst_rq: rq to move the task into, locked on return
2292  *
2293  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2294  */
2295 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2296 					  struct rq *src_rq, struct rq *dst_rq)
2297 {
2298 	lockdep_assert_rq_held(src_rq);
2299 
2300 	/* the following marks @p MIGRATING which excludes dequeue */
2301 	deactivate_task(src_rq, p, 0);
2302 	set_task_cpu(p, cpu_of(dst_rq));
2303 	p->scx.sticky_cpu = cpu_of(dst_rq);
2304 
2305 	raw_spin_rq_unlock(src_rq);
2306 	raw_spin_rq_lock(dst_rq);
2307 
2308 	/*
2309 	 * We want to pass scx-specific enq_flags but activate_task() will
2310 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2311 	 * @rq->scx.extra_enq_flags instead.
2312 	 */
2313 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2314 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2315 	dst_rq->scx.extra_enq_flags = enq_flags;
2316 	activate_task(dst_rq, p, 0);
2317 	dst_rq->scx.extra_enq_flags = 0;
2318 }
2319 
2320 /*
2321  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2322  * differences:
2323  *
2324  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2325  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2326  *   this CPU?".
2327  *
2328  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2329  *   must be allowed to finish on the CPU that it's currently on regardless of
2330  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2331  *   BPF scheduler shouldn't attempt to migrate a task which has migration
2332  *   disabled.
2333  *
2334  * - The BPF scheduler is bypassed while the rq is offline and we can always say
2335  *   no to the BPF scheduler initiated migrations while offline.
2336  *
2337  * The caller must ensure that @p and @rq are on different CPUs.
2338  */
2339 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2340 				      bool trigger_error)
2341 {
2342 	int cpu = cpu_of(rq);
2343 
2344 	SCHED_WARN_ON(task_cpu(p) == cpu);
2345 
2346 	/*
2347 	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2348 	 * the pinned CPU in migrate_disable_switch() while @p is being switched
2349 	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2350 	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2351 	 * @p passing the below task_allowed_on_cpu() check while migration is
2352 	 * disabled.
2353 	 *
2354 	 * Test the migration disabled state first as the race window is narrow
2355 	 * and the BPF scheduler failing to check migration disabled state can
2356 	 * easily be masked if task_allowed_on_cpu() is done first.
2357 	 */
2358 	if (unlikely(is_migration_disabled(p))) {
2359 		if (trigger_error)
2360 			scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2361 				      p->comm, p->pid, task_cpu(p), cpu);
2362 		return false;
2363 	}
2364 
2365 	/*
2366 	 * We don't require the BPF scheduler to avoid dispatching to offline
2367 	 * CPUs mostly for convenience but also because CPUs can go offline
2368 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2369 	 * picked CPU is outside the allowed mask.
2370 	 */
2371 	if (!task_allowed_on_cpu(p, cpu)) {
2372 		if (trigger_error)
2373 			scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2374 				      cpu, p->comm, p->pid);
2375 		return false;
2376 	}
2377 
2378 	if (!scx_rq_online(rq))
2379 		return false;
2380 
2381 	return true;
2382 }
2383 
2384 /**
2385  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2386  * @p: target task
2387  * @dsq: locked DSQ @p is currently on
2388  * @src_rq: rq @p is currently on, stable with @dsq locked
2389  *
2390  * Called with @dsq locked but no rq's locked. We want to move @p to a different
2391  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2392  * required when transferring into a local DSQ. Even when transferring into a
2393  * non-local DSQ, it's better to use the same mechanism to protect against
2394  * dequeues and maintain the invariant that @p->scx.dsq can only change while
2395  * @src_rq is locked, which e.g. scx_dump_task() depends on.
2396  *
2397  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2398  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2399  * this may race with dequeue, which can't drop the rq lock or fail, do a little
2400  * dancing from our side.
2401  *
2402  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2403  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2404  * would be cleared to -1. While other cpus may have updated it to different
2405  * values afterwards, as this operation can't be preempted or recurse, the
2406  * holding_cpu can never become this CPU again before we're done. Thus, we can
2407  * tell whether we lost to dequeue by testing whether the holding_cpu still
2408  * points to this CPU. See dispatch_dequeue() for the counterpart.
2409  *
2410  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2411  * still valid. %false if lost to dequeue.
2412  */
2413 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2414 				       struct scx_dispatch_q *dsq,
2415 				       struct rq *src_rq)
2416 {
2417 	s32 cpu = raw_smp_processor_id();
2418 
2419 	lockdep_assert_held(&dsq->lock);
2420 
2421 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2422 	task_unlink_from_dsq(p, dsq);
2423 	p->scx.holding_cpu = cpu;
2424 
2425 	raw_spin_unlock(&dsq->lock);
2426 	raw_spin_rq_lock(src_rq);
2427 
2428 	/* task_rq couldn't have changed if we're still the holding cpu */
2429 	return likely(p->scx.holding_cpu == cpu) &&
2430 		!WARN_ON_ONCE(src_rq != task_rq(p));
2431 }
2432 
2433 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2434 				struct scx_dispatch_q *dsq, struct rq *src_rq)
2435 {
2436 	raw_spin_rq_unlock(this_rq);
2437 
2438 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2439 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2440 		return true;
2441 	} else {
2442 		raw_spin_rq_unlock(src_rq);
2443 		raw_spin_rq_lock(this_rq);
2444 		return false;
2445 	}
2446 }
2447 #else	/* CONFIG_SMP */
2448 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2449 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
2450 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2451 #endif	/* CONFIG_SMP */
2452 
2453 /**
2454  * move_task_between_dsqs() - Move a task from one DSQ to another
2455  * @p: target task
2456  * @enq_flags: %SCX_ENQ_*
2457  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2458  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2459  *
2460  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2461  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2462  * will change. As @p's task_rq is locked, this function doesn't need to use the
2463  * holding_cpu mechanism.
2464  *
2465  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2466  * return value, is locked.
2467  */
2468 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2469 					 struct scx_dispatch_q *src_dsq,
2470 					 struct scx_dispatch_q *dst_dsq)
2471 {
2472 	struct rq *src_rq = task_rq(p), *dst_rq;
2473 
2474 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2475 	lockdep_assert_held(&src_dsq->lock);
2476 	lockdep_assert_rq_held(src_rq);
2477 
2478 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2479 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2480 		if (src_rq != dst_rq &&
2481 		    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2482 			dst_dsq = find_global_dsq(p);
2483 			dst_rq = src_rq;
2484 		}
2485 	} else {
2486 		/* no need to migrate if destination is a non-local DSQ */
2487 		dst_rq = src_rq;
2488 	}
2489 
2490 	/*
2491 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2492 	 * CPU, @p will be migrated.
2493 	 */
2494 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2495 		/* @p is going from a non-local DSQ to a local DSQ */
2496 		if (src_rq == dst_rq) {
2497 			task_unlink_from_dsq(p, src_dsq);
2498 			move_local_task_to_local_dsq(p, enq_flags,
2499 						     src_dsq, dst_rq);
2500 			raw_spin_unlock(&src_dsq->lock);
2501 		} else {
2502 			raw_spin_unlock(&src_dsq->lock);
2503 			move_remote_task_to_local_dsq(p, enq_flags,
2504 						      src_rq, dst_rq);
2505 		}
2506 	} else {
2507 		/*
2508 		 * @p is going from a non-local DSQ to a non-local DSQ. As
2509 		 * $src_dsq is already locked, do an abbreviated dequeue.
2510 		 */
2511 		task_unlink_from_dsq(p, src_dsq);
2512 		p->scx.dsq = NULL;
2513 		raw_spin_unlock(&src_dsq->lock);
2514 
2515 		dispatch_enqueue(dst_dsq, p, enq_flags);
2516 	}
2517 
2518 	return dst_rq;
2519 }
2520 
2521 /*
2522  * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2523  * banging on the same DSQ on a large NUMA system to the point where switching
2524  * to the bypass mode can take a long time. Inject artificial delays while the
2525  * bypass mode is switching to guarantee timely completion.
2526  */
2527 static void scx_ops_breather(struct rq *rq)
2528 {
2529 	u64 until;
2530 
2531 	lockdep_assert_rq_held(rq);
2532 
2533 	if (likely(!atomic_read(&scx_ops_breather_depth)))
2534 		return;
2535 
2536 	raw_spin_rq_unlock(rq);
2537 
2538 	until = ktime_get_ns() + NSEC_PER_MSEC;
2539 
2540 	do {
2541 		int cnt = 1024;
2542 		while (atomic_read(&scx_ops_breather_depth) && --cnt)
2543 			cpu_relax();
2544 	} while (atomic_read(&scx_ops_breather_depth) &&
2545 		 time_before64(ktime_get_ns(), until));
2546 
2547 	raw_spin_rq_lock(rq);
2548 }
2549 
2550 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2551 {
2552 	struct task_struct *p;
2553 retry:
2554 	/*
2555 	 * This retry loop can repeatedly race against scx_ops_bypass()
2556 	 * dequeueing tasks from @dsq trying to put the system into the bypass
2557 	 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2558 	 * live-lock the machine into soft lockups. Give a breather.
2559 	 */
2560 	scx_ops_breather(rq);
2561 
2562 	/*
2563 	 * The caller can't expect to successfully consume a task if the task's
2564 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2565 	 * @dsq->list without locking and skip if it seems empty.
2566 	 */
2567 	if (list_empty(&dsq->list))
2568 		return false;
2569 
2570 	raw_spin_lock(&dsq->lock);
2571 
2572 	nldsq_for_each_task(p, dsq) {
2573 		struct rq *task_rq = task_rq(p);
2574 
2575 		if (rq == task_rq) {
2576 			task_unlink_from_dsq(p, dsq);
2577 			move_local_task_to_local_dsq(p, 0, dsq, rq);
2578 			raw_spin_unlock(&dsq->lock);
2579 			return true;
2580 		}
2581 
2582 		if (task_can_run_on_remote_rq(p, rq, false)) {
2583 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2584 				return true;
2585 			goto retry;
2586 		}
2587 	}
2588 
2589 	raw_spin_unlock(&dsq->lock);
2590 	return false;
2591 }
2592 
2593 static bool consume_global_dsq(struct rq *rq)
2594 {
2595 	int node = cpu_to_node(cpu_of(rq));
2596 
2597 	return consume_dispatch_q(rq, global_dsqs[node]);
2598 }
2599 
2600 /**
2601  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2602  * @rq: current rq which is locked
2603  * @dst_dsq: destination DSQ
2604  * @p: task to dispatch
2605  * @enq_flags: %SCX_ENQ_*
2606  *
2607  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2608  * DSQ. This function performs all the synchronization dancing needed because
2609  * local DSQs are protected with rq locks.
2610  *
2611  * The caller must have exclusive ownership of @p (e.g. through
2612  * %SCX_OPSS_DISPATCHING).
2613  */
2614 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2615 				  struct task_struct *p, u64 enq_flags)
2616 {
2617 	struct rq *src_rq = task_rq(p);
2618 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2619 #ifdef CONFIG_SMP
2620 	struct rq *locked_rq = rq;
2621 #endif
2622 
2623 	/*
2624 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2625 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2626 	 *
2627 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2628 	 */
2629 	if (rq == src_rq && rq == dst_rq) {
2630 		dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2631 		return;
2632 	}
2633 
2634 #ifdef CONFIG_SMP
2635 	if (src_rq != dst_rq &&
2636 	    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2637 		dispatch_enqueue(find_global_dsq(p), p,
2638 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2639 		return;
2640 	}
2641 
2642 	/*
2643 	 * @p is on a possibly remote @src_rq which we need to lock to move the
2644 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2645 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2646 	 * DISPATCHING.
2647 	 *
2648 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2649 	 * we're moving from a DSQ and use the same mechanism - mark the task
2650 	 * under transfer with holding_cpu, release DISPATCHING and then follow
2651 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2652 	 */
2653 	p->scx.holding_cpu = raw_smp_processor_id();
2654 
2655 	/* store_release ensures that dequeue sees the above */
2656 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2657 
2658 	/* switch to @src_rq lock */
2659 	if (locked_rq != src_rq) {
2660 		raw_spin_rq_unlock(locked_rq);
2661 		locked_rq = src_rq;
2662 		raw_spin_rq_lock(src_rq);
2663 	}
2664 
2665 	/* task_rq couldn't have changed if we're still the holding cpu */
2666 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2667 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2668 		/*
2669 		 * If @p is staying on the same rq, there's no need to go
2670 		 * through the full deactivate/activate cycle. Optimize by
2671 		 * abbreviating move_remote_task_to_local_dsq().
2672 		 */
2673 		if (src_rq == dst_rq) {
2674 			p->scx.holding_cpu = -1;
2675 			dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2676 		} else {
2677 			move_remote_task_to_local_dsq(p, enq_flags,
2678 						      src_rq, dst_rq);
2679 			/* task has been moved to dst_rq, which is now locked */
2680 			locked_rq = dst_rq;
2681 		}
2682 
2683 		/* if the destination CPU is idle, wake it up */
2684 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2685 			resched_curr(dst_rq);
2686 	}
2687 
2688 	/* switch back to @rq lock */
2689 	if (locked_rq != rq) {
2690 		raw_spin_rq_unlock(locked_rq);
2691 		raw_spin_rq_lock(rq);
2692 	}
2693 #else	/* CONFIG_SMP */
2694 	BUG();	/* control can not reach here on UP */
2695 #endif	/* CONFIG_SMP */
2696 }
2697 
2698 /**
2699  * finish_dispatch - Asynchronously finish dispatching a task
2700  * @rq: current rq which is locked
2701  * @p: task to finish dispatching
2702  * @qseq_at_dispatch: qseq when @p started getting dispatched
2703  * @dsq_id: destination DSQ ID
2704  * @enq_flags: %SCX_ENQ_*
2705  *
2706  * Dispatching to local DSQs may need to wait for queueing to complete or
2707  * require rq lock dancing. As we don't wanna do either while inside
2708  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2709  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2710  * task and its qseq. Once ops.dispatch() returns, this function is called to
2711  * finish up.
2712  *
2713  * There is no guarantee that @p is still valid for dispatching or even that it
2714  * was valid in the first place. Make sure that the task is still owned by the
2715  * BPF scheduler and claim the ownership before dispatching.
2716  */
2717 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2718 			    unsigned long qseq_at_dispatch,
2719 			    u64 dsq_id, u64 enq_flags)
2720 {
2721 	struct scx_dispatch_q *dsq;
2722 	unsigned long opss;
2723 
2724 	touch_core_sched_dispatch(rq, p);
2725 retry:
2726 	/*
2727 	 * No need for _acquire here. @p is accessed only after a successful
2728 	 * try_cmpxchg to DISPATCHING.
2729 	 */
2730 	opss = atomic_long_read(&p->scx.ops_state);
2731 
2732 	switch (opss & SCX_OPSS_STATE_MASK) {
2733 	case SCX_OPSS_DISPATCHING:
2734 	case SCX_OPSS_NONE:
2735 		/* someone else already got to it */
2736 		return;
2737 	case SCX_OPSS_QUEUED:
2738 		/*
2739 		 * If qseq doesn't match, @p has gone through at least one
2740 		 * dispatch/dequeue and re-enqueue cycle between
2741 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
2742 		 */
2743 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2744 			return;
2745 
2746 		/*
2747 		 * While we know @p is accessible, we don't yet have a claim on
2748 		 * it - the BPF scheduler is allowed to dispatch tasks
2749 		 * spuriously and there can be a racing dequeue attempt. Let's
2750 		 * claim @p by atomically transitioning it from QUEUED to
2751 		 * DISPATCHING.
2752 		 */
2753 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2754 						   SCX_OPSS_DISPATCHING)))
2755 			break;
2756 		goto retry;
2757 	case SCX_OPSS_QUEUEING:
2758 		/*
2759 		 * do_enqueue_task() is in the process of transferring the task
2760 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2761 		 * holding any kernel or BPF resource that the enqueue path may
2762 		 * depend upon, it's safe to wait.
2763 		 */
2764 		wait_ops_state(p, opss);
2765 		goto retry;
2766 	}
2767 
2768 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2769 
2770 	dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2771 
2772 	if (dsq->id == SCX_DSQ_LOCAL)
2773 		dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2774 	else
2775 		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2776 }
2777 
2778 static void flush_dispatch_buf(struct rq *rq)
2779 {
2780 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2781 	u32 u;
2782 
2783 	for (u = 0; u < dspc->cursor; u++) {
2784 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2785 
2786 		finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2787 				ent->enq_flags);
2788 	}
2789 
2790 	dspc->nr_tasks += dspc->cursor;
2791 	dspc->cursor = 0;
2792 }
2793 
2794 static int balance_one(struct rq *rq, struct task_struct *prev)
2795 {
2796 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2797 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2798 	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2799 	int nr_loops = SCX_DSP_MAX_LOOPS;
2800 
2801 	lockdep_assert_rq_held(rq);
2802 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2803 	rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2804 
2805 	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2806 	    unlikely(rq->scx.cpu_released)) {
2807 		/*
2808 		 * If the previous sched_class for the current CPU was not SCX,
2809 		 * notify the BPF scheduler that it again has control of the
2810 		 * core. This callback complements ->cpu_release(), which is
2811 		 * emitted in switch_class().
2812 		 */
2813 		if (SCX_HAS_OP(cpu_acquire))
2814 			SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2815 		rq->scx.cpu_released = false;
2816 	}
2817 
2818 	if (prev_on_scx) {
2819 		update_curr_scx(rq);
2820 
2821 		/*
2822 		 * If @prev is runnable & has slice left, it has priority and
2823 		 * fetching more just increases latency for the fetched tasks.
2824 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2825 		 * scheduler wants to handle this explicitly, it should
2826 		 * implement ->cpu_release().
2827 		 *
2828 		 * See scx_ops_disable_workfn() for the explanation on the
2829 		 * bypassing test.
2830 		 */
2831 		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2832 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2833 			goto has_tasks;
2834 		}
2835 	}
2836 
2837 	/* if there already are tasks to run, nothing to do */
2838 	if (rq->scx.local_dsq.nr)
2839 		goto has_tasks;
2840 
2841 	if (consume_global_dsq(rq))
2842 		goto has_tasks;
2843 
2844 	if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2845 		goto no_tasks;
2846 
2847 	dspc->rq = rq;
2848 
2849 	/*
2850 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2851 	 * the local DSQ might still end up empty after a successful
2852 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2853 	 * produced some tasks, retry. The BPF scheduler may depend on this
2854 	 * looping behavior to simplify its implementation.
2855 	 */
2856 	do {
2857 		dspc->nr_tasks = 0;
2858 
2859 		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2860 			    prev_on_scx ? prev : NULL);
2861 
2862 		flush_dispatch_buf(rq);
2863 
2864 		if (prev_on_rq && prev->scx.slice) {
2865 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2866 			goto has_tasks;
2867 		}
2868 		if (rq->scx.local_dsq.nr)
2869 			goto has_tasks;
2870 		if (consume_global_dsq(rq))
2871 			goto has_tasks;
2872 
2873 		/*
2874 		 * ops.dispatch() can trap us in this loop by repeatedly
2875 		 * dispatching ineligible tasks. Break out once in a while to
2876 		 * allow the watchdog to run. As IRQ can't be enabled in
2877 		 * balance(), we want to complete this scheduling cycle and then
2878 		 * start a new one. IOW, we want to call resched_curr() on the
2879 		 * next, most likely idle, task, not the current one. Use
2880 		 * scx_bpf_kick_cpu() for deferred kicking.
2881 		 */
2882 		if (unlikely(!--nr_loops)) {
2883 			scx_bpf_kick_cpu(cpu_of(rq), 0);
2884 			break;
2885 		}
2886 	} while (dspc->nr_tasks);
2887 
2888 no_tasks:
2889 	/*
2890 	 * Didn't find another task to run. Keep running @prev unless
2891 	 * %SCX_OPS_ENQ_LAST is in effect.
2892 	 */
2893 	if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
2894 	     scx_rq_bypassing(rq))) {
2895 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2896 		goto has_tasks;
2897 	}
2898 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2899 	return false;
2900 
2901 has_tasks:
2902 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2903 	return true;
2904 }
2905 
2906 static int balance_scx(struct rq *rq, struct task_struct *prev,
2907 		       struct rq_flags *rf)
2908 {
2909 	int ret;
2910 
2911 	rq_unpin_lock(rq, rf);
2912 
2913 	ret = balance_one(rq, prev);
2914 
2915 #ifdef CONFIG_SCHED_SMT
2916 	/*
2917 	 * When core-sched is enabled, this ops.balance() call will be followed
2918 	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2919 	 * siblings too.
2920 	 */
2921 	if (sched_core_enabled(rq)) {
2922 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2923 		int scpu;
2924 
2925 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2926 			struct rq *srq = cpu_rq(scpu);
2927 			struct task_struct *sprev = srq->curr;
2928 
2929 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2930 			update_rq_clock(srq);
2931 			balance_one(srq, sprev);
2932 		}
2933 	}
2934 #endif
2935 	rq_repin_lock(rq, rf);
2936 
2937 	return ret;
2938 }
2939 
2940 static void process_ddsp_deferred_locals(struct rq *rq)
2941 {
2942 	struct task_struct *p;
2943 
2944 	lockdep_assert_rq_held(rq);
2945 
2946 	/*
2947 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2948 	 * tasks directly dispatched to the local DSQs of other CPUs. See
2949 	 * direct_dispatch(). Keep popping from the head instead of using
2950 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2951 	 * temporarily.
2952 	 */
2953 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2954 				struct task_struct, scx.dsq_list.node))) {
2955 		struct scx_dispatch_q *dsq;
2956 
2957 		list_del_init(&p->scx.dsq_list.node);
2958 
2959 		dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2960 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2961 			dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2962 	}
2963 }
2964 
2965 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2966 {
2967 	if (p->scx.flags & SCX_TASK_QUEUED) {
2968 		/*
2969 		 * Core-sched might decide to execute @p before it is
2970 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2971 		 */
2972 		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2973 		dispatch_dequeue(rq, p);
2974 	}
2975 
2976 	p->se.exec_start = rq_clock_task(rq);
2977 
2978 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2979 	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2980 		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2981 
2982 	clr_task_runnable(p, true);
2983 
2984 	/*
2985 	 * @p is getting newly scheduled or got kicked after someone updated its
2986 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2987 	 */
2988 	if ((p->scx.slice == SCX_SLICE_INF) !=
2989 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2990 		if (p->scx.slice == SCX_SLICE_INF)
2991 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2992 		else
2993 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2994 
2995 		sched_update_tick_dependency(rq);
2996 
2997 		/*
2998 		 * For now, let's refresh the load_avgs just when transitioning
2999 		 * in and out of nohz. In the future, we might want to add a
3000 		 * mechanism which calls the following periodically on
3001 		 * tick-stopped CPUs.
3002 		 */
3003 		update_other_load_avgs(rq);
3004 	}
3005 }
3006 
3007 static enum scx_cpu_preempt_reason
3008 preempt_reason_from_class(const struct sched_class *class)
3009 {
3010 #ifdef CONFIG_SMP
3011 	if (class == &stop_sched_class)
3012 		return SCX_CPU_PREEMPT_STOP;
3013 #endif
3014 	if (class == &dl_sched_class)
3015 		return SCX_CPU_PREEMPT_DL;
3016 	if (class == &rt_sched_class)
3017 		return SCX_CPU_PREEMPT_RT;
3018 	return SCX_CPU_PREEMPT_UNKNOWN;
3019 }
3020 
3021 static void switch_class(struct rq *rq, struct task_struct *next)
3022 {
3023 	const struct sched_class *next_class = next->sched_class;
3024 
3025 #ifdef CONFIG_SMP
3026 	/*
3027 	 * Pairs with the smp_load_acquire() issued by a CPU in
3028 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3029 	 * resched.
3030 	 */
3031 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3032 #endif
3033 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
3034 		return;
3035 
3036 	/*
3037 	 * The callback is conceptually meant to convey that the CPU is no
3038 	 * longer under the control of SCX. Therefore, don't invoke the callback
3039 	 * if the next class is below SCX (in which case the BPF scheduler has
3040 	 * actively decided not to schedule any tasks on the CPU).
3041 	 */
3042 	if (sched_class_above(&ext_sched_class, next_class))
3043 		return;
3044 
3045 	/*
3046 	 * At this point we know that SCX was preempted by a higher priority
3047 	 * sched_class, so invoke the ->cpu_release() callback if we have not
3048 	 * done so already. We only send the callback once between SCX being
3049 	 * preempted, and it regaining control of the CPU.
3050 	 *
3051 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3052 	 *  next time that balance_scx() is invoked.
3053 	 */
3054 	if (!rq->scx.cpu_released) {
3055 		if (SCX_HAS_OP(cpu_release)) {
3056 			struct scx_cpu_release_args args = {
3057 				.reason = preempt_reason_from_class(next_class),
3058 				.task = next,
3059 			};
3060 
3061 			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3062 				    cpu_release, cpu_of(rq), &args);
3063 		}
3064 		rq->scx.cpu_released = true;
3065 	}
3066 }
3067 
3068 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3069 			      struct task_struct *next)
3070 {
3071 	update_curr_scx(rq);
3072 
3073 	/* see dequeue_task_scx() on why we skip when !QUEUED */
3074 	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3075 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3076 
3077 	if (p->scx.flags & SCX_TASK_QUEUED) {
3078 		set_task_runnable(rq, p);
3079 
3080 		/*
3081 		 * If @p has slice left and is being put, @p is getting
3082 		 * preempted by a higher priority scheduler class or core-sched
3083 		 * forcing a different task. Leave it at the head of the local
3084 		 * DSQ.
3085 		 */
3086 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
3087 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3088 			goto switch_class;
3089 		}
3090 
3091 		/*
3092 		 * If @p is runnable but we're about to enter a lower
3093 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3094 		 * ops.enqueue() that @p is the only one available for this cpu,
3095 		 * which should trigger an explicit follow-up scheduling event.
3096 		 */
3097 		if (sched_class_above(&ext_sched_class, next->sched_class)) {
3098 			WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3099 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3100 		} else {
3101 			do_enqueue_task(rq, p, 0, -1);
3102 		}
3103 	}
3104 
3105 switch_class:
3106 	if (next && next->sched_class != &ext_sched_class)
3107 		switch_class(rq, next);
3108 }
3109 
3110 static struct task_struct *first_local_task(struct rq *rq)
3111 {
3112 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
3113 					struct task_struct, scx.dsq_list.node);
3114 }
3115 
3116 static struct task_struct *pick_task_scx(struct rq *rq)
3117 {
3118 	struct task_struct *prev = rq->curr;
3119 	struct task_struct *p;
3120 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
3121 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3122 	bool kick_idle = false;
3123 
3124 	/*
3125 	 * WORKAROUND:
3126 	 *
3127 	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3128 	 * have gone through balance_scx(). Unfortunately, there currently is a
3129 	 * bug where fair could say yes on balance() but no on pick_task(),
3130 	 * which then ends up calling pick_task_scx() without preceding
3131 	 * balance_scx().
3132 	 *
3133 	 * Keep running @prev if possible and avoid stalling from entering idle
3134 	 * without balancing.
3135 	 *
3136 	 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3137 	 * if pick_task_scx() is called without preceding balance_scx().
3138 	 */
3139 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3140 		if (prev_on_scx) {
3141 			keep_prev = true;
3142 		} else {
3143 			keep_prev = false;
3144 			kick_idle = true;
3145 		}
3146 	} else if (unlikely(keep_prev && !prev_on_scx)) {
3147 		/* only allowed during transitions */
3148 		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3149 		keep_prev = false;
3150 	}
3151 
3152 	/*
3153 	 * If balance_scx() is telling us to keep running @prev, replenish slice
3154 	 * if necessary and keep running @prev. Otherwise, pop the first one
3155 	 * from the local DSQ.
3156 	 */
3157 	if (keep_prev) {
3158 		p = prev;
3159 		if (!p->scx.slice)
3160 			p->scx.slice = SCX_SLICE_DFL;
3161 	} else {
3162 		p = first_local_task(rq);
3163 		if (!p) {
3164 			if (kick_idle)
3165 				scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3166 			return NULL;
3167 		}
3168 
3169 		if (unlikely(!p->scx.slice)) {
3170 			if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3171 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3172 						p->comm, p->pid, __func__);
3173 				scx_warned_zero_slice = true;
3174 			}
3175 			p->scx.slice = SCX_SLICE_DFL;
3176 		}
3177 	}
3178 
3179 	return p;
3180 }
3181 
3182 #ifdef CONFIG_SCHED_CORE
3183 /**
3184  * scx_prio_less - Task ordering for core-sched
3185  * @a: task A
3186  * @b: task B
3187  * @in_fi: in forced idle state
3188  *
3189  * Core-sched is implemented as an additional scheduling layer on top of the
3190  * usual sched_class'es and needs to find out the expected task ordering. For
3191  * SCX, core-sched calls this function to interrogate the task ordering.
3192  *
3193  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3194  * to implement the default task ordering. The older the timestamp, the higher
3195  * priority the task - the global FIFO ordering matching the default scheduling
3196  * behavior.
3197  *
3198  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3199  * implement FIFO ordering within each local DSQ. See pick_task_scx().
3200  */
3201 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3202 		   bool in_fi)
3203 {
3204 	/*
3205 	 * The const qualifiers are dropped from task_struct pointers when
3206 	 * calling ops.core_sched_before(). Accesses are controlled by the
3207 	 * verifier.
3208 	 */
3209 	if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3210 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3211 					      (struct task_struct *)a,
3212 					      (struct task_struct *)b);
3213 	else
3214 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3215 }
3216 #endif	/* CONFIG_SCHED_CORE */
3217 
3218 #ifdef CONFIG_SMP
3219 
3220 static bool test_and_clear_cpu_idle(int cpu)
3221 {
3222 #ifdef CONFIG_SCHED_SMT
3223 	/*
3224 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3225 	 * cluster is not wholly idle either way. This also prevents
3226 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3227 	 */
3228 	if (sched_smt_active()) {
3229 		const struct cpumask *smt = cpu_smt_mask(cpu);
3230 
3231 		/*
3232 		 * If offline, @cpu is not its own sibling and
3233 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
3234 		 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3235 		 * is eventually cleared.
3236 		 *
3237 		 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
3238 		 * reduce memory writes, which may help alleviate cache
3239 		 * coherence pressure.
3240 		 */
3241 		if (cpumask_intersects(smt, idle_masks.smt))
3242 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3243 		else if (cpumask_test_cpu(cpu, idle_masks.smt))
3244 			__cpumask_clear_cpu(cpu, idle_masks.smt);
3245 	}
3246 #endif
3247 	return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3248 }
3249 
3250 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3251 {
3252 	int cpu;
3253 
3254 retry:
3255 	if (sched_smt_active()) {
3256 		cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3257 		if (cpu < nr_cpu_ids)
3258 			goto found;
3259 
3260 		if (flags & SCX_PICK_IDLE_CORE)
3261 			return -EBUSY;
3262 	}
3263 
3264 	cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3265 	if (cpu >= nr_cpu_ids)
3266 		return -EBUSY;
3267 
3268 found:
3269 	if (test_and_clear_cpu_idle(cpu))
3270 		return cpu;
3271 	else
3272 		goto retry;
3273 }
3274 
3275 /*
3276  * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
3277  * domain is not defined).
3278  */
3279 static unsigned int llc_weight(s32 cpu)
3280 {
3281 	struct sched_domain *sd;
3282 
3283 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
3284 	if (!sd)
3285 		return 0;
3286 
3287 	return sd->span_weight;
3288 }
3289 
3290 /*
3291  * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
3292  * domain is not defined).
3293  */
3294 static struct cpumask *llc_span(s32 cpu)
3295 {
3296 	struct sched_domain *sd;
3297 
3298 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
3299 	if (!sd)
3300 		return 0;
3301 
3302 	return sched_domain_span(sd);
3303 }
3304 
3305 /*
3306  * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
3307  * NUMA domain is not defined).
3308  */
3309 static unsigned int numa_weight(s32 cpu)
3310 {
3311 	struct sched_domain *sd;
3312 	struct sched_group *sg;
3313 
3314 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
3315 	if (!sd)
3316 		return 0;
3317 	sg = sd->groups;
3318 	if (!sg)
3319 		return 0;
3320 
3321 	return sg->group_weight;
3322 }
3323 
3324 /*
3325  * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
3326  * domain is not defined).
3327  */
3328 static struct cpumask *numa_span(s32 cpu)
3329 {
3330 	struct sched_domain *sd;
3331 	struct sched_group *sg;
3332 
3333 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
3334 	if (!sd)
3335 		return NULL;
3336 	sg = sd->groups;
3337 	if (!sg)
3338 		return NULL;
3339 
3340 	return sched_group_span(sg);
3341 }
3342 
3343 /*
3344  * Return true if the LLC domains do not perfectly overlap with the NUMA
3345  * domains, false otherwise.
3346  */
3347 static bool llc_numa_mismatch(void)
3348 {
3349 	int cpu;
3350 
3351 	/*
3352 	 * We need to scan all online CPUs to verify whether their scheduling
3353 	 * domains overlap.
3354 	 *
3355 	 * While it is rare to encounter architectures with asymmetric NUMA
3356 	 * topologies, CPU hotplugging or virtualized environments can result
3357 	 * in asymmetric configurations.
3358 	 *
3359 	 * For example:
3360 	 *
3361 	 *  NUMA 0:
3362 	 *    - LLC 0: cpu0..cpu7
3363 	 *    - LLC 1: cpu8..cpu15 [offline]
3364 	 *
3365 	 *  NUMA 1:
3366 	 *    - LLC 0: cpu16..cpu23
3367 	 *    - LLC 1: cpu24..cpu31
3368 	 *
3369 	 * In this case, if we only check the first online CPU (cpu0), we might
3370 	 * incorrectly assume that the LLC and NUMA domains are fully
3371 	 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
3372 	 * domains).
3373 	 */
3374 	for_each_online_cpu(cpu)
3375 		if (llc_weight(cpu) != numa_weight(cpu))
3376 			return true;
3377 
3378 	return false;
3379 }
3380 
3381 /*
3382  * Initialize topology-aware scheduling.
3383  *
3384  * Detect if the system has multiple LLC or multiple NUMA domains and enable
3385  * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
3386  * selection policy.
3387  *
3388  * Assumption: the kernel's internal topology representation assumes that each
3389  * CPU belongs to a single LLC domain, and that each LLC domain is entirely
3390  * contained within a single NUMA node.
3391  */
3392 static void update_selcpu_topology(void)
3393 {
3394 	bool enable_llc = false, enable_numa = false;
3395 	unsigned int nr_cpus;
3396 	s32 cpu = cpumask_first(cpu_online_mask);
3397 
3398 	/*
3399 	 * Enable LLC domain optimization only when there are multiple LLC
3400 	 * domains among the online CPUs. If all online CPUs are part of a
3401 	 * single LLC domain, the idle CPU selection logic can choose any
3402 	 * online CPU without bias.
3403 	 *
3404 	 * Note that it is sufficient to check the LLC domain of the first
3405 	 * online CPU to determine whether a single LLC domain includes all
3406 	 * CPUs.
3407 	 */
3408 	rcu_read_lock();
3409 	nr_cpus = llc_weight(cpu);
3410 	if (nr_cpus > 0) {
3411 		if (nr_cpus < num_online_cpus())
3412 			enable_llc = true;
3413 		pr_debug("sched_ext: LLC=%*pb weight=%u\n",
3414 			 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
3415 	}
3416 
3417 	/*
3418 	 * Enable NUMA optimization only when there are multiple NUMA domains
3419 	 * among the online CPUs and the NUMA domains don't perfectly overlaps
3420 	 * with the LLC domains.
3421 	 *
3422 	 * If all CPUs belong to the same NUMA node and the same LLC domain,
3423 	 * enabling both NUMA and LLC optimizations is unnecessary, as checking
3424 	 * for an idle CPU in the same domain twice is redundant.
3425 	 */
3426 	nr_cpus = numa_weight(cpu);
3427 	if (nr_cpus > 0) {
3428 		if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
3429 			enable_numa = true;
3430 		pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
3431 			 cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
3432 	}
3433 	rcu_read_unlock();
3434 
3435 	pr_debug("sched_ext: LLC idle selection %s\n",
3436 		 str_enabled_disabled(enable_llc));
3437 	pr_debug("sched_ext: NUMA idle selection %s\n",
3438 		 str_enabled_disabled(enable_numa));
3439 
3440 	if (enable_llc)
3441 		static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
3442 	else
3443 		static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
3444 	if (enable_numa)
3445 		static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
3446 	else
3447 		static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
3448 }
3449 
3450 /*
3451  * Built-in CPU idle selection policy:
3452  *
3453  * 1. Prioritize full-idle cores:
3454  *   - always prioritize CPUs from fully idle cores (both logical CPUs are
3455  *     idle) to avoid interference caused by SMT.
3456  *
3457  * 2. Reuse the same CPU:
3458  *   - prefer the last used CPU to take advantage of cached data (L1, L2) and
3459  *     branch prediction optimizations.
3460  *
3461  * 3. Pick a CPU within the same LLC (Last-Level Cache):
3462  *   - if the above conditions aren't met, pick a CPU that shares the same LLC
3463  *     to maintain cache locality.
3464  *
3465  * 4. Pick a CPU within the same NUMA node, if enabled:
3466  *   - choose a CPU from the same NUMA node to reduce memory access latency.
3467  *
3468  * 5. Pick any idle CPU usable by the task.
3469  *
3470  * Step 3 and 4 are performed only if the system has, respectively, multiple
3471  * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
3472  * scx_selcpu_topo_numa).
3473  *
3474  * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
3475  * we never call ops.select_cpu() for them, see select_task_rq().
3476  */
3477 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3478 			      u64 wake_flags, bool *found)
3479 {
3480 	const struct cpumask *llc_cpus = NULL;
3481 	const struct cpumask *numa_cpus = NULL;
3482 	s32 cpu;
3483 
3484 	*found = false;
3485 
3486 	/*
3487 	 * This is necessary to protect llc_cpus.
3488 	 */
3489 	rcu_read_lock();
3490 
3491 	/*
3492 	 * Determine the scheduling domain only if the task is allowed to run
3493 	 * on all CPUs.
3494 	 *
3495 	 * This is done primarily for efficiency, as it avoids the overhead of
3496 	 * updating a cpumask every time we need to select an idle CPU (which
3497 	 * can be costly in large SMP systems), but it also aligns logically:
3498 	 * if a task's scheduling domain is restricted by user-space (through
3499 	 * CPU affinity), the task will simply use the flat scheduling domain
3500 	 * defined by user-space.
3501 	 */
3502 	if (p->nr_cpus_allowed >= num_possible_cpus()) {
3503 		if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
3504 			numa_cpus = numa_span(prev_cpu);
3505 
3506 		if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
3507 			llc_cpus = llc_span(prev_cpu);
3508 	}
3509 
3510 	/*
3511 	 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
3512 	 */
3513 	if (wake_flags & SCX_WAKE_SYNC) {
3514 		cpu = smp_processor_id();
3515 
3516 		/*
3517 		 * If the waker's CPU is cache affine and prev_cpu is idle,
3518 		 * then avoid a migration.
3519 		 */
3520 		if (cpus_share_cache(cpu, prev_cpu) &&
3521 		    test_and_clear_cpu_idle(prev_cpu)) {
3522 			cpu = prev_cpu;
3523 			goto cpu_found;
3524 		}
3525 
3526 		/*
3527 		 * If the waker's local DSQ is empty, and the system is under
3528 		 * utilized, try to wake up @p to the local DSQ of the waker.
3529 		 *
3530 		 * Checking only for an empty local DSQ is insufficient as it
3531 		 * could give the wakee an unfair advantage when the system is
3532 		 * oversaturated.
3533 		 *
3534 		 * Checking only for the presence of idle CPUs is also
3535 		 * insufficient as the local DSQ of the waker could have tasks
3536 		 * piled up on it even if there is an idle core elsewhere on
3537 		 * the system.
3538 		 */
3539 		if (!cpumask_empty(idle_masks.cpu) &&
3540 		    !(current->flags & PF_EXITING) &&
3541 		    cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3542 			if (cpumask_test_cpu(cpu, p->cpus_ptr))
3543 				goto cpu_found;
3544 		}
3545 	}
3546 
3547 	/*
3548 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3549 	 * partially idle @prev_cpu.
3550 	 */
3551 	if (sched_smt_active()) {
3552 		/*
3553 		 * Keep using @prev_cpu if it's part of a fully idle core.
3554 		 */
3555 		if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3556 		    test_and_clear_cpu_idle(prev_cpu)) {
3557 			cpu = prev_cpu;
3558 			goto cpu_found;
3559 		}
3560 
3561 		/*
3562 		 * Search for any fully idle core in the same LLC domain.
3563 		 */
3564 		if (llc_cpus) {
3565 			cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
3566 			if (cpu >= 0)
3567 				goto cpu_found;
3568 		}
3569 
3570 		/*
3571 		 * Search for any fully idle core in the same NUMA node.
3572 		 */
3573 		if (numa_cpus) {
3574 			cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
3575 			if (cpu >= 0)
3576 				goto cpu_found;
3577 		}
3578 
3579 		/*
3580 		 * Search for any full idle core usable by the task.
3581 		 */
3582 		cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3583 		if (cpu >= 0)
3584 			goto cpu_found;
3585 	}
3586 
3587 	/*
3588 	 * Use @prev_cpu if it's idle.
3589 	 */
3590 	if (test_and_clear_cpu_idle(prev_cpu)) {
3591 		cpu = prev_cpu;
3592 		goto cpu_found;
3593 	}
3594 
3595 	/*
3596 	 * Search for any idle CPU in the same LLC domain.
3597 	 */
3598 	if (llc_cpus) {
3599 		cpu = scx_pick_idle_cpu(llc_cpus, 0);
3600 		if (cpu >= 0)
3601 			goto cpu_found;
3602 	}
3603 
3604 	/*
3605 	 * Search for any idle CPU in the same NUMA node.
3606 	 */
3607 	if (numa_cpus) {
3608 		cpu = scx_pick_idle_cpu(numa_cpus, 0);
3609 		if (cpu >= 0)
3610 			goto cpu_found;
3611 	}
3612 
3613 	/*
3614 	 * Search for any idle CPU usable by the task.
3615 	 */
3616 	cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3617 	if (cpu >= 0)
3618 		goto cpu_found;
3619 
3620 	rcu_read_unlock();
3621 	return prev_cpu;
3622 
3623 cpu_found:
3624 	rcu_read_unlock();
3625 
3626 	*found = true;
3627 	return cpu;
3628 }
3629 
3630 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3631 {
3632 	/*
3633 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3634 	 * can be a good migration opportunity with low cache and memory
3635 	 * footprint. Returning a CPU different than @prev_cpu triggers
3636 	 * immediate rq migration. However, for SCX, as the current rq
3637 	 * association doesn't dictate where the task is going to run, this
3638 	 * doesn't fit well. If necessary, we can later add a dedicated method
3639 	 * which can decide to preempt self to force it through the regular
3640 	 * scheduling path.
3641 	 */
3642 	if (unlikely(wake_flags & WF_EXEC))
3643 		return prev_cpu;
3644 
3645 	if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3646 		s32 cpu;
3647 		struct task_struct **ddsp_taskp;
3648 
3649 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3650 		WARN_ON_ONCE(*ddsp_taskp);
3651 		*ddsp_taskp = p;
3652 
3653 		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3654 					   select_cpu, p, prev_cpu, wake_flags);
3655 		*ddsp_taskp = NULL;
3656 		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3657 			return cpu;
3658 		else
3659 			return prev_cpu;
3660 	} else {
3661 		bool found;
3662 		s32 cpu;
3663 
3664 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3665 		if (found) {
3666 			p->scx.slice = SCX_SLICE_DFL;
3667 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3668 		}
3669 		return cpu;
3670 	}
3671 }
3672 
3673 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3674 {
3675 	run_deferred(rq);
3676 }
3677 
3678 static void set_cpus_allowed_scx(struct task_struct *p,
3679 				 struct affinity_context *ac)
3680 {
3681 	set_cpus_allowed_common(p, ac);
3682 
3683 	/*
3684 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3685 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3686 	 * scheduler the effective one.
3687 	 *
3688 	 * Fine-grained memory write control is enforced by BPF making the const
3689 	 * designation pointless. Cast it away when calling the operation.
3690 	 */
3691 	if (SCX_HAS_OP(set_cpumask))
3692 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3693 				 (struct cpumask *)p->cpus_ptr);
3694 }
3695 
3696 static void reset_idle_masks(void)
3697 {
3698 	/*
3699 	 * Consider all online cpus idle. Should converge to the actual state
3700 	 * quickly.
3701 	 */
3702 	cpumask_copy(idle_masks.cpu, cpu_online_mask);
3703 	cpumask_copy(idle_masks.smt, cpu_online_mask);
3704 }
3705 
3706 static void update_builtin_idle(int cpu, bool idle)
3707 {
3708 	assign_cpu(cpu, idle_masks.cpu, idle);
3709 
3710 #ifdef CONFIG_SCHED_SMT
3711 	if (sched_smt_active()) {
3712 		const struct cpumask *smt = cpu_smt_mask(cpu);
3713 
3714 		if (idle) {
3715 			/*
3716 			 * idle_masks.smt handling is racy but that's fine as
3717 			 * it's only for optimization and self-correcting.
3718 			 */
3719 			if (!cpumask_subset(smt, idle_masks.cpu))
3720 				return;
3721 			cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3722 		} else {
3723 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3724 		}
3725 	}
3726 #endif
3727 }
3728 
3729 /*
3730  * Update the idle state of a CPU to @idle.
3731  *
3732  * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3733  * scheduler of an actual idle state transition (idle to busy or vice
3734  * versa). If @do_notify is false, only the idle state in the idle masks is
3735  * refreshed without invoking ops.update_idle().
3736  *
3737  * This distinction is necessary, because an idle CPU can be "reserved" and
3738  * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3739  * busy even if no tasks are dispatched. In this case, the CPU may return
3740  * to idle without a true state transition. Refreshing the idle masks
3741  * without invoking ops.update_idle() ensures accurate idle state tracking
3742  * while avoiding unnecessary updates and maintaining balanced state
3743  * transitions.
3744  */
3745 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
3746 {
3747 	int cpu = cpu_of(rq);
3748 
3749 	lockdep_assert_rq_held(rq);
3750 
3751 	/*
3752 	 * Trigger ops.update_idle() only when transitioning from a task to
3753 	 * the idle thread and vice versa.
3754 	 *
3755 	 * Idle transitions are indicated by do_notify being set to true,
3756 	 * managed by put_prev_task_idle()/set_next_task_idle().
3757 	 */
3758 	if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
3759 		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3760 
3761 	/*
3762 	 * Update the idle masks:
3763 	 * - for real idle transitions (do_notify == true)
3764 	 * - for idle-to-idle transitions (indicated by the previous task
3765 	 *   being the idle thread, managed by pick_task_idle())
3766 	 *
3767 	 * Skip updating idle masks if the previous task is not the idle
3768 	 * thread, since set_next_task_idle() has already handled it when
3769 	 * transitioning from a task to the idle thread (calling this
3770 	 * function with do_notify == true).
3771 	 *
3772 	 * In this way we can avoid updating the idle masks twice,
3773 	 * unnecessarily.
3774 	 */
3775 	if (static_branch_likely(&scx_builtin_idle_enabled))
3776 		if (do_notify || is_idle_task(rq->curr))
3777 			update_builtin_idle(cpu, idle);
3778 }
3779 
3780 static void handle_hotplug(struct rq *rq, bool online)
3781 {
3782 	int cpu = cpu_of(rq);
3783 
3784 	atomic_long_inc(&scx_hotplug_seq);
3785 
3786 	if (scx_enabled())
3787 		update_selcpu_topology();
3788 
3789 	if (online && SCX_HAS_OP(cpu_online))
3790 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3791 	else if (!online && SCX_HAS_OP(cpu_offline))
3792 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3793 	else
3794 		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3795 			     "cpu %d going %s, exiting scheduler", cpu,
3796 			     online ? "online" : "offline");
3797 }
3798 
3799 void scx_rq_activate(struct rq *rq)
3800 {
3801 	handle_hotplug(rq, true);
3802 }
3803 
3804 void scx_rq_deactivate(struct rq *rq)
3805 {
3806 	handle_hotplug(rq, false);
3807 }
3808 
3809 static void rq_online_scx(struct rq *rq)
3810 {
3811 	rq->scx.flags |= SCX_RQ_ONLINE;
3812 }
3813 
3814 static void rq_offline_scx(struct rq *rq)
3815 {
3816 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3817 }
3818 
3819 #else	/* CONFIG_SMP */
3820 
3821 static bool test_and_clear_cpu_idle(int cpu) { return false; }
3822 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3823 static void reset_idle_masks(void) {}
3824 
3825 #endif	/* CONFIG_SMP */
3826 
3827 static bool check_rq_for_timeouts(struct rq *rq)
3828 {
3829 	struct task_struct *p;
3830 	struct rq_flags rf;
3831 	bool timed_out = false;
3832 
3833 	rq_lock_irqsave(rq, &rf);
3834 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3835 		unsigned long last_runnable = p->scx.runnable_at;
3836 
3837 		if (unlikely(time_after(jiffies,
3838 					last_runnable + scx_watchdog_timeout))) {
3839 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3840 
3841 			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3842 					   "%s[%d] failed to run for %u.%03us",
3843 					   p->comm, p->pid,
3844 					   dur_ms / 1000, dur_ms % 1000);
3845 			timed_out = true;
3846 			break;
3847 		}
3848 	}
3849 	rq_unlock_irqrestore(rq, &rf);
3850 
3851 	return timed_out;
3852 }
3853 
3854 static void scx_watchdog_workfn(struct work_struct *work)
3855 {
3856 	int cpu;
3857 
3858 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3859 
3860 	for_each_online_cpu(cpu) {
3861 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3862 			break;
3863 
3864 		cond_resched();
3865 	}
3866 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3867 			   scx_watchdog_timeout / 2);
3868 }
3869 
3870 void scx_tick(struct rq *rq)
3871 {
3872 	unsigned long last_check;
3873 
3874 	if (!scx_enabled())
3875 		return;
3876 
3877 	last_check = READ_ONCE(scx_watchdog_timestamp);
3878 	if (unlikely(time_after(jiffies,
3879 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3880 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3881 
3882 		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3883 				   "watchdog failed to check in for %u.%03us",
3884 				   dur_ms / 1000, dur_ms % 1000);
3885 	}
3886 
3887 	update_other_load_avgs(rq);
3888 }
3889 
3890 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3891 {
3892 	update_curr_scx(rq);
3893 
3894 	/*
3895 	 * While disabling, always resched and refresh core-sched timestamp as
3896 	 * we can't trust the slice management or ops.core_sched_before().
3897 	 */
3898 	if (scx_rq_bypassing(rq)) {
3899 		curr->scx.slice = 0;
3900 		touch_core_sched(rq, curr);
3901 	} else if (SCX_HAS_OP(tick)) {
3902 		SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
3903 	}
3904 
3905 	if (!curr->scx.slice)
3906 		resched_curr(rq);
3907 }
3908 
3909 #ifdef CONFIG_EXT_GROUP_SCHED
3910 static struct cgroup *tg_cgrp(struct task_group *tg)
3911 {
3912 	/*
3913 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3914 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3915 	 * root cgroup.
3916 	 */
3917 	if (tg && tg->css.cgroup)
3918 		return tg->css.cgroup;
3919 	else
3920 		return &cgrp_dfl_root.cgrp;
3921 }
3922 
3923 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3924 
3925 #else	/* CONFIG_EXT_GROUP_SCHED */
3926 
3927 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3928 
3929 #endif	/* CONFIG_EXT_GROUP_SCHED */
3930 
3931 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3932 {
3933 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3934 }
3935 
3936 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3937 {
3938 	enum scx_task_state prev_state = scx_get_task_state(p);
3939 	bool warn = false;
3940 
3941 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3942 
3943 	switch (state) {
3944 	case SCX_TASK_NONE:
3945 		break;
3946 	case SCX_TASK_INIT:
3947 		warn = prev_state != SCX_TASK_NONE;
3948 		break;
3949 	case SCX_TASK_READY:
3950 		warn = prev_state == SCX_TASK_NONE;
3951 		break;
3952 	case SCX_TASK_ENABLED:
3953 		warn = prev_state != SCX_TASK_READY;
3954 		break;
3955 	default:
3956 		warn = true;
3957 		return;
3958 	}
3959 
3960 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3961 		  prev_state, state, p->comm, p->pid);
3962 
3963 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3964 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3965 }
3966 
3967 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3968 {
3969 	int ret;
3970 
3971 	p->scx.disallow = false;
3972 
3973 	if (SCX_HAS_OP(init_task)) {
3974 		struct scx_init_task_args args = {
3975 			SCX_INIT_TASK_ARGS_CGROUP(tg)
3976 			.fork = fork,
3977 		};
3978 
3979 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3980 		if (unlikely(ret)) {
3981 			ret = ops_sanitize_err("init_task", ret);
3982 			return ret;
3983 		}
3984 	}
3985 
3986 	scx_set_task_state(p, SCX_TASK_INIT);
3987 
3988 	if (p->scx.disallow) {
3989 		if (!fork) {
3990 			struct rq *rq;
3991 			struct rq_flags rf;
3992 
3993 			rq = task_rq_lock(p, &rf);
3994 
3995 			/*
3996 			 * We're in the load path and @p->policy will be applied
3997 			 * right after. Reverting @p->policy here and rejecting
3998 			 * %SCHED_EXT transitions from scx_check_setscheduler()
3999 			 * guarantees that if ops.init_task() sets @p->disallow,
4000 			 * @p can never be in SCX.
4001 			 */
4002 			if (p->policy == SCHED_EXT) {
4003 				p->policy = SCHED_NORMAL;
4004 				atomic_long_inc(&scx_nr_rejected);
4005 			}
4006 
4007 			task_rq_unlock(rq, p, &rf);
4008 		} else if (p->policy == SCHED_EXT) {
4009 			scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
4010 				      p->comm, p->pid);
4011 		}
4012 	}
4013 
4014 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
4015 	return 0;
4016 }
4017 
4018 static void scx_ops_enable_task(struct task_struct *p)
4019 {
4020 	u32 weight;
4021 
4022 	lockdep_assert_rq_held(task_rq(p));
4023 
4024 	/*
4025 	 * Set the weight before calling ops.enable() so that the scheduler
4026 	 * doesn't see a stale value if they inspect the task struct.
4027 	 */
4028 	if (task_has_idle_policy(p))
4029 		weight = WEIGHT_IDLEPRIO;
4030 	else
4031 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
4032 
4033 	p->scx.weight = sched_weight_to_cgroup(weight);
4034 
4035 	if (SCX_HAS_OP(enable))
4036 		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
4037 	scx_set_task_state(p, SCX_TASK_ENABLED);
4038 
4039 	if (SCX_HAS_OP(set_weight))
4040 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4041 }
4042 
4043 static void scx_ops_disable_task(struct task_struct *p)
4044 {
4045 	lockdep_assert_rq_held(task_rq(p));
4046 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
4047 
4048 	if (SCX_HAS_OP(disable))
4049 		SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
4050 	scx_set_task_state(p, SCX_TASK_READY);
4051 }
4052 
4053 static void scx_ops_exit_task(struct task_struct *p)
4054 {
4055 	struct scx_exit_task_args args = {
4056 		.cancelled = false,
4057 	};
4058 
4059 	lockdep_assert_rq_held(task_rq(p));
4060 
4061 	switch (scx_get_task_state(p)) {
4062 	case SCX_TASK_NONE:
4063 		return;
4064 	case SCX_TASK_INIT:
4065 		args.cancelled = true;
4066 		break;
4067 	case SCX_TASK_READY:
4068 		break;
4069 	case SCX_TASK_ENABLED:
4070 		scx_ops_disable_task(p);
4071 		break;
4072 	default:
4073 		WARN_ON_ONCE(true);
4074 		return;
4075 	}
4076 
4077 	if (SCX_HAS_OP(exit_task))
4078 		SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
4079 	scx_set_task_state(p, SCX_TASK_NONE);
4080 }
4081 
4082 void init_scx_entity(struct sched_ext_entity *scx)
4083 {
4084 	memset(scx, 0, sizeof(*scx));
4085 	INIT_LIST_HEAD(&scx->dsq_list.node);
4086 	RB_CLEAR_NODE(&scx->dsq_priq);
4087 	scx->sticky_cpu = -1;
4088 	scx->holding_cpu = -1;
4089 	INIT_LIST_HEAD(&scx->runnable_node);
4090 	scx->runnable_at = jiffies;
4091 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
4092 	scx->slice = SCX_SLICE_DFL;
4093 }
4094 
4095 void scx_pre_fork(struct task_struct *p)
4096 {
4097 	/*
4098 	 * BPF scheduler enable/disable paths want to be able to iterate and
4099 	 * update all tasks which can become complex when racing forks. As
4100 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
4101 	 * exclude forks.
4102 	 */
4103 	percpu_down_read(&scx_fork_rwsem);
4104 }
4105 
4106 int scx_fork(struct task_struct *p)
4107 {
4108 	percpu_rwsem_assert_held(&scx_fork_rwsem);
4109 
4110 	if (scx_ops_init_task_enabled)
4111 		return scx_ops_init_task(p, task_group(p), true);
4112 	else
4113 		return 0;
4114 }
4115 
4116 void scx_post_fork(struct task_struct *p)
4117 {
4118 	if (scx_ops_init_task_enabled) {
4119 		scx_set_task_state(p, SCX_TASK_READY);
4120 
4121 		/*
4122 		 * Enable the task immediately if it's running on sched_ext.
4123 		 * Otherwise, it'll be enabled in switching_to_scx() if and
4124 		 * when it's ever configured to run with a SCHED_EXT policy.
4125 		 */
4126 		if (p->sched_class == &ext_sched_class) {
4127 			struct rq_flags rf;
4128 			struct rq *rq;
4129 
4130 			rq = task_rq_lock(p, &rf);
4131 			scx_ops_enable_task(p);
4132 			task_rq_unlock(rq, p, &rf);
4133 		}
4134 	}
4135 
4136 	spin_lock_irq(&scx_tasks_lock);
4137 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
4138 	spin_unlock_irq(&scx_tasks_lock);
4139 
4140 	percpu_up_read(&scx_fork_rwsem);
4141 }
4142 
4143 void scx_cancel_fork(struct task_struct *p)
4144 {
4145 	if (scx_enabled()) {
4146 		struct rq *rq;
4147 		struct rq_flags rf;
4148 
4149 		rq = task_rq_lock(p, &rf);
4150 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
4151 		scx_ops_exit_task(p);
4152 		task_rq_unlock(rq, p, &rf);
4153 	}
4154 
4155 	percpu_up_read(&scx_fork_rwsem);
4156 }
4157 
4158 void sched_ext_free(struct task_struct *p)
4159 {
4160 	unsigned long flags;
4161 
4162 	spin_lock_irqsave(&scx_tasks_lock, flags);
4163 	list_del_init(&p->scx.tasks_node);
4164 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
4165 
4166 	/*
4167 	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
4168 	 * ENABLED transitions can't race us. Disable ops for @p.
4169 	 */
4170 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
4171 		struct rq_flags rf;
4172 		struct rq *rq;
4173 
4174 		rq = task_rq_lock(p, &rf);
4175 		scx_ops_exit_task(p);
4176 		task_rq_unlock(rq, p, &rf);
4177 	}
4178 }
4179 
4180 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4181 			      const struct load_weight *lw)
4182 {
4183 	lockdep_assert_rq_held(task_rq(p));
4184 
4185 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4186 	if (SCX_HAS_OP(set_weight))
4187 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4188 }
4189 
4190 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4191 {
4192 }
4193 
4194 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4195 {
4196 	scx_ops_enable_task(p);
4197 
4198 	/*
4199 	 * set_cpus_allowed_scx() is not called while @p is associated with a
4200 	 * different scheduler class. Keep the BPF scheduler up-to-date.
4201 	 */
4202 	if (SCX_HAS_OP(set_cpumask))
4203 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
4204 				 (struct cpumask *)p->cpus_ptr);
4205 }
4206 
4207 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4208 {
4209 	scx_ops_disable_task(p);
4210 }
4211 
4212 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
4213 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4214 
4215 int scx_check_setscheduler(struct task_struct *p, int policy)
4216 {
4217 	lockdep_assert_rq_held(task_rq(p));
4218 
4219 	/* if disallow, reject transitioning into SCX */
4220 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4221 	    p->policy != policy && policy == SCHED_EXT)
4222 		return -EACCES;
4223 
4224 	return 0;
4225 }
4226 
4227 #ifdef CONFIG_NO_HZ_FULL
4228 bool scx_can_stop_tick(struct rq *rq)
4229 {
4230 	struct task_struct *p = rq->curr;
4231 
4232 	if (scx_rq_bypassing(rq))
4233 		return false;
4234 
4235 	if (p->sched_class != &ext_sched_class)
4236 		return true;
4237 
4238 	/*
4239 	 * @rq can dispatch from different DSQs, so we can't tell whether it
4240 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
4241 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
4242 	 */
4243 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4244 }
4245 #endif
4246 
4247 #ifdef CONFIG_EXT_GROUP_SCHED
4248 
4249 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4250 static bool scx_cgroup_enabled;
4251 static bool cgroup_warned_missing_weight;
4252 static bool cgroup_warned_missing_idle;
4253 
4254 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
4255 {
4256 	if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
4257 	    cgroup_warned_missing_weight)
4258 		return;
4259 
4260 	if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
4261 		return;
4262 
4263 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
4264 		scx_ops.name);
4265 	cgroup_warned_missing_weight = true;
4266 }
4267 
4268 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
4269 {
4270 	if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
4271 		return;
4272 
4273 	if (!tg->idle)
4274 		return;
4275 
4276 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
4277 		scx_ops.name);
4278 	cgroup_warned_missing_idle = true;
4279 }
4280 
4281 int scx_tg_online(struct task_group *tg)
4282 {
4283 	int ret = 0;
4284 
4285 	WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4286 
4287 	percpu_down_read(&scx_cgroup_rwsem);
4288 
4289 	scx_cgroup_warn_missing_weight(tg);
4290 
4291 	if (scx_cgroup_enabled) {
4292 		if (SCX_HAS_OP(cgroup_init)) {
4293 			struct scx_cgroup_init_args args =
4294 				{ .weight = tg->scx_weight };
4295 
4296 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4297 					      tg->css.cgroup, &args);
4298 			if (ret)
4299 				ret = ops_sanitize_err("cgroup_init", ret);
4300 		}
4301 		if (ret == 0)
4302 			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4303 	} else {
4304 		tg->scx_flags |= SCX_TG_ONLINE;
4305 	}
4306 
4307 	percpu_up_read(&scx_cgroup_rwsem);
4308 	return ret;
4309 }
4310 
4311 void scx_tg_offline(struct task_group *tg)
4312 {
4313 	WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4314 
4315 	percpu_down_read(&scx_cgroup_rwsem);
4316 
4317 	if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
4318 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
4319 	tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4320 
4321 	percpu_up_read(&scx_cgroup_rwsem);
4322 }
4323 
4324 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4325 {
4326 	struct cgroup_subsys_state *css;
4327 	struct task_struct *p;
4328 	int ret;
4329 
4330 	/* released in scx_finish/cancel_attach() */
4331 	percpu_down_read(&scx_cgroup_rwsem);
4332 
4333 	if (!scx_cgroup_enabled)
4334 		return 0;
4335 
4336 	cgroup_taskset_for_each(p, css, tset) {
4337 		struct cgroup *from = tg_cgrp(task_group(p));
4338 		struct cgroup *to = tg_cgrp(css_tg(css));
4339 
4340 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
4341 
4342 		/*
4343 		 * sched_move_task() omits identity migrations. Let's match the
4344 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4345 		 * always match one-to-one.
4346 		 */
4347 		if (from == to)
4348 			continue;
4349 
4350 		if (SCX_HAS_OP(cgroup_prep_move)) {
4351 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4352 					      p, from, css->cgroup);
4353 			if (ret)
4354 				goto err;
4355 		}
4356 
4357 		p->scx.cgrp_moving_from = from;
4358 	}
4359 
4360 	return 0;
4361 
4362 err:
4363 	cgroup_taskset_for_each(p, css, tset) {
4364 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4365 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4366 				    p->scx.cgrp_moving_from, css->cgroup);
4367 		p->scx.cgrp_moving_from = NULL;
4368 	}
4369 
4370 	percpu_up_read(&scx_cgroup_rwsem);
4371 	return ops_sanitize_err("cgroup_prep_move", ret);
4372 }
4373 
4374 void scx_cgroup_move_task(struct task_struct *p)
4375 {
4376 	if (!scx_cgroup_enabled)
4377 		return;
4378 
4379 	/*
4380 	 * @p must have ops.cgroup_prep_move() called on it and thus
4381 	 * cgrp_moving_from set.
4382 	 */
4383 	if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4384 		SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4385 			p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4386 	p->scx.cgrp_moving_from = NULL;
4387 }
4388 
4389 void scx_cgroup_finish_attach(void)
4390 {
4391 	percpu_up_read(&scx_cgroup_rwsem);
4392 }
4393 
4394 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4395 {
4396 	struct cgroup_subsys_state *css;
4397 	struct task_struct *p;
4398 
4399 	if (!scx_cgroup_enabled)
4400 		goto out_unlock;
4401 
4402 	cgroup_taskset_for_each(p, css, tset) {
4403 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4404 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4405 				    p->scx.cgrp_moving_from, css->cgroup);
4406 		p->scx.cgrp_moving_from = NULL;
4407 	}
4408 out_unlock:
4409 	percpu_up_read(&scx_cgroup_rwsem);
4410 }
4411 
4412 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4413 {
4414 	percpu_down_read(&scx_cgroup_rwsem);
4415 
4416 	if (scx_cgroup_enabled && tg->scx_weight != weight) {
4417 		if (SCX_HAS_OP(cgroup_set_weight))
4418 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4419 				    tg_cgrp(tg), weight);
4420 		tg->scx_weight = weight;
4421 	}
4422 
4423 	percpu_up_read(&scx_cgroup_rwsem);
4424 }
4425 
4426 void scx_group_set_idle(struct task_group *tg, bool idle)
4427 {
4428 	percpu_down_read(&scx_cgroup_rwsem);
4429 	scx_cgroup_warn_missing_idle(tg);
4430 	percpu_up_read(&scx_cgroup_rwsem);
4431 }
4432 
4433 static void scx_cgroup_lock(void)
4434 {
4435 	percpu_down_write(&scx_cgroup_rwsem);
4436 }
4437 
4438 static void scx_cgroup_unlock(void)
4439 {
4440 	percpu_up_write(&scx_cgroup_rwsem);
4441 }
4442 
4443 #else	/* CONFIG_EXT_GROUP_SCHED */
4444 
4445 static inline void scx_cgroup_lock(void) {}
4446 static inline void scx_cgroup_unlock(void) {}
4447 
4448 #endif	/* CONFIG_EXT_GROUP_SCHED */
4449 
4450 /*
4451  * Omitted operations:
4452  *
4453  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4454  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
4455  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
4456  *
4457  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4458  *
4459  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4460  *   their current sched_class. Call them directly from sched core instead.
4461  */
4462 DEFINE_SCHED_CLASS(ext) = {
4463 	.enqueue_task		= enqueue_task_scx,
4464 	.dequeue_task		= dequeue_task_scx,
4465 	.yield_task		= yield_task_scx,
4466 	.yield_to_task		= yield_to_task_scx,
4467 
4468 	.wakeup_preempt		= wakeup_preempt_scx,
4469 
4470 	.balance		= balance_scx,
4471 	.pick_task		= pick_task_scx,
4472 
4473 	.put_prev_task		= put_prev_task_scx,
4474 	.set_next_task		= set_next_task_scx,
4475 
4476 #ifdef CONFIG_SMP
4477 	.select_task_rq		= select_task_rq_scx,
4478 	.task_woken		= task_woken_scx,
4479 	.set_cpus_allowed	= set_cpus_allowed_scx,
4480 
4481 	.rq_online		= rq_online_scx,
4482 	.rq_offline		= rq_offline_scx,
4483 #endif
4484 
4485 	.task_tick		= task_tick_scx,
4486 
4487 	.switching_to		= switching_to_scx,
4488 	.switched_from		= switched_from_scx,
4489 	.switched_to		= switched_to_scx,
4490 	.reweight_task		= reweight_task_scx,
4491 	.prio_changed		= prio_changed_scx,
4492 
4493 	.update_curr		= update_curr_scx,
4494 
4495 #ifdef CONFIG_UCLAMP_TASK
4496 	.uclamp_enabled		= 1,
4497 #endif
4498 };
4499 
4500 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4501 {
4502 	memset(dsq, 0, sizeof(*dsq));
4503 
4504 	raw_spin_lock_init(&dsq->lock);
4505 	INIT_LIST_HEAD(&dsq->list);
4506 	dsq->id = dsq_id;
4507 }
4508 
4509 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4510 {
4511 	struct scx_dispatch_q *dsq;
4512 	int ret;
4513 
4514 	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4515 		return ERR_PTR(-EINVAL);
4516 
4517 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4518 	if (!dsq)
4519 		return ERR_PTR(-ENOMEM);
4520 
4521 	init_dsq(dsq, dsq_id);
4522 
4523 	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4524 				     dsq_hash_params);
4525 	if (ret) {
4526 		kfree(dsq);
4527 		return ERR_PTR(ret);
4528 	}
4529 	return dsq;
4530 }
4531 
4532 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4533 {
4534 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4535 	struct scx_dispatch_q *dsq, *tmp_dsq;
4536 
4537 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4538 		kfree_rcu(dsq, rcu);
4539 }
4540 
4541 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4542 
4543 static void destroy_dsq(u64 dsq_id)
4544 {
4545 	struct scx_dispatch_q *dsq;
4546 	unsigned long flags;
4547 
4548 	rcu_read_lock();
4549 
4550 	dsq = find_user_dsq(dsq_id);
4551 	if (!dsq)
4552 		goto out_unlock_rcu;
4553 
4554 	raw_spin_lock_irqsave(&dsq->lock, flags);
4555 
4556 	if (dsq->nr) {
4557 		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4558 			      dsq->id, dsq->nr);
4559 		goto out_unlock_dsq;
4560 	}
4561 
4562 	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4563 		goto out_unlock_dsq;
4564 
4565 	/*
4566 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4567 	 * queueing more tasks. As this function can be called from anywhere,
4568 	 * freeing is bounced through an irq work to avoid nesting RCU
4569 	 * operations inside scheduler locks.
4570 	 */
4571 	dsq->id = SCX_DSQ_INVALID;
4572 	llist_add(&dsq->free_node, &dsqs_to_free);
4573 	irq_work_queue(&free_dsq_irq_work);
4574 
4575 out_unlock_dsq:
4576 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4577 out_unlock_rcu:
4578 	rcu_read_unlock();
4579 }
4580 
4581 #ifdef CONFIG_EXT_GROUP_SCHED
4582 static void scx_cgroup_exit(void)
4583 {
4584 	struct cgroup_subsys_state *css;
4585 
4586 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4587 
4588 	scx_cgroup_enabled = false;
4589 
4590 	/*
4591 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4592 	 * cgroups and exit all the inited ones, all online cgroups are exited.
4593 	 */
4594 	rcu_read_lock();
4595 	css_for_each_descendant_post(css, &root_task_group.css) {
4596 		struct task_group *tg = css_tg(css);
4597 
4598 		if (!(tg->scx_flags & SCX_TG_INITED))
4599 			continue;
4600 		tg->scx_flags &= ~SCX_TG_INITED;
4601 
4602 		if (!scx_ops.cgroup_exit)
4603 			continue;
4604 
4605 		if (WARN_ON_ONCE(!css_tryget(css)))
4606 			continue;
4607 		rcu_read_unlock();
4608 
4609 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4610 
4611 		rcu_read_lock();
4612 		css_put(css);
4613 	}
4614 	rcu_read_unlock();
4615 }
4616 
4617 static int scx_cgroup_init(void)
4618 {
4619 	struct cgroup_subsys_state *css;
4620 	int ret;
4621 
4622 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4623 
4624 	cgroup_warned_missing_weight = false;
4625 	cgroup_warned_missing_idle = false;
4626 
4627 	/*
4628 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4629 	 * cgroups and init, all online cgroups are initialized.
4630 	 */
4631 	rcu_read_lock();
4632 	css_for_each_descendant_pre(css, &root_task_group.css) {
4633 		struct task_group *tg = css_tg(css);
4634 		struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4635 
4636 		scx_cgroup_warn_missing_weight(tg);
4637 		scx_cgroup_warn_missing_idle(tg);
4638 
4639 		if ((tg->scx_flags &
4640 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4641 			continue;
4642 
4643 		if (!scx_ops.cgroup_init) {
4644 			tg->scx_flags |= SCX_TG_INITED;
4645 			continue;
4646 		}
4647 
4648 		if (WARN_ON_ONCE(!css_tryget(css)))
4649 			continue;
4650 		rcu_read_unlock();
4651 
4652 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4653 				      css->cgroup, &args);
4654 		if (ret) {
4655 			css_put(css);
4656 			scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4657 			return ret;
4658 		}
4659 		tg->scx_flags |= SCX_TG_INITED;
4660 
4661 		rcu_read_lock();
4662 		css_put(css);
4663 	}
4664 	rcu_read_unlock();
4665 
4666 	WARN_ON_ONCE(scx_cgroup_enabled);
4667 	scx_cgroup_enabled = true;
4668 
4669 	return 0;
4670 }
4671 
4672 #else
4673 static void scx_cgroup_exit(void) {}
4674 static int scx_cgroup_init(void) { return 0; }
4675 #endif
4676 
4677 
4678 /********************************************************************************
4679  * Sysfs interface and ops enable/disable.
4680  */
4681 
4682 #define SCX_ATTR(_name)								\
4683 	static struct kobj_attribute scx_attr_##_name = {			\
4684 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4685 		.show = scx_attr_##_name##_show,				\
4686 	}
4687 
4688 static ssize_t scx_attr_state_show(struct kobject *kobj,
4689 				   struct kobj_attribute *ka, char *buf)
4690 {
4691 	return sysfs_emit(buf, "%s\n",
4692 			  scx_ops_enable_state_str[scx_ops_enable_state()]);
4693 }
4694 SCX_ATTR(state);
4695 
4696 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4697 					struct kobj_attribute *ka, char *buf)
4698 {
4699 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4700 }
4701 SCX_ATTR(switch_all);
4702 
4703 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4704 					 struct kobj_attribute *ka, char *buf)
4705 {
4706 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4707 }
4708 SCX_ATTR(nr_rejected);
4709 
4710 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4711 					 struct kobj_attribute *ka, char *buf)
4712 {
4713 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4714 }
4715 SCX_ATTR(hotplug_seq);
4716 
4717 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4718 					struct kobj_attribute *ka, char *buf)
4719 {
4720 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4721 }
4722 SCX_ATTR(enable_seq);
4723 
4724 static struct attribute *scx_global_attrs[] = {
4725 	&scx_attr_state.attr,
4726 	&scx_attr_switch_all.attr,
4727 	&scx_attr_nr_rejected.attr,
4728 	&scx_attr_hotplug_seq.attr,
4729 	&scx_attr_enable_seq.attr,
4730 	NULL,
4731 };
4732 
4733 static const struct attribute_group scx_global_attr_group = {
4734 	.attrs = scx_global_attrs,
4735 };
4736 
4737 static void scx_kobj_release(struct kobject *kobj)
4738 {
4739 	kfree(kobj);
4740 }
4741 
4742 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4743 				 struct kobj_attribute *ka, char *buf)
4744 {
4745 	return sysfs_emit(buf, "%s\n", scx_ops.name);
4746 }
4747 SCX_ATTR(ops);
4748 
4749 static struct attribute *scx_sched_attrs[] = {
4750 	&scx_attr_ops.attr,
4751 	NULL,
4752 };
4753 ATTRIBUTE_GROUPS(scx_sched);
4754 
4755 static const struct kobj_type scx_ktype = {
4756 	.release = scx_kobj_release,
4757 	.sysfs_ops = &kobj_sysfs_ops,
4758 	.default_groups = scx_sched_groups,
4759 };
4760 
4761 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4762 {
4763 	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4764 }
4765 
4766 static const struct kset_uevent_ops scx_uevent_ops = {
4767 	.uevent = scx_uevent,
4768 };
4769 
4770 /*
4771  * Used by sched_fork() and __setscheduler_prio() to pick the matching
4772  * sched_class. dl/rt are already handled.
4773  */
4774 bool task_should_scx(int policy)
4775 {
4776 	if (!scx_enabled() ||
4777 	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4778 		return false;
4779 	if (READ_ONCE(scx_switching_all))
4780 		return true;
4781 	return policy == SCHED_EXT;
4782 }
4783 
4784 /**
4785  * scx_softlockup - sched_ext softlockup handler
4786  * @dur_s: number of seconds of CPU stuck due to soft lockup
4787  *
4788  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4789  * live-lock the system by making many CPUs target the same DSQ to the point
4790  * where soft-lockup detection triggers. This function is called from
4791  * soft-lockup watchdog when the triggering point is close and tries to unjam
4792  * the system by enabling the breather and aborting the BPF scheduler.
4793  */
4794 void scx_softlockup(u32 dur_s)
4795 {
4796 	switch (scx_ops_enable_state()) {
4797 	case SCX_OPS_ENABLING:
4798 	case SCX_OPS_ENABLED:
4799 		break;
4800 	default:
4801 		return;
4802 	}
4803 
4804 	/* allow only one instance, cleared at the end of scx_ops_bypass() */
4805 	if (test_and_set_bit(0, &scx_in_softlockup))
4806 		return;
4807 
4808 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4809 			smp_processor_id(), dur_s, scx_ops.name);
4810 
4811 	/*
4812 	 * Some CPUs may be trapped in the dispatch paths. Enable breather
4813 	 * immediately; otherwise, we might even be able to get to
4814 	 * scx_ops_bypass().
4815 	 */
4816 	atomic_inc(&scx_ops_breather_depth);
4817 
4818 	scx_ops_error("soft lockup - CPU#%d stuck for %us",
4819 		      smp_processor_id(), dur_s);
4820 }
4821 
4822 static void scx_clear_softlockup(void)
4823 {
4824 	if (test_and_clear_bit(0, &scx_in_softlockup))
4825 		atomic_dec(&scx_ops_breather_depth);
4826 }
4827 
4828 /**
4829  * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4830  * @bypass: true for bypass, false for unbypass
4831  *
4832  * Bypassing guarantees that all runnable tasks make forward progress without
4833  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4834  * be held by tasks that the BPF scheduler is forgetting to run, which
4835  * unfortunately also excludes toggling the static branches.
4836  *
4837  * Let's work around by overriding a couple ops and modifying behaviors based on
4838  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4839  * to force global FIFO scheduling.
4840  *
4841  * - ops.select_cpu() is ignored and the default select_cpu() is used.
4842  *
4843  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4844  *   %SCX_OPS_ENQ_LAST is also ignored.
4845  *
4846  * - ops.dispatch() is ignored.
4847  *
4848  * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4849  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
4850  *   the tail of the queue with core_sched_at touched.
4851  *
4852  * - pick_next_task() suppresses zero slice warning.
4853  *
4854  * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4855  *   operations.
4856  *
4857  * - scx_prio_less() reverts to the default core_sched_at order.
4858  */
4859 static void scx_ops_bypass(bool bypass)
4860 {
4861 	static DEFINE_RAW_SPINLOCK(bypass_lock);
4862 	int cpu;
4863 	unsigned long flags;
4864 
4865 	raw_spin_lock_irqsave(&bypass_lock, flags);
4866 	if (bypass) {
4867 		scx_ops_bypass_depth++;
4868 		WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4869 		if (scx_ops_bypass_depth != 1)
4870 			goto unlock;
4871 	} else {
4872 		scx_ops_bypass_depth--;
4873 		WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4874 		if (scx_ops_bypass_depth != 0)
4875 			goto unlock;
4876 	}
4877 
4878 	atomic_inc(&scx_ops_breather_depth);
4879 
4880 	/*
4881 	 * No task property is changing. We just need to make sure all currently
4882 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4883 	 * state. As an optimization, walk each rq's runnable_list instead of
4884 	 * the scx_tasks list.
4885 	 *
4886 	 * This function can't trust the scheduler and thus can't use
4887 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4888 	 */
4889 	for_each_possible_cpu(cpu) {
4890 		struct rq *rq = cpu_rq(cpu);
4891 		struct task_struct *p, *n;
4892 
4893 		raw_spin_rq_lock(rq);
4894 
4895 		if (bypass) {
4896 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4897 			rq->scx.flags |= SCX_RQ_BYPASSING;
4898 		} else {
4899 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4900 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4901 		}
4902 
4903 		/*
4904 		 * We need to guarantee that no tasks are on the BPF scheduler
4905 		 * while bypassing. Either we see enabled or the enable path
4906 		 * sees scx_rq_bypassing() before moving tasks to SCX.
4907 		 */
4908 		if (!scx_enabled()) {
4909 			raw_spin_rq_unlock(rq);
4910 			continue;
4911 		}
4912 
4913 		/*
4914 		 * The use of list_for_each_entry_safe_reverse() is required
4915 		 * because each task is going to be removed from and added back
4916 		 * to the runnable_list during iteration. Because they're added
4917 		 * to the tail of the list, safe reverse iteration can still
4918 		 * visit all nodes.
4919 		 */
4920 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4921 						 scx.runnable_node) {
4922 			struct sched_enq_and_set_ctx ctx;
4923 
4924 			/* cycling deq/enq is enough, see the function comment */
4925 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4926 			sched_enq_and_set_task(&ctx);
4927 		}
4928 
4929 		/* resched to restore ticks and idle state */
4930 		if (cpu_online(cpu) || cpu == smp_processor_id())
4931 			resched_curr(rq);
4932 
4933 		raw_spin_rq_unlock(rq);
4934 	}
4935 
4936 	atomic_dec(&scx_ops_breather_depth);
4937 unlock:
4938 	raw_spin_unlock_irqrestore(&bypass_lock, flags);
4939 	scx_clear_softlockup();
4940 }
4941 
4942 static void free_exit_info(struct scx_exit_info *ei)
4943 {
4944 	kfree(ei->dump);
4945 	kfree(ei->msg);
4946 	kfree(ei->bt);
4947 	kfree(ei);
4948 }
4949 
4950 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4951 {
4952 	struct scx_exit_info *ei;
4953 
4954 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4955 	if (!ei)
4956 		return NULL;
4957 
4958 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4959 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4960 	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4961 
4962 	if (!ei->bt || !ei->msg || !ei->dump) {
4963 		free_exit_info(ei);
4964 		return NULL;
4965 	}
4966 
4967 	return ei;
4968 }
4969 
4970 static const char *scx_exit_reason(enum scx_exit_kind kind)
4971 {
4972 	switch (kind) {
4973 	case SCX_EXIT_UNREG:
4974 		return "unregistered from user space";
4975 	case SCX_EXIT_UNREG_BPF:
4976 		return "unregistered from BPF";
4977 	case SCX_EXIT_UNREG_KERN:
4978 		return "unregistered from the main kernel";
4979 	case SCX_EXIT_SYSRQ:
4980 		return "disabled by sysrq-S";
4981 	case SCX_EXIT_ERROR:
4982 		return "runtime error";
4983 	case SCX_EXIT_ERROR_BPF:
4984 		return "scx_bpf_error";
4985 	case SCX_EXIT_ERROR_STALL:
4986 		return "runnable task stall";
4987 	default:
4988 		return "<UNKNOWN>";
4989 	}
4990 }
4991 
4992 static void scx_ops_disable_workfn(struct kthread_work *work)
4993 {
4994 	struct scx_exit_info *ei = scx_exit_info;
4995 	struct scx_task_iter sti;
4996 	struct task_struct *p;
4997 	struct rhashtable_iter rht_iter;
4998 	struct scx_dispatch_q *dsq;
4999 	int i, kind, cpu;
5000 
5001 	kind = atomic_read(&scx_exit_kind);
5002 	while (true) {
5003 		/*
5004 		 * NONE indicates that a new scx_ops has been registered since
5005 		 * disable was scheduled - don't kill the new ops. DONE
5006 		 * indicates that the ops has already been disabled.
5007 		 */
5008 		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
5009 			return;
5010 		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
5011 			break;
5012 	}
5013 	ei->kind = kind;
5014 	ei->reason = scx_exit_reason(ei->kind);
5015 
5016 	/* guarantee forward progress by bypassing scx_ops */
5017 	scx_ops_bypass(true);
5018 
5019 	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
5020 	case SCX_OPS_DISABLING:
5021 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
5022 		break;
5023 	case SCX_OPS_DISABLED:
5024 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
5025 			scx_exit_info->msg);
5026 		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5027 			     SCX_OPS_DISABLING);
5028 		goto done;
5029 	default:
5030 		break;
5031 	}
5032 
5033 	/*
5034 	 * Here, every runnable task is guaranteed to make forward progress and
5035 	 * we can safely use blocking synchronization constructs. Actually
5036 	 * disable ops.
5037 	 */
5038 	mutex_lock(&scx_ops_enable_mutex);
5039 
5040 	static_branch_disable(&__scx_switched_all);
5041 	WRITE_ONCE(scx_switching_all, false);
5042 
5043 	/*
5044 	 * Shut down cgroup support before tasks so that the cgroup attach path
5045 	 * doesn't race against scx_ops_exit_task().
5046 	 */
5047 	scx_cgroup_lock();
5048 	scx_cgroup_exit();
5049 	scx_cgroup_unlock();
5050 
5051 	/*
5052 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5053 	 * must be switched out and exited synchronously.
5054 	 */
5055 	percpu_down_write(&scx_fork_rwsem);
5056 
5057 	scx_ops_init_task_enabled = false;
5058 
5059 	scx_task_iter_start(&sti);
5060 	while ((p = scx_task_iter_next_locked(&sti))) {
5061 		const struct sched_class *old_class = p->sched_class;
5062 		const struct sched_class *new_class =
5063 			__setscheduler_class(p->policy, p->prio);
5064 		struct sched_enq_and_set_ctx ctx;
5065 
5066 		if (old_class != new_class && p->se.sched_delayed)
5067 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5068 
5069 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5070 
5071 		p->sched_class = new_class;
5072 		check_class_changing(task_rq(p), p, old_class);
5073 
5074 		sched_enq_and_set_task(&ctx);
5075 
5076 		check_class_changed(task_rq(p), p, old_class, p->prio);
5077 		scx_ops_exit_task(p);
5078 	}
5079 	scx_task_iter_stop(&sti);
5080 	percpu_up_write(&scx_fork_rwsem);
5081 
5082 	/*
5083 	 * Invalidate all the rq clocks to prevent getting outdated
5084 	 * rq clocks from a previous scx scheduler.
5085 	 */
5086 	for_each_possible_cpu(cpu) {
5087 		struct rq *rq = cpu_rq(cpu);
5088 		scx_rq_clock_invalidate(rq);
5089 	}
5090 
5091 	/* no task is on scx, turn off all the switches and flush in-progress calls */
5092 	static_branch_disable(&__scx_ops_enabled);
5093 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
5094 		static_branch_disable(&scx_has_op[i]);
5095 	static_branch_disable(&scx_ops_enq_last);
5096 	static_branch_disable(&scx_ops_enq_exiting);
5097 	static_branch_disable(&scx_ops_enq_migration_disabled);
5098 	static_branch_disable(&scx_ops_cpu_preempt);
5099 	static_branch_disable(&scx_builtin_idle_enabled);
5100 	synchronize_rcu();
5101 
5102 	if (ei->kind >= SCX_EXIT_ERROR) {
5103 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5104 		       scx_ops.name, ei->reason);
5105 
5106 		if (ei->msg[0] != '\0')
5107 			pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
5108 #ifdef CONFIG_STACKTRACE
5109 		stack_trace_print(ei->bt, ei->bt_len, 2);
5110 #endif
5111 	} else {
5112 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5113 			scx_ops.name, ei->reason);
5114 	}
5115 
5116 	if (scx_ops.exit)
5117 		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
5118 
5119 	cancel_delayed_work_sync(&scx_watchdog_work);
5120 
5121 	/*
5122 	 * Delete the kobject from the hierarchy eagerly in addition to just
5123 	 * dropping a reference. Otherwise, if the object is deleted
5124 	 * asynchronously, sysfs could observe an object of the same name still
5125 	 * in the hierarchy when another scheduler is loaded.
5126 	 */
5127 	kobject_del(scx_root_kobj);
5128 	kobject_put(scx_root_kobj);
5129 	scx_root_kobj = NULL;
5130 
5131 	memset(&scx_ops, 0, sizeof(scx_ops));
5132 
5133 	rhashtable_walk_enter(&dsq_hash, &rht_iter);
5134 	do {
5135 		rhashtable_walk_start(&rht_iter);
5136 
5137 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
5138 			destroy_dsq(dsq->id);
5139 
5140 		rhashtable_walk_stop(&rht_iter);
5141 	} while (dsq == ERR_PTR(-EAGAIN));
5142 	rhashtable_walk_exit(&rht_iter);
5143 
5144 	free_percpu(scx_dsp_ctx);
5145 	scx_dsp_ctx = NULL;
5146 	scx_dsp_max_batch = 0;
5147 
5148 	free_exit_info(scx_exit_info);
5149 	scx_exit_info = NULL;
5150 
5151 	mutex_unlock(&scx_ops_enable_mutex);
5152 
5153 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5154 		     SCX_OPS_DISABLING);
5155 done:
5156 	scx_ops_bypass(false);
5157 }
5158 
5159 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
5160 
5161 static void schedule_scx_ops_disable_work(void)
5162 {
5163 	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
5164 
5165 	/*
5166 	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
5167 	 * scx_ops_helper isn't set up yet, there's nothing to do.
5168 	 */
5169 	if (helper)
5170 		kthread_queue_work(helper, &scx_ops_disable_work);
5171 }
5172 
5173 static void scx_ops_disable(enum scx_exit_kind kind)
5174 {
5175 	int none = SCX_EXIT_NONE;
5176 
5177 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5178 		kind = SCX_EXIT_ERROR;
5179 
5180 	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
5181 
5182 	schedule_scx_ops_disable_work();
5183 }
5184 
5185 static void dump_newline(struct seq_buf *s)
5186 {
5187 	trace_sched_ext_dump("");
5188 
5189 	/* @s may be zero sized and seq_buf triggers WARN if so */
5190 	if (s->size)
5191 		seq_buf_putc(s, '\n');
5192 }
5193 
5194 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5195 {
5196 	va_list args;
5197 
5198 #ifdef CONFIG_TRACEPOINTS
5199 	if (trace_sched_ext_dump_enabled()) {
5200 		/* protected by scx_dump_state()::dump_lock */
5201 		static char line_buf[SCX_EXIT_MSG_LEN];
5202 
5203 		va_start(args, fmt);
5204 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5205 		va_end(args);
5206 
5207 		trace_sched_ext_dump(line_buf);
5208 	}
5209 #endif
5210 	/* @s may be zero sized and seq_buf triggers WARN if so */
5211 	if (s->size) {
5212 		va_start(args, fmt);
5213 		seq_buf_vprintf(s, fmt, args);
5214 		va_end(args);
5215 
5216 		seq_buf_putc(s, '\n');
5217 	}
5218 }
5219 
5220 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5221 			     const unsigned long *bt, unsigned int len)
5222 {
5223 	unsigned int i;
5224 
5225 	for (i = 0; i < len; i++)
5226 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5227 }
5228 
5229 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5230 {
5231 	struct scx_dump_data *dd = &scx_dump_data;
5232 
5233 	lockdep_assert_irqs_disabled();
5234 
5235 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
5236 	dd->first = true;
5237 	dd->cursor = 0;
5238 	dd->s = s;
5239 	dd->prefix = prefix;
5240 }
5241 
5242 static void ops_dump_flush(void)
5243 {
5244 	struct scx_dump_data *dd = &scx_dump_data;
5245 	char *line = dd->buf.line;
5246 
5247 	if (!dd->cursor)
5248 		return;
5249 
5250 	/*
5251 	 * There's something to flush and this is the first line. Insert a blank
5252 	 * line to distinguish ops dump.
5253 	 */
5254 	if (dd->first) {
5255 		dump_newline(dd->s);
5256 		dd->first = false;
5257 	}
5258 
5259 	/*
5260 	 * There may be multiple lines in $line. Scan and emit each line
5261 	 * separately.
5262 	 */
5263 	while (true) {
5264 		char *end = line;
5265 		char c;
5266 
5267 		while (*end != '\n' && *end != '\0')
5268 			end++;
5269 
5270 		/*
5271 		 * If $line overflowed, it may not have newline at the end.
5272 		 * Always emit with a newline.
5273 		 */
5274 		c = *end;
5275 		*end = '\0';
5276 		dump_line(dd->s, "%s%s", dd->prefix, line);
5277 		if (c == '\0')
5278 			break;
5279 
5280 		/* move to the next line */
5281 		end++;
5282 		if (*end == '\0')
5283 			break;
5284 		line = end;
5285 	}
5286 
5287 	dd->cursor = 0;
5288 }
5289 
5290 static void ops_dump_exit(void)
5291 {
5292 	ops_dump_flush();
5293 	scx_dump_data.cpu = -1;
5294 }
5295 
5296 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5297 			  struct task_struct *p, char marker)
5298 {
5299 	static unsigned long bt[SCX_EXIT_BT_LEN];
5300 	char dsq_id_buf[19] = "(n/a)";
5301 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5302 	unsigned int bt_len = 0;
5303 
5304 	if (p->scx.dsq)
5305 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5306 			  (unsigned long long)p->scx.dsq->id);
5307 
5308 	dump_newline(s);
5309 	dump_line(s, " %c%c %s[%d] %+ldms",
5310 		  marker, task_state_to_char(p), p->comm, p->pid,
5311 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5312 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5313 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5314 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5315 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
5316 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s",
5317 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
5318 	dump_line(s, "      dsq_vtime=%llu slice=%llu weight=%u",
5319 		  p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
5320 	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5321 
5322 	if (SCX_HAS_OP(dump_task)) {
5323 		ops_dump_init(s, "    ");
5324 		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5325 		ops_dump_exit();
5326 	}
5327 
5328 #ifdef CONFIG_STACKTRACE
5329 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5330 #endif
5331 	if (bt_len) {
5332 		dump_newline(s);
5333 		dump_stack_trace(s, "    ", bt, bt_len);
5334 	}
5335 }
5336 
5337 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5338 {
5339 	static DEFINE_SPINLOCK(dump_lock);
5340 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5341 	struct scx_dump_ctx dctx = {
5342 		.kind = ei->kind,
5343 		.exit_code = ei->exit_code,
5344 		.reason = ei->reason,
5345 		.at_ns = ktime_get_ns(),
5346 		.at_jiffies = jiffies,
5347 	};
5348 	struct seq_buf s;
5349 	unsigned long flags;
5350 	char *buf;
5351 	int cpu;
5352 
5353 	spin_lock_irqsave(&dump_lock, flags);
5354 
5355 	seq_buf_init(&s, ei->dump, dump_len);
5356 
5357 	if (ei->kind == SCX_EXIT_NONE) {
5358 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
5359 	} else {
5360 		dump_line(&s, "%s[%d] triggered exit kind %d:",
5361 			  current->comm, current->pid, ei->kind);
5362 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
5363 		dump_newline(&s);
5364 		dump_line(&s, "Backtrace:");
5365 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
5366 	}
5367 
5368 	if (SCX_HAS_OP(dump)) {
5369 		ops_dump_init(&s, "");
5370 		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5371 		ops_dump_exit();
5372 	}
5373 
5374 	dump_newline(&s);
5375 	dump_line(&s, "CPU states");
5376 	dump_line(&s, "----------");
5377 
5378 	for_each_possible_cpu(cpu) {
5379 		struct rq *rq = cpu_rq(cpu);
5380 		struct rq_flags rf;
5381 		struct task_struct *p;
5382 		struct seq_buf ns;
5383 		size_t avail, used;
5384 		bool idle;
5385 
5386 		rq_lock(rq, &rf);
5387 
5388 		idle = list_empty(&rq->scx.runnable_list) &&
5389 			rq->curr->sched_class == &idle_sched_class;
5390 
5391 		if (idle && !SCX_HAS_OP(dump_cpu))
5392 			goto next;
5393 
5394 		/*
5395 		 * We don't yet know whether ops.dump_cpu() will produce output
5396 		 * and we may want to skip the default CPU dump if it doesn't.
5397 		 * Use a nested seq_buf to generate the standard dump so that we
5398 		 * can decide whether to commit later.
5399 		 */
5400 		avail = seq_buf_get_buf(&s, &buf);
5401 		seq_buf_init(&ns, buf, avail);
5402 
5403 		dump_newline(&ns);
5404 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5405 			  cpu, rq->scx.nr_running, rq->scx.flags,
5406 			  rq->scx.cpu_released, rq->scx.ops_qseq,
5407 			  rq->scx.pnt_seq);
5408 		dump_line(&ns, "          curr=%s[%d] class=%ps",
5409 			  rq->curr->comm, rq->curr->pid,
5410 			  rq->curr->sched_class);
5411 		if (!cpumask_empty(rq->scx.cpus_to_kick))
5412 			dump_line(&ns, "  cpus_to_kick   : %*pb",
5413 				  cpumask_pr_args(rq->scx.cpus_to_kick));
5414 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5415 			dump_line(&ns, "  idle_to_kick   : %*pb",
5416 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5417 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
5418 			dump_line(&ns, "  cpus_to_preempt: %*pb",
5419 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
5420 		if (!cpumask_empty(rq->scx.cpus_to_wait))
5421 			dump_line(&ns, "  cpus_to_wait   : %*pb",
5422 				  cpumask_pr_args(rq->scx.cpus_to_wait));
5423 
5424 		used = seq_buf_used(&ns);
5425 		if (SCX_HAS_OP(dump_cpu)) {
5426 			ops_dump_init(&ns, "  ");
5427 			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5428 			ops_dump_exit();
5429 		}
5430 
5431 		/*
5432 		 * If idle && nothing generated by ops.dump_cpu(), there's
5433 		 * nothing interesting. Skip.
5434 		 */
5435 		if (idle && used == seq_buf_used(&ns))
5436 			goto next;
5437 
5438 		/*
5439 		 * $s may already have overflowed when $ns was created. If so,
5440 		 * calling commit on it will trigger BUG.
5441 		 */
5442 		if (avail) {
5443 			seq_buf_commit(&s, seq_buf_used(&ns));
5444 			if (seq_buf_has_overflowed(&ns))
5445 				seq_buf_set_overflow(&s);
5446 		}
5447 
5448 		if (rq->curr->sched_class == &ext_sched_class)
5449 			scx_dump_task(&s, &dctx, rq->curr, '*');
5450 
5451 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5452 			scx_dump_task(&s, &dctx, p, ' ');
5453 	next:
5454 		rq_unlock(rq, &rf);
5455 	}
5456 
5457 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5458 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5459 		       trunc_marker, sizeof(trunc_marker));
5460 
5461 	spin_unlock_irqrestore(&dump_lock, flags);
5462 }
5463 
5464 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5465 {
5466 	struct scx_exit_info *ei = scx_exit_info;
5467 
5468 	if (ei->kind >= SCX_EXIT_ERROR)
5469 		scx_dump_state(ei, scx_ops.exit_dump_len);
5470 
5471 	schedule_scx_ops_disable_work();
5472 }
5473 
5474 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5475 
5476 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5477 					     s64 exit_code,
5478 					     const char *fmt, ...)
5479 {
5480 	struct scx_exit_info *ei = scx_exit_info;
5481 	int none = SCX_EXIT_NONE;
5482 	va_list args;
5483 
5484 	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5485 		return;
5486 
5487 	ei->exit_code = exit_code;
5488 #ifdef CONFIG_STACKTRACE
5489 	if (kind >= SCX_EXIT_ERROR)
5490 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5491 #endif
5492 	va_start(args, fmt);
5493 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5494 	va_end(args);
5495 
5496 	/*
5497 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5498 	 * in scx_ops_disable_workfn().
5499 	 */
5500 	ei->kind = kind;
5501 	ei->reason = scx_exit_reason(ei->kind);
5502 
5503 	irq_work_queue(&scx_ops_error_irq_work);
5504 }
5505 
5506 static struct kthread_worker *scx_create_rt_helper(const char *name)
5507 {
5508 	struct kthread_worker *helper;
5509 
5510 	helper = kthread_run_worker(0, name);
5511 	if (helper)
5512 		sched_set_fifo(helper->task);
5513 	return helper;
5514 }
5515 
5516 static void check_hotplug_seq(const struct sched_ext_ops *ops)
5517 {
5518 	unsigned long long global_hotplug_seq;
5519 
5520 	/*
5521 	 * If a hotplug event has occurred between when a scheduler was
5522 	 * initialized, and when we were able to attach, exit and notify user
5523 	 * space about it.
5524 	 */
5525 	if (ops->hotplug_seq) {
5526 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5527 		if (ops->hotplug_seq != global_hotplug_seq) {
5528 			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5529 				     "expected hotplug seq %llu did not match actual %llu",
5530 				     ops->hotplug_seq, global_hotplug_seq);
5531 		}
5532 	}
5533 }
5534 
5535 static int validate_ops(const struct sched_ext_ops *ops)
5536 {
5537 	/*
5538 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5539 	 * ops.enqueue() callback isn't implemented.
5540 	 */
5541 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5542 		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5543 		return -EINVAL;
5544 	}
5545 
5546 	return 0;
5547 }
5548 
5549 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5550 {
5551 	struct scx_task_iter sti;
5552 	struct task_struct *p;
5553 	unsigned long timeout;
5554 	int i, cpu, node, ret;
5555 
5556 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5557 			   cpu_possible_mask)) {
5558 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5559 		return -EINVAL;
5560 	}
5561 
5562 	mutex_lock(&scx_ops_enable_mutex);
5563 
5564 	if (!scx_ops_helper) {
5565 		WRITE_ONCE(scx_ops_helper,
5566 			   scx_create_rt_helper("sched_ext_ops_helper"));
5567 		if (!scx_ops_helper) {
5568 			ret = -ENOMEM;
5569 			goto err_unlock;
5570 		}
5571 	}
5572 
5573 	if (!global_dsqs) {
5574 		struct scx_dispatch_q **dsqs;
5575 
5576 		dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5577 		if (!dsqs) {
5578 			ret = -ENOMEM;
5579 			goto err_unlock;
5580 		}
5581 
5582 		for_each_node_state(node, N_POSSIBLE) {
5583 			struct scx_dispatch_q *dsq;
5584 
5585 			dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5586 			if (!dsq) {
5587 				for_each_node_state(node, N_POSSIBLE)
5588 					kfree(dsqs[node]);
5589 				kfree(dsqs);
5590 				ret = -ENOMEM;
5591 				goto err_unlock;
5592 			}
5593 
5594 			init_dsq(dsq, SCX_DSQ_GLOBAL);
5595 			dsqs[node] = dsq;
5596 		}
5597 
5598 		global_dsqs = dsqs;
5599 	}
5600 
5601 	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5602 		ret = -EBUSY;
5603 		goto err_unlock;
5604 	}
5605 
5606 	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5607 	if (!scx_root_kobj) {
5608 		ret = -ENOMEM;
5609 		goto err_unlock;
5610 	}
5611 
5612 	scx_root_kobj->kset = scx_kset;
5613 	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5614 	if (ret < 0)
5615 		goto err;
5616 
5617 	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5618 	if (!scx_exit_info) {
5619 		ret = -ENOMEM;
5620 		goto err_del;
5621 	}
5622 
5623 	/*
5624 	 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5625 	 * disable path. Failure triggers full disabling from here on.
5626 	 */
5627 	scx_ops = *ops;
5628 
5629 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5630 		     SCX_OPS_DISABLED);
5631 
5632 	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5633 	scx_warned_zero_slice = false;
5634 
5635 	atomic_long_set(&scx_nr_rejected, 0);
5636 
5637 	for_each_possible_cpu(cpu)
5638 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5639 
5640 	/*
5641 	 * Keep CPUs stable during enable so that the BPF scheduler can track
5642 	 * online CPUs by watching ->on/offline_cpu() after ->init().
5643 	 */
5644 	cpus_read_lock();
5645 
5646 	if (scx_ops.init) {
5647 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5648 		if (ret) {
5649 			ret = ops_sanitize_err("init", ret);
5650 			cpus_read_unlock();
5651 			scx_ops_error("ops.init() failed (%d)", ret);
5652 			goto err_disable;
5653 		}
5654 	}
5655 
5656 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5657 		if (((void (**)(void))ops)[i])
5658 			static_branch_enable_cpuslocked(&scx_has_op[i]);
5659 
5660 	check_hotplug_seq(ops);
5661 #ifdef CONFIG_SMP
5662 	update_selcpu_topology();
5663 #endif
5664 	cpus_read_unlock();
5665 
5666 	ret = validate_ops(ops);
5667 	if (ret)
5668 		goto err_disable;
5669 
5670 	WARN_ON_ONCE(scx_dsp_ctx);
5671 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5672 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5673 						   scx_dsp_max_batch),
5674 				     __alignof__(struct scx_dsp_ctx));
5675 	if (!scx_dsp_ctx) {
5676 		ret = -ENOMEM;
5677 		goto err_disable;
5678 	}
5679 
5680 	if (ops->timeout_ms)
5681 		timeout = msecs_to_jiffies(ops->timeout_ms);
5682 	else
5683 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5684 
5685 	WRITE_ONCE(scx_watchdog_timeout, timeout);
5686 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5687 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5688 			   scx_watchdog_timeout / 2);
5689 
5690 	/*
5691 	 * Once __scx_ops_enabled is set, %current can be switched to SCX
5692 	 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5693 	 * userspace scheduling) may not function correctly before all tasks are
5694 	 * switched. Init in bypass mode to guarantee forward progress.
5695 	 */
5696 	scx_ops_bypass(true);
5697 
5698 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5699 		if (((void (**)(void))ops)[i])
5700 			static_branch_enable(&scx_has_op[i]);
5701 
5702 	if (ops->flags & SCX_OPS_ENQ_LAST)
5703 		static_branch_enable(&scx_ops_enq_last);
5704 
5705 	if (ops->flags & SCX_OPS_ENQ_EXITING)
5706 		static_branch_enable(&scx_ops_enq_exiting);
5707 	if (ops->flags & SCX_OPS_ENQ_MIGRATION_DISABLED)
5708 		static_branch_enable(&scx_ops_enq_migration_disabled);
5709 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5710 		static_branch_enable(&scx_ops_cpu_preempt);
5711 
5712 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5713 		reset_idle_masks();
5714 		static_branch_enable(&scx_builtin_idle_enabled);
5715 	} else {
5716 		static_branch_disable(&scx_builtin_idle_enabled);
5717 	}
5718 
5719 	/*
5720 	 * Lock out forks, cgroup on/offlining and moves before opening the
5721 	 * floodgate so that they don't wander into the operations prematurely.
5722 	 */
5723 	percpu_down_write(&scx_fork_rwsem);
5724 
5725 	WARN_ON_ONCE(scx_ops_init_task_enabled);
5726 	scx_ops_init_task_enabled = true;
5727 
5728 	/*
5729 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5730 	 * preventing new tasks from being added. No need to exclude tasks
5731 	 * leaving as sched_ext_free() can handle both prepped and enabled
5732 	 * tasks. Prep all tasks first and then enable them with preemption
5733 	 * disabled.
5734 	 *
5735 	 * All cgroups should be initialized before scx_ops_init_task() so that
5736 	 * the BPF scheduler can reliably track each task's cgroup membership
5737 	 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5738 	 * migrations while tasks are being initialized so that
5739 	 * scx_cgroup_can_attach() never sees uninitialized tasks.
5740 	 */
5741 	scx_cgroup_lock();
5742 	ret = scx_cgroup_init();
5743 	if (ret)
5744 		goto err_disable_unlock_all;
5745 
5746 	scx_task_iter_start(&sti);
5747 	while ((p = scx_task_iter_next_locked(&sti))) {
5748 		/*
5749 		 * @p may already be dead, have lost all its usages counts and
5750 		 * be waiting for RCU grace period before being freed. @p can't
5751 		 * be initialized for SCX in such cases and should be ignored.
5752 		 */
5753 		if (!tryget_task_struct(p))
5754 			continue;
5755 
5756 		scx_task_iter_unlock(&sti);
5757 
5758 		ret = scx_ops_init_task(p, task_group(p), false);
5759 		if (ret) {
5760 			put_task_struct(p);
5761 			scx_task_iter_relock(&sti);
5762 			scx_task_iter_stop(&sti);
5763 			scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5764 				      ret, p->comm, p->pid);
5765 			goto err_disable_unlock_all;
5766 		}
5767 
5768 		scx_set_task_state(p, SCX_TASK_READY);
5769 
5770 		put_task_struct(p);
5771 		scx_task_iter_relock(&sti);
5772 	}
5773 	scx_task_iter_stop(&sti);
5774 	scx_cgroup_unlock();
5775 	percpu_up_write(&scx_fork_rwsem);
5776 
5777 	/*
5778 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5779 	 * all eligible tasks.
5780 	 */
5781 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5782 	static_branch_enable(&__scx_ops_enabled);
5783 
5784 	/*
5785 	 * We're fully committed and can't fail. The task READY -> ENABLED
5786 	 * transitions here are synchronized against sched_ext_free() through
5787 	 * scx_tasks_lock.
5788 	 */
5789 	percpu_down_write(&scx_fork_rwsem);
5790 	scx_task_iter_start(&sti);
5791 	while ((p = scx_task_iter_next_locked(&sti))) {
5792 		const struct sched_class *old_class = p->sched_class;
5793 		const struct sched_class *new_class =
5794 			__setscheduler_class(p->policy, p->prio);
5795 		struct sched_enq_and_set_ctx ctx;
5796 
5797 		if (old_class != new_class && p->se.sched_delayed)
5798 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5799 
5800 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5801 
5802 		p->scx.slice = SCX_SLICE_DFL;
5803 		p->sched_class = new_class;
5804 		check_class_changing(task_rq(p), p, old_class);
5805 
5806 		sched_enq_and_set_task(&ctx);
5807 
5808 		check_class_changed(task_rq(p), p, old_class, p->prio);
5809 	}
5810 	scx_task_iter_stop(&sti);
5811 	percpu_up_write(&scx_fork_rwsem);
5812 
5813 	scx_ops_bypass(false);
5814 
5815 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5816 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5817 		goto err_disable;
5818 	}
5819 
5820 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5821 		static_branch_enable(&__scx_switched_all);
5822 
5823 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5824 		scx_ops.name, scx_switched_all() ? "" : " (partial)");
5825 	kobject_uevent(scx_root_kobj, KOBJ_ADD);
5826 	mutex_unlock(&scx_ops_enable_mutex);
5827 
5828 	atomic_long_inc(&scx_enable_seq);
5829 
5830 	return 0;
5831 
5832 err_del:
5833 	kobject_del(scx_root_kobj);
5834 err:
5835 	kobject_put(scx_root_kobj);
5836 	scx_root_kobj = NULL;
5837 	if (scx_exit_info) {
5838 		free_exit_info(scx_exit_info);
5839 		scx_exit_info = NULL;
5840 	}
5841 err_unlock:
5842 	mutex_unlock(&scx_ops_enable_mutex);
5843 	return ret;
5844 
5845 err_disable_unlock_all:
5846 	scx_cgroup_unlock();
5847 	percpu_up_write(&scx_fork_rwsem);
5848 	scx_ops_bypass(false);
5849 err_disable:
5850 	mutex_unlock(&scx_ops_enable_mutex);
5851 	/*
5852 	 * Returning an error code here would not pass all the error information
5853 	 * to userspace. Record errno using scx_ops_error() for cases
5854 	 * scx_ops_error() wasn't already invoked and exit indicating success so
5855 	 * that the error is notified through ops.exit() with all the details.
5856 	 *
5857 	 * Flush scx_ops_disable_work to ensure that error is reported before
5858 	 * init completion.
5859 	 */
5860 	scx_ops_error("scx_ops_enable() failed (%d)", ret);
5861 	kthread_flush_work(&scx_ops_disable_work);
5862 	return 0;
5863 }
5864 
5865 
5866 /********************************************************************************
5867  * bpf_struct_ops plumbing.
5868  */
5869 #include <linux/bpf_verifier.h>
5870 #include <linux/bpf.h>
5871 #include <linux/btf.h>
5872 
5873 static const struct btf_type *task_struct_type;
5874 
5875 static bool bpf_scx_is_valid_access(int off, int size,
5876 				    enum bpf_access_type type,
5877 				    const struct bpf_prog *prog,
5878 				    struct bpf_insn_access_aux *info)
5879 {
5880 	if (type != BPF_READ)
5881 		return false;
5882 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5883 		return false;
5884 	if (off % size != 0)
5885 		return false;
5886 
5887 	return btf_ctx_access(off, size, type, prog, info);
5888 }
5889 
5890 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5891 				     const struct bpf_reg_state *reg, int off,
5892 				     int size)
5893 {
5894 	const struct btf_type *t;
5895 
5896 	t = btf_type_by_id(reg->btf, reg->btf_id);
5897 	if (t == task_struct_type) {
5898 		if (off >= offsetof(struct task_struct, scx.slice) &&
5899 		    off + size <= offsetofend(struct task_struct, scx.slice))
5900 			return SCALAR_VALUE;
5901 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5902 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5903 			return SCALAR_VALUE;
5904 		if (off >= offsetof(struct task_struct, scx.disallow) &&
5905 		    off + size <= offsetofend(struct task_struct, scx.disallow))
5906 			return SCALAR_VALUE;
5907 	}
5908 
5909 	return -EACCES;
5910 }
5911 
5912 static const struct bpf_func_proto *
5913 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5914 {
5915 	switch (func_id) {
5916 	case BPF_FUNC_task_storage_get:
5917 		return &bpf_task_storage_get_proto;
5918 	case BPF_FUNC_task_storage_delete:
5919 		return &bpf_task_storage_delete_proto;
5920 	default:
5921 		return bpf_base_func_proto(func_id, prog);
5922 	}
5923 }
5924 
5925 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5926 	.get_func_proto = bpf_scx_get_func_proto,
5927 	.is_valid_access = bpf_scx_is_valid_access,
5928 	.btf_struct_access = bpf_scx_btf_struct_access,
5929 };
5930 
5931 static int bpf_scx_init_member(const struct btf_type *t,
5932 			       const struct btf_member *member,
5933 			       void *kdata, const void *udata)
5934 {
5935 	const struct sched_ext_ops *uops = udata;
5936 	struct sched_ext_ops *ops = kdata;
5937 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5938 	int ret;
5939 
5940 	switch (moff) {
5941 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5942 		if (*(u32 *)(udata + moff) > INT_MAX)
5943 			return -E2BIG;
5944 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5945 		return 1;
5946 	case offsetof(struct sched_ext_ops, flags):
5947 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5948 			return -EINVAL;
5949 		ops->flags = *(u64 *)(udata + moff);
5950 		return 1;
5951 	case offsetof(struct sched_ext_ops, name):
5952 		ret = bpf_obj_name_cpy(ops->name, uops->name,
5953 				       sizeof(ops->name));
5954 		if (ret < 0)
5955 			return ret;
5956 		if (ret == 0)
5957 			return -EINVAL;
5958 		return 1;
5959 	case offsetof(struct sched_ext_ops, timeout_ms):
5960 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5961 		    SCX_WATCHDOG_MAX_TIMEOUT)
5962 			return -E2BIG;
5963 		ops->timeout_ms = *(u32 *)(udata + moff);
5964 		return 1;
5965 	case offsetof(struct sched_ext_ops, exit_dump_len):
5966 		ops->exit_dump_len =
5967 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5968 		return 1;
5969 	case offsetof(struct sched_ext_ops, hotplug_seq):
5970 		ops->hotplug_seq = *(u64 *)(udata + moff);
5971 		return 1;
5972 	}
5973 
5974 	return 0;
5975 }
5976 
5977 static int bpf_scx_check_member(const struct btf_type *t,
5978 				const struct btf_member *member,
5979 				const struct bpf_prog *prog)
5980 {
5981 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5982 
5983 	switch (moff) {
5984 	case offsetof(struct sched_ext_ops, init_task):
5985 #ifdef CONFIG_EXT_GROUP_SCHED
5986 	case offsetof(struct sched_ext_ops, cgroup_init):
5987 	case offsetof(struct sched_ext_ops, cgroup_exit):
5988 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5989 #endif
5990 	case offsetof(struct sched_ext_ops, cpu_online):
5991 	case offsetof(struct sched_ext_ops, cpu_offline):
5992 	case offsetof(struct sched_ext_ops, init):
5993 	case offsetof(struct sched_ext_ops, exit):
5994 		break;
5995 	default:
5996 		if (prog->sleepable)
5997 			return -EINVAL;
5998 	}
5999 
6000 	return 0;
6001 }
6002 
6003 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
6004 {
6005 	return scx_ops_enable(kdata, link);
6006 }
6007 
6008 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
6009 {
6010 	scx_ops_disable(SCX_EXIT_UNREG);
6011 	kthread_flush_work(&scx_ops_disable_work);
6012 }
6013 
6014 static int bpf_scx_init(struct btf *btf)
6015 {
6016 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
6017 
6018 	return 0;
6019 }
6020 
6021 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
6022 {
6023 	/*
6024 	 * sched_ext does not support updating the actively-loaded BPF
6025 	 * scheduler, as registering a BPF scheduler can always fail if the
6026 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
6027 	 * etc. Similarly, we can always race with unregistration happening
6028 	 * elsewhere, such as with sysrq.
6029 	 */
6030 	return -EOPNOTSUPP;
6031 }
6032 
6033 static int bpf_scx_validate(void *kdata)
6034 {
6035 	return 0;
6036 }
6037 
6038 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
6039 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
6040 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
6041 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
6042 static void sched_ext_ops__tick(struct task_struct *p) {}
6043 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
6044 static void sched_ext_ops__running(struct task_struct *p) {}
6045 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
6046 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
6047 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
6048 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
6049 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
6050 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
6051 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
6052 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
6053 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
6054 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
6055 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
6056 static void sched_ext_ops__enable(struct task_struct *p) {}
6057 static void sched_ext_ops__disable(struct task_struct *p) {}
6058 #ifdef CONFIG_EXT_GROUP_SCHED
6059 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
6060 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
6061 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
6062 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6063 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6064 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
6065 #endif
6066 static void sched_ext_ops__cpu_online(s32 cpu) {}
6067 static void sched_ext_ops__cpu_offline(s32 cpu) {}
6068 static s32 sched_ext_ops__init(void) { return -EINVAL; }
6069 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
6070 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
6071 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
6072 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
6073 
6074 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
6075 	.select_cpu		= sched_ext_ops__select_cpu,
6076 	.enqueue		= sched_ext_ops__enqueue,
6077 	.dequeue		= sched_ext_ops__dequeue,
6078 	.dispatch		= sched_ext_ops__dispatch,
6079 	.tick			= sched_ext_ops__tick,
6080 	.runnable		= sched_ext_ops__runnable,
6081 	.running		= sched_ext_ops__running,
6082 	.stopping		= sched_ext_ops__stopping,
6083 	.quiescent		= sched_ext_ops__quiescent,
6084 	.yield			= sched_ext_ops__yield,
6085 	.core_sched_before	= sched_ext_ops__core_sched_before,
6086 	.set_weight		= sched_ext_ops__set_weight,
6087 	.set_cpumask		= sched_ext_ops__set_cpumask,
6088 	.update_idle		= sched_ext_ops__update_idle,
6089 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
6090 	.cpu_release		= sched_ext_ops__cpu_release,
6091 	.init_task		= sched_ext_ops__init_task,
6092 	.exit_task		= sched_ext_ops__exit_task,
6093 	.enable			= sched_ext_ops__enable,
6094 	.disable		= sched_ext_ops__disable,
6095 #ifdef CONFIG_EXT_GROUP_SCHED
6096 	.cgroup_init		= sched_ext_ops__cgroup_init,
6097 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
6098 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
6099 	.cgroup_move		= sched_ext_ops__cgroup_move,
6100 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
6101 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
6102 #endif
6103 	.cpu_online		= sched_ext_ops__cpu_online,
6104 	.cpu_offline		= sched_ext_ops__cpu_offline,
6105 	.init			= sched_ext_ops__init,
6106 	.exit			= sched_ext_ops__exit,
6107 	.dump			= sched_ext_ops__dump,
6108 	.dump_cpu		= sched_ext_ops__dump_cpu,
6109 	.dump_task		= sched_ext_ops__dump_task,
6110 };
6111 
6112 static struct bpf_struct_ops bpf_sched_ext_ops = {
6113 	.verifier_ops = &bpf_scx_verifier_ops,
6114 	.reg = bpf_scx_reg,
6115 	.unreg = bpf_scx_unreg,
6116 	.check_member = bpf_scx_check_member,
6117 	.init_member = bpf_scx_init_member,
6118 	.init = bpf_scx_init,
6119 	.update = bpf_scx_update,
6120 	.validate = bpf_scx_validate,
6121 	.name = "sched_ext_ops",
6122 	.owner = THIS_MODULE,
6123 	.cfi_stubs = &__bpf_ops_sched_ext_ops
6124 };
6125 
6126 
6127 /********************************************************************************
6128  * System integration and init.
6129  */
6130 
6131 static void sysrq_handle_sched_ext_reset(u8 key)
6132 {
6133 	if (scx_ops_helper)
6134 		scx_ops_disable(SCX_EXIT_SYSRQ);
6135 	else
6136 		pr_info("sched_ext: BPF scheduler not yet used\n");
6137 }
6138 
6139 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6140 	.handler	= sysrq_handle_sched_ext_reset,
6141 	.help_msg	= "reset-sched-ext(S)",
6142 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
6143 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
6144 };
6145 
6146 static void sysrq_handle_sched_ext_dump(u8 key)
6147 {
6148 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6149 
6150 	if (scx_enabled())
6151 		scx_dump_state(&ei, 0);
6152 }
6153 
6154 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6155 	.handler	= sysrq_handle_sched_ext_dump,
6156 	.help_msg	= "dump-sched-ext(D)",
6157 	.action_msg	= "Trigger sched_ext debug dump",
6158 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
6159 };
6160 
6161 static bool can_skip_idle_kick(struct rq *rq)
6162 {
6163 	lockdep_assert_rq_held(rq);
6164 
6165 	/*
6166 	 * We can skip idle kicking if @rq is going to go through at least one
6167 	 * full SCX scheduling cycle before going idle. Just checking whether
6168 	 * curr is not idle is insufficient because we could be racing
6169 	 * balance_one() trying to pull the next task from a remote rq, which
6170 	 * may fail, and @rq may become idle afterwards.
6171 	 *
6172 	 * The race window is small and we don't and can't guarantee that @rq is
6173 	 * only kicked while idle anyway. Skip only when sure.
6174 	 */
6175 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6176 }
6177 
6178 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6179 {
6180 	struct rq *rq = cpu_rq(cpu);
6181 	struct scx_rq *this_scx = &this_rq->scx;
6182 	bool should_wait = false;
6183 	unsigned long flags;
6184 
6185 	raw_spin_rq_lock_irqsave(rq, flags);
6186 
6187 	/*
6188 	 * During CPU hotplug, a CPU may depend on kicking itself to make
6189 	 * forward progress. Allow kicking self regardless of online state.
6190 	 */
6191 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6192 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6193 			if (rq->curr->sched_class == &ext_sched_class)
6194 				rq->curr->scx.slice = 0;
6195 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6196 		}
6197 
6198 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6199 			pseqs[cpu] = rq->scx.pnt_seq;
6200 			should_wait = true;
6201 		}
6202 
6203 		resched_curr(rq);
6204 	} else {
6205 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6206 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6207 	}
6208 
6209 	raw_spin_rq_unlock_irqrestore(rq, flags);
6210 
6211 	return should_wait;
6212 }
6213 
6214 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6215 {
6216 	struct rq *rq = cpu_rq(cpu);
6217 	unsigned long flags;
6218 
6219 	raw_spin_rq_lock_irqsave(rq, flags);
6220 
6221 	if (!can_skip_idle_kick(rq) &&
6222 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6223 		resched_curr(rq);
6224 
6225 	raw_spin_rq_unlock_irqrestore(rq, flags);
6226 }
6227 
6228 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6229 {
6230 	struct rq *this_rq = this_rq();
6231 	struct scx_rq *this_scx = &this_rq->scx;
6232 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6233 	bool should_wait = false;
6234 	s32 cpu;
6235 
6236 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
6237 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6238 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6239 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6240 	}
6241 
6242 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6243 		kick_one_cpu_if_idle(cpu, this_rq);
6244 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6245 	}
6246 
6247 	if (!should_wait)
6248 		return;
6249 
6250 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
6251 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6252 
6253 		if (cpu != cpu_of(this_rq)) {
6254 			/*
6255 			 * Pairs with smp_store_release() issued by this CPU in
6256 			 * switch_class() on the resched path.
6257 			 *
6258 			 * We busy-wait here to guarantee that no other task can
6259 			 * be scheduled on our core before the target CPU has
6260 			 * entered the resched path.
6261 			 */
6262 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6263 				cpu_relax();
6264 		}
6265 
6266 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6267 	}
6268 }
6269 
6270 /**
6271  * print_scx_info - print out sched_ext scheduler state
6272  * @log_lvl: the log level to use when printing
6273  * @p: target task
6274  *
6275  * If a sched_ext scheduler is enabled, print the name and state of the
6276  * scheduler. If @p is on sched_ext, print further information about the task.
6277  *
6278  * This function can be safely called on any task as long as the task_struct
6279  * itself is accessible. While safe, this function isn't synchronized and may
6280  * print out mixups or garbages of limited length.
6281  */
6282 void print_scx_info(const char *log_lvl, struct task_struct *p)
6283 {
6284 	enum scx_ops_enable_state state = scx_ops_enable_state();
6285 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6286 	char runnable_at_buf[22] = "?";
6287 	struct sched_class *class;
6288 	unsigned long runnable_at;
6289 
6290 	if (state == SCX_OPS_DISABLED)
6291 		return;
6292 
6293 	/*
6294 	 * Carefully check if the task was running on sched_ext, and then
6295 	 * carefully copy the time it's been runnable, and its state.
6296 	 */
6297 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6298 	    class != &ext_sched_class) {
6299 		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6300 		       scx_ops_enable_state_str[state], all);
6301 		return;
6302 	}
6303 
6304 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6305 				      sizeof(runnable_at)))
6306 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6307 			  jiffies_delta_msecs(runnable_at, jiffies));
6308 
6309 	/* print everything onto one line to conserve console space */
6310 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6311 	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6312 	       runnable_at_buf);
6313 }
6314 
6315 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6316 {
6317 	/*
6318 	 * SCX schedulers often have userspace components which are sometimes
6319 	 * involved in critial scheduling paths. PM operations involve freezing
6320 	 * userspace which can lead to scheduling misbehaviors including stalls.
6321 	 * Let's bypass while PM operations are in progress.
6322 	 */
6323 	switch (event) {
6324 	case PM_HIBERNATION_PREPARE:
6325 	case PM_SUSPEND_PREPARE:
6326 	case PM_RESTORE_PREPARE:
6327 		scx_ops_bypass(true);
6328 		break;
6329 	case PM_POST_HIBERNATION:
6330 	case PM_POST_SUSPEND:
6331 	case PM_POST_RESTORE:
6332 		scx_ops_bypass(false);
6333 		break;
6334 	}
6335 
6336 	return NOTIFY_OK;
6337 }
6338 
6339 static struct notifier_block scx_pm_notifier = {
6340 	.notifier_call = scx_pm_handler,
6341 };
6342 
6343 void __init init_sched_ext_class(void)
6344 {
6345 	s32 cpu, v;
6346 
6347 	/*
6348 	 * The following is to prevent the compiler from optimizing out the enum
6349 	 * definitions so that BPF scheduler implementations can use them
6350 	 * through the generated vmlinux.h.
6351 	 */
6352 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6353 		   SCX_TG_ONLINE);
6354 
6355 	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6356 #ifdef CONFIG_SMP
6357 	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
6358 	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
6359 #endif
6360 	scx_kick_cpus_pnt_seqs =
6361 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6362 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
6363 	BUG_ON(!scx_kick_cpus_pnt_seqs);
6364 
6365 	for_each_possible_cpu(cpu) {
6366 		struct rq *rq = cpu_rq(cpu);
6367 
6368 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6369 		INIT_LIST_HEAD(&rq->scx.runnable_list);
6370 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6371 
6372 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
6373 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
6374 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
6375 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
6376 		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6377 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6378 
6379 		if (cpu_online(cpu))
6380 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6381 	}
6382 
6383 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6384 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6385 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6386 }
6387 
6388 
6389 /********************************************************************************
6390  * Helpers that can be called from the BPF scheduler.
6391  */
6392 #include <linux/btf_ids.h>
6393 
6394 __bpf_kfunc_start_defs();
6395 
6396 static bool check_builtin_idle_enabled(void)
6397 {
6398 	if (static_branch_likely(&scx_builtin_idle_enabled))
6399 		return true;
6400 
6401 	scx_ops_error("built-in idle tracking is disabled");
6402 	return false;
6403 }
6404 
6405 /**
6406  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
6407  * @p: task_struct to select a CPU for
6408  * @prev_cpu: CPU @p was on previously
6409  * @wake_flags: %SCX_WAKE_* flags
6410  * @is_idle: out parameter indicating whether the returned CPU is idle
6411  *
6412  * Can only be called from ops.select_cpu() if the built-in CPU selection is
6413  * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
6414  * @p, @prev_cpu and @wake_flags match ops.select_cpu().
6415  *
6416  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
6417  * currently idle and thus a good candidate for direct dispatching.
6418  */
6419 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
6420 				       u64 wake_flags, bool *is_idle)
6421 {
6422 	if (!check_builtin_idle_enabled())
6423 		goto prev_cpu;
6424 
6425 	if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
6426 		goto prev_cpu;
6427 
6428 #ifdef CONFIG_SMP
6429 	return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
6430 #endif
6431 
6432 prev_cpu:
6433 	*is_idle = false;
6434 	return prev_cpu;
6435 }
6436 
6437 __bpf_kfunc_end_defs();
6438 
6439 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
6440 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
6441 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
6442 
6443 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6444 	.owner			= THIS_MODULE,
6445 	.set			= &scx_kfunc_ids_select_cpu,
6446 };
6447 
6448 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6449 {
6450 	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6451 		return false;
6452 
6453 	lockdep_assert_irqs_disabled();
6454 
6455 	if (unlikely(!p)) {
6456 		scx_ops_error("called with NULL task");
6457 		return false;
6458 	}
6459 
6460 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6461 		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6462 		return false;
6463 	}
6464 
6465 	return true;
6466 }
6467 
6468 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6469 				  u64 enq_flags)
6470 {
6471 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6472 	struct task_struct *ddsp_task;
6473 
6474 	ddsp_task = __this_cpu_read(direct_dispatch_task);
6475 	if (ddsp_task) {
6476 		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6477 		return;
6478 	}
6479 
6480 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6481 		scx_ops_error("dispatch buffer overflow");
6482 		return;
6483 	}
6484 
6485 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6486 		.task = p,
6487 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6488 		.dsq_id = dsq_id,
6489 		.enq_flags = enq_flags,
6490 	};
6491 }
6492 
6493 __bpf_kfunc_start_defs();
6494 
6495 /**
6496  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6497  * @p: task_struct to insert
6498  * @dsq_id: DSQ to insert into
6499  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6500  * @enq_flags: SCX_ENQ_*
6501  *
6502  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6503  * call this function spuriously. Can be called from ops.enqueue(),
6504  * ops.select_cpu(), and ops.dispatch().
6505  *
6506  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6507  * and @p must match the task being enqueued.
6508  *
6509  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6510  * will be directly inserted into the corresponding dispatch queue after
6511  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6512  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6513  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6514  * task is inserted.
6515  *
6516  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6517  * and this function can be called upto ops.dispatch_max_batch times to insert
6518  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6519  * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6520  *
6521  * This function doesn't have any locking restrictions and may be called under
6522  * BPF locks (in the future when BPF introduces more flexible locking).
6523  *
6524  * @p is allowed to run for @slice. The scheduling path is triggered on slice
6525  * exhaustion. If zero, the current residual slice is maintained. If
6526  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6527  * scx_bpf_kick_cpu() to trigger scheduling.
6528  */
6529 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6530 				    u64 enq_flags)
6531 {
6532 	if (!scx_dsq_insert_preamble(p, enq_flags))
6533 		return;
6534 
6535 	if (slice)
6536 		p->scx.slice = slice;
6537 	else
6538 		p->scx.slice = p->scx.slice ?: 1;
6539 
6540 	scx_dsq_insert_commit(p, dsq_id, enq_flags);
6541 }
6542 
6543 /* for backward compatibility, will be removed in v6.15 */
6544 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6545 				  u64 enq_flags)
6546 {
6547 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6548 	scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6549 }
6550 
6551 /**
6552  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6553  * @p: task_struct to insert
6554  * @dsq_id: DSQ to insert into
6555  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6556  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6557  * @enq_flags: SCX_ENQ_*
6558  *
6559  * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6560  * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6561  * are identical to scx_bpf_dsq_insert().
6562  *
6563  * @vtime ordering is according to time_before64() which considers wrapping. A
6564  * numerically larger vtime may indicate an earlier position in the ordering and
6565  * vice-versa.
6566  *
6567  * A DSQ can only be used as a FIFO or priority queue at any given time and this
6568  * function must not be called on a DSQ which already has one or more FIFO tasks
6569  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6570  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6571  */
6572 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6573 					  u64 slice, u64 vtime, u64 enq_flags)
6574 {
6575 	if (!scx_dsq_insert_preamble(p, enq_flags))
6576 		return;
6577 
6578 	if (slice)
6579 		p->scx.slice = slice;
6580 	else
6581 		p->scx.slice = p->scx.slice ?: 1;
6582 
6583 	p->scx.dsq_vtime = vtime;
6584 
6585 	scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6586 }
6587 
6588 /* for backward compatibility, will be removed in v6.15 */
6589 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6590 					u64 slice, u64 vtime, u64 enq_flags)
6591 {
6592 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6593 	scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6594 }
6595 
6596 __bpf_kfunc_end_defs();
6597 
6598 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6599 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6600 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6601 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6602 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6603 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6604 
6605 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6606 	.owner			= THIS_MODULE,
6607 	.set			= &scx_kfunc_ids_enqueue_dispatch,
6608 };
6609 
6610 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6611 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6612 {
6613 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6614 	struct rq *this_rq, *src_rq, *locked_rq;
6615 	bool dispatched = false;
6616 	bool in_balance;
6617 	unsigned long flags;
6618 
6619 	if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6620 		return false;
6621 
6622 	/*
6623 	 * Can be called from either ops.dispatch() locking this_rq() or any
6624 	 * context where no rq lock is held. If latter, lock @p's task_rq which
6625 	 * we'll likely need anyway.
6626 	 */
6627 	src_rq = task_rq(p);
6628 
6629 	local_irq_save(flags);
6630 	this_rq = this_rq();
6631 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6632 
6633 	if (in_balance) {
6634 		if (this_rq != src_rq) {
6635 			raw_spin_rq_unlock(this_rq);
6636 			raw_spin_rq_lock(src_rq);
6637 		}
6638 	} else {
6639 		raw_spin_rq_lock(src_rq);
6640 	}
6641 
6642 	/*
6643 	 * If the BPF scheduler keeps calling this function repeatedly, it can
6644 	 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6645 	 * breather if necessary.
6646 	 */
6647 	scx_ops_breather(src_rq);
6648 
6649 	locked_rq = src_rq;
6650 	raw_spin_lock(&src_dsq->lock);
6651 
6652 	/*
6653 	 * Did someone else get to it? @p could have already left $src_dsq, got
6654 	 * re-enqueud, or be in the process of being consumed by someone else.
6655 	 */
6656 	if (unlikely(p->scx.dsq != src_dsq ||
6657 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6658 		     p->scx.holding_cpu >= 0) ||
6659 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
6660 		raw_spin_unlock(&src_dsq->lock);
6661 		goto out;
6662 	}
6663 
6664 	/* @p is still on $src_dsq and stable, determine the destination */
6665 	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6666 
6667 	/*
6668 	 * Apply vtime and slice updates before moving so that the new time is
6669 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6670 	 * this is safe as we're locking it.
6671 	 */
6672 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6673 		p->scx.dsq_vtime = kit->vtime;
6674 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6675 		p->scx.slice = kit->slice;
6676 
6677 	/* execute move */
6678 	locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6679 	dispatched = true;
6680 out:
6681 	if (in_balance) {
6682 		if (this_rq != locked_rq) {
6683 			raw_spin_rq_unlock(locked_rq);
6684 			raw_spin_rq_lock(this_rq);
6685 		}
6686 	} else {
6687 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6688 	}
6689 
6690 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6691 			       __SCX_DSQ_ITER_HAS_VTIME);
6692 	return dispatched;
6693 }
6694 
6695 __bpf_kfunc_start_defs();
6696 
6697 /**
6698  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6699  *
6700  * Can only be called from ops.dispatch().
6701  */
6702 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6703 {
6704 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6705 		return 0;
6706 
6707 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6708 }
6709 
6710 /**
6711  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6712  *
6713  * Cancel the latest dispatch. Can be called multiple times to cancel further
6714  * dispatches. Can only be called from ops.dispatch().
6715  */
6716 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6717 {
6718 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6719 
6720 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6721 		return;
6722 
6723 	if (dspc->cursor > 0)
6724 		dspc->cursor--;
6725 	else
6726 		scx_ops_error("dispatch buffer underflow");
6727 }
6728 
6729 /**
6730  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6731  * @dsq_id: DSQ to move task from
6732  *
6733  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6734  * local DSQ for execution. Can only be called from ops.dispatch().
6735  *
6736  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6737  * before trying to move from the specified DSQ. It may also grab rq locks and
6738  * thus can't be called under any BPF locks.
6739  *
6740  * Returns %true if a task has been moved, %false if there isn't any task to
6741  * move.
6742  */
6743 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6744 {
6745 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6746 	struct scx_dispatch_q *dsq;
6747 
6748 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6749 		return false;
6750 
6751 	flush_dispatch_buf(dspc->rq);
6752 
6753 	dsq = find_user_dsq(dsq_id);
6754 	if (unlikely(!dsq)) {
6755 		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6756 		return false;
6757 	}
6758 
6759 	if (consume_dispatch_q(dspc->rq, dsq)) {
6760 		/*
6761 		 * A successfully consumed task can be dequeued before it starts
6762 		 * running while the CPU is trying to migrate other dispatched
6763 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6764 		 * local DSQ.
6765 		 */
6766 		dspc->nr_tasks++;
6767 		return true;
6768 	} else {
6769 		return false;
6770 	}
6771 }
6772 
6773 /* for backward compatibility, will be removed in v6.15 */
6774 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6775 {
6776 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6777 	return scx_bpf_dsq_move_to_local(dsq_id);
6778 }
6779 
6780 /**
6781  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6782  * @it__iter: DSQ iterator in progress
6783  * @slice: duration the moved task can run for in nsecs
6784  *
6785  * Override the slice of the next task that will be moved from @it__iter using
6786  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6787  * slice duration is kept.
6788  */
6789 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6790 					    u64 slice)
6791 {
6792 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6793 
6794 	kit->slice = slice;
6795 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6796 }
6797 
6798 /* for backward compatibility, will be removed in v6.15 */
6799 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6800 			struct bpf_iter_scx_dsq *it__iter, u64 slice)
6801 {
6802 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6803 	scx_bpf_dsq_move_set_slice(it__iter, slice);
6804 }
6805 
6806 /**
6807  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6808  * @it__iter: DSQ iterator in progress
6809  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6810  *
6811  * Override the vtime of the next task that will be moved from @it__iter using
6812  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6813  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6814  * override is ignored and cleared.
6815  */
6816 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6817 					    u64 vtime)
6818 {
6819 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6820 
6821 	kit->vtime = vtime;
6822 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6823 }
6824 
6825 /* for backward compatibility, will be removed in v6.15 */
6826 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6827 			struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6828 {
6829 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6830 	scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6831 }
6832 
6833 /**
6834  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6835  * @it__iter: DSQ iterator in progress
6836  * @p: task to transfer
6837  * @dsq_id: DSQ to move @p to
6838  * @enq_flags: SCX_ENQ_*
6839  *
6840  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6841  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6842  * be the destination.
6843  *
6844  * For the transfer to be successful, @p must still be on the DSQ and have been
6845  * queued before the DSQ iteration started. This function doesn't care whether
6846  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6847  * been queued before the iteration started.
6848  *
6849  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6850  *
6851  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6852  * lock (e.g. BPF timers or SYSCALL programs).
6853  *
6854  * Returns %true if @p has been consumed, %false if @p had already been consumed
6855  * or dequeued.
6856  */
6857 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6858 				  struct task_struct *p, u64 dsq_id,
6859 				  u64 enq_flags)
6860 {
6861 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6862 			    p, dsq_id, enq_flags);
6863 }
6864 
6865 /* for backward compatibility, will be removed in v6.15 */
6866 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6867 					   struct task_struct *p, u64 dsq_id,
6868 					   u64 enq_flags)
6869 {
6870 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6871 	return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6872 }
6873 
6874 /**
6875  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6876  * @it__iter: DSQ iterator in progress
6877  * @p: task to transfer
6878  * @dsq_id: DSQ to move @p to
6879  * @enq_flags: SCX_ENQ_*
6880  *
6881  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6882  * priority queue of the DSQ specified by @dsq_id. The destination must be a
6883  * user DSQ as only user DSQs support priority queue.
6884  *
6885  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6886  * and scx_bpf_dsq_move_set_vtime() to update.
6887  *
6888  * All other aspects are identical to scx_bpf_dsq_move(). See
6889  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6890  */
6891 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6892 					struct task_struct *p, u64 dsq_id,
6893 					u64 enq_flags)
6894 {
6895 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6896 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6897 }
6898 
6899 /* for backward compatibility, will be removed in v6.15 */
6900 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6901 						 struct task_struct *p, u64 dsq_id,
6902 						 u64 enq_flags)
6903 {
6904 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6905 	return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6906 }
6907 
6908 __bpf_kfunc_end_defs();
6909 
6910 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6911 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6912 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6913 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6914 BTF_ID_FLAGS(func, scx_bpf_consume)
6915 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6916 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6917 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6918 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6919 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6920 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6921 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6922 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6923 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6924 
6925 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6926 	.owner			= THIS_MODULE,
6927 	.set			= &scx_kfunc_ids_dispatch,
6928 };
6929 
6930 __bpf_kfunc_start_defs();
6931 
6932 /**
6933  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6934  *
6935  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6936  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6937  * processed tasks. Can only be called from ops.cpu_release().
6938  */
6939 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6940 {
6941 	LIST_HEAD(tasks);
6942 	u32 nr_enqueued = 0;
6943 	struct rq *rq;
6944 	struct task_struct *p, *n;
6945 
6946 	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6947 		return 0;
6948 
6949 	rq = cpu_rq(smp_processor_id());
6950 	lockdep_assert_rq_held(rq);
6951 
6952 	/*
6953 	 * The BPF scheduler may choose to dispatch tasks back to
6954 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6955 	 * first to avoid processing the same tasks repeatedly.
6956 	 */
6957 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6958 				 scx.dsq_list.node) {
6959 		/*
6960 		 * If @p is being migrated, @p's current CPU may not agree with
6961 		 * its allowed CPUs and the migration_cpu_stop is about to
6962 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6963 		 *
6964 		 * While racing sched property changes may also dequeue and
6965 		 * re-enqueue a migrating task while its current CPU and allowed
6966 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6967 		 * the current local DSQ for running tasks and thus are not
6968 		 * visible to the BPF scheduler.
6969 		 */
6970 		if (p->migration_pending)
6971 			continue;
6972 
6973 		dispatch_dequeue(rq, p);
6974 		list_add_tail(&p->scx.dsq_list.node, &tasks);
6975 	}
6976 
6977 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6978 		list_del_init(&p->scx.dsq_list.node);
6979 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6980 		nr_enqueued++;
6981 	}
6982 
6983 	return nr_enqueued;
6984 }
6985 
6986 __bpf_kfunc_end_defs();
6987 
6988 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6989 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6990 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6991 
6992 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6993 	.owner			= THIS_MODULE,
6994 	.set			= &scx_kfunc_ids_cpu_release,
6995 };
6996 
6997 __bpf_kfunc_start_defs();
6998 
6999 /**
7000  * scx_bpf_create_dsq - Create a custom DSQ
7001  * @dsq_id: DSQ to create
7002  * @node: NUMA node to allocate from
7003  *
7004  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
7005  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
7006  */
7007 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
7008 {
7009 	if (unlikely(node >= (int)nr_node_ids ||
7010 		     (node < 0 && node != NUMA_NO_NODE)))
7011 		return -EINVAL;
7012 	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
7013 }
7014 
7015 __bpf_kfunc_end_defs();
7016 
7017 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
7018 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
7019 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
7020 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
7021 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
7022 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
7023 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
7024 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
7025 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
7026 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
7027 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
7028 
7029 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
7030 	.owner			= THIS_MODULE,
7031 	.set			= &scx_kfunc_ids_unlocked,
7032 };
7033 
7034 __bpf_kfunc_start_defs();
7035 
7036 /**
7037  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
7038  * @cpu: cpu to kick
7039  * @flags: %SCX_KICK_* flags
7040  *
7041  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
7042  * trigger rescheduling on a busy CPU. This can be called from any online
7043  * scx_ops operation and the actual kicking is performed asynchronously through
7044  * an irq work.
7045  */
7046 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
7047 {
7048 	struct rq *this_rq;
7049 	unsigned long irq_flags;
7050 
7051 	if (!ops_cpu_valid(cpu, NULL))
7052 		return;
7053 
7054 	local_irq_save(irq_flags);
7055 
7056 	this_rq = this_rq();
7057 
7058 	/*
7059 	 * While bypassing for PM ops, IRQ handling may not be online which can
7060 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
7061 	 * IRQ status update. Suppress kicking.
7062 	 */
7063 	if (scx_rq_bypassing(this_rq))
7064 		goto out;
7065 
7066 	/*
7067 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
7068 	 * rq locks. We can probably be smarter and avoid bouncing if called
7069 	 * from ops which don't hold a rq lock.
7070 	 */
7071 	if (flags & SCX_KICK_IDLE) {
7072 		struct rq *target_rq = cpu_rq(cpu);
7073 
7074 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
7075 			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
7076 
7077 		if (raw_spin_rq_trylock(target_rq)) {
7078 			if (can_skip_idle_kick(target_rq)) {
7079 				raw_spin_rq_unlock(target_rq);
7080 				goto out;
7081 			}
7082 			raw_spin_rq_unlock(target_rq);
7083 		}
7084 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
7085 	} else {
7086 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
7087 
7088 		if (flags & SCX_KICK_PREEMPT)
7089 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
7090 		if (flags & SCX_KICK_WAIT)
7091 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
7092 	}
7093 
7094 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
7095 out:
7096 	local_irq_restore(irq_flags);
7097 }
7098 
7099 /**
7100  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
7101  * @dsq_id: id of the DSQ
7102  *
7103  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
7104  * -%ENOENT is returned.
7105  */
7106 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
7107 {
7108 	struct scx_dispatch_q *dsq;
7109 	s32 ret;
7110 
7111 	preempt_disable();
7112 
7113 	if (dsq_id == SCX_DSQ_LOCAL) {
7114 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
7115 		goto out;
7116 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
7117 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
7118 
7119 		if (ops_cpu_valid(cpu, NULL)) {
7120 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
7121 			goto out;
7122 		}
7123 	} else {
7124 		dsq = find_user_dsq(dsq_id);
7125 		if (dsq) {
7126 			ret = READ_ONCE(dsq->nr);
7127 			goto out;
7128 		}
7129 	}
7130 	ret = -ENOENT;
7131 out:
7132 	preempt_enable();
7133 	return ret;
7134 }
7135 
7136 /**
7137  * scx_bpf_destroy_dsq - Destroy a custom DSQ
7138  * @dsq_id: DSQ to destroy
7139  *
7140  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7141  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7142  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7143  * which doesn't exist. Can be called from any online scx_ops operations.
7144  */
7145 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7146 {
7147 	destroy_dsq(dsq_id);
7148 }
7149 
7150 /**
7151  * bpf_iter_scx_dsq_new - Create a DSQ iterator
7152  * @it: iterator to initialize
7153  * @dsq_id: DSQ to iterate
7154  * @flags: %SCX_DSQ_ITER_*
7155  *
7156  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7157  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7158  * tasks which are already queued when this function is invoked.
7159  */
7160 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7161 				     u64 flags)
7162 {
7163 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7164 
7165 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7166 		     sizeof(struct bpf_iter_scx_dsq));
7167 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7168 		     __alignof__(struct bpf_iter_scx_dsq));
7169 
7170 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7171 		return -EINVAL;
7172 
7173 	kit->dsq = find_user_dsq(dsq_id);
7174 	if (!kit->dsq)
7175 		return -ENOENT;
7176 
7177 	INIT_LIST_HEAD(&kit->cursor.node);
7178 	kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7179 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7180 
7181 	return 0;
7182 }
7183 
7184 /**
7185  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7186  * @it: iterator to progress
7187  *
7188  * Return the next task. See bpf_iter_scx_dsq_new().
7189  */
7190 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7191 {
7192 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7193 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7194 	struct task_struct *p;
7195 	unsigned long flags;
7196 
7197 	if (!kit->dsq)
7198 		return NULL;
7199 
7200 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7201 
7202 	if (list_empty(&kit->cursor.node))
7203 		p = NULL;
7204 	else
7205 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7206 
7207 	/*
7208 	 * Only tasks which were queued before the iteration started are
7209 	 * visible. This bounds BPF iterations and guarantees that vtime never
7210 	 * jumps in the other direction while iterating.
7211 	 */
7212 	do {
7213 		p = nldsq_next_task(kit->dsq, p, rev);
7214 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7215 
7216 	if (p) {
7217 		if (rev)
7218 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7219 		else
7220 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7221 	} else {
7222 		list_del_init(&kit->cursor.node);
7223 	}
7224 
7225 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7226 
7227 	return p;
7228 }
7229 
7230 /**
7231  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7232  * @it: iterator to destroy
7233  *
7234  * Undo scx_iter_scx_dsq_new().
7235  */
7236 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7237 {
7238 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7239 
7240 	if (!kit->dsq)
7241 		return;
7242 
7243 	if (!list_empty(&kit->cursor.node)) {
7244 		unsigned long flags;
7245 
7246 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7247 		list_del_init(&kit->cursor.node);
7248 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7249 	}
7250 	kit->dsq = NULL;
7251 }
7252 
7253 __bpf_kfunc_end_defs();
7254 
7255 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7256 			 char *fmt, unsigned long long *data, u32 data__sz)
7257 {
7258 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7259 	s32 ret;
7260 
7261 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7262 	    (data__sz && !data)) {
7263 		scx_ops_error("invalid data=%p and data__sz=%u",
7264 			      (void *)data, data__sz);
7265 		return -EINVAL;
7266 	}
7267 
7268 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7269 	if (ret < 0) {
7270 		scx_ops_error("failed to read data fields (%d)", ret);
7271 		return ret;
7272 	}
7273 
7274 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7275 				  &bprintf_data);
7276 	if (ret < 0) {
7277 		scx_ops_error("format preparation failed (%d)", ret);
7278 		return ret;
7279 	}
7280 
7281 	ret = bstr_printf(line_buf, line_size, fmt,
7282 			  bprintf_data.bin_args);
7283 	bpf_bprintf_cleanup(&bprintf_data);
7284 	if (ret < 0) {
7285 		scx_ops_error("(\"%s\", %p, %u) failed to format",
7286 			      fmt, data, data__sz);
7287 		return ret;
7288 	}
7289 
7290 	return ret;
7291 }
7292 
7293 static s32 bstr_format(struct scx_bstr_buf *buf,
7294 		       char *fmt, unsigned long long *data, u32 data__sz)
7295 {
7296 	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7297 			     fmt, data, data__sz);
7298 }
7299 
7300 __bpf_kfunc_start_defs();
7301 
7302 /**
7303  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7304  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7305  * @fmt: error message format string
7306  * @data: format string parameters packaged using ___bpf_fill() macro
7307  * @data__sz: @data len, must end in '__sz' for the verifier
7308  *
7309  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7310  * disabling.
7311  */
7312 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7313 				   unsigned long long *data, u32 data__sz)
7314 {
7315 	unsigned long flags;
7316 
7317 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7318 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7319 		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
7320 				  scx_exit_bstr_buf.line);
7321 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7322 }
7323 
7324 /**
7325  * scx_bpf_error_bstr - Indicate fatal error
7326  * @fmt: error message format string
7327  * @data: format string parameters packaged using ___bpf_fill() macro
7328  * @data__sz: @data len, must end in '__sz' for the verifier
7329  *
7330  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7331  * disabling.
7332  */
7333 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7334 				    u32 data__sz)
7335 {
7336 	unsigned long flags;
7337 
7338 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7339 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7340 		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7341 				  scx_exit_bstr_buf.line);
7342 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7343 }
7344 
7345 /**
7346  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7347  * @fmt: format string
7348  * @data: format string parameters packaged using ___bpf_fill() macro
7349  * @data__sz: @data len, must end in '__sz' for the verifier
7350  *
7351  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7352  * dump_task() to generate extra debug dump specific to the BPF scheduler.
7353  *
7354  * The extra dump may be multiple lines. A single line may be split over
7355  * multiple calls. The last line is automatically terminated.
7356  */
7357 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7358 				   u32 data__sz)
7359 {
7360 	struct scx_dump_data *dd = &scx_dump_data;
7361 	struct scx_bstr_buf *buf = &dd->buf;
7362 	s32 ret;
7363 
7364 	if (raw_smp_processor_id() != dd->cpu) {
7365 		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7366 		return;
7367 	}
7368 
7369 	/* append the formatted string to the line buf */
7370 	ret = __bstr_format(buf->data, buf->line + dd->cursor,
7371 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7372 	if (ret < 0) {
7373 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7374 			  dd->prefix, fmt, data, data__sz, ret);
7375 		return;
7376 	}
7377 
7378 	dd->cursor += ret;
7379 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7380 
7381 	if (!dd->cursor)
7382 		return;
7383 
7384 	/*
7385 	 * If the line buf overflowed or ends in a newline, flush it into the
7386 	 * dump. This is to allow the caller to generate a single line over
7387 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7388 	 * the line buf, the only case which can lead to an unexpected
7389 	 * truncation is when the caller keeps generating newlines in the middle
7390 	 * instead of the end consecutively. Don't do that.
7391 	 */
7392 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7393 		ops_dump_flush();
7394 }
7395 
7396 /**
7397  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7398  * @cpu: CPU of interest
7399  *
7400  * Return the maximum relative capacity of @cpu in relation to the most
7401  * performant CPU in the system. The return value is in the range [1,
7402  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7403  */
7404 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7405 {
7406 	if (ops_cpu_valid(cpu, NULL))
7407 		return arch_scale_cpu_capacity(cpu);
7408 	else
7409 		return SCX_CPUPERF_ONE;
7410 }
7411 
7412 /**
7413  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7414  * @cpu: CPU of interest
7415  *
7416  * Return the current relative performance of @cpu in relation to its maximum.
7417  * The return value is in the range [1, %SCX_CPUPERF_ONE].
7418  *
7419  * The current performance level of a CPU in relation to the maximum performance
7420  * available in the system can be calculated as follows:
7421  *
7422  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7423  *
7424  * The result is in the range [1, %SCX_CPUPERF_ONE].
7425  */
7426 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7427 {
7428 	if (ops_cpu_valid(cpu, NULL))
7429 		return arch_scale_freq_capacity(cpu);
7430 	else
7431 		return SCX_CPUPERF_ONE;
7432 }
7433 
7434 /**
7435  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7436  * @cpu: CPU of interest
7437  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7438  *
7439  * Set the target performance level of @cpu to @perf. @perf is in linear
7440  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7441  * schedutil cpufreq governor chooses the target frequency.
7442  *
7443  * The actual performance level chosen, CPU grouping, and the overhead and
7444  * latency of the operations are dependent on the hardware and cpufreq driver in
7445  * use. Consult hardware and cpufreq documentation for more information. The
7446  * current performance level can be monitored using scx_bpf_cpuperf_cur().
7447  */
7448 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7449 {
7450 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
7451 		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7452 		return;
7453 	}
7454 
7455 	if (ops_cpu_valid(cpu, NULL)) {
7456 		struct rq *rq = cpu_rq(cpu);
7457 
7458 		rq->scx.cpuperf_target = perf;
7459 
7460 		rcu_read_lock_sched_notrace();
7461 		cpufreq_update_util(cpu_rq(cpu), 0);
7462 		rcu_read_unlock_sched_notrace();
7463 	}
7464 }
7465 
7466 /**
7467  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7468  *
7469  * All valid CPU IDs in the system are smaller than the returned value.
7470  */
7471 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7472 {
7473 	return nr_cpu_ids;
7474 }
7475 
7476 /**
7477  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7478  */
7479 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7480 {
7481 	return cpu_possible_mask;
7482 }
7483 
7484 /**
7485  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7486  */
7487 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7488 {
7489 	return cpu_online_mask;
7490 }
7491 
7492 /**
7493  * scx_bpf_put_cpumask - Release a possible/online cpumask
7494  * @cpumask: cpumask to release
7495  */
7496 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7497 {
7498 	/*
7499 	 * Empty function body because we aren't actually acquiring or releasing
7500 	 * a reference to a global cpumask, which is read-only in the caller and
7501 	 * is never released. The acquire / release semantics here are just used
7502 	 * to make the cpumask is a trusted pointer in the caller.
7503 	 */
7504 }
7505 
7506 /**
7507  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
7508  * per-CPU cpumask.
7509  *
7510  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7511  */
7512 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
7513 {
7514 	if (!check_builtin_idle_enabled())
7515 		return cpu_none_mask;
7516 
7517 #ifdef CONFIG_SMP
7518 	return idle_masks.cpu;
7519 #else
7520 	return cpu_none_mask;
7521 #endif
7522 }
7523 
7524 /**
7525  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
7526  * per-physical-core cpumask. Can be used to determine if an entire physical
7527  * core is free.
7528  *
7529  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7530  */
7531 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
7532 {
7533 	if (!check_builtin_idle_enabled())
7534 		return cpu_none_mask;
7535 
7536 #ifdef CONFIG_SMP
7537 	if (sched_smt_active())
7538 		return idle_masks.smt;
7539 	else
7540 		return idle_masks.cpu;
7541 #else
7542 	return cpu_none_mask;
7543 #endif
7544 }
7545 
7546 /**
7547  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7548  * either the percpu, or SMT idle-tracking cpumask.
7549  * @idle_mask: &cpumask to use
7550  */
7551 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7552 {
7553 	/*
7554 	 * Empty function body because we aren't actually acquiring or releasing
7555 	 * a reference to a global idle cpumask, which is read-only in the
7556 	 * caller and is never released. The acquire / release semantics here
7557 	 * are just used to make the cpumask a trusted pointer in the caller.
7558 	 */
7559 }
7560 
7561 /**
7562  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7563  * @cpu: cpu to test and clear idle for
7564  *
7565  * Returns %true if @cpu was idle and its idle state was successfully cleared.
7566  * %false otherwise.
7567  *
7568  * Unavailable if ops.update_idle() is implemented and
7569  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7570  */
7571 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7572 {
7573 	if (!check_builtin_idle_enabled())
7574 		return false;
7575 
7576 	if (ops_cpu_valid(cpu, NULL))
7577 		return test_and_clear_cpu_idle(cpu);
7578 	else
7579 		return false;
7580 }
7581 
7582 /**
7583  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7584  * @cpus_allowed: Allowed cpumask
7585  * @flags: %SCX_PICK_IDLE_CPU_* flags
7586  *
7587  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7588  * number on success. -%EBUSY if no matching cpu was found.
7589  *
7590  * Idle CPU tracking may race against CPU scheduling state transitions. For
7591  * example, this function may return -%EBUSY as CPUs are transitioning into the
7592  * idle state. If the caller then assumes that there will be dispatch events on
7593  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7594  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7595  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7596  * event in the near future.
7597  *
7598  * Unavailable if ops.update_idle() is implemented and
7599  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7600  */
7601 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7602 				      u64 flags)
7603 {
7604 	if (!check_builtin_idle_enabled())
7605 		return -EBUSY;
7606 
7607 	return scx_pick_idle_cpu(cpus_allowed, flags);
7608 }
7609 
7610 /**
7611  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7612  * @cpus_allowed: Allowed cpumask
7613  * @flags: %SCX_PICK_IDLE_CPU_* flags
7614  *
7615  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7616  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7617  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7618  * empty.
7619  *
7620  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7621  * set, this function can't tell which CPUs are idle and will always pick any
7622  * CPU.
7623  */
7624 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7625 				     u64 flags)
7626 {
7627 	s32 cpu;
7628 
7629 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
7630 		cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7631 		if (cpu >= 0)
7632 			return cpu;
7633 	}
7634 
7635 	cpu = cpumask_any_distribute(cpus_allowed);
7636 	if (cpu < nr_cpu_ids)
7637 		return cpu;
7638 	else
7639 		return -EBUSY;
7640 }
7641 
7642 /**
7643  * scx_bpf_task_running - Is task currently running?
7644  * @p: task of interest
7645  */
7646 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7647 {
7648 	return task_rq(p)->curr == p;
7649 }
7650 
7651 /**
7652  * scx_bpf_task_cpu - CPU a task is currently associated with
7653  * @p: task of interest
7654  */
7655 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7656 {
7657 	return task_cpu(p);
7658 }
7659 
7660 /**
7661  * scx_bpf_cpu_rq - Fetch the rq of a CPU
7662  * @cpu: CPU of the rq
7663  */
7664 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7665 {
7666 	if (!ops_cpu_valid(cpu, NULL))
7667 		return NULL;
7668 
7669 	return cpu_rq(cpu);
7670 }
7671 
7672 /**
7673  * scx_bpf_task_cgroup - Return the sched cgroup of a task
7674  * @p: task of interest
7675  *
7676  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7677  * from the scheduler's POV. SCX operations should use this function to
7678  * determine @p's current cgroup as, unlike following @p->cgroups,
7679  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7680  * rq-locked operations. Can be called on the parameter tasks of rq-locked
7681  * operations. The restriction guarantees that @p's rq is locked by the caller.
7682  */
7683 #ifdef CONFIG_CGROUP_SCHED
7684 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7685 {
7686 	struct task_group *tg = p->sched_task_group;
7687 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7688 
7689 	if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7690 		goto out;
7691 
7692 	cgrp = tg_cgrp(tg);
7693 
7694 out:
7695 	cgroup_get(cgrp);
7696 	return cgrp;
7697 }
7698 #endif
7699 
7700 /**
7701  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7702  * clock for the current CPU. The clock returned is in nanoseconds.
7703  *
7704  * It provides the following properties:
7705  *
7706  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7707  *  to account for execution time and track tasks' runtime properties.
7708  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7709  *  eventually reads a hardware timestamp counter -- is neither performant nor
7710  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
7711  *  using the rq clock in the scheduler core whenever possible.
7712  *
7713  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7714  *  scheduler use cases, the required clock resolution is lower than the most
7715  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7716  *  uses the rq clock in the scheduler core whenever it is valid. It considers
7717  *  that the rq clock is valid from the time the rq clock is updated
7718  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7719  *
7720  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7721  *  guarantees the clock never goes backward when comparing them in the same
7722  *  CPU. On the other hand, when comparing clocks in different CPUs, there
7723  *  is no such guarantee -- the clock can go backward. It provides a
7724  *  monotonically *non-decreasing* clock so that it would provide the same
7725  *  clock values in two different scx_bpf_now() calls in the same CPU
7726  *  during the same period of when the rq clock is valid.
7727  */
7728 __bpf_kfunc u64 scx_bpf_now(void)
7729 {
7730 	struct rq *rq;
7731 	u64 clock;
7732 
7733 	preempt_disable();
7734 
7735 	rq = this_rq();
7736 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7737 		/*
7738 		 * If the rq clock is valid, use the cached rq clock.
7739 		 *
7740 		 * Note that scx_bpf_now() is re-entrant between a process
7741 		 * context and an interrupt context (e.g., timer interrupt).
7742 		 * However, we don't need to consider the race between them
7743 		 * because such race is not observable from a caller.
7744 		 */
7745 		clock = READ_ONCE(rq->scx.clock);
7746 	} else {
7747 		/*
7748 		 * Otherwise, return a fresh rq clock.
7749 		 *
7750 		 * The rq clock is updated outside of the rq lock.
7751 		 * In this case, keep the updated rq clock invalid so the next
7752 		 * kfunc call outside the rq lock gets a fresh rq clock.
7753 		 */
7754 		clock = sched_clock_cpu(cpu_of(rq));
7755 	}
7756 
7757 	preempt_enable();
7758 
7759 	return clock;
7760 }
7761 
7762 __bpf_kfunc_end_defs();
7763 
7764 BTF_KFUNCS_START(scx_kfunc_ids_any)
7765 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7766 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7767 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7768 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7769 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7770 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7771 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7772 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7773 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7774 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7775 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7776 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7777 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7778 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7779 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7780 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7781 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7782 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7783 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7784 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7785 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7786 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7787 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7788 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7789 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7790 #ifdef CONFIG_CGROUP_SCHED
7791 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7792 #endif
7793 BTF_ID_FLAGS(func, scx_bpf_now)
7794 BTF_KFUNCS_END(scx_kfunc_ids_any)
7795 
7796 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7797 	.owner			= THIS_MODULE,
7798 	.set			= &scx_kfunc_ids_any,
7799 };
7800 
7801 static int __init scx_init(void)
7802 {
7803 	int ret;
7804 
7805 	/*
7806 	 * kfunc registration can't be done from init_sched_ext_class() as
7807 	 * register_btf_kfunc_id_set() needs most of the system to be up.
7808 	 *
7809 	 * Some kfuncs are context-sensitive and can only be called from
7810 	 * specific SCX ops. They are grouped into BTF sets accordingly.
7811 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7812 	 * restrictions. Eventually, the verifier should be able to enforce
7813 	 * them. For now, register them the same and make each kfunc explicitly
7814 	 * check using scx_kf_allowed().
7815 	 */
7816 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7817 					     &scx_kfunc_set_select_cpu)) ||
7818 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7819 					     &scx_kfunc_set_enqueue_dispatch)) ||
7820 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7821 					     &scx_kfunc_set_dispatch)) ||
7822 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7823 					     &scx_kfunc_set_cpu_release)) ||
7824 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7825 					     &scx_kfunc_set_unlocked)) ||
7826 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7827 					     &scx_kfunc_set_unlocked)) ||
7828 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7829 					     &scx_kfunc_set_any)) ||
7830 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7831 					     &scx_kfunc_set_any)) ||
7832 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7833 					     &scx_kfunc_set_any))) {
7834 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7835 		return ret;
7836 	}
7837 
7838 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7839 	if (ret) {
7840 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7841 		return ret;
7842 	}
7843 
7844 	ret = register_pm_notifier(&scx_pm_notifier);
7845 	if (ret) {
7846 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7847 		return ret;
7848 	}
7849 
7850 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7851 	if (!scx_kset) {
7852 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7853 		return -ENOMEM;
7854 	}
7855 
7856 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7857 	if (ret < 0) {
7858 		pr_err("sched_ext: Failed to add global attributes\n");
7859 		return ret;
7860 	}
7861 
7862 	return 0;
7863 }
7864 __initcall(scx_init);
7865