xref: /linux/kernel/sched/ext.c (revision 86f5536004a61a0c797c14a248fc976f03f55cd5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10 
11 enum scx_consts {
12 	SCX_DSP_DFL_MAX_BATCH		= 32,
13 	SCX_DSP_MAX_LOOPS		= 32,
14 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
15 
16 	SCX_EXIT_BT_LEN			= 64,
17 	SCX_EXIT_MSG_LEN		= 1024,
18 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
19 
20 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
21 
22 	/*
23 	 * Iterating all tasks may take a while. Periodically drop
24 	 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25 	 */
26 	SCX_OPS_TASK_ITER_BATCH		= 32,
27 };
28 
29 enum scx_exit_kind {
30 	SCX_EXIT_NONE,
31 	SCX_EXIT_DONE,
32 
33 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
34 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
35 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
36 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
37 
38 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
39 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
40 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
41 };
42 
43 /*
44  * An exit code can be specified when exiting with scx_bpf_exit() or
45  * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46  * respectively. The codes are 64bit of the format:
47  *
48  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
49  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
50  *
51  *   SYS ACT: System-defined exit actions
52  *   SYS RSN: System-defined exit reasons
53  *   USR    : User-defined exit codes and reasons
54  *
55  * Using the above, users may communicate intention and context by ORing system
56  * actions and/or system reasons with a user-defined exit code.
57  */
58 enum scx_exit_code {
59 	/* Reasons */
60 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
61 
62 	/* Actions */
63 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
64 };
65 
66 /*
67  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
68  * being disabled.
69  */
70 struct scx_exit_info {
71 	/* %SCX_EXIT_* - broad category of the exit reason */
72 	enum scx_exit_kind	kind;
73 
74 	/* exit code if gracefully exiting */
75 	s64			exit_code;
76 
77 	/* textual representation of the above */
78 	const char		*reason;
79 
80 	/* backtrace if exiting due to an error */
81 	unsigned long		*bt;
82 	u32			bt_len;
83 
84 	/* informational message */
85 	char			*msg;
86 
87 	/* debug dump */
88 	char			*dump;
89 };
90 
91 /* sched_ext_ops.flags */
92 enum scx_ops_flags {
93 	/*
94 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
95 	 */
96 	SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
97 
98 	/*
99 	 * By default, if there are no other task to run on the CPU, ext core
100 	 * keeps running the current task even after its slice expires. If this
101 	 * flag is specified, such tasks are passed to ops.enqueue() with
102 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
103 	 */
104 	SCX_OPS_ENQ_LAST	= 1LLU << 1,
105 
106 	/*
107 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
108 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
109 	 * scheduler depends on pid lookup for dispatching, the task will be
110 	 * lost leading to various issues including RCU grace period stalls.
111 	 *
112 	 * To mask this problem, by default, unhashed tasks are automatically
113 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114 	 * depend on pid lookups and wants to handle these tasks directly, the
115 	 * following flag can be used.
116 	 */
117 	SCX_OPS_ENQ_EXITING	= 1LLU << 2,
118 
119 	/*
120 	 * If set, only tasks with policy set to SCHED_EXT are attached to
121 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
122 	 */
123 	SCX_OPS_SWITCH_PARTIAL	= 1LLU << 3,
124 
125 	/*
126 	 * CPU cgroup support flags
127 	 */
128 	SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16,	/* cpu.weight */
129 
130 	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
131 				  SCX_OPS_ENQ_LAST |
132 				  SCX_OPS_ENQ_EXITING |
133 				  SCX_OPS_SWITCH_PARTIAL |
134 				  SCX_OPS_HAS_CGROUP_WEIGHT,
135 };
136 
137 /* argument container for ops.init_task() */
138 struct scx_init_task_args {
139 	/*
140 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
141 	 * to the scheduler transition path.
142 	 */
143 	bool			fork;
144 #ifdef CONFIG_EXT_GROUP_SCHED
145 	/* the cgroup the task is joining */
146 	struct cgroup		*cgroup;
147 #endif
148 };
149 
150 /* argument container for ops.exit_task() */
151 struct scx_exit_task_args {
152 	/* Whether the task exited before running on sched_ext. */
153 	bool cancelled;
154 };
155 
156 /* argument container for ops->cgroup_init() */
157 struct scx_cgroup_init_args {
158 	/* the weight of the cgroup [1..10000] */
159 	u32			weight;
160 };
161 
162 enum scx_cpu_preempt_reason {
163 	/* next task is being scheduled by &sched_class_rt */
164 	SCX_CPU_PREEMPT_RT,
165 	/* next task is being scheduled by &sched_class_dl */
166 	SCX_CPU_PREEMPT_DL,
167 	/* next task is being scheduled by &sched_class_stop */
168 	SCX_CPU_PREEMPT_STOP,
169 	/* unknown reason for SCX being preempted */
170 	SCX_CPU_PREEMPT_UNKNOWN,
171 };
172 
173 /*
174  * Argument container for ops->cpu_acquire(). Currently empty, but may be
175  * expanded in the future.
176  */
177 struct scx_cpu_acquire_args {};
178 
179 /* argument container for ops->cpu_release() */
180 struct scx_cpu_release_args {
181 	/* the reason the CPU was preempted */
182 	enum scx_cpu_preempt_reason reason;
183 
184 	/* the task that's going to be scheduled on the CPU */
185 	struct task_struct	*task;
186 };
187 
188 /*
189  * Informational context provided to dump operations.
190  */
191 struct scx_dump_ctx {
192 	enum scx_exit_kind	kind;
193 	s64			exit_code;
194 	const char		*reason;
195 	u64			at_ns;
196 	u64			at_jiffies;
197 };
198 
199 /**
200  * struct sched_ext_ops - Operation table for BPF scheduler implementation
201  *
202  * A BPF scheduler can implement an arbitrary scheduling policy by
203  * implementing and loading operations in this table. Note that a userland
204  * scheduling policy can also be implemented using the BPF scheduler
205  * as a shim layer.
206  */
207 struct sched_ext_ops {
208 	/**
209 	 * @select_cpu: Pick the target CPU for a task which is being woken up
210 	 * @p: task being woken up
211 	 * @prev_cpu: the cpu @p was on before sleeping
212 	 * @wake_flags: SCX_WAKE_*
213 	 *
214 	 * Decision made here isn't final. @p may be moved to any CPU while it
215 	 * is getting dispatched for execution later. However, as @p is not on
216 	 * the rq at this point, getting the eventual execution CPU right here
217 	 * saves a small bit of overhead down the line.
218 	 *
219 	 * If an idle CPU is returned, the CPU is kicked and will try to
220 	 * dispatch. While an explicit custom mechanism can be added,
221 	 * select_cpu() serves as the default way to wake up idle CPUs.
222 	 *
223 	 * @p may be inserted into a DSQ directly by calling
224 	 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
225 	 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
226 	 * of the CPU returned by this operation.
227 	 *
228 	 * Note that select_cpu() is never called for tasks that can only run
229 	 * on a single CPU or tasks with migration disabled, as they don't have
230 	 * the option to select a different CPU. See select_task_rq() for
231 	 * details.
232 	 */
233 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
234 
235 	/**
236 	 * @enqueue: Enqueue a task on the BPF scheduler
237 	 * @p: task being enqueued
238 	 * @enq_flags: %SCX_ENQ_*
239 	 *
240 	 * @p is ready to run. Insert directly into a DSQ by calling
241 	 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
242 	 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
243 	 * the task will stall.
244 	 *
245 	 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
246 	 * skipped.
247 	 */
248 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
249 
250 	/**
251 	 * @dequeue: Remove a task from the BPF scheduler
252 	 * @p: task being dequeued
253 	 * @deq_flags: %SCX_DEQ_*
254 	 *
255 	 * Remove @p from the BPF scheduler. This is usually called to isolate
256 	 * the task while updating its scheduling properties (e.g. priority).
257 	 *
258 	 * The ext core keeps track of whether the BPF side owns a given task or
259 	 * not and can gracefully ignore spurious dispatches from BPF side,
260 	 * which makes it safe to not implement this method. However, depending
261 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
262 	 * scheduling position not being updated across a priority change.
263 	 */
264 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
265 
266 	/**
267 	 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
268 	 * @cpu: CPU to dispatch tasks for
269 	 * @prev: previous task being switched out
270 	 *
271 	 * Called when a CPU's local dsq is empty. The operation should dispatch
272 	 * one or more tasks from the BPF scheduler into the DSQs using
273 	 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
274 	 * using scx_bpf_dsq_move_to_local().
275 	 *
276 	 * The maximum number of times scx_bpf_dsq_insert() can be called
277 	 * without an intervening scx_bpf_dsq_move_to_local() is specified by
278 	 * ops.dispatch_max_batch. See the comments on top of the two functions
279 	 * for more details.
280 	 *
281 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
282 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
283 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
284 	 * ops.dispatch() returns. To keep executing @prev, return without
285 	 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
286 	 */
287 	void (*dispatch)(s32 cpu, struct task_struct *prev);
288 
289 	/**
290 	 * @tick: Periodic tick
291 	 * @p: task running currently
292 	 *
293 	 * This operation is called every 1/HZ seconds on CPUs which are
294 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
295 	 * immediate dispatch cycle on the CPU.
296 	 */
297 	void (*tick)(struct task_struct *p);
298 
299 	/**
300 	 * @runnable: A task is becoming runnable on its associated CPU
301 	 * @p: task becoming runnable
302 	 * @enq_flags: %SCX_ENQ_*
303 	 *
304 	 * This and the following three functions can be used to track a task's
305 	 * execution state transitions. A task becomes ->runnable() on a CPU,
306 	 * and then goes through one or more ->running() and ->stopping() pairs
307 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
308 	 * done running on the CPU.
309 	 *
310 	 * @p is becoming runnable on the CPU because it's
311 	 *
312 	 * - waking up (%SCX_ENQ_WAKEUP)
313 	 * - being moved from another CPU
314 	 * - being restored after temporarily taken off the queue for an
315 	 *   attribute change.
316 	 *
317 	 * This and ->enqueue() are related but not coupled. This operation
318 	 * notifies @p's state transition and may not be followed by ->enqueue()
319 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
320 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
321 	 * task may be ->enqueue()'d without being preceded by this operation
322 	 * e.g. after exhausting its slice.
323 	 */
324 	void (*runnable)(struct task_struct *p, u64 enq_flags);
325 
326 	/**
327 	 * @running: A task is starting to run on its associated CPU
328 	 * @p: task starting to run
329 	 *
330 	 * See ->runnable() for explanation on the task state notifiers.
331 	 */
332 	void (*running)(struct task_struct *p);
333 
334 	/**
335 	 * @stopping: A task is stopping execution
336 	 * @p: task stopping to run
337 	 * @runnable: is task @p still runnable?
338 	 *
339 	 * See ->runnable() for explanation on the task state notifiers. If
340 	 * !@runnable, ->quiescent() will be invoked after this operation
341 	 * returns.
342 	 */
343 	void (*stopping)(struct task_struct *p, bool runnable);
344 
345 	/**
346 	 * @quiescent: A task is becoming not runnable on its associated CPU
347 	 * @p: task becoming not runnable
348 	 * @deq_flags: %SCX_DEQ_*
349 	 *
350 	 * See ->runnable() for explanation on the task state notifiers.
351 	 *
352 	 * @p is becoming quiescent on the CPU because it's
353 	 *
354 	 * - sleeping (%SCX_DEQ_SLEEP)
355 	 * - being moved to another CPU
356 	 * - being temporarily taken off the queue for an attribute change
357 	 *   (%SCX_DEQ_SAVE)
358 	 *
359 	 * This and ->dequeue() are related but not coupled. This operation
360 	 * notifies @p's state transition and may not be preceded by ->dequeue()
361 	 * e.g. when @p is being dispatched to a remote CPU.
362 	 */
363 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
364 
365 	/**
366 	 * @yield: Yield CPU
367 	 * @from: yielding task
368 	 * @to: optional yield target task
369 	 *
370 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
371 	 * The BPF scheduler should ensure that other available tasks are
372 	 * dispatched before the yielding task. Return value is ignored in this
373 	 * case.
374 	 *
375 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
376 	 * scheduler can implement the request, return %true; otherwise, %false.
377 	 */
378 	bool (*yield)(struct task_struct *from, struct task_struct *to);
379 
380 	/**
381 	 * @core_sched_before: Task ordering for core-sched
382 	 * @a: task A
383 	 * @b: task B
384 	 *
385 	 * Used by core-sched to determine the ordering between two tasks. See
386 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
387 	 * core-sched.
388 	 *
389 	 * Both @a and @b are runnable and may or may not currently be queued on
390 	 * the BPF scheduler. Should return %true if @a should run before @b.
391 	 * %false if there's no required ordering or @b should run before @a.
392 	 *
393 	 * If not specified, the default is ordering them according to when they
394 	 * became runnable.
395 	 */
396 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
397 
398 	/**
399 	 * @set_weight: Set task weight
400 	 * @p: task to set weight for
401 	 * @weight: new weight [1..10000]
402 	 *
403 	 * Update @p's weight to @weight.
404 	 */
405 	void (*set_weight)(struct task_struct *p, u32 weight);
406 
407 	/**
408 	 * @set_cpumask: Set CPU affinity
409 	 * @p: task to set CPU affinity for
410 	 * @cpumask: cpumask of cpus that @p can run on
411 	 *
412 	 * Update @p's CPU affinity to @cpumask.
413 	 */
414 	void (*set_cpumask)(struct task_struct *p,
415 			    const struct cpumask *cpumask);
416 
417 	/**
418 	 * @update_idle: Update the idle state of a CPU
419 	 * @cpu: CPU to udpate the idle state for
420 	 * @idle: whether entering or exiting the idle state
421 	 *
422 	 * This operation is called when @rq's CPU goes or leaves the idle
423 	 * state. By default, implementing this operation disables the built-in
424 	 * idle CPU tracking and the following helpers become unavailable:
425 	 *
426 	 * - scx_bpf_select_cpu_dfl()
427 	 * - scx_bpf_test_and_clear_cpu_idle()
428 	 * - scx_bpf_pick_idle_cpu()
429 	 *
430 	 * The user also must implement ops.select_cpu() as the default
431 	 * implementation relies on scx_bpf_select_cpu_dfl().
432 	 *
433 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
434 	 * tracking.
435 	 */
436 	void (*update_idle)(s32 cpu, bool idle);
437 
438 	/**
439 	 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
440 	 * @cpu: The CPU being acquired by the BPF scheduler.
441 	 * @args: Acquire arguments, see the struct definition.
442 	 *
443 	 * A CPU that was previously released from the BPF scheduler is now once
444 	 * again under its control.
445 	 */
446 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
447 
448 	/**
449 	 * @cpu_release: A CPU is taken away from the BPF scheduler
450 	 * @cpu: The CPU being released by the BPF scheduler.
451 	 * @args: Release arguments, see the struct definition.
452 	 *
453 	 * The specified CPU is no longer under the control of the BPF
454 	 * scheduler. This could be because it was preempted by a higher
455 	 * priority sched_class, though there may be other reasons as well. The
456 	 * caller should consult @args->reason to determine the cause.
457 	 */
458 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
459 
460 	/**
461 	 * @init_task: Initialize a task to run in a BPF scheduler
462 	 * @p: task to initialize for BPF scheduling
463 	 * @args: init arguments, see the struct definition
464 	 *
465 	 * Either we're loading a BPF scheduler or a new task is being forked.
466 	 * Initialize @p for BPF scheduling. This operation may block and can
467 	 * be used for allocations, and is called exactly once for a task.
468 	 *
469 	 * Return 0 for success, -errno for failure. An error return while
470 	 * loading will abort loading of the BPF scheduler. During a fork, it
471 	 * will abort that specific fork.
472 	 */
473 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
474 
475 	/**
476 	 * @exit_task: Exit a previously-running task from the system
477 	 * @p: task to exit
478 	 * @args: exit arguments, see the struct definition
479 	 *
480 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
481 	 * necessary cleanup for @p.
482 	 */
483 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
484 
485 	/**
486 	 * @enable: Enable BPF scheduling for a task
487 	 * @p: task to enable BPF scheduling for
488 	 *
489 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
490 	 * enters SCX, and is always paired with a matching disable().
491 	 */
492 	void (*enable)(struct task_struct *p);
493 
494 	/**
495 	 * @disable: Disable BPF scheduling for a task
496 	 * @p: task to disable BPF scheduling for
497 	 *
498 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
499 	 * Disable BPF scheduling for @p. A disable() call is always matched
500 	 * with a prior enable() call.
501 	 */
502 	void (*disable)(struct task_struct *p);
503 
504 	/**
505 	 * @dump: Dump BPF scheduler state on error
506 	 * @ctx: debug dump context
507 	 *
508 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
509 	 */
510 	void (*dump)(struct scx_dump_ctx *ctx);
511 
512 	/**
513 	 * @dump_cpu: Dump BPF scheduler state for a CPU on error
514 	 * @ctx: debug dump context
515 	 * @cpu: CPU to generate debug dump for
516 	 * @idle: @cpu is currently idle without any runnable tasks
517 	 *
518 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
519 	 * @cpu. If @idle is %true and this operation doesn't produce any
520 	 * output, @cpu is skipped for dump.
521 	 */
522 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
523 
524 	/**
525 	 * @dump_task: Dump BPF scheduler state for a runnable task on error
526 	 * @ctx: debug dump context
527 	 * @p: runnable task to generate debug dump for
528 	 *
529 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
530 	 * @p.
531 	 */
532 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
533 
534 #ifdef CONFIG_EXT_GROUP_SCHED
535 	/**
536 	 * @cgroup_init: Initialize a cgroup
537 	 * @cgrp: cgroup being initialized
538 	 * @args: init arguments, see the struct definition
539 	 *
540 	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
541 	 * @cgrp for sched_ext. This operation may block.
542 	 *
543 	 * Return 0 for success, -errno for failure. An error return while
544 	 * loading will abort loading of the BPF scheduler. During cgroup
545 	 * creation, it will abort the specific cgroup creation.
546 	 */
547 	s32 (*cgroup_init)(struct cgroup *cgrp,
548 			   struct scx_cgroup_init_args *args);
549 
550 	/**
551 	 * @cgroup_exit: Exit a cgroup
552 	 * @cgrp: cgroup being exited
553 	 *
554 	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
555 	 * @cgrp for sched_ext. This operation my block.
556 	 */
557 	void (*cgroup_exit)(struct cgroup *cgrp);
558 
559 	/**
560 	 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
561 	 * @p: task being moved
562 	 * @from: cgroup @p is being moved from
563 	 * @to: cgroup @p is being moved to
564 	 *
565 	 * Prepare @p for move from cgroup @from to @to. This operation may
566 	 * block and can be used for allocations.
567 	 *
568 	 * Return 0 for success, -errno for failure. An error return aborts the
569 	 * migration.
570 	 */
571 	s32 (*cgroup_prep_move)(struct task_struct *p,
572 				struct cgroup *from, struct cgroup *to);
573 
574 	/**
575 	 * @cgroup_move: Commit cgroup move
576 	 * @p: task being moved
577 	 * @from: cgroup @p is being moved from
578 	 * @to: cgroup @p is being moved to
579 	 *
580 	 * Commit the move. @p is dequeued during this operation.
581 	 */
582 	void (*cgroup_move)(struct task_struct *p,
583 			    struct cgroup *from, struct cgroup *to);
584 
585 	/**
586 	 * @cgroup_cancel_move: Cancel cgroup move
587 	 * @p: task whose cgroup move is being canceled
588 	 * @from: cgroup @p was being moved from
589 	 * @to: cgroup @p was being moved to
590 	 *
591 	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
592 	 * Undo the preparation.
593 	 */
594 	void (*cgroup_cancel_move)(struct task_struct *p,
595 				   struct cgroup *from, struct cgroup *to);
596 
597 	/**
598 	 * @cgroup_set_weight: A cgroup's weight is being changed
599 	 * @cgrp: cgroup whose weight is being updated
600 	 * @weight: new weight [1..10000]
601 	 *
602 	 * Update @tg's weight to @weight.
603 	 */
604 	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
605 #endif	/* CONFIG_EXT_GROUP_SCHED */
606 
607 	/*
608 	 * All online ops must come before ops.cpu_online().
609 	 */
610 
611 	/**
612 	 * @cpu_online: A CPU became online
613 	 * @cpu: CPU which just came up
614 	 *
615 	 * @cpu just came online. @cpu will not call ops.enqueue() or
616 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
617 	 */
618 	void (*cpu_online)(s32 cpu);
619 
620 	/**
621 	 * @cpu_offline: A CPU is going offline
622 	 * @cpu: CPU which is going offline
623 	 *
624 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
625 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
626 	 */
627 	void (*cpu_offline)(s32 cpu);
628 
629 	/*
630 	 * All CPU hotplug ops must come before ops.init().
631 	 */
632 
633 	/**
634 	 * @init: Initialize the BPF scheduler
635 	 */
636 	s32 (*init)(void);
637 
638 	/**
639 	 * @exit: Clean up after the BPF scheduler
640 	 * @info: Exit info
641 	 *
642 	 * ops.exit() is also called on ops.init() failure, which is a bit
643 	 * unusual. This is to allow rich reporting through @info on how
644 	 * ops.init() failed.
645 	 */
646 	void (*exit)(struct scx_exit_info *info);
647 
648 	/**
649 	 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
650 	 */
651 	u32 dispatch_max_batch;
652 
653 	/**
654 	 * @flags: %SCX_OPS_* flags
655 	 */
656 	u64 flags;
657 
658 	/**
659 	 * @timeout_ms: The maximum amount of time, in milliseconds, that a
660 	 * runnable task should be able to wait before being scheduled. The
661 	 * maximum timeout may not exceed the default timeout of 30 seconds.
662 	 *
663 	 * Defaults to the maximum allowed timeout value of 30 seconds.
664 	 */
665 	u32 timeout_ms;
666 
667 	/**
668 	 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
669 	 * value of 32768 is used.
670 	 */
671 	u32 exit_dump_len;
672 
673 	/**
674 	 * @hotplug_seq: A sequence number that may be set by the scheduler to
675 	 * detect when a hotplug event has occurred during the loading process.
676 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
677 	 * load if the sequence number does not match @scx_hotplug_seq on the
678 	 * enable path.
679 	 */
680 	u64 hotplug_seq;
681 
682 	/**
683 	 * @name: BPF scheduler's name
684 	 *
685 	 * Must be a non-zero valid BPF object name including only isalnum(),
686 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
687 	 * BPF scheduler is enabled.
688 	 */
689 	char name[SCX_OPS_NAME_LEN];
690 };
691 
692 enum scx_opi {
693 	SCX_OPI_BEGIN			= 0,
694 	SCX_OPI_NORMAL_BEGIN		= 0,
695 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
696 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
697 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
698 	SCX_OPI_END			= SCX_OP_IDX(init),
699 };
700 
701 enum scx_wake_flags {
702 	/* expose select WF_* flags as enums */
703 	SCX_WAKE_FORK		= WF_FORK,
704 	SCX_WAKE_TTWU		= WF_TTWU,
705 	SCX_WAKE_SYNC		= WF_SYNC,
706 };
707 
708 enum scx_enq_flags {
709 	/* expose select ENQUEUE_* flags as enums */
710 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
711 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
712 	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
713 
714 	/* high 32bits are SCX specific */
715 
716 	/*
717 	 * Set the following to trigger preemption when calling
718 	 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
719 	 * current task is cleared to zero and the CPU is kicked into the
720 	 * scheduling path. Implies %SCX_ENQ_HEAD.
721 	 */
722 	SCX_ENQ_PREEMPT		= 1LLU << 32,
723 
724 	/*
725 	 * The task being enqueued was previously enqueued on the current CPU's
726 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
727 	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
728 	 * invoked in a ->cpu_release() callback, and the task is again
729 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
730 	 * task will not be scheduled on the CPU until at least the next invocation
731 	 * of the ->cpu_acquire() callback.
732 	 */
733 	SCX_ENQ_REENQ		= 1LLU << 40,
734 
735 	/*
736 	 * The task being enqueued is the only task available for the cpu. By
737 	 * default, ext core keeps executing such tasks but when
738 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
739 	 * %SCX_ENQ_LAST flag set.
740 	 *
741 	 * The BPF scheduler is responsible for triggering a follow-up
742 	 * scheduling event. Otherwise, Execution may stall.
743 	 */
744 	SCX_ENQ_LAST		= 1LLU << 41,
745 
746 	/* high 8 bits are internal */
747 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
748 
749 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
750 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
751 };
752 
753 enum scx_deq_flags {
754 	/* expose select DEQUEUE_* flags as enums */
755 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
756 
757 	/* high 32bits are SCX specific */
758 
759 	/*
760 	 * The generic core-sched layer decided to execute the task even though
761 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
762 	 */
763 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
764 };
765 
766 enum scx_pick_idle_cpu_flags {
767 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
768 };
769 
770 enum scx_kick_flags {
771 	/*
772 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
773 	 * through at least one full scheduling cycle before going idle. If the
774 	 * target CPU can be determined to be currently not idle and going to go
775 	 * through a scheduling cycle before going idle, noop.
776 	 */
777 	SCX_KICK_IDLE		= 1LLU << 0,
778 
779 	/*
780 	 * Preempt the current task and execute the dispatch path. If the
781 	 * current task of the target CPU is an SCX task, its ->scx.slice is
782 	 * cleared to zero before the scheduling path is invoked so that the
783 	 * task expires and the dispatch path is invoked.
784 	 */
785 	SCX_KICK_PREEMPT	= 1LLU << 1,
786 
787 	/*
788 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
789 	 * return after the target CPU finishes picking the next task.
790 	 */
791 	SCX_KICK_WAIT		= 1LLU << 2,
792 };
793 
794 enum scx_tg_flags {
795 	SCX_TG_ONLINE		= 1U << 0,
796 	SCX_TG_INITED		= 1U << 1,
797 };
798 
799 enum scx_ops_enable_state {
800 	SCX_OPS_ENABLING,
801 	SCX_OPS_ENABLED,
802 	SCX_OPS_DISABLING,
803 	SCX_OPS_DISABLED,
804 };
805 
806 static const char *scx_ops_enable_state_str[] = {
807 	[SCX_OPS_ENABLING]	= "enabling",
808 	[SCX_OPS_ENABLED]	= "enabled",
809 	[SCX_OPS_DISABLING]	= "disabling",
810 	[SCX_OPS_DISABLED]	= "disabled",
811 };
812 
813 /*
814  * sched_ext_entity->ops_state
815  *
816  * Used to track the task ownership between the SCX core and the BPF scheduler.
817  * State transitions look as follows:
818  *
819  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
820  *   ^              |                 |
821  *   |              v                 v
822  *   \-------------------------------/
823  *
824  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
825  * sites for explanations on the conditions being waited upon and why they are
826  * safe. Transitions out of them into NONE or QUEUED must store_release and the
827  * waiters should load_acquire.
828  *
829  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
830  * any given task can be dispatched by the BPF scheduler at all times and thus
831  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
832  * to try to dispatch any task anytime regardless of its state as the SCX core
833  * can safely reject invalid dispatches.
834  */
835 enum scx_ops_state {
836 	SCX_OPSS_NONE,		/* owned by the SCX core */
837 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
838 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
839 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
840 
841 	/*
842 	 * QSEQ brands each QUEUED instance so that, when dispatch races
843 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
844 	 * on the task being dispatched.
845 	 *
846 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
847 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
848 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
849 	 * and runs with IRQ disabled. 30 bits should be sufficient.
850 	 */
851 	SCX_OPSS_QSEQ_SHIFT	= 2,
852 };
853 
854 /* Use macros to ensure that the type is unsigned long for the masks */
855 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
856 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
857 
858 /*
859  * During exit, a task may schedule after losing its PIDs. When disabling the
860  * BPF scheduler, we need to be able to iterate tasks in every state to
861  * guarantee system safety. Maintain a dedicated task list which contains every
862  * task between its fork and eventual free.
863  */
864 static DEFINE_SPINLOCK(scx_tasks_lock);
865 static LIST_HEAD(scx_tasks);
866 
867 /* ops enable/disable */
868 static struct kthread_worker *scx_ops_helper;
869 static DEFINE_MUTEX(scx_ops_enable_mutex);
870 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
871 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
872 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
873 static unsigned long scx_in_softlockup;
874 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
875 static int scx_ops_bypass_depth;
876 static bool scx_ops_init_task_enabled;
877 static bool scx_switching_all;
878 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
879 
880 static struct sched_ext_ops scx_ops;
881 static bool scx_warned_zero_slice;
882 
883 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
884 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
885 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
886 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
887 
888 #ifdef CONFIG_SMP
889 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
890 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
891 #endif
892 
893 static struct static_key_false scx_has_op[SCX_OPI_END] =
894 	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
895 
896 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
897 static struct scx_exit_info *scx_exit_info;
898 
899 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
900 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
901 
902 /*
903  * A monotically increasing sequence number that is incremented every time a
904  * scheduler is enabled. This can be used by to check if any custom sched_ext
905  * scheduler has ever been used in the system.
906  */
907 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
908 
909 /*
910  * The maximum amount of time in jiffies that a task may be runnable without
911  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
912  * scx_ops_error().
913  */
914 static unsigned long scx_watchdog_timeout;
915 
916 /*
917  * The last time the delayed work was run. This delayed work relies on
918  * ksoftirqd being able to run to service timer interrupts, so it's possible
919  * that this work itself could get wedged. To account for this, we check that
920  * it's not stalled in the timer tick, and trigger an error if it is.
921  */
922 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
923 
924 static struct delayed_work scx_watchdog_work;
925 
926 /* idle tracking */
927 #ifdef CONFIG_SMP
928 #ifdef CONFIG_CPUMASK_OFFSTACK
929 #define CL_ALIGNED_IF_ONSTACK
930 #else
931 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
932 #endif
933 
934 static struct {
935 	cpumask_var_t cpu;
936 	cpumask_var_t smt;
937 } idle_masks CL_ALIGNED_IF_ONSTACK;
938 
939 #endif	/* CONFIG_SMP */
940 
941 /* for %SCX_KICK_WAIT */
942 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
943 
944 /*
945  * Direct dispatch marker.
946  *
947  * Non-NULL values are used for direct dispatch from enqueue path. A valid
948  * pointer points to the task currently being enqueued. An ERR_PTR value is used
949  * to indicate that direct dispatch has already happened.
950  */
951 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
952 
953 /*
954  * Dispatch queues.
955  *
956  * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
957  * to avoid live-locking in bypass mode where all tasks are dispatched to
958  * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
959  * sufficient, it can be further split.
960  */
961 static struct scx_dispatch_q **global_dsqs;
962 
963 static const struct rhashtable_params dsq_hash_params = {
964 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
965 	.key_offset		= offsetof(struct scx_dispatch_q, id),
966 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
967 };
968 
969 static struct rhashtable dsq_hash;
970 static LLIST_HEAD(dsqs_to_free);
971 
972 /* dispatch buf */
973 struct scx_dsp_buf_ent {
974 	struct task_struct	*task;
975 	unsigned long		qseq;
976 	u64			dsq_id;
977 	u64			enq_flags;
978 };
979 
980 static u32 scx_dsp_max_batch;
981 
982 struct scx_dsp_ctx {
983 	struct rq		*rq;
984 	u32			cursor;
985 	u32			nr_tasks;
986 	struct scx_dsp_buf_ent	buf[];
987 };
988 
989 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
990 
991 /* string formatting from BPF */
992 struct scx_bstr_buf {
993 	u64			data[MAX_BPRINTF_VARARGS];
994 	char			line[SCX_EXIT_MSG_LEN];
995 };
996 
997 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
998 static struct scx_bstr_buf scx_exit_bstr_buf;
999 
1000 /* ops debug dump */
1001 struct scx_dump_data {
1002 	s32			cpu;
1003 	bool			first;
1004 	s32			cursor;
1005 	struct seq_buf		*s;
1006 	const char		*prefix;
1007 	struct scx_bstr_buf	buf;
1008 };
1009 
1010 static struct scx_dump_data scx_dump_data = {
1011 	.cpu			= -1,
1012 };
1013 
1014 /* /sys/kernel/sched_ext interface */
1015 static struct kset *scx_kset;
1016 static struct kobject *scx_root_kobj;
1017 
1018 #define CREATE_TRACE_POINTS
1019 #include <trace/events/sched_ext.h>
1020 
1021 static void process_ddsp_deferred_locals(struct rq *rq);
1022 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1023 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1024 					     s64 exit_code,
1025 					     const char *fmt, ...);
1026 
1027 #define scx_ops_error_kind(err, fmt, args...)					\
1028 	scx_ops_exit_kind((err), 0, fmt, ##args)
1029 
1030 #define scx_ops_exit(code, fmt, args...)					\
1031 	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1032 
1033 #define scx_ops_error(fmt, args...)						\
1034 	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1035 
1036 #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1037 
1038 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1039 {
1040 	if (time_after(at, now))
1041 		return jiffies_to_msecs(at - now);
1042 	else
1043 		return -(long)jiffies_to_msecs(now - at);
1044 }
1045 
1046 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
1047 static u32 higher_bits(u32 flags)
1048 {
1049 	return ~((1 << fls(flags)) - 1);
1050 }
1051 
1052 /* return the mask with only the highest bit set */
1053 static u32 highest_bit(u32 flags)
1054 {
1055 	int bit = fls(flags);
1056 	return ((u64)1 << bit) >> 1;
1057 }
1058 
1059 static bool u32_before(u32 a, u32 b)
1060 {
1061 	return (s32)(a - b) < 0;
1062 }
1063 
1064 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1065 {
1066 	return global_dsqs[cpu_to_node(task_cpu(p))];
1067 }
1068 
1069 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1070 {
1071 	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1072 }
1073 
1074 /*
1075  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1076  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1077  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1078  * whether it's running from an allowed context.
1079  *
1080  * @mask is constant, always inline to cull the mask calculations.
1081  */
1082 static __always_inline void scx_kf_allow(u32 mask)
1083 {
1084 	/* nesting is allowed only in increasing scx_kf_mask order */
1085 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1086 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1087 		  current->scx.kf_mask, mask);
1088 	current->scx.kf_mask |= mask;
1089 	barrier();
1090 }
1091 
1092 static void scx_kf_disallow(u32 mask)
1093 {
1094 	barrier();
1095 	current->scx.kf_mask &= ~mask;
1096 }
1097 
1098 #define SCX_CALL_OP(mask, op, args...)						\
1099 do {										\
1100 	if (mask) {								\
1101 		scx_kf_allow(mask);						\
1102 		scx_ops.op(args);						\
1103 		scx_kf_disallow(mask);						\
1104 	} else {								\
1105 		scx_ops.op(args);						\
1106 	}									\
1107 } while (0)
1108 
1109 #define SCX_CALL_OP_RET(mask, op, args...)					\
1110 ({										\
1111 	__typeof__(scx_ops.op(args)) __ret;					\
1112 	if (mask) {								\
1113 		scx_kf_allow(mask);						\
1114 		__ret = scx_ops.op(args);					\
1115 		scx_kf_disallow(mask);						\
1116 	} else {								\
1117 		__ret = scx_ops.op(args);					\
1118 	}									\
1119 	__ret;									\
1120 })
1121 
1122 /*
1123  * Some kfuncs are allowed only on the tasks that are subjects of the
1124  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1125  * restrictions, the following SCX_CALL_OP_*() variants should be used when
1126  * invoking scx_ops operations that take task arguments. These can only be used
1127  * for non-nesting operations due to the way the tasks are tracked.
1128  *
1129  * kfuncs which can only operate on such tasks can in turn use
1130  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1131  * the specific task.
1132  */
1133 #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
1134 do {										\
1135 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1136 	current->scx.kf_tasks[0] = task;					\
1137 	SCX_CALL_OP(mask, op, task, ##args);					\
1138 	current->scx.kf_tasks[0] = NULL;					\
1139 } while (0)
1140 
1141 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
1142 ({										\
1143 	__typeof__(scx_ops.op(task, ##args)) __ret;				\
1144 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1145 	current->scx.kf_tasks[0] = task;					\
1146 	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1147 	current->scx.kf_tasks[0] = NULL;					\
1148 	__ret;									\
1149 })
1150 
1151 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1152 ({										\
1153 	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1154 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1155 	current->scx.kf_tasks[0] = task0;					\
1156 	current->scx.kf_tasks[1] = task1;					\
1157 	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1158 	current->scx.kf_tasks[0] = NULL;					\
1159 	current->scx.kf_tasks[1] = NULL;					\
1160 	__ret;									\
1161 })
1162 
1163 /* @mask is constant, always inline to cull unnecessary branches */
1164 static __always_inline bool scx_kf_allowed(u32 mask)
1165 {
1166 	if (unlikely(!(current->scx.kf_mask & mask))) {
1167 		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1168 			      mask, current->scx.kf_mask);
1169 		return false;
1170 	}
1171 
1172 	/*
1173 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1174 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1175 	 * inside ops.dispatch(). We don't need to check boundaries for any
1176 	 * blocking kfuncs as the verifier ensures they're only called from
1177 	 * sleepable progs.
1178 	 */
1179 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1180 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1181 		scx_ops_error("cpu_release kfunc called from a nested operation");
1182 		return false;
1183 	}
1184 
1185 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1186 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1187 		scx_ops_error("dispatch kfunc called from a nested operation");
1188 		return false;
1189 	}
1190 
1191 	return true;
1192 }
1193 
1194 /* see SCX_CALL_OP_TASK() */
1195 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1196 							struct task_struct *p)
1197 {
1198 	if (!scx_kf_allowed(mask))
1199 		return false;
1200 
1201 	if (unlikely((p != current->scx.kf_tasks[0] &&
1202 		      p != current->scx.kf_tasks[1]))) {
1203 		scx_ops_error("called on a task not being operated on");
1204 		return false;
1205 	}
1206 
1207 	return true;
1208 }
1209 
1210 static bool scx_kf_allowed_if_unlocked(void)
1211 {
1212 	return !current->scx.kf_mask;
1213 }
1214 
1215 /**
1216  * nldsq_next_task - Iterate to the next task in a non-local DSQ
1217  * @dsq: user dsq being interated
1218  * @cur: current position, %NULL to start iteration
1219  * @rev: walk backwards
1220  *
1221  * Returns %NULL when iteration is finished.
1222  */
1223 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1224 					   struct task_struct *cur, bool rev)
1225 {
1226 	struct list_head *list_node;
1227 	struct scx_dsq_list_node *dsq_lnode;
1228 
1229 	lockdep_assert_held(&dsq->lock);
1230 
1231 	if (cur)
1232 		list_node = &cur->scx.dsq_list.node;
1233 	else
1234 		list_node = &dsq->list;
1235 
1236 	/* find the next task, need to skip BPF iteration cursors */
1237 	do {
1238 		if (rev)
1239 			list_node = list_node->prev;
1240 		else
1241 			list_node = list_node->next;
1242 
1243 		if (list_node == &dsq->list)
1244 			return NULL;
1245 
1246 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1247 					 node);
1248 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1249 
1250 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1251 }
1252 
1253 #define nldsq_for_each_task(p, dsq)						\
1254 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
1255 	     (p) = nldsq_next_task((dsq), (p), false))
1256 
1257 
1258 /*
1259  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1260  * dispatch order. BPF-visible iterator is opaque and larger to allow future
1261  * changes without breaking backward compatibility. Can be used with
1262  * bpf_for_each(). See bpf_iter_scx_dsq_*().
1263  */
1264 enum scx_dsq_iter_flags {
1265 	/* iterate in the reverse dispatch order */
1266 	SCX_DSQ_ITER_REV		= 1U << 16,
1267 
1268 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
1269 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
1270 
1271 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
1272 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
1273 					  __SCX_DSQ_ITER_HAS_SLICE |
1274 					  __SCX_DSQ_ITER_HAS_VTIME,
1275 };
1276 
1277 struct bpf_iter_scx_dsq_kern {
1278 	struct scx_dsq_list_node	cursor;
1279 	struct scx_dispatch_q		*dsq;
1280 	u64				slice;
1281 	u64				vtime;
1282 } __attribute__((aligned(8)));
1283 
1284 struct bpf_iter_scx_dsq {
1285 	u64				__opaque[6];
1286 } __attribute__((aligned(8)));
1287 
1288 
1289 /*
1290  * SCX task iterator.
1291  */
1292 struct scx_task_iter {
1293 	struct sched_ext_entity		cursor;
1294 	struct task_struct		*locked;
1295 	struct rq			*rq;
1296 	struct rq_flags			rf;
1297 	u32				cnt;
1298 };
1299 
1300 /**
1301  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1302  * @iter: iterator to init
1303  *
1304  * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1305  * must eventually be stopped with scx_task_iter_stop().
1306  *
1307  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1308  * between this and the first next() call or between any two next() calls. If
1309  * the locks are released between two next() calls, the caller is responsible
1310  * for ensuring that the task being iterated remains accessible either through
1311  * RCU read lock or obtaining a reference count.
1312  *
1313  * All tasks which existed when the iteration started are guaranteed to be
1314  * visited as long as they still exist.
1315  */
1316 static void scx_task_iter_start(struct scx_task_iter *iter)
1317 {
1318 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1319 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1320 
1321 	spin_lock_irq(&scx_tasks_lock);
1322 
1323 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1324 	list_add(&iter->cursor.tasks_node, &scx_tasks);
1325 	iter->locked = NULL;
1326 	iter->cnt = 0;
1327 }
1328 
1329 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1330 {
1331 	if (iter->locked) {
1332 		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1333 		iter->locked = NULL;
1334 	}
1335 }
1336 
1337 /**
1338  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1339  * @iter: iterator to unlock
1340  *
1341  * If @iter is in the middle of a locked iteration, it may be locking the rq of
1342  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1343  * This function can be safely called anytime during an iteration.
1344  */
1345 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1346 {
1347 	__scx_task_iter_rq_unlock(iter);
1348 	spin_unlock_irq(&scx_tasks_lock);
1349 }
1350 
1351 /**
1352  * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1353  * @iter: iterator to re-lock
1354  *
1355  * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1356  * doesn't re-lock the rq lock. Must be called before other iterator operations.
1357  */
1358 static void scx_task_iter_relock(struct scx_task_iter *iter)
1359 {
1360 	spin_lock_irq(&scx_tasks_lock);
1361 }
1362 
1363 /**
1364  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1365  * @iter: iterator to exit
1366  *
1367  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1368  * which is released on return. If the iterator holds a task's rq lock, that rq
1369  * lock is also released. See scx_task_iter_start() for details.
1370  */
1371 static void scx_task_iter_stop(struct scx_task_iter *iter)
1372 {
1373 	list_del_init(&iter->cursor.tasks_node);
1374 	scx_task_iter_unlock(iter);
1375 }
1376 
1377 /**
1378  * scx_task_iter_next - Next task
1379  * @iter: iterator to walk
1380  *
1381  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1382  * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1383  * stalls by holding scx_tasks_lock for too long.
1384  */
1385 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1386 {
1387 	struct list_head *cursor = &iter->cursor.tasks_node;
1388 	struct sched_ext_entity *pos;
1389 
1390 	if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1391 		scx_task_iter_unlock(iter);
1392 		cond_resched();
1393 		scx_task_iter_relock(iter);
1394 	}
1395 
1396 	list_for_each_entry(pos, cursor, tasks_node) {
1397 		if (&pos->tasks_node == &scx_tasks)
1398 			return NULL;
1399 		if (!(pos->flags & SCX_TASK_CURSOR)) {
1400 			list_move(cursor, &pos->tasks_node);
1401 			return container_of(pos, struct task_struct, scx);
1402 		}
1403 	}
1404 
1405 	/* can't happen, should always terminate at scx_tasks above */
1406 	BUG();
1407 }
1408 
1409 /**
1410  * scx_task_iter_next_locked - Next non-idle task with its rq locked
1411  * @iter: iterator to walk
1412  *
1413  * Visit the non-idle task with its rq lock held. Allows callers to specify
1414  * whether they would like to filter out dead tasks. See scx_task_iter_start()
1415  * for details.
1416  */
1417 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1418 {
1419 	struct task_struct *p;
1420 
1421 	__scx_task_iter_rq_unlock(iter);
1422 
1423 	while ((p = scx_task_iter_next(iter))) {
1424 		/*
1425 		 * scx_task_iter is used to prepare and move tasks into SCX
1426 		 * while loading the BPF scheduler and vice-versa while
1427 		 * unloading. The init_tasks ("swappers") should be excluded
1428 		 * from the iteration because:
1429 		 *
1430 		 * - It's unsafe to use __setschduler_prio() on an init_task to
1431 		 *   determine the sched_class to use as it won't preserve its
1432 		 *   idle_sched_class.
1433 		 *
1434 		 * - ops.init/exit_task() can easily be confused if called with
1435 		 *   init_tasks as they, e.g., share PID 0.
1436 		 *
1437 		 * As init_tasks are never scheduled through SCX, they can be
1438 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1439 		 * doesn't work here:
1440 		 *
1441 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1442 		 *   yet been onlined.
1443 		 *
1444 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1445 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
1446 		 *
1447 		 * Test for idle_sched_class as only init_tasks are on it.
1448 		 */
1449 		if (p->sched_class != &idle_sched_class)
1450 			break;
1451 	}
1452 	if (!p)
1453 		return NULL;
1454 
1455 	iter->rq = task_rq_lock(p, &iter->rf);
1456 	iter->locked = p;
1457 
1458 	return p;
1459 }
1460 
1461 static enum scx_ops_enable_state scx_ops_enable_state(void)
1462 {
1463 	return atomic_read(&scx_ops_enable_state_var);
1464 }
1465 
1466 static enum scx_ops_enable_state
1467 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1468 {
1469 	return atomic_xchg(&scx_ops_enable_state_var, to);
1470 }
1471 
1472 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1473 					enum scx_ops_enable_state from)
1474 {
1475 	int from_v = from;
1476 
1477 	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1478 }
1479 
1480 static bool scx_rq_bypassing(struct rq *rq)
1481 {
1482 	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1483 }
1484 
1485 /**
1486  * wait_ops_state - Busy-wait the specified ops state to end
1487  * @p: target task
1488  * @opss: state to wait the end of
1489  *
1490  * Busy-wait for @p to transition out of @opss. This can only be used when the
1491  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1492  * has load_acquire semantics to ensure that the caller can see the updates made
1493  * in the enqueueing and dispatching paths.
1494  */
1495 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1496 {
1497 	do {
1498 		cpu_relax();
1499 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1500 }
1501 
1502 /**
1503  * ops_cpu_valid - Verify a cpu number
1504  * @cpu: cpu number which came from a BPF ops
1505  * @where: extra information reported on error
1506  *
1507  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1508  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1509  * an ops error.
1510  */
1511 static bool ops_cpu_valid(s32 cpu, const char *where)
1512 {
1513 	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1514 		return true;
1515 	} else {
1516 		scx_ops_error("invalid CPU %d%s%s", cpu,
1517 			      where ? " " : "", where ?: "");
1518 		return false;
1519 	}
1520 }
1521 
1522 /**
1523  * ops_sanitize_err - Sanitize a -errno value
1524  * @ops_name: operation to blame on failure
1525  * @err: -errno value to sanitize
1526  *
1527  * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1528  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1529  * cause misbehaviors. For an example, a large negative return from
1530  * ops.init_task() triggers an oops when passed up the call chain because the
1531  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1532  * handled as a pointer.
1533  */
1534 static int ops_sanitize_err(const char *ops_name, s32 err)
1535 {
1536 	if (err < 0 && err >= -MAX_ERRNO)
1537 		return err;
1538 
1539 	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1540 	return -EPROTO;
1541 }
1542 
1543 static void run_deferred(struct rq *rq)
1544 {
1545 	process_ddsp_deferred_locals(rq);
1546 }
1547 
1548 #ifdef CONFIG_SMP
1549 static void deferred_bal_cb_workfn(struct rq *rq)
1550 {
1551 	run_deferred(rq);
1552 }
1553 #endif
1554 
1555 static void deferred_irq_workfn(struct irq_work *irq_work)
1556 {
1557 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1558 
1559 	raw_spin_rq_lock(rq);
1560 	run_deferred(rq);
1561 	raw_spin_rq_unlock(rq);
1562 }
1563 
1564 /**
1565  * schedule_deferred - Schedule execution of deferred actions on an rq
1566  * @rq: target rq
1567  *
1568  * Schedule execution of deferred actions on @rq. Must be called with @rq
1569  * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1570  * can unlock @rq to e.g. migrate tasks to other rqs.
1571  */
1572 static void schedule_deferred(struct rq *rq)
1573 {
1574 	lockdep_assert_rq_held(rq);
1575 
1576 #ifdef CONFIG_SMP
1577 	/*
1578 	 * If in the middle of waking up a task, task_woken_scx() will be called
1579 	 * afterwards which will then run the deferred actions, no need to
1580 	 * schedule anything.
1581 	 */
1582 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1583 		return;
1584 
1585 	/*
1586 	 * If in balance, the balance callbacks will be called before rq lock is
1587 	 * released. Schedule one.
1588 	 */
1589 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1590 		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1591 				       deferred_bal_cb_workfn);
1592 		return;
1593 	}
1594 #endif
1595 	/*
1596 	 * No scheduler hooks available. Queue an irq work. They are executed on
1597 	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1598 	 * The above WAKEUP and BALANCE paths should cover most of the cases and
1599 	 * the time to IRQ re-enable shouldn't be long.
1600 	 */
1601 	irq_work_queue(&rq->scx.deferred_irq_work);
1602 }
1603 
1604 /**
1605  * touch_core_sched - Update timestamp used for core-sched task ordering
1606  * @rq: rq to read clock from, must be locked
1607  * @p: task to update the timestamp for
1608  *
1609  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1610  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1611  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1612  * exhaustion).
1613  */
1614 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1615 {
1616 	lockdep_assert_rq_held(rq);
1617 
1618 #ifdef CONFIG_SCHED_CORE
1619 	/*
1620 	 * It's okay to update the timestamp spuriously. Use
1621 	 * sched_core_disabled() which is cheaper than enabled().
1622 	 *
1623 	 * As this is used to determine ordering between tasks of sibling CPUs,
1624 	 * it may be better to use per-core dispatch sequence instead.
1625 	 */
1626 	if (!sched_core_disabled())
1627 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1628 #endif
1629 }
1630 
1631 /**
1632  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1633  * @rq: rq to read clock from, must be locked
1634  * @p: task being dispatched
1635  *
1636  * If the BPF scheduler implements custom core-sched ordering via
1637  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1638  * ordering within each local DSQ. This function is called from dispatch paths
1639  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1640  */
1641 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1642 {
1643 	lockdep_assert_rq_held(rq);
1644 
1645 #ifdef CONFIG_SCHED_CORE
1646 	if (SCX_HAS_OP(core_sched_before))
1647 		touch_core_sched(rq, p);
1648 #endif
1649 }
1650 
1651 static void update_curr_scx(struct rq *rq)
1652 {
1653 	struct task_struct *curr = rq->curr;
1654 	s64 delta_exec;
1655 
1656 	delta_exec = update_curr_common(rq);
1657 	if (unlikely(delta_exec <= 0))
1658 		return;
1659 
1660 	if (curr->scx.slice != SCX_SLICE_INF) {
1661 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1662 		if (!curr->scx.slice)
1663 			touch_core_sched(rq, curr);
1664 	}
1665 }
1666 
1667 static bool scx_dsq_priq_less(struct rb_node *node_a,
1668 			      const struct rb_node *node_b)
1669 {
1670 	const struct task_struct *a =
1671 		container_of(node_a, struct task_struct, scx.dsq_priq);
1672 	const struct task_struct *b =
1673 		container_of(node_b, struct task_struct, scx.dsq_priq);
1674 
1675 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1676 }
1677 
1678 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1679 {
1680 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1681 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1682 }
1683 
1684 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1685 			     u64 enq_flags)
1686 {
1687 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1688 
1689 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1690 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1691 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1692 
1693 	if (!is_local) {
1694 		raw_spin_lock(&dsq->lock);
1695 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1696 			scx_ops_error("attempting to dispatch to a destroyed dsq");
1697 			/* fall back to the global dsq */
1698 			raw_spin_unlock(&dsq->lock);
1699 			dsq = find_global_dsq(p);
1700 			raw_spin_lock(&dsq->lock);
1701 		}
1702 	}
1703 
1704 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1705 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1706 		/*
1707 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1708 		 * their FIFO queues. To avoid confusion and accidentally
1709 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1710 		 * disallow any internal DSQ from doing vtime ordering of
1711 		 * tasks.
1712 		 */
1713 		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1714 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1715 	}
1716 
1717 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1718 		struct rb_node *rbp;
1719 
1720 		/*
1721 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1722 		 * linked to both the rbtree and list on PRIQs, this can only be
1723 		 * tested easily when adding the first task.
1724 		 */
1725 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1726 			     nldsq_next_task(dsq, NULL, false)))
1727 			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1728 				      dsq->id);
1729 
1730 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1731 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1732 
1733 		/*
1734 		 * Find the previous task and insert after it on the list so
1735 		 * that @dsq->list is vtime ordered.
1736 		 */
1737 		rbp = rb_prev(&p->scx.dsq_priq);
1738 		if (rbp) {
1739 			struct task_struct *prev =
1740 				container_of(rbp, struct task_struct,
1741 					     scx.dsq_priq);
1742 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1743 		} else {
1744 			list_add(&p->scx.dsq_list.node, &dsq->list);
1745 		}
1746 	} else {
1747 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1748 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1749 			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1750 				      dsq->id);
1751 
1752 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1753 			list_add(&p->scx.dsq_list.node, &dsq->list);
1754 		else
1755 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1756 	}
1757 
1758 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1759 	dsq->seq++;
1760 	p->scx.dsq_seq = dsq->seq;
1761 
1762 	dsq_mod_nr(dsq, 1);
1763 	p->scx.dsq = dsq;
1764 
1765 	/*
1766 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1767 	 * direct dispatch path, but we clear them here because the direct
1768 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1769 	 * bypass.
1770 	 */
1771 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1772 	p->scx.ddsp_enq_flags = 0;
1773 
1774 	/*
1775 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1776 	 * match waiters' load_acquire.
1777 	 */
1778 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1779 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1780 
1781 	if (is_local) {
1782 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1783 		bool preempt = false;
1784 
1785 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1786 		    rq->curr->sched_class == &ext_sched_class) {
1787 			rq->curr->scx.slice = 0;
1788 			preempt = true;
1789 		}
1790 
1791 		if (preempt || sched_class_above(&ext_sched_class,
1792 						 rq->curr->sched_class))
1793 			resched_curr(rq);
1794 	} else {
1795 		raw_spin_unlock(&dsq->lock);
1796 	}
1797 }
1798 
1799 static void task_unlink_from_dsq(struct task_struct *p,
1800 				 struct scx_dispatch_q *dsq)
1801 {
1802 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1803 
1804 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1805 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1806 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1807 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1808 	}
1809 
1810 	list_del_init(&p->scx.dsq_list.node);
1811 	dsq_mod_nr(dsq, -1);
1812 }
1813 
1814 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1815 {
1816 	struct scx_dispatch_q *dsq = p->scx.dsq;
1817 	bool is_local = dsq == &rq->scx.local_dsq;
1818 
1819 	if (!dsq) {
1820 		/*
1821 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1822 		 * Unlinking is all that's needed to cancel.
1823 		 */
1824 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1825 			list_del_init(&p->scx.dsq_list.node);
1826 
1827 		/*
1828 		 * When dispatching directly from the BPF scheduler to a local
1829 		 * DSQ, the task isn't associated with any DSQ but
1830 		 * @p->scx.holding_cpu may be set under the protection of
1831 		 * %SCX_OPSS_DISPATCHING.
1832 		 */
1833 		if (p->scx.holding_cpu >= 0)
1834 			p->scx.holding_cpu = -1;
1835 
1836 		return;
1837 	}
1838 
1839 	if (!is_local)
1840 		raw_spin_lock(&dsq->lock);
1841 
1842 	/*
1843 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1844 	 * change underneath us.
1845 	*/
1846 	if (p->scx.holding_cpu < 0) {
1847 		/* @p must still be on @dsq, dequeue */
1848 		task_unlink_from_dsq(p, dsq);
1849 	} else {
1850 		/*
1851 		 * We're racing against dispatch_to_local_dsq() which already
1852 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1853 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1854 		 * the race.
1855 		 */
1856 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1857 		p->scx.holding_cpu = -1;
1858 	}
1859 	p->scx.dsq = NULL;
1860 
1861 	if (!is_local)
1862 		raw_spin_unlock(&dsq->lock);
1863 }
1864 
1865 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1866 						    struct task_struct *p)
1867 {
1868 	struct scx_dispatch_q *dsq;
1869 
1870 	if (dsq_id == SCX_DSQ_LOCAL)
1871 		return &rq->scx.local_dsq;
1872 
1873 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1874 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1875 
1876 		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1877 			return find_global_dsq(p);
1878 
1879 		return &cpu_rq(cpu)->scx.local_dsq;
1880 	}
1881 
1882 	if (dsq_id == SCX_DSQ_GLOBAL)
1883 		dsq = find_global_dsq(p);
1884 	else
1885 		dsq = find_user_dsq(dsq_id);
1886 
1887 	if (unlikely(!dsq)) {
1888 		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1889 			      dsq_id, p->comm, p->pid);
1890 		return find_global_dsq(p);
1891 	}
1892 
1893 	return dsq;
1894 }
1895 
1896 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1897 				 struct task_struct *p, u64 dsq_id,
1898 				 u64 enq_flags)
1899 {
1900 	/*
1901 	 * Mark that dispatch already happened from ops.select_cpu() or
1902 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1903 	 * which can never match a valid task pointer.
1904 	 */
1905 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1906 
1907 	/* @p must match the task on the enqueue path */
1908 	if (unlikely(p != ddsp_task)) {
1909 		if (IS_ERR(ddsp_task))
1910 			scx_ops_error("%s[%d] already direct-dispatched",
1911 				      p->comm, p->pid);
1912 		else
1913 			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1914 				      ddsp_task->comm, ddsp_task->pid,
1915 				      p->comm, p->pid);
1916 		return;
1917 	}
1918 
1919 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1920 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1921 
1922 	p->scx.ddsp_dsq_id = dsq_id;
1923 	p->scx.ddsp_enq_flags = enq_flags;
1924 }
1925 
1926 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1927 {
1928 	struct rq *rq = task_rq(p);
1929 	struct scx_dispatch_q *dsq =
1930 		find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1931 
1932 	touch_core_sched_dispatch(rq, p);
1933 
1934 	p->scx.ddsp_enq_flags |= enq_flags;
1935 
1936 	/*
1937 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1938 	 * double lock a remote rq and enqueue to its local DSQ. For
1939 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1940 	 * the enqueue so that it's executed when @rq can be unlocked.
1941 	 */
1942 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1943 		unsigned long opss;
1944 
1945 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1946 
1947 		switch (opss & SCX_OPSS_STATE_MASK) {
1948 		case SCX_OPSS_NONE:
1949 			break;
1950 		case SCX_OPSS_QUEUEING:
1951 			/*
1952 			 * As @p was never passed to the BPF side, _release is
1953 			 * not strictly necessary. Still do it for consistency.
1954 			 */
1955 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1956 			break;
1957 		default:
1958 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1959 				  p->comm, p->pid, opss);
1960 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1961 			break;
1962 		}
1963 
1964 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1965 		list_add_tail(&p->scx.dsq_list.node,
1966 			      &rq->scx.ddsp_deferred_locals);
1967 		schedule_deferred(rq);
1968 		return;
1969 	}
1970 
1971 	dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1972 }
1973 
1974 static bool scx_rq_online(struct rq *rq)
1975 {
1976 	/*
1977 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1978 	 * the online state as seen from the BPF scheduler. cpu_active() test
1979 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1980 	 * stay set until the current scheduling operation is complete even if
1981 	 * we aren't locking @rq.
1982 	 */
1983 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1984 }
1985 
1986 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1987 			    int sticky_cpu)
1988 {
1989 	struct task_struct **ddsp_taskp;
1990 	unsigned long qseq;
1991 
1992 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1993 
1994 	/* rq migration */
1995 	if (sticky_cpu == cpu_of(rq))
1996 		goto local_norefill;
1997 
1998 	/*
1999 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2000 	 * is offline and are just running the hotplug path. Don't bother the
2001 	 * BPF scheduler.
2002 	 */
2003 	if (!scx_rq_online(rq))
2004 		goto local;
2005 
2006 	if (scx_rq_bypassing(rq))
2007 		goto global;
2008 
2009 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2010 		goto direct;
2011 
2012 	/* see %SCX_OPS_ENQ_EXITING */
2013 	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2014 	    unlikely(p->flags & PF_EXITING))
2015 		goto local;
2016 
2017 	if (!SCX_HAS_OP(enqueue))
2018 		goto global;
2019 
2020 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2021 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2022 
2023 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2024 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2025 
2026 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2027 	WARN_ON_ONCE(*ddsp_taskp);
2028 	*ddsp_taskp = p;
2029 
2030 	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2031 
2032 	*ddsp_taskp = NULL;
2033 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2034 		goto direct;
2035 
2036 	/*
2037 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2038 	 * dequeue may be waiting. The store_release matches their load_acquire.
2039 	 */
2040 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2041 	return;
2042 
2043 direct:
2044 	direct_dispatch(p, enq_flags);
2045 	return;
2046 
2047 local:
2048 	/*
2049 	 * For task-ordering, slice refill must be treated as implying the end
2050 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2051 	 * higher priority it becomes from scx_prio_less()'s POV.
2052 	 */
2053 	touch_core_sched(rq, p);
2054 	p->scx.slice = SCX_SLICE_DFL;
2055 local_norefill:
2056 	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2057 	return;
2058 
2059 global:
2060 	touch_core_sched(rq, p);	/* see the comment in local: */
2061 	p->scx.slice = SCX_SLICE_DFL;
2062 	dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2063 }
2064 
2065 static bool task_runnable(const struct task_struct *p)
2066 {
2067 	return !list_empty(&p->scx.runnable_node);
2068 }
2069 
2070 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2071 {
2072 	lockdep_assert_rq_held(rq);
2073 
2074 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2075 		p->scx.runnable_at = jiffies;
2076 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2077 	}
2078 
2079 	/*
2080 	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2081 	 * appened to the runnable_list.
2082 	 */
2083 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2084 }
2085 
2086 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2087 {
2088 	list_del_init(&p->scx.runnable_node);
2089 	if (reset_runnable_at)
2090 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2091 }
2092 
2093 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2094 {
2095 	int sticky_cpu = p->scx.sticky_cpu;
2096 
2097 	if (enq_flags & ENQUEUE_WAKEUP)
2098 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2099 
2100 	enq_flags |= rq->scx.extra_enq_flags;
2101 
2102 	if (sticky_cpu >= 0)
2103 		p->scx.sticky_cpu = -1;
2104 
2105 	/*
2106 	 * Restoring a running task will be immediately followed by
2107 	 * set_next_task_scx() which expects the task to not be on the BPF
2108 	 * scheduler as tasks can only start running through local DSQs. Force
2109 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2110 	 */
2111 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2112 		sticky_cpu = cpu_of(rq);
2113 
2114 	if (p->scx.flags & SCX_TASK_QUEUED) {
2115 		WARN_ON_ONCE(!task_runnable(p));
2116 		goto out;
2117 	}
2118 
2119 	set_task_runnable(rq, p);
2120 	p->scx.flags |= SCX_TASK_QUEUED;
2121 	rq->scx.nr_running++;
2122 	add_nr_running(rq, 1);
2123 
2124 	if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2125 		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2126 
2127 	if (enq_flags & SCX_ENQ_WAKEUP)
2128 		touch_core_sched(rq, p);
2129 
2130 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2131 out:
2132 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2133 }
2134 
2135 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2136 {
2137 	unsigned long opss;
2138 
2139 	/* dequeue is always temporary, don't reset runnable_at */
2140 	clr_task_runnable(p, false);
2141 
2142 	/* acquire ensures that we see the preceding updates on QUEUED */
2143 	opss = atomic_long_read_acquire(&p->scx.ops_state);
2144 
2145 	switch (opss & SCX_OPSS_STATE_MASK) {
2146 	case SCX_OPSS_NONE:
2147 		break;
2148 	case SCX_OPSS_QUEUEING:
2149 		/*
2150 		 * QUEUEING is started and finished while holding @p's rq lock.
2151 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2152 		 */
2153 		BUG();
2154 	case SCX_OPSS_QUEUED:
2155 		if (SCX_HAS_OP(dequeue))
2156 			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2157 
2158 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2159 					    SCX_OPSS_NONE))
2160 			break;
2161 		fallthrough;
2162 	case SCX_OPSS_DISPATCHING:
2163 		/*
2164 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2165 		 * wait for the transfer to complete so that @p doesn't get
2166 		 * added to its DSQ after dequeueing is complete.
2167 		 *
2168 		 * As we're waiting on DISPATCHING with the rq locked, the
2169 		 * dispatching side shouldn't try to lock the rq while
2170 		 * DISPATCHING is set. See dispatch_to_local_dsq().
2171 		 *
2172 		 * DISPATCHING shouldn't have qseq set and control can reach
2173 		 * here with NONE @opss from the above QUEUED case block.
2174 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2175 		 */
2176 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2177 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2178 		break;
2179 	}
2180 }
2181 
2182 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2183 {
2184 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2185 		WARN_ON_ONCE(task_runnable(p));
2186 		return true;
2187 	}
2188 
2189 	ops_dequeue(p, deq_flags);
2190 
2191 	/*
2192 	 * A currently running task which is going off @rq first gets dequeued
2193 	 * and then stops running. As we want running <-> stopping transitions
2194 	 * to be contained within runnable <-> quiescent transitions, trigger
2195 	 * ->stopping() early here instead of in put_prev_task_scx().
2196 	 *
2197 	 * @p may go through multiple stopping <-> running transitions between
2198 	 * here and put_prev_task_scx() if task attribute changes occur while
2199 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
2200 	 * information meaningful to the BPF scheduler and can be suppressed by
2201 	 * skipping the callbacks if the task is !QUEUED.
2202 	 */
2203 	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2204 		update_curr_scx(rq);
2205 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2206 	}
2207 
2208 	if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2209 		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2210 
2211 	if (deq_flags & SCX_DEQ_SLEEP)
2212 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2213 	else
2214 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2215 
2216 	p->scx.flags &= ~SCX_TASK_QUEUED;
2217 	rq->scx.nr_running--;
2218 	sub_nr_running(rq, 1);
2219 
2220 	dispatch_dequeue(rq, p);
2221 	return true;
2222 }
2223 
2224 static void yield_task_scx(struct rq *rq)
2225 {
2226 	struct task_struct *p = rq->curr;
2227 
2228 	if (SCX_HAS_OP(yield))
2229 		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2230 	else
2231 		p->scx.slice = 0;
2232 }
2233 
2234 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2235 {
2236 	struct task_struct *from = rq->curr;
2237 
2238 	if (SCX_HAS_OP(yield))
2239 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2240 	else
2241 		return false;
2242 }
2243 
2244 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2245 					 struct scx_dispatch_q *src_dsq,
2246 					 struct rq *dst_rq)
2247 {
2248 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2249 
2250 	/* @dsq is locked and @p is on @dst_rq */
2251 	lockdep_assert_held(&src_dsq->lock);
2252 	lockdep_assert_rq_held(dst_rq);
2253 
2254 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2255 
2256 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2257 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2258 	else
2259 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2260 
2261 	dsq_mod_nr(dst_dsq, 1);
2262 	p->scx.dsq = dst_dsq;
2263 }
2264 
2265 #ifdef CONFIG_SMP
2266 /**
2267  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2268  * @p: task to move
2269  * @enq_flags: %SCX_ENQ_*
2270  * @src_rq: rq to move the task from, locked on entry, released on return
2271  * @dst_rq: rq to move the task into, locked on return
2272  *
2273  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2274  */
2275 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2276 					  struct rq *src_rq, struct rq *dst_rq)
2277 {
2278 	lockdep_assert_rq_held(src_rq);
2279 
2280 	/* the following marks @p MIGRATING which excludes dequeue */
2281 	deactivate_task(src_rq, p, 0);
2282 	set_task_cpu(p, cpu_of(dst_rq));
2283 	p->scx.sticky_cpu = cpu_of(dst_rq);
2284 
2285 	raw_spin_rq_unlock(src_rq);
2286 	raw_spin_rq_lock(dst_rq);
2287 
2288 	/*
2289 	 * We want to pass scx-specific enq_flags but activate_task() will
2290 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2291 	 * @rq->scx.extra_enq_flags instead.
2292 	 */
2293 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2294 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2295 	dst_rq->scx.extra_enq_flags = enq_flags;
2296 	activate_task(dst_rq, p, 0);
2297 	dst_rq->scx.extra_enq_flags = 0;
2298 }
2299 
2300 /*
2301  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2302  * differences:
2303  *
2304  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2305  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2306  *   this CPU?".
2307  *
2308  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2309  *   must be allowed to finish on the CPU that it's currently on regardless of
2310  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2311  *   BPF scheduler shouldn't attempt to migrate a task which has migration
2312  *   disabled.
2313  *
2314  * - The BPF scheduler is bypassed while the rq is offline and we can always say
2315  *   no to the BPF scheduler initiated migrations while offline.
2316  */
2317 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2318 				      bool trigger_error)
2319 {
2320 	int cpu = cpu_of(rq);
2321 
2322 	/*
2323 	 * We don't require the BPF scheduler to avoid dispatching to offline
2324 	 * CPUs mostly for convenience but also because CPUs can go offline
2325 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2326 	 * picked CPU is outside the allowed mask.
2327 	 */
2328 	if (!task_allowed_on_cpu(p, cpu)) {
2329 		if (trigger_error)
2330 			scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2331 				      cpu_of(rq), p->comm, p->pid);
2332 		return false;
2333 	}
2334 
2335 	if (unlikely(is_migration_disabled(p)))
2336 		return false;
2337 
2338 	if (!scx_rq_online(rq))
2339 		return false;
2340 
2341 	return true;
2342 }
2343 
2344 /**
2345  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2346  * @p: target task
2347  * @dsq: locked DSQ @p is currently on
2348  * @src_rq: rq @p is currently on, stable with @dsq locked
2349  *
2350  * Called with @dsq locked but no rq's locked. We want to move @p to a different
2351  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2352  * required when transferring into a local DSQ. Even when transferring into a
2353  * non-local DSQ, it's better to use the same mechanism to protect against
2354  * dequeues and maintain the invariant that @p->scx.dsq can only change while
2355  * @src_rq is locked, which e.g. scx_dump_task() depends on.
2356  *
2357  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2358  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2359  * this may race with dequeue, which can't drop the rq lock or fail, do a little
2360  * dancing from our side.
2361  *
2362  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2363  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2364  * would be cleared to -1. While other cpus may have updated it to different
2365  * values afterwards, as this operation can't be preempted or recurse, the
2366  * holding_cpu can never become this CPU again before we're done. Thus, we can
2367  * tell whether we lost to dequeue by testing whether the holding_cpu still
2368  * points to this CPU. See dispatch_dequeue() for the counterpart.
2369  *
2370  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2371  * still valid. %false if lost to dequeue.
2372  */
2373 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2374 				       struct scx_dispatch_q *dsq,
2375 				       struct rq *src_rq)
2376 {
2377 	s32 cpu = raw_smp_processor_id();
2378 
2379 	lockdep_assert_held(&dsq->lock);
2380 
2381 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2382 	task_unlink_from_dsq(p, dsq);
2383 	p->scx.holding_cpu = cpu;
2384 
2385 	raw_spin_unlock(&dsq->lock);
2386 	raw_spin_rq_lock(src_rq);
2387 
2388 	/* task_rq couldn't have changed if we're still the holding cpu */
2389 	return likely(p->scx.holding_cpu == cpu) &&
2390 		!WARN_ON_ONCE(src_rq != task_rq(p));
2391 }
2392 
2393 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2394 				struct scx_dispatch_q *dsq, struct rq *src_rq)
2395 {
2396 	raw_spin_rq_unlock(this_rq);
2397 
2398 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2399 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2400 		return true;
2401 	} else {
2402 		raw_spin_rq_unlock(src_rq);
2403 		raw_spin_rq_lock(this_rq);
2404 		return false;
2405 	}
2406 }
2407 #else	/* CONFIG_SMP */
2408 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2409 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
2410 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2411 #endif	/* CONFIG_SMP */
2412 
2413 /**
2414  * move_task_between_dsqs() - Move a task from one DSQ to another
2415  * @p: target task
2416  * @enq_flags: %SCX_ENQ_*
2417  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2418  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2419  *
2420  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2421  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2422  * will change. As @p's task_rq is locked, this function doesn't need to use the
2423  * holding_cpu mechanism.
2424  *
2425  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2426  * return value, is locked.
2427  */
2428 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2429 					 struct scx_dispatch_q *src_dsq,
2430 					 struct scx_dispatch_q *dst_dsq)
2431 {
2432 	struct rq *src_rq = task_rq(p), *dst_rq;
2433 
2434 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2435 	lockdep_assert_held(&src_dsq->lock);
2436 	lockdep_assert_rq_held(src_rq);
2437 
2438 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2439 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2440 		if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
2441 			dst_dsq = find_global_dsq(p);
2442 			dst_rq = src_rq;
2443 		}
2444 	} else {
2445 		/* no need to migrate if destination is a non-local DSQ */
2446 		dst_rq = src_rq;
2447 	}
2448 
2449 	/*
2450 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2451 	 * CPU, @p will be migrated.
2452 	 */
2453 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
2454 		/* @p is going from a non-local DSQ to a local DSQ */
2455 		if (src_rq == dst_rq) {
2456 			task_unlink_from_dsq(p, src_dsq);
2457 			move_local_task_to_local_dsq(p, enq_flags,
2458 						     src_dsq, dst_rq);
2459 			raw_spin_unlock(&src_dsq->lock);
2460 		} else {
2461 			raw_spin_unlock(&src_dsq->lock);
2462 			move_remote_task_to_local_dsq(p, enq_flags,
2463 						      src_rq, dst_rq);
2464 		}
2465 	} else {
2466 		/*
2467 		 * @p is going from a non-local DSQ to a non-local DSQ. As
2468 		 * $src_dsq is already locked, do an abbreviated dequeue.
2469 		 */
2470 		task_unlink_from_dsq(p, src_dsq);
2471 		p->scx.dsq = NULL;
2472 		raw_spin_unlock(&src_dsq->lock);
2473 
2474 		dispatch_enqueue(dst_dsq, p, enq_flags);
2475 	}
2476 
2477 	return dst_rq;
2478 }
2479 
2480 /*
2481  * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2482  * banging on the same DSQ on a large NUMA system to the point where switching
2483  * to the bypass mode can take a long time. Inject artifical delays while the
2484  * bypass mode is switching to guarantee timely completion.
2485  */
2486 static void scx_ops_breather(struct rq *rq)
2487 {
2488 	u64 until;
2489 
2490 	lockdep_assert_rq_held(rq);
2491 
2492 	if (likely(!atomic_read(&scx_ops_breather_depth)))
2493 		return;
2494 
2495 	raw_spin_rq_unlock(rq);
2496 
2497 	until = ktime_get_ns() + NSEC_PER_MSEC;
2498 
2499 	do {
2500 		int cnt = 1024;
2501 		while (atomic_read(&scx_ops_breather_depth) && --cnt)
2502 			cpu_relax();
2503 	} while (atomic_read(&scx_ops_breather_depth) &&
2504 		 time_before64(ktime_get_ns(), until));
2505 
2506 	raw_spin_rq_lock(rq);
2507 }
2508 
2509 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2510 {
2511 	struct task_struct *p;
2512 retry:
2513 	/*
2514 	 * This retry loop can repeatedly race against scx_ops_bypass()
2515 	 * dequeueing tasks from @dsq trying to put the system into the bypass
2516 	 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2517 	 * live-lock the machine into soft lockups. Give a breather.
2518 	 */
2519 	scx_ops_breather(rq);
2520 
2521 	/*
2522 	 * The caller can't expect to successfully consume a task if the task's
2523 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2524 	 * @dsq->list without locking and skip if it seems empty.
2525 	 */
2526 	if (list_empty(&dsq->list))
2527 		return false;
2528 
2529 	raw_spin_lock(&dsq->lock);
2530 
2531 	nldsq_for_each_task(p, dsq) {
2532 		struct rq *task_rq = task_rq(p);
2533 
2534 		if (rq == task_rq) {
2535 			task_unlink_from_dsq(p, dsq);
2536 			move_local_task_to_local_dsq(p, 0, dsq, rq);
2537 			raw_spin_unlock(&dsq->lock);
2538 			return true;
2539 		}
2540 
2541 		if (task_can_run_on_remote_rq(p, rq, false)) {
2542 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2543 				return true;
2544 			goto retry;
2545 		}
2546 	}
2547 
2548 	raw_spin_unlock(&dsq->lock);
2549 	return false;
2550 }
2551 
2552 static bool consume_global_dsq(struct rq *rq)
2553 {
2554 	int node = cpu_to_node(cpu_of(rq));
2555 
2556 	return consume_dispatch_q(rq, global_dsqs[node]);
2557 }
2558 
2559 /**
2560  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2561  * @rq: current rq which is locked
2562  * @dst_dsq: destination DSQ
2563  * @p: task to dispatch
2564  * @enq_flags: %SCX_ENQ_*
2565  *
2566  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2567  * DSQ. This function performs all the synchronization dancing needed because
2568  * local DSQs are protected with rq locks.
2569  *
2570  * The caller must have exclusive ownership of @p (e.g. through
2571  * %SCX_OPSS_DISPATCHING).
2572  */
2573 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2574 				  struct task_struct *p, u64 enq_flags)
2575 {
2576 	struct rq *src_rq = task_rq(p);
2577 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2578 
2579 	/*
2580 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2581 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2582 	 *
2583 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2584 	 */
2585 	if (rq == src_rq && rq == dst_rq) {
2586 		dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2587 		return;
2588 	}
2589 
2590 #ifdef CONFIG_SMP
2591 	if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2592 		dispatch_enqueue(find_global_dsq(p), p,
2593 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2594 		return;
2595 	}
2596 
2597 	/*
2598 	 * @p is on a possibly remote @src_rq which we need to lock to move the
2599 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2600 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2601 	 * DISPATCHING.
2602 	 *
2603 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2604 	 * we're moving from a DSQ and use the same mechanism - mark the task
2605 	 * under transfer with holding_cpu, release DISPATCHING and then follow
2606 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2607 	 */
2608 	p->scx.holding_cpu = raw_smp_processor_id();
2609 
2610 	/* store_release ensures that dequeue sees the above */
2611 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2612 
2613 	/* switch to @src_rq lock */
2614 	if (rq != src_rq) {
2615 		raw_spin_rq_unlock(rq);
2616 		raw_spin_rq_lock(src_rq);
2617 	}
2618 
2619 	/* task_rq couldn't have changed if we're still the holding cpu */
2620 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2621 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2622 		/*
2623 		 * If @p is staying on the same rq, there's no need to go
2624 		 * through the full deactivate/activate cycle. Optimize by
2625 		 * abbreviating move_remote_task_to_local_dsq().
2626 		 */
2627 		if (src_rq == dst_rq) {
2628 			p->scx.holding_cpu = -1;
2629 			dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2630 		} else {
2631 			move_remote_task_to_local_dsq(p, enq_flags,
2632 						      src_rq, dst_rq);
2633 		}
2634 
2635 		/* if the destination CPU is idle, wake it up */
2636 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2637 			resched_curr(dst_rq);
2638 	}
2639 
2640 	/* switch back to @rq lock */
2641 	if (rq != dst_rq) {
2642 		raw_spin_rq_unlock(dst_rq);
2643 		raw_spin_rq_lock(rq);
2644 	}
2645 #else	/* CONFIG_SMP */
2646 	BUG();	/* control can not reach here on UP */
2647 #endif	/* CONFIG_SMP */
2648 }
2649 
2650 /**
2651  * finish_dispatch - Asynchronously finish dispatching a task
2652  * @rq: current rq which is locked
2653  * @p: task to finish dispatching
2654  * @qseq_at_dispatch: qseq when @p started getting dispatched
2655  * @dsq_id: destination DSQ ID
2656  * @enq_flags: %SCX_ENQ_*
2657  *
2658  * Dispatching to local DSQs may need to wait for queueing to complete or
2659  * require rq lock dancing. As we don't wanna do either while inside
2660  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2661  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2662  * task and its qseq. Once ops.dispatch() returns, this function is called to
2663  * finish up.
2664  *
2665  * There is no guarantee that @p is still valid for dispatching or even that it
2666  * was valid in the first place. Make sure that the task is still owned by the
2667  * BPF scheduler and claim the ownership before dispatching.
2668  */
2669 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2670 			    unsigned long qseq_at_dispatch,
2671 			    u64 dsq_id, u64 enq_flags)
2672 {
2673 	struct scx_dispatch_q *dsq;
2674 	unsigned long opss;
2675 
2676 	touch_core_sched_dispatch(rq, p);
2677 retry:
2678 	/*
2679 	 * No need for _acquire here. @p is accessed only after a successful
2680 	 * try_cmpxchg to DISPATCHING.
2681 	 */
2682 	opss = atomic_long_read(&p->scx.ops_state);
2683 
2684 	switch (opss & SCX_OPSS_STATE_MASK) {
2685 	case SCX_OPSS_DISPATCHING:
2686 	case SCX_OPSS_NONE:
2687 		/* someone else already got to it */
2688 		return;
2689 	case SCX_OPSS_QUEUED:
2690 		/*
2691 		 * If qseq doesn't match, @p has gone through at least one
2692 		 * dispatch/dequeue and re-enqueue cycle between
2693 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
2694 		 */
2695 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2696 			return;
2697 
2698 		/*
2699 		 * While we know @p is accessible, we don't yet have a claim on
2700 		 * it - the BPF scheduler is allowed to dispatch tasks
2701 		 * spuriously and there can be a racing dequeue attempt. Let's
2702 		 * claim @p by atomically transitioning it from QUEUED to
2703 		 * DISPATCHING.
2704 		 */
2705 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2706 						   SCX_OPSS_DISPATCHING)))
2707 			break;
2708 		goto retry;
2709 	case SCX_OPSS_QUEUEING:
2710 		/*
2711 		 * do_enqueue_task() is in the process of transferring the task
2712 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2713 		 * holding any kernel or BPF resource that the enqueue path may
2714 		 * depend upon, it's safe to wait.
2715 		 */
2716 		wait_ops_state(p, opss);
2717 		goto retry;
2718 	}
2719 
2720 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2721 
2722 	dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2723 
2724 	if (dsq->id == SCX_DSQ_LOCAL)
2725 		dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2726 	else
2727 		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2728 }
2729 
2730 static void flush_dispatch_buf(struct rq *rq)
2731 {
2732 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2733 	u32 u;
2734 
2735 	for (u = 0; u < dspc->cursor; u++) {
2736 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2737 
2738 		finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2739 				ent->enq_flags);
2740 	}
2741 
2742 	dspc->nr_tasks += dspc->cursor;
2743 	dspc->cursor = 0;
2744 }
2745 
2746 static int balance_one(struct rq *rq, struct task_struct *prev)
2747 {
2748 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2749 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2750 	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2751 	int nr_loops = SCX_DSP_MAX_LOOPS;
2752 
2753 	lockdep_assert_rq_held(rq);
2754 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2755 	rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2756 
2757 	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2758 	    unlikely(rq->scx.cpu_released)) {
2759 		/*
2760 		 * If the previous sched_class for the current CPU was not SCX,
2761 		 * notify the BPF scheduler that it again has control of the
2762 		 * core. This callback complements ->cpu_release(), which is
2763 		 * emitted in switch_class().
2764 		 */
2765 		if (SCX_HAS_OP(cpu_acquire))
2766 			SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2767 		rq->scx.cpu_released = false;
2768 	}
2769 
2770 	if (prev_on_scx) {
2771 		update_curr_scx(rq);
2772 
2773 		/*
2774 		 * If @prev is runnable & has slice left, it has priority and
2775 		 * fetching more just increases latency for the fetched tasks.
2776 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2777 		 * scheduler wants to handle this explicitly, it should
2778 		 * implement ->cpu_release().
2779 		 *
2780 		 * See scx_ops_disable_workfn() for the explanation on the
2781 		 * bypassing test.
2782 		 */
2783 		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2784 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2785 			goto has_tasks;
2786 		}
2787 	}
2788 
2789 	/* if there already are tasks to run, nothing to do */
2790 	if (rq->scx.local_dsq.nr)
2791 		goto has_tasks;
2792 
2793 	if (consume_global_dsq(rq))
2794 		goto has_tasks;
2795 
2796 	if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2797 		goto no_tasks;
2798 
2799 	dspc->rq = rq;
2800 
2801 	/*
2802 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2803 	 * the local DSQ might still end up empty after a successful
2804 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2805 	 * produced some tasks, retry. The BPF scheduler may depend on this
2806 	 * looping behavior to simplify its implementation.
2807 	 */
2808 	do {
2809 		dspc->nr_tasks = 0;
2810 
2811 		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2812 			    prev_on_scx ? prev : NULL);
2813 
2814 		flush_dispatch_buf(rq);
2815 
2816 		if (prev_on_rq && prev->scx.slice) {
2817 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2818 			goto has_tasks;
2819 		}
2820 		if (rq->scx.local_dsq.nr)
2821 			goto has_tasks;
2822 		if (consume_global_dsq(rq))
2823 			goto has_tasks;
2824 
2825 		/*
2826 		 * ops.dispatch() can trap us in this loop by repeatedly
2827 		 * dispatching ineligible tasks. Break out once in a while to
2828 		 * allow the watchdog to run. As IRQ can't be enabled in
2829 		 * balance(), we want to complete this scheduling cycle and then
2830 		 * start a new one. IOW, we want to call resched_curr() on the
2831 		 * next, most likely idle, task, not the current one. Use
2832 		 * scx_bpf_kick_cpu() for deferred kicking.
2833 		 */
2834 		if (unlikely(!--nr_loops)) {
2835 			scx_bpf_kick_cpu(cpu_of(rq), 0);
2836 			break;
2837 		}
2838 	} while (dspc->nr_tasks);
2839 
2840 no_tasks:
2841 	/*
2842 	 * Didn't find another task to run. Keep running @prev unless
2843 	 * %SCX_OPS_ENQ_LAST is in effect.
2844 	 */
2845 	if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
2846 	     scx_rq_bypassing(rq))) {
2847 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2848 		goto has_tasks;
2849 	}
2850 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2851 	return false;
2852 
2853 has_tasks:
2854 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2855 	return true;
2856 }
2857 
2858 static int balance_scx(struct rq *rq, struct task_struct *prev,
2859 		       struct rq_flags *rf)
2860 {
2861 	int ret;
2862 
2863 	rq_unpin_lock(rq, rf);
2864 
2865 	ret = balance_one(rq, prev);
2866 
2867 #ifdef CONFIG_SCHED_SMT
2868 	/*
2869 	 * When core-sched is enabled, this ops.balance() call will be followed
2870 	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2871 	 * siblings too.
2872 	 */
2873 	if (sched_core_enabled(rq)) {
2874 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2875 		int scpu;
2876 
2877 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2878 			struct rq *srq = cpu_rq(scpu);
2879 			struct task_struct *sprev = srq->curr;
2880 
2881 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2882 			update_rq_clock(srq);
2883 			balance_one(srq, sprev);
2884 		}
2885 	}
2886 #endif
2887 	rq_repin_lock(rq, rf);
2888 
2889 	return ret;
2890 }
2891 
2892 static void process_ddsp_deferred_locals(struct rq *rq)
2893 {
2894 	struct task_struct *p;
2895 
2896 	lockdep_assert_rq_held(rq);
2897 
2898 	/*
2899 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2900 	 * tasks directly dispatched to the local DSQs of other CPUs. See
2901 	 * direct_dispatch(). Keep popping from the head instead of using
2902 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2903 	 * temporarily.
2904 	 */
2905 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2906 				struct task_struct, scx.dsq_list.node))) {
2907 		struct scx_dispatch_q *dsq;
2908 
2909 		list_del_init(&p->scx.dsq_list.node);
2910 
2911 		dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2912 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2913 			dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2914 	}
2915 }
2916 
2917 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2918 {
2919 	if (p->scx.flags & SCX_TASK_QUEUED) {
2920 		/*
2921 		 * Core-sched might decide to execute @p before it is
2922 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2923 		 */
2924 		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2925 		dispatch_dequeue(rq, p);
2926 	}
2927 
2928 	p->se.exec_start = rq_clock_task(rq);
2929 
2930 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2931 	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2932 		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2933 
2934 	clr_task_runnable(p, true);
2935 
2936 	/*
2937 	 * @p is getting newly scheduled or got kicked after someone updated its
2938 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2939 	 */
2940 	if ((p->scx.slice == SCX_SLICE_INF) !=
2941 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2942 		if (p->scx.slice == SCX_SLICE_INF)
2943 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2944 		else
2945 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2946 
2947 		sched_update_tick_dependency(rq);
2948 
2949 		/*
2950 		 * For now, let's refresh the load_avgs just when transitioning
2951 		 * in and out of nohz. In the future, we might want to add a
2952 		 * mechanism which calls the following periodically on
2953 		 * tick-stopped CPUs.
2954 		 */
2955 		update_other_load_avgs(rq);
2956 	}
2957 }
2958 
2959 static enum scx_cpu_preempt_reason
2960 preempt_reason_from_class(const struct sched_class *class)
2961 {
2962 #ifdef CONFIG_SMP
2963 	if (class == &stop_sched_class)
2964 		return SCX_CPU_PREEMPT_STOP;
2965 #endif
2966 	if (class == &dl_sched_class)
2967 		return SCX_CPU_PREEMPT_DL;
2968 	if (class == &rt_sched_class)
2969 		return SCX_CPU_PREEMPT_RT;
2970 	return SCX_CPU_PREEMPT_UNKNOWN;
2971 }
2972 
2973 static void switch_class(struct rq *rq, struct task_struct *next)
2974 {
2975 	const struct sched_class *next_class = next->sched_class;
2976 
2977 #ifdef CONFIG_SMP
2978 	/*
2979 	 * Pairs with the smp_load_acquire() issued by a CPU in
2980 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2981 	 * resched.
2982 	 */
2983 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2984 #endif
2985 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2986 		return;
2987 
2988 	/*
2989 	 * The callback is conceptually meant to convey that the CPU is no
2990 	 * longer under the control of SCX. Therefore, don't invoke the callback
2991 	 * if the next class is below SCX (in which case the BPF scheduler has
2992 	 * actively decided not to schedule any tasks on the CPU).
2993 	 */
2994 	if (sched_class_above(&ext_sched_class, next_class))
2995 		return;
2996 
2997 	/*
2998 	 * At this point we know that SCX was preempted by a higher priority
2999 	 * sched_class, so invoke the ->cpu_release() callback if we have not
3000 	 * done so already. We only send the callback once between SCX being
3001 	 * preempted, and it regaining control of the CPU.
3002 	 *
3003 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3004 	 *  next time that balance_scx() is invoked.
3005 	 */
3006 	if (!rq->scx.cpu_released) {
3007 		if (SCX_HAS_OP(cpu_release)) {
3008 			struct scx_cpu_release_args args = {
3009 				.reason = preempt_reason_from_class(next_class),
3010 				.task = next,
3011 			};
3012 
3013 			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3014 				    cpu_release, cpu_of(rq), &args);
3015 		}
3016 		rq->scx.cpu_released = true;
3017 	}
3018 }
3019 
3020 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3021 			      struct task_struct *next)
3022 {
3023 	update_curr_scx(rq);
3024 
3025 	/* see dequeue_task_scx() on why we skip when !QUEUED */
3026 	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3027 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3028 
3029 	if (p->scx.flags & SCX_TASK_QUEUED) {
3030 		set_task_runnable(rq, p);
3031 
3032 		/*
3033 		 * If @p has slice left and is being put, @p is getting
3034 		 * preempted by a higher priority scheduler class or core-sched
3035 		 * forcing a different task. Leave it at the head of the local
3036 		 * DSQ.
3037 		 */
3038 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
3039 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3040 			goto switch_class;
3041 		}
3042 
3043 		/*
3044 		 * If @p is runnable but we're about to enter a lower
3045 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3046 		 * ops.enqueue() that @p is the only one available for this cpu,
3047 		 * which should trigger an explicit follow-up scheduling event.
3048 		 */
3049 		if (sched_class_above(&ext_sched_class, next->sched_class)) {
3050 			WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3051 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3052 		} else {
3053 			do_enqueue_task(rq, p, 0, -1);
3054 		}
3055 	}
3056 
3057 switch_class:
3058 	if (next && next->sched_class != &ext_sched_class)
3059 		switch_class(rq, next);
3060 }
3061 
3062 static struct task_struct *first_local_task(struct rq *rq)
3063 {
3064 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
3065 					struct task_struct, scx.dsq_list.node);
3066 }
3067 
3068 static struct task_struct *pick_task_scx(struct rq *rq)
3069 {
3070 	struct task_struct *prev = rq->curr;
3071 	struct task_struct *p;
3072 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
3073 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3074 	bool kick_idle = false;
3075 
3076 	/*
3077 	 * WORKAROUND:
3078 	 *
3079 	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3080 	 * have gone through balance_scx(). Unfortunately, there currently is a
3081 	 * bug where fair could say yes on balance() but no on pick_task(),
3082 	 * which then ends up calling pick_task_scx() without preceding
3083 	 * balance_scx().
3084 	 *
3085 	 * Keep running @prev if possible and avoid stalling from entering idle
3086 	 * without balancing.
3087 	 *
3088 	 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3089 	 * if pick_task_scx() is called without preceding balance_scx().
3090 	 */
3091 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3092 		if (prev_on_scx) {
3093 			keep_prev = true;
3094 		} else {
3095 			keep_prev = false;
3096 			kick_idle = true;
3097 		}
3098 	} else if (unlikely(keep_prev && !prev_on_scx)) {
3099 		/* only allowed during transitions */
3100 		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3101 		keep_prev = false;
3102 	}
3103 
3104 	/*
3105 	 * If balance_scx() is telling us to keep running @prev, replenish slice
3106 	 * if necessary and keep running @prev. Otherwise, pop the first one
3107 	 * from the local DSQ.
3108 	 */
3109 	if (keep_prev) {
3110 		p = prev;
3111 		if (!p->scx.slice)
3112 			p->scx.slice = SCX_SLICE_DFL;
3113 	} else {
3114 		p = first_local_task(rq);
3115 		if (!p) {
3116 			if (kick_idle)
3117 				scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3118 			return NULL;
3119 		}
3120 
3121 		if (unlikely(!p->scx.slice)) {
3122 			if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3123 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3124 						p->comm, p->pid, __func__);
3125 				scx_warned_zero_slice = true;
3126 			}
3127 			p->scx.slice = SCX_SLICE_DFL;
3128 		}
3129 	}
3130 
3131 	return p;
3132 }
3133 
3134 #ifdef CONFIG_SCHED_CORE
3135 /**
3136  * scx_prio_less - Task ordering for core-sched
3137  * @a: task A
3138  * @b: task B
3139  * @in_fi: in forced idle state
3140  *
3141  * Core-sched is implemented as an additional scheduling layer on top of the
3142  * usual sched_class'es and needs to find out the expected task ordering. For
3143  * SCX, core-sched calls this function to interrogate the task ordering.
3144  *
3145  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3146  * to implement the default task ordering. The older the timestamp, the higher
3147  * prority the task - the global FIFO ordering matching the default scheduling
3148  * behavior.
3149  *
3150  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3151  * implement FIFO ordering within each local DSQ. See pick_task_scx().
3152  */
3153 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3154 		   bool in_fi)
3155 {
3156 	/*
3157 	 * The const qualifiers are dropped from task_struct pointers when
3158 	 * calling ops.core_sched_before(). Accesses are controlled by the
3159 	 * verifier.
3160 	 */
3161 	if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3162 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3163 					      (struct task_struct *)a,
3164 					      (struct task_struct *)b);
3165 	else
3166 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3167 }
3168 #endif	/* CONFIG_SCHED_CORE */
3169 
3170 #ifdef CONFIG_SMP
3171 
3172 static bool test_and_clear_cpu_idle(int cpu)
3173 {
3174 #ifdef CONFIG_SCHED_SMT
3175 	/*
3176 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3177 	 * cluster is not wholly idle either way. This also prevents
3178 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3179 	 */
3180 	if (sched_smt_active()) {
3181 		const struct cpumask *smt = cpu_smt_mask(cpu);
3182 
3183 		/*
3184 		 * If offline, @cpu is not its own sibling and
3185 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
3186 		 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3187 		 * is eventually cleared.
3188 		 *
3189 		 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
3190 		 * reduce memory writes, which may help alleviate cache
3191 		 * coherence pressure.
3192 		 */
3193 		if (cpumask_intersects(smt, idle_masks.smt))
3194 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3195 		else if (cpumask_test_cpu(cpu, idle_masks.smt))
3196 			__cpumask_clear_cpu(cpu, idle_masks.smt);
3197 	}
3198 #endif
3199 	return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3200 }
3201 
3202 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3203 {
3204 	int cpu;
3205 
3206 retry:
3207 	if (sched_smt_active()) {
3208 		cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3209 		if (cpu < nr_cpu_ids)
3210 			goto found;
3211 
3212 		if (flags & SCX_PICK_IDLE_CORE)
3213 			return -EBUSY;
3214 	}
3215 
3216 	cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3217 	if (cpu >= nr_cpu_ids)
3218 		return -EBUSY;
3219 
3220 found:
3221 	if (test_and_clear_cpu_idle(cpu))
3222 		return cpu;
3223 	else
3224 		goto retry;
3225 }
3226 
3227 /*
3228  * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
3229  * domain is not defined).
3230  */
3231 static unsigned int llc_weight(s32 cpu)
3232 {
3233 	struct sched_domain *sd;
3234 
3235 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
3236 	if (!sd)
3237 		return 0;
3238 
3239 	return sd->span_weight;
3240 }
3241 
3242 /*
3243  * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
3244  * domain is not defined).
3245  */
3246 static struct cpumask *llc_span(s32 cpu)
3247 {
3248 	struct sched_domain *sd;
3249 
3250 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
3251 	if (!sd)
3252 		return 0;
3253 
3254 	return sched_domain_span(sd);
3255 }
3256 
3257 /*
3258  * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
3259  * NUMA domain is not defined).
3260  */
3261 static unsigned int numa_weight(s32 cpu)
3262 {
3263 	struct sched_domain *sd;
3264 	struct sched_group *sg;
3265 
3266 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
3267 	if (!sd)
3268 		return 0;
3269 	sg = sd->groups;
3270 	if (!sg)
3271 		return 0;
3272 
3273 	return sg->group_weight;
3274 }
3275 
3276 /*
3277  * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
3278  * domain is not defined).
3279  */
3280 static struct cpumask *numa_span(s32 cpu)
3281 {
3282 	struct sched_domain *sd;
3283 	struct sched_group *sg;
3284 
3285 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
3286 	if (!sd)
3287 		return NULL;
3288 	sg = sd->groups;
3289 	if (!sg)
3290 		return NULL;
3291 
3292 	return sched_group_span(sg);
3293 }
3294 
3295 /*
3296  * Return true if the LLC domains do not perfectly overlap with the NUMA
3297  * domains, false otherwise.
3298  */
3299 static bool llc_numa_mismatch(void)
3300 {
3301 	int cpu;
3302 
3303 	/*
3304 	 * We need to scan all online CPUs to verify whether their scheduling
3305 	 * domains overlap.
3306 	 *
3307 	 * While it is rare to encounter architectures with asymmetric NUMA
3308 	 * topologies, CPU hotplugging or virtualized environments can result
3309 	 * in asymmetric configurations.
3310 	 *
3311 	 * For example:
3312 	 *
3313 	 *  NUMA 0:
3314 	 *    - LLC 0: cpu0..cpu7
3315 	 *    - LLC 1: cpu8..cpu15 [offline]
3316 	 *
3317 	 *  NUMA 1:
3318 	 *    - LLC 0: cpu16..cpu23
3319 	 *    - LLC 1: cpu24..cpu31
3320 	 *
3321 	 * In this case, if we only check the first online CPU (cpu0), we might
3322 	 * incorrectly assume that the LLC and NUMA domains are fully
3323 	 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
3324 	 * domains).
3325 	 */
3326 	for_each_online_cpu(cpu)
3327 		if (llc_weight(cpu) != numa_weight(cpu))
3328 			return true;
3329 
3330 	return false;
3331 }
3332 
3333 /*
3334  * Initialize topology-aware scheduling.
3335  *
3336  * Detect if the system has multiple LLC or multiple NUMA domains and enable
3337  * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
3338  * selection policy.
3339  *
3340  * Assumption: the kernel's internal topology representation assumes that each
3341  * CPU belongs to a single LLC domain, and that each LLC domain is entirely
3342  * contained within a single NUMA node.
3343  */
3344 static void update_selcpu_topology(void)
3345 {
3346 	bool enable_llc = false, enable_numa = false;
3347 	unsigned int nr_cpus;
3348 	s32 cpu = cpumask_first(cpu_online_mask);
3349 
3350 	/*
3351 	 * Enable LLC domain optimization only when there are multiple LLC
3352 	 * domains among the online CPUs. If all online CPUs are part of a
3353 	 * single LLC domain, the idle CPU selection logic can choose any
3354 	 * online CPU without bias.
3355 	 *
3356 	 * Note that it is sufficient to check the LLC domain of the first
3357 	 * online CPU to determine whether a single LLC domain includes all
3358 	 * CPUs.
3359 	 */
3360 	rcu_read_lock();
3361 	nr_cpus = llc_weight(cpu);
3362 	if (nr_cpus > 0) {
3363 		if (nr_cpus < num_online_cpus())
3364 			enable_llc = true;
3365 		pr_debug("sched_ext: LLC=%*pb weight=%u\n",
3366 			 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
3367 	}
3368 
3369 	/*
3370 	 * Enable NUMA optimization only when there are multiple NUMA domains
3371 	 * among the online CPUs and the NUMA domains don't perfectly overlaps
3372 	 * with the LLC domains.
3373 	 *
3374 	 * If all CPUs belong to the same NUMA node and the same LLC domain,
3375 	 * enabling both NUMA and LLC optimizations is unnecessary, as checking
3376 	 * for an idle CPU in the same domain twice is redundant.
3377 	 */
3378 	nr_cpus = numa_weight(cpu);
3379 	if (nr_cpus > 0) {
3380 		if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
3381 			enable_numa = true;
3382 		pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
3383 			 cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
3384 	}
3385 	rcu_read_unlock();
3386 
3387 	pr_debug("sched_ext: LLC idle selection %s\n",
3388 		 str_enabled_disabled(enable_llc));
3389 	pr_debug("sched_ext: NUMA idle selection %s\n",
3390 		 str_enabled_disabled(enable_numa));
3391 
3392 	if (enable_llc)
3393 		static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
3394 	else
3395 		static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
3396 	if (enable_numa)
3397 		static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
3398 	else
3399 		static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
3400 }
3401 
3402 /*
3403  * Built-in CPU idle selection policy:
3404  *
3405  * 1. Prioritize full-idle cores:
3406  *   - always prioritize CPUs from fully idle cores (both logical CPUs are
3407  *     idle) to avoid interference caused by SMT.
3408  *
3409  * 2. Reuse the same CPU:
3410  *   - prefer the last used CPU to take advantage of cached data (L1, L2) and
3411  *     branch prediction optimizations.
3412  *
3413  * 3. Pick a CPU within the same LLC (Last-Level Cache):
3414  *   - if the above conditions aren't met, pick a CPU that shares the same LLC
3415  *     to maintain cache locality.
3416  *
3417  * 4. Pick a CPU within the same NUMA node, if enabled:
3418  *   - choose a CPU from the same NUMA node to reduce memory access latency.
3419  *
3420  * 5. Pick any idle CPU usable by the task.
3421  *
3422  * Step 3 and 4 are performed only if the system has, respectively, multiple
3423  * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
3424  * scx_selcpu_topo_numa).
3425  *
3426  * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
3427  * we never call ops.select_cpu() for them, see select_task_rq().
3428  */
3429 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3430 			      u64 wake_flags, bool *found)
3431 {
3432 	const struct cpumask *llc_cpus = NULL;
3433 	const struct cpumask *numa_cpus = NULL;
3434 	s32 cpu;
3435 
3436 	*found = false;
3437 
3438 	/*
3439 	 * This is necessary to protect llc_cpus.
3440 	 */
3441 	rcu_read_lock();
3442 
3443 	/*
3444 	 * Determine the scheduling domain only if the task is allowed to run
3445 	 * on all CPUs.
3446 	 *
3447 	 * This is done primarily for efficiency, as it avoids the overhead of
3448 	 * updating a cpumask every time we need to select an idle CPU (which
3449 	 * can be costly in large SMP systems), but it also aligns logically:
3450 	 * if a task's scheduling domain is restricted by user-space (through
3451 	 * CPU affinity), the task will simply use the flat scheduling domain
3452 	 * defined by user-space.
3453 	 */
3454 	if (p->nr_cpus_allowed >= num_possible_cpus()) {
3455 		if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
3456 			numa_cpus = numa_span(prev_cpu);
3457 
3458 		if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
3459 			llc_cpus = llc_span(prev_cpu);
3460 	}
3461 
3462 	/*
3463 	 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
3464 	 */
3465 	if (wake_flags & SCX_WAKE_SYNC) {
3466 		cpu = smp_processor_id();
3467 
3468 		/*
3469 		 * If the waker's CPU is cache affine and prev_cpu is idle,
3470 		 * then avoid a migration.
3471 		 */
3472 		if (cpus_share_cache(cpu, prev_cpu) &&
3473 		    test_and_clear_cpu_idle(prev_cpu)) {
3474 			cpu = prev_cpu;
3475 			goto cpu_found;
3476 		}
3477 
3478 		/*
3479 		 * If the waker's local DSQ is empty, and the system is under
3480 		 * utilized, try to wake up @p to the local DSQ of the waker.
3481 		 *
3482 		 * Checking only for an empty local DSQ is insufficient as it
3483 		 * could give the wakee an unfair advantage when the system is
3484 		 * oversaturated.
3485 		 *
3486 		 * Checking only for the presence of idle CPUs is also
3487 		 * insufficient as the local DSQ of the waker could have tasks
3488 		 * piled up on it even if there is an idle core elsewhere on
3489 		 * the system.
3490 		 */
3491 		if (!cpumask_empty(idle_masks.cpu) &&
3492 		    !(current->flags & PF_EXITING) &&
3493 		    cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3494 			if (cpumask_test_cpu(cpu, p->cpus_ptr))
3495 				goto cpu_found;
3496 		}
3497 	}
3498 
3499 	/*
3500 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3501 	 * partially idle @prev_cpu.
3502 	 */
3503 	if (sched_smt_active()) {
3504 		/*
3505 		 * Keep using @prev_cpu if it's part of a fully idle core.
3506 		 */
3507 		if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3508 		    test_and_clear_cpu_idle(prev_cpu)) {
3509 			cpu = prev_cpu;
3510 			goto cpu_found;
3511 		}
3512 
3513 		/*
3514 		 * Search for any fully idle core in the same LLC domain.
3515 		 */
3516 		if (llc_cpus) {
3517 			cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
3518 			if (cpu >= 0)
3519 				goto cpu_found;
3520 		}
3521 
3522 		/*
3523 		 * Search for any fully idle core in the same NUMA node.
3524 		 */
3525 		if (numa_cpus) {
3526 			cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
3527 			if (cpu >= 0)
3528 				goto cpu_found;
3529 		}
3530 
3531 		/*
3532 		 * Search for any full idle core usable by the task.
3533 		 */
3534 		cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3535 		if (cpu >= 0)
3536 			goto cpu_found;
3537 	}
3538 
3539 	/*
3540 	 * Use @prev_cpu if it's idle.
3541 	 */
3542 	if (test_and_clear_cpu_idle(prev_cpu)) {
3543 		cpu = prev_cpu;
3544 		goto cpu_found;
3545 	}
3546 
3547 	/*
3548 	 * Search for any idle CPU in the same LLC domain.
3549 	 */
3550 	if (llc_cpus) {
3551 		cpu = scx_pick_idle_cpu(llc_cpus, 0);
3552 		if (cpu >= 0)
3553 			goto cpu_found;
3554 	}
3555 
3556 	/*
3557 	 * Search for any idle CPU in the same NUMA node.
3558 	 */
3559 	if (numa_cpus) {
3560 		cpu = scx_pick_idle_cpu(numa_cpus, 0);
3561 		if (cpu >= 0)
3562 			goto cpu_found;
3563 	}
3564 
3565 	/*
3566 	 * Search for any idle CPU usable by the task.
3567 	 */
3568 	cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3569 	if (cpu >= 0)
3570 		goto cpu_found;
3571 
3572 	rcu_read_unlock();
3573 	return prev_cpu;
3574 
3575 cpu_found:
3576 	rcu_read_unlock();
3577 
3578 	*found = true;
3579 	return cpu;
3580 }
3581 
3582 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3583 {
3584 	/*
3585 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3586 	 * can be a good migration opportunity with low cache and memory
3587 	 * footprint. Returning a CPU different than @prev_cpu triggers
3588 	 * immediate rq migration. However, for SCX, as the current rq
3589 	 * association doesn't dictate where the task is going to run, this
3590 	 * doesn't fit well. If necessary, we can later add a dedicated method
3591 	 * which can decide to preempt self to force it through the regular
3592 	 * scheduling path.
3593 	 */
3594 	if (unlikely(wake_flags & WF_EXEC))
3595 		return prev_cpu;
3596 
3597 	if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3598 		s32 cpu;
3599 		struct task_struct **ddsp_taskp;
3600 
3601 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3602 		WARN_ON_ONCE(*ddsp_taskp);
3603 		*ddsp_taskp = p;
3604 
3605 		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3606 					   select_cpu, p, prev_cpu, wake_flags);
3607 		*ddsp_taskp = NULL;
3608 		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3609 			return cpu;
3610 		else
3611 			return prev_cpu;
3612 	} else {
3613 		bool found;
3614 		s32 cpu;
3615 
3616 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3617 		if (found) {
3618 			p->scx.slice = SCX_SLICE_DFL;
3619 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3620 		}
3621 		return cpu;
3622 	}
3623 }
3624 
3625 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3626 {
3627 	run_deferred(rq);
3628 }
3629 
3630 static void set_cpus_allowed_scx(struct task_struct *p,
3631 				 struct affinity_context *ac)
3632 {
3633 	set_cpus_allowed_common(p, ac);
3634 
3635 	/*
3636 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3637 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3638 	 * scheduler the effective one.
3639 	 *
3640 	 * Fine-grained memory write control is enforced by BPF making the const
3641 	 * designation pointless. Cast it away when calling the operation.
3642 	 */
3643 	if (SCX_HAS_OP(set_cpumask))
3644 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3645 				 (struct cpumask *)p->cpus_ptr);
3646 }
3647 
3648 static void reset_idle_masks(void)
3649 {
3650 	/*
3651 	 * Consider all online cpus idle. Should converge to the actual state
3652 	 * quickly.
3653 	 */
3654 	cpumask_copy(idle_masks.cpu, cpu_online_mask);
3655 	cpumask_copy(idle_masks.smt, cpu_online_mask);
3656 }
3657 
3658 static void update_builtin_idle(int cpu, bool idle)
3659 {
3660 	assign_cpu(cpu, idle_masks.cpu, idle);
3661 
3662 #ifdef CONFIG_SCHED_SMT
3663 	if (sched_smt_active()) {
3664 		const struct cpumask *smt = cpu_smt_mask(cpu);
3665 
3666 		if (idle) {
3667 			/*
3668 			 * idle_masks.smt handling is racy but that's fine as
3669 			 * it's only for optimization and self-correcting.
3670 			 */
3671 			if (!cpumask_subset(smt, idle_masks.cpu))
3672 				return;
3673 			cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3674 		} else {
3675 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3676 		}
3677 	}
3678 #endif
3679 }
3680 
3681 /*
3682  * Update the idle state of a CPU to @idle.
3683  *
3684  * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3685  * scheduler of an actual idle state transition (idle to busy or vice
3686  * versa). If @do_notify is false, only the idle state in the idle masks is
3687  * refreshed without invoking ops.update_idle().
3688  *
3689  * This distinction is necessary, because an idle CPU can be "reserved" and
3690  * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3691  * busy even if no tasks are dispatched. In this case, the CPU may return
3692  * to idle without a true state transition. Refreshing the idle masks
3693  * without invoking ops.update_idle() ensures accurate idle state tracking
3694  * while avoiding unnecessary updates and maintaining balanced state
3695  * transitions.
3696  */
3697 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
3698 {
3699 	int cpu = cpu_of(rq);
3700 
3701 	lockdep_assert_rq_held(rq);
3702 
3703 	/*
3704 	 * Trigger ops.update_idle() only when transitioning from a task to
3705 	 * the idle thread and vice versa.
3706 	 *
3707 	 * Idle transitions are indicated by do_notify being set to true,
3708 	 * managed by put_prev_task_idle()/set_next_task_idle().
3709 	 */
3710 	if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
3711 		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3712 
3713 	/*
3714 	 * Update the idle masks:
3715 	 * - for real idle transitions (do_notify == true)
3716 	 * - for idle-to-idle transitions (indicated by the previous task
3717 	 *   being the idle thread, managed by pick_task_idle())
3718 	 *
3719 	 * Skip updating idle masks if the previous task is not the idle
3720 	 * thread, since set_next_task_idle() has already handled it when
3721 	 * transitioning from a task to the idle thread (calling this
3722 	 * function with do_notify == true).
3723 	 *
3724 	 * In this way we can avoid updating the idle masks twice,
3725 	 * unnecessarily.
3726 	 */
3727 	if (static_branch_likely(&scx_builtin_idle_enabled))
3728 		if (do_notify || is_idle_task(rq->curr))
3729 			update_builtin_idle(cpu, idle);
3730 }
3731 
3732 static void handle_hotplug(struct rq *rq, bool online)
3733 {
3734 	int cpu = cpu_of(rq);
3735 
3736 	atomic_long_inc(&scx_hotplug_seq);
3737 
3738 	if (scx_enabled())
3739 		update_selcpu_topology();
3740 
3741 	if (online && SCX_HAS_OP(cpu_online))
3742 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3743 	else if (!online && SCX_HAS_OP(cpu_offline))
3744 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3745 	else
3746 		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3747 			     "cpu %d going %s, exiting scheduler", cpu,
3748 			     online ? "online" : "offline");
3749 }
3750 
3751 void scx_rq_activate(struct rq *rq)
3752 {
3753 	handle_hotplug(rq, true);
3754 }
3755 
3756 void scx_rq_deactivate(struct rq *rq)
3757 {
3758 	handle_hotplug(rq, false);
3759 }
3760 
3761 static void rq_online_scx(struct rq *rq)
3762 {
3763 	rq->scx.flags |= SCX_RQ_ONLINE;
3764 }
3765 
3766 static void rq_offline_scx(struct rq *rq)
3767 {
3768 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3769 }
3770 
3771 #else	/* CONFIG_SMP */
3772 
3773 static bool test_and_clear_cpu_idle(int cpu) { return false; }
3774 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3775 static void reset_idle_masks(void) {}
3776 
3777 #endif	/* CONFIG_SMP */
3778 
3779 static bool check_rq_for_timeouts(struct rq *rq)
3780 {
3781 	struct task_struct *p;
3782 	struct rq_flags rf;
3783 	bool timed_out = false;
3784 
3785 	rq_lock_irqsave(rq, &rf);
3786 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3787 		unsigned long last_runnable = p->scx.runnable_at;
3788 
3789 		if (unlikely(time_after(jiffies,
3790 					last_runnable + scx_watchdog_timeout))) {
3791 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3792 
3793 			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3794 					   "%s[%d] failed to run for %u.%03us",
3795 					   p->comm, p->pid,
3796 					   dur_ms / 1000, dur_ms % 1000);
3797 			timed_out = true;
3798 			break;
3799 		}
3800 	}
3801 	rq_unlock_irqrestore(rq, &rf);
3802 
3803 	return timed_out;
3804 }
3805 
3806 static void scx_watchdog_workfn(struct work_struct *work)
3807 {
3808 	int cpu;
3809 
3810 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3811 
3812 	for_each_online_cpu(cpu) {
3813 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3814 			break;
3815 
3816 		cond_resched();
3817 	}
3818 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3819 			   scx_watchdog_timeout / 2);
3820 }
3821 
3822 void scx_tick(struct rq *rq)
3823 {
3824 	unsigned long last_check;
3825 
3826 	if (!scx_enabled())
3827 		return;
3828 
3829 	last_check = READ_ONCE(scx_watchdog_timestamp);
3830 	if (unlikely(time_after(jiffies,
3831 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3832 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3833 
3834 		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3835 				   "watchdog failed to check in for %u.%03us",
3836 				   dur_ms / 1000, dur_ms % 1000);
3837 	}
3838 
3839 	update_other_load_avgs(rq);
3840 }
3841 
3842 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3843 {
3844 	update_curr_scx(rq);
3845 
3846 	/*
3847 	 * While disabling, always resched and refresh core-sched timestamp as
3848 	 * we can't trust the slice management or ops.core_sched_before().
3849 	 */
3850 	if (scx_rq_bypassing(rq)) {
3851 		curr->scx.slice = 0;
3852 		touch_core_sched(rq, curr);
3853 	} else if (SCX_HAS_OP(tick)) {
3854 		SCX_CALL_OP(SCX_KF_REST, tick, curr);
3855 	}
3856 
3857 	if (!curr->scx.slice)
3858 		resched_curr(rq);
3859 }
3860 
3861 #ifdef CONFIG_EXT_GROUP_SCHED
3862 static struct cgroup *tg_cgrp(struct task_group *tg)
3863 {
3864 	/*
3865 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3866 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3867 	 * root cgroup.
3868 	 */
3869 	if (tg && tg->css.cgroup)
3870 		return tg->css.cgroup;
3871 	else
3872 		return &cgrp_dfl_root.cgrp;
3873 }
3874 
3875 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3876 
3877 #else	/* CONFIG_EXT_GROUP_SCHED */
3878 
3879 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3880 
3881 #endif	/* CONFIG_EXT_GROUP_SCHED */
3882 
3883 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3884 {
3885 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3886 }
3887 
3888 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3889 {
3890 	enum scx_task_state prev_state = scx_get_task_state(p);
3891 	bool warn = false;
3892 
3893 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3894 
3895 	switch (state) {
3896 	case SCX_TASK_NONE:
3897 		break;
3898 	case SCX_TASK_INIT:
3899 		warn = prev_state != SCX_TASK_NONE;
3900 		break;
3901 	case SCX_TASK_READY:
3902 		warn = prev_state == SCX_TASK_NONE;
3903 		break;
3904 	case SCX_TASK_ENABLED:
3905 		warn = prev_state != SCX_TASK_READY;
3906 		break;
3907 	default:
3908 		warn = true;
3909 		return;
3910 	}
3911 
3912 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3913 		  prev_state, state, p->comm, p->pid);
3914 
3915 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3916 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3917 }
3918 
3919 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3920 {
3921 	int ret;
3922 
3923 	p->scx.disallow = false;
3924 
3925 	if (SCX_HAS_OP(init_task)) {
3926 		struct scx_init_task_args args = {
3927 			SCX_INIT_TASK_ARGS_CGROUP(tg)
3928 			.fork = fork,
3929 		};
3930 
3931 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3932 		if (unlikely(ret)) {
3933 			ret = ops_sanitize_err("init_task", ret);
3934 			return ret;
3935 		}
3936 	}
3937 
3938 	scx_set_task_state(p, SCX_TASK_INIT);
3939 
3940 	if (p->scx.disallow) {
3941 		if (!fork) {
3942 			struct rq *rq;
3943 			struct rq_flags rf;
3944 
3945 			rq = task_rq_lock(p, &rf);
3946 
3947 			/*
3948 			 * We're in the load path and @p->policy will be applied
3949 			 * right after. Reverting @p->policy here and rejecting
3950 			 * %SCHED_EXT transitions from scx_check_setscheduler()
3951 			 * guarantees that if ops.init_task() sets @p->disallow,
3952 			 * @p can never be in SCX.
3953 			 */
3954 			if (p->policy == SCHED_EXT) {
3955 				p->policy = SCHED_NORMAL;
3956 				atomic_long_inc(&scx_nr_rejected);
3957 			}
3958 
3959 			task_rq_unlock(rq, p, &rf);
3960 		} else if (p->policy == SCHED_EXT) {
3961 			scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3962 				      p->comm, p->pid);
3963 		}
3964 	}
3965 
3966 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3967 	return 0;
3968 }
3969 
3970 static void scx_ops_enable_task(struct task_struct *p)
3971 {
3972 	u32 weight;
3973 
3974 	lockdep_assert_rq_held(task_rq(p));
3975 
3976 	/*
3977 	 * Set the weight before calling ops.enable() so that the scheduler
3978 	 * doesn't see a stale value if they inspect the task struct.
3979 	 */
3980 	if (task_has_idle_policy(p))
3981 		weight = WEIGHT_IDLEPRIO;
3982 	else
3983 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3984 
3985 	p->scx.weight = sched_weight_to_cgroup(weight);
3986 
3987 	if (SCX_HAS_OP(enable))
3988 		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3989 	scx_set_task_state(p, SCX_TASK_ENABLED);
3990 
3991 	if (SCX_HAS_OP(set_weight))
3992 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3993 }
3994 
3995 static void scx_ops_disable_task(struct task_struct *p)
3996 {
3997 	lockdep_assert_rq_held(task_rq(p));
3998 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3999 
4000 	if (SCX_HAS_OP(disable))
4001 		SCX_CALL_OP(SCX_KF_REST, disable, p);
4002 	scx_set_task_state(p, SCX_TASK_READY);
4003 }
4004 
4005 static void scx_ops_exit_task(struct task_struct *p)
4006 {
4007 	struct scx_exit_task_args args = {
4008 		.cancelled = false,
4009 	};
4010 
4011 	lockdep_assert_rq_held(task_rq(p));
4012 
4013 	switch (scx_get_task_state(p)) {
4014 	case SCX_TASK_NONE:
4015 		return;
4016 	case SCX_TASK_INIT:
4017 		args.cancelled = true;
4018 		break;
4019 	case SCX_TASK_READY:
4020 		break;
4021 	case SCX_TASK_ENABLED:
4022 		scx_ops_disable_task(p);
4023 		break;
4024 	default:
4025 		WARN_ON_ONCE(true);
4026 		return;
4027 	}
4028 
4029 	if (SCX_HAS_OP(exit_task))
4030 		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
4031 	scx_set_task_state(p, SCX_TASK_NONE);
4032 }
4033 
4034 void init_scx_entity(struct sched_ext_entity *scx)
4035 {
4036 	memset(scx, 0, sizeof(*scx));
4037 	INIT_LIST_HEAD(&scx->dsq_list.node);
4038 	RB_CLEAR_NODE(&scx->dsq_priq);
4039 	scx->sticky_cpu = -1;
4040 	scx->holding_cpu = -1;
4041 	INIT_LIST_HEAD(&scx->runnable_node);
4042 	scx->runnable_at = jiffies;
4043 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
4044 	scx->slice = SCX_SLICE_DFL;
4045 }
4046 
4047 void scx_pre_fork(struct task_struct *p)
4048 {
4049 	/*
4050 	 * BPF scheduler enable/disable paths want to be able to iterate and
4051 	 * update all tasks which can become complex when racing forks. As
4052 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
4053 	 * exclude forks.
4054 	 */
4055 	percpu_down_read(&scx_fork_rwsem);
4056 }
4057 
4058 int scx_fork(struct task_struct *p)
4059 {
4060 	percpu_rwsem_assert_held(&scx_fork_rwsem);
4061 
4062 	if (scx_ops_init_task_enabled)
4063 		return scx_ops_init_task(p, task_group(p), true);
4064 	else
4065 		return 0;
4066 }
4067 
4068 void scx_post_fork(struct task_struct *p)
4069 {
4070 	if (scx_ops_init_task_enabled) {
4071 		scx_set_task_state(p, SCX_TASK_READY);
4072 
4073 		/*
4074 		 * Enable the task immediately if it's running on sched_ext.
4075 		 * Otherwise, it'll be enabled in switching_to_scx() if and
4076 		 * when it's ever configured to run with a SCHED_EXT policy.
4077 		 */
4078 		if (p->sched_class == &ext_sched_class) {
4079 			struct rq_flags rf;
4080 			struct rq *rq;
4081 
4082 			rq = task_rq_lock(p, &rf);
4083 			scx_ops_enable_task(p);
4084 			task_rq_unlock(rq, p, &rf);
4085 		}
4086 	}
4087 
4088 	spin_lock_irq(&scx_tasks_lock);
4089 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
4090 	spin_unlock_irq(&scx_tasks_lock);
4091 
4092 	percpu_up_read(&scx_fork_rwsem);
4093 }
4094 
4095 void scx_cancel_fork(struct task_struct *p)
4096 {
4097 	if (scx_enabled()) {
4098 		struct rq *rq;
4099 		struct rq_flags rf;
4100 
4101 		rq = task_rq_lock(p, &rf);
4102 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
4103 		scx_ops_exit_task(p);
4104 		task_rq_unlock(rq, p, &rf);
4105 	}
4106 
4107 	percpu_up_read(&scx_fork_rwsem);
4108 }
4109 
4110 void sched_ext_free(struct task_struct *p)
4111 {
4112 	unsigned long flags;
4113 
4114 	spin_lock_irqsave(&scx_tasks_lock, flags);
4115 	list_del_init(&p->scx.tasks_node);
4116 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
4117 
4118 	/*
4119 	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
4120 	 * ENABLED transitions can't race us. Disable ops for @p.
4121 	 */
4122 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
4123 		struct rq_flags rf;
4124 		struct rq *rq;
4125 
4126 		rq = task_rq_lock(p, &rf);
4127 		scx_ops_exit_task(p);
4128 		task_rq_unlock(rq, p, &rf);
4129 	}
4130 }
4131 
4132 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4133 			      const struct load_weight *lw)
4134 {
4135 	lockdep_assert_rq_held(task_rq(p));
4136 
4137 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4138 	if (SCX_HAS_OP(set_weight))
4139 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4140 }
4141 
4142 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4143 {
4144 }
4145 
4146 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4147 {
4148 	scx_ops_enable_task(p);
4149 
4150 	/*
4151 	 * set_cpus_allowed_scx() is not called while @p is associated with a
4152 	 * different scheduler class. Keep the BPF scheduler up-to-date.
4153 	 */
4154 	if (SCX_HAS_OP(set_cpumask))
4155 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
4156 				 (struct cpumask *)p->cpus_ptr);
4157 }
4158 
4159 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4160 {
4161 	scx_ops_disable_task(p);
4162 }
4163 
4164 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
4165 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4166 
4167 int scx_check_setscheduler(struct task_struct *p, int policy)
4168 {
4169 	lockdep_assert_rq_held(task_rq(p));
4170 
4171 	/* if disallow, reject transitioning into SCX */
4172 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4173 	    p->policy != policy && policy == SCHED_EXT)
4174 		return -EACCES;
4175 
4176 	return 0;
4177 }
4178 
4179 #ifdef CONFIG_NO_HZ_FULL
4180 bool scx_can_stop_tick(struct rq *rq)
4181 {
4182 	struct task_struct *p = rq->curr;
4183 
4184 	if (scx_rq_bypassing(rq))
4185 		return false;
4186 
4187 	if (p->sched_class != &ext_sched_class)
4188 		return true;
4189 
4190 	/*
4191 	 * @rq can dispatch from different DSQs, so we can't tell whether it
4192 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
4193 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
4194 	 */
4195 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4196 }
4197 #endif
4198 
4199 #ifdef CONFIG_EXT_GROUP_SCHED
4200 
4201 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4202 static bool scx_cgroup_enabled;
4203 static bool cgroup_warned_missing_weight;
4204 static bool cgroup_warned_missing_idle;
4205 
4206 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
4207 {
4208 	if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
4209 	    cgroup_warned_missing_weight)
4210 		return;
4211 
4212 	if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
4213 		return;
4214 
4215 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
4216 		scx_ops.name);
4217 	cgroup_warned_missing_weight = true;
4218 }
4219 
4220 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
4221 {
4222 	if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
4223 		return;
4224 
4225 	if (!tg->idle)
4226 		return;
4227 
4228 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
4229 		scx_ops.name);
4230 	cgroup_warned_missing_idle = true;
4231 }
4232 
4233 int scx_tg_online(struct task_group *tg)
4234 {
4235 	int ret = 0;
4236 
4237 	WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4238 
4239 	percpu_down_read(&scx_cgroup_rwsem);
4240 
4241 	scx_cgroup_warn_missing_weight(tg);
4242 
4243 	if (scx_cgroup_enabled) {
4244 		if (SCX_HAS_OP(cgroup_init)) {
4245 			struct scx_cgroup_init_args args =
4246 				{ .weight = tg->scx_weight };
4247 
4248 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4249 					      tg->css.cgroup, &args);
4250 			if (ret)
4251 				ret = ops_sanitize_err("cgroup_init", ret);
4252 		}
4253 		if (ret == 0)
4254 			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4255 	} else {
4256 		tg->scx_flags |= SCX_TG_ONLINE;
4257 	}
4258 
4259 	percpu_up_read(&scx_cgroup_rwsem);
4260 	return ret;
4261 }
4262 
4263 void scx_tg_offline(struct task_group *tg)
4264 {
4265 	WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4266 
4267 	percpu_down_read(&scx_cgroup_rwsem);
4268 
4269 	if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
4270 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
4271 	tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4272 
4273 	percpu_up_read(&scx_cgroup_rwsem);
4274 }
4275 
4276 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4277 {
4278 	struct cgroup_subsys_state *css;
4279 	struct task_struct *p;
4280 	int ret;
4281 
4282 	/* released in scx_finish/cancel_attach() */
4283 	percpu_down_read(&scx_cgroup_rwsem);
4284 
4285 	if (!scx_cgroup_enabled)
4286 		return 0;
4287 
4288 	cgroup_taskset_for_each(p, css, tset) {
4289 		struct cgroup *from = tg_cgrp(task_group(p));
4290 		struct cgroup *to = tg_cgrp(css_tg(css));
4291 
4292 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
4293 
4294 		/*
4295 		 * sched_move_task() omits identity migrations. Let's match the
4296 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4297 		 * always match one-to-one.
4298 		 */
4299 		if (from == to)
4300 			continue;
4301 
4302 		if (SCX_HAS_OP(cgroup_prep_move)) {
4303 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4304 					      p, from, css->cgroup);
4305 			if (ret)
4306 				goto err;
4307 		}
4308 
4309 		p->scx.cgrp_moving_from = from;
4310 	}
4311 
4312 	return 0;
4313 
4314 err:
4315 	cgroup_taskset_for_each(p, css, tset) {
4316 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4317 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4318 				    p->scx.cgrp_moving_from, css->cgroup);
4319 		p->scx.cgrp_moving_from = NULL;
4320 	}
4321 
4322 	percpu_up_read(&scx_cgroup_rwsem);
4323 	return ops_sanitize_err("cgroup_prep_move", ret);
4324 }
4325 
4326 void scx_move_task(struct task_struct *p)
4327 {
4328 	if (!scx_cgroup_enabled)
4329 		return;
4330 
4331 	/*
4332 	 * We're called from sched_move_task() which handles both cgroup and
4333 	 * autogroup moves. Ignore the latter.
4334 	 *
4335 	 * Also ignore exiting tasks, because in the exit path tasks transition
4336 	 * from the autogroup to the root group, so task_group_is_autogroup()
4337 	 * alone isn't able to catch exiting autogroup tasks. This is safe for
4338 	 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
4339 	 * tasks.
4340 	 */
4341 	if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
4342 		return;
4343 
4344 	/*
4345 	 * @p must have ops.cgroup_prep_move() called on it and thus
4346 	 * cgrp_moving_from set.
4347 	 */
4348 	if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4349 		SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4350 			p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4351 	p->scx.cgrp_moving_from = NULL;
4352 }
4353 
4354 void scx_cgroup_finish_attach(void)
4355 {
4356 	percpu_up_read(&scx_cgroup_rwsem);
4357 }
4358 
4359 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4360 {
4361 	struct cgroup_subsys_state *css;
4362 	struct task_struct *p;
4363 
4364 	if (!scx_cgroup_enabled)
4365 		goto out_unlock;
4366 
4367 	cgroup_taskset_for_each(p, css, tset) {
4368 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4369 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4370 				    p->scx.cgrp_moving_from, css->cgroup);
4371 		p->scx.cgrp_moving_from = NULL;
4372 	}
4373 out_unlock:
4374 	percpu_up_read(&scx_cgroup_rwsem);
4375 }
4376 
4377 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4378 {
4379 	percpu_down_read(&scx_cgroup_rwsem);
4380 
4381 	if (scx_cgroup_enabled && tg->scx_weight != weight) {
4382 		if (SCX_HAS_OP(cgroup_set_weight))
4383 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4384 				    tg_cgrp(tg), weight);
4385 		tg->scx_weight = weight;
4386 	}
4387 
4388 	percpu_up_read(&scx_cgroup_rwsem);
4389 }
4390 
4391 void scx_group_set_idle(struct task_group *tg, bool idle)
4392 {
4393 	percpu_down_read(&scx_cgroup_rwsem);
4394 	scx_cgroup_warn_missing_idle(tg);
4395 	percpu_up_read(&scx_cgroup_rwsem);
4396 }
4397 
4398 static void scx_cgroup_lock(void)
4399 {
4400 	percpu_down_write(&scx_cgroup_rwsem);
4401 }
4402 
4403 static void scx_cgroup_unlock(void)
4404 {
4405 	percpu_up_write(&scx_cgroup_rwsem);
4406 }
4407 
4408 #else	/* CONFIG_EXT_GROUP_SCHED */
4409 
4410 static inline void scx_cgroup_lock(void) {}
4411 static inline void scx_cgroup_unlock(void) {}
4412 
4413 #endif	/* CONFIG_EXT_GROUP_SCHED */
4414 
4415 /*
4416  * Omitted operations:
4417  *
4418  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4419  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
4420  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
4421  *
4422  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4423  *
4424  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4425  *   their current sched_class. Call them directly from sched core instead.
4426  */
4427 DEFINE_SCHED_CLASS(ext) = {
4428 	.enqueue_task		= enqueue_task_scx,
4429 	.dequeue_task		= dequeue_task_scx,
4430 	.yield_task		= yield_task_scx,
4431 	.yield_to_task		= yield_to_task_scx,
4432 
4433 	.wakeup_preempt		= wakeup_preempt_scx,
4434 
4435 	.balance		= balance_scx,
4436 	.pick_task		= pick_task_scx,
4437 
4438 	.put_prev_task		= put_prev_task_scx,
4439 	.set_next_task		= set_next_task_scx,
4440 
4441 #ifdef CONFIG_SMP
4442 	.select_task_rq		= select_task_rq_scx,
4443 	.task_woken		= task_woken_scx,
4444 	.set_cpus_allowed	= set_cpus_allowed_scx,
4445 
4446 	.rq_online		= rq_online_scx,
4447 	.rq_offline		= rq_offline_scx,
4448 #endif
4449 
4450 	.task_tick		= task_tick_scx,
4451 
4452 	.switching_to		= switching_to_scx,
4453 	.switched_from		= switched_from_scx,
4454 	.switched_to		= switched_to_scx,
4455 	.reweight_task		= reweight_task_scx,
4456 	.prio_changed		= prio_changed_scx,
4457 
4458 	.update_curr		= update_curr_scx,
4459 
4460 #ifdef CONFIG_UCLAMP_TASK
4461 	.uclamp_enabled		= 1,
4462 #endif
4463 };
4464 
4465 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4466 {
4467 	memset(dsq, 0, sizeof(*dsq));
4468 
4469 	raw_spin_lock_init(&dsq->lock);
4470 	INIT_LIST_HEAD(&dsq->list);
4471 	dsq->id = dsq_id;
4472 }
4473 
4474 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4475 {
4476 	struct scx_dispatch_q *dsq;
4477 	int ret;
4478 
4479 	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4480 		return ERR_PTR(-EINVAL);
4481 
4482 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4483 	if (!dsq)
4484 		return ERR_PTR(-ENOMEM);
4485 
4486 	init_dsq(dsq, dsq_id);
4487 
4488 	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4489 				     dsq_hash_params);
4490 	if (ret) {
4491 		kfree(dsq);
4492 		return ERR_PTR(ret);
4493 	}
4494 	return dsq;
4495 }
4496 
4497 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4498 {
4499 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4500 	struct scx_dispatch_q *dsq, *tmp_dsq;
4501 
4502 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4503 		kfree_rcu(dsq, rcu);
4504 }
4505 
4506 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4507 
4508 static void destroy_dsq(u64 dsq_id)
4509 {
4510 	struct scx_dispatch_q *dsq;
4511 	unsigned long flags;
4512 
4513 	rcu_read_lock();
4514 
4515 	dsq = find_user_dsq(dsq_id);
4516 	if (!dsq)
4517 		goto out_unlock_rcu;
4518 
4519 	raw_spin_lock_irqsave(&dsq->lock, flags);
4520 
4521 	if (dsq->nr) {
4522 		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4523 			      dsq->id, dsq->nr);
4524 		goto out_unlock_dsq;
4525 	}
4526 
4527 	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4528 		goto out_unlock_dsq;
4529 
4530 	/*
4531 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4532 	 * queueing more tasks. As this function can be called from anywhere,
4533 	 * freeing is bounced through an irq work to avoid nesting RCU
4534 	 * operations inside scheduler locks.
4535 	 */
4536 	dsq->id = SCX_DSQ_INVALID;
4537 	llist_add(&dsq->free_node, &dsqs_to_free);
4538 	irq_work_queue(&free_dsq_irq_work);
4539 
4540 out_unlock_dsq:
4541 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4542 out_unlock_rcu:
4543 	rcu_read_unlock();
4544 }
4545 
4546 #ifdef CONFIG_EXT_GROUP_SCHED
4547 static void scx_cgroup_exit(void)
4548 {
4549 	struct cgroup_subsys_state *css;
4550 
4551 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4552 
4553 	scx_cgroup_enabled = false;
4554 
4555 	/*
4556 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4557 	 * cgroups and exit all the inited ones, all online cgroups are exited.
4558 	 */
4559 	rcu_read_lock();
4560 	css_for_each_descendant_post(css, &root_task_group.css) {
4561 		struct task_group *tg = css_tg(css);
4562 
4563 		if (!(tg->scx_flags & SCX_TG_INITED))
4564 			continue;
4565 		tg->scx_flags &= ~SCX_TG_INITED;
4566 
4567 		if (!scx_ops.cgroup_exit)
4568 			continue;
4569 
4570 		if (WARN_ON_ONCE(!css_tryget(css)))
4571 			continue;
4572 		rcu_read_unlock();
4573 
4574 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4575 
4576 		rcu_read_lock();
4577 		css_put(css);
4578 	}
4579 	rcu_read_unlock();
4580 }
4581 
4582 static int scx_cgroup_init(void)
4583 {
4584 	struct cgroup_subsys_state *css;
4585 	int ret;
4586 
4587 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4588 
4589 	cgroup_warned_missing_weight = false;
4590 	cgroup_warned_missing_idle = false;
4591 
4592 	/*
4593 	 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4594 	 * cgroups and init, all online cgroups are initialized.
4595 	 */
4596 	rcu_read_lock();
4597 	css_for_each_descendant_pre(css, &root_task_group.css) {
4598 		struct task_group *tg = css_tg(css);
4599 		struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4600 
4601 		scx_cgroup_warn_missing_weight(tg);
4602 		scx_cgroup_warn_missing_idle(tg);
4603 
4604 		if ((tg->scx_flags &
4605 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4606 			continue;
4607 
4608 		if (!scx_ops.cgroup_init) {
4609 			tg->scx_flags |= SCX_TG_INITED;
4610 			continue;
4611 		}
4612 
4613 		if (WARN_ON_ONCE(!css_tryget(css)))
4614 			continue;
4615 		rcu_read_unlock();
4616 
4617 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4618 				      css->cgroup, &args);
4619 		if (ret) {
4620 			css_put(css);
4621 			scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4622 			return ret;
4623 		}
4624 		tg->scx_flags |= SCX_TG_INITED;
4625 
4626 		rcu_read_lock();
4627 		css_put(css);
4628 	}
4629 	rcu_read_unlock();
4630 
4631 	WARN_ON_ONCE(scx_cgroup_enabled);
4632 	scx_cgroup_enabled = true;
4633 
4634 	return 0;
4635 }
4636 
4637 #else
4638 static void scx_cgroup_exit(void) {}
4639 static int scx_cgroup_init(void) { return 0; }
4640 #endif
4641 
4642 
4643 /********************************************************************************
4644  * Sysfs interface and ops enable/disable.
4645  */
4646 
4647 #define SCX_ATTR(_name)								\
4648 	static struct kobj_attribute scx_attr_##_name = {			\
4649 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4650 		.show = scx_attr_##_name##_show,				\
4651 	}
4652 
4653 static ssize_t scx_attr_state_show(struct kobject *kobj,
4654 				   struct kobj_attribute *ka, char *buf)
4655 {
4656 	return sysfs_emit(buf, "%s\n",
4657 			  scx_ops_enable_state_str[scx_ops_enable_state()]);
4658 }
4659 SCX_ATTR(state);
4660 
4661 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4662 					struct kobj_attribute *ka, char *buf)
4663 {
4664 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4665 }
4666 SCX_ATTR(switch_all);
4667 
4668 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4669 					 struct kobj_attribute *ka, char *buf)
4670 {
4671 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4672 }
4673 SCX_ATTR(nr_rejected);
4674 
4675 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4676 					 struct kobj_attribute *ka, char *buf)
4677 {
4678 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4679 }
4680 SCX_ATTR(hotplug_seq);
4681 
4682 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4683 					struct kobj_attribute *ka, char *buf)
4684 {
4685 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4686 }
4687 SCX_ATTR(enable_seq);
4688 
4689 static struct attribute *scx_global_attrs[] = {
4690 	&scx_attr_state.attr,
4691 	&scx_attr_switch_all.attr,
4692 	&scx_attr_nr_rejected.attr,
4693 	&scx_attr_hotplug_seq.attr,
4694 	&scx_attr_enable_seq.attr,
4695 	NULL,
4696 };
4697 
4698 static const struct attribute_group scx_global_attr_group = {
4699 	.attrs = scx_global_attrs,
4700 };
4701 
4702 static void scx_kobj_release(struct kobject *kobj)
4703 {
4704 	kfree(kobj);
4705 }
4706 
4707 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4708 				 struct kobj_attribute *ka, char *buf)
4709 {
4710 	return sysfs_emit(buf, "%s\n", scx_ops.name);
4711 }
4712 SCX_ATTR(ops);
4713 
4714 static struct attribute *scx_sched_attrs[] = {
4715 	&scx_attr_ops.attr,
4716 	NULL,
4717 };
4718 ATTRIBUTE_GROUPS(scx_sched);
4719 
4720 static const struct kobj_type scx_ktype = {
4721 	.release = scx_kobj_release,
4722 	.sysfs_ops = &kobj_sysfs_ops,
4723 	.default_groups = scx_sched_groups,
4724 };
4725 
4726 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4727 {
4728 	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4729 }
4730 
4731 static const struct kset_uevent_ops scx_uevent_ops = {
4732 	.uevent = scx_uevent,
4733 };
4734 
4735 /*
4736  * Used by sched_fork() and __setscheduler_prio() to pick the matching
4737  * sched_class. dl/rt are already handled.
4738  */
4739 bool task_should_scx(int policy)
4740 {
4741 	if (!scx_enabled() ||
4742 	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4743 		return false;
4744 	if (READ_ONCE(scx_switching_all))
4745 		return true;
4746 	return policy == SCHED_EXT;
4747 }
4748 
4749 /**
4750  * scx_softlockup - sched_ext softlockup handler
4751  * @dur_s: number of seconds of CPU stuck due to soft lockup
4752  *
4753  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4754  * live-lock the system by making many CPUs target the same DSQ to the point
4755  * where soft-lockup detection triggers. This function is called from
4756  * soft-lockup watchdog when the triggering point is close and tries to unjam
4757  * the system by enabling the breather and aborting the BPF scheduler.
4758  */
4759 void scx_softlockup(u32 dur_s)
4760 {
4761 	switch (scx_ops_enable_state()) {
4762 	case SCX_OPS_ENABLING:
4763 	case SCX_OPS_ENABLED:
4764 		break;
4765 	default:
4766 		return;
4767 	}
4768 
4769 	/* allow only one instance, cleared at the end of scx_ops_bypass() */
4770 	if (test_and_set_bit(0, &scx_in_softlockup))
4771 		return;
4772 
4773 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4774 			smp_processor_id(), dur_s, scx_ops.name);
4775 
4776 	/*
4777 	 * Some CPUs may be trapped in the dispatch paths. Enable breather
4778 	 * immediately; otherwise, we might even be able to get to
4779 	 * scx_ops_bypass().
4780 	 */
4781 	atomic_inc(&scx_ops_breather_depth);
4782 
4783 	scx_ops_error("soft lockup - CPU#%d stuck for %us",
4784 		      smp_processor_id(), dur_s);
4785 }
4786 
4787 static void scx_clear_softlockup(void)
4788 {
4789 	if (test_and_clear_bit(0, &scx_in_softlockup))
4790 		atomic_dec(&scx_ops_breather_depth);
4791 }
4792 
4793 /**
4794  * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4795  * @bypass: true for bypass, false for unbypass
4796  *
4797  * Bypassing guarantees that all runnable tasks make forward progress without
4798  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4799  * be held by tasks that the BPF scheduler is forgetting to run, which
4800  * unfortunately also excludes toggling the static branches.
4801  *
4802  * Let's work around by overriding a couple ops and modifying behaviors based on
4803  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4804  * to force global FIFO scheduling.
4805  *
4806  * - ops.select_cpu() is ignored and the default select_cpu() is used.
4807  *
4808  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4809  *   %SCX_OPS_ENQ_LAST is also ignored.
4810  *
4811  * - ops.dispatch() is ignored.
4812  *
4813  * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4814  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
4815  *   the tail of the queue with core_sched_at touched.
4816  *
4817  * - pick_next_task() suppresses zero slice warning.
4818  *
4819  * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4820  *   operations.
4821  *
4822  * - scx_prio_less() reverts to the default core_sched_at order.
4823  */
4824 static void scx_ops_bypass(bool bypass)
4825 {
4826 	static DEFINE_RAW_SPINLOCK(bypass_lock);
4827 	int cpu;
4828 	unsigned long flags;
4829 
4830 	raw_spin_lock_irqsave(&bypass_lock, flags);
4831 	if (bypass) {
4832 		scx_ops_bypass_depth++;
4833 		WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4834 		if (scx_ops_bypass_depth != 1)
4835 			goto unlock;
4836 	} else {
4837 		scx_ops_bypass_depth--;
4838 		WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4839 		if (scx_ops_bypass_depth != 0)
4840 			goto unlock;
4841 	}
4842 
4843 	atomic_inc(&scx_ops_breather_depth);
4844 
4845 	/*
4846 	 * No task property is changing. We just need to make sure all currently
4847 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4848 	 * state. As an optimization, walk each rq's runnable_list instead of
4849 	 * the scx_tasks list.
4850 	 *
4851 	 * This function can't trust the scheduler and thus can't use
4852 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4853 	 */
4854 	for_each_possible_cpu(cpu) {
4855 		struct rq *rq = cpu_rq(cpu);
4856 		struct task_struct *p, *n;
4857 
4858 		raw_spin_rq_lock(rq);
4859 
4860 		if (bypass) {
4861 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4862 			rq->scx.flags |= SCX_RQ_BYPASSING;
4863 		} else {
4864 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4865 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4866 		}
4867 
4868 		/*
4869 		 * We need to guarantee that no tasks are on the BPF scheduler
4870 		 * while bypassing. Either we see enabled or the enable path
4871 		 * sees scx_rq_bypassing() before moving tasks to SCX.
4872 		 */
4873 		if (!scx_enabled()) {
4874 			raw_spin_rq_unlock(rq);
4875 			continue;
4876 		}
4877 
4878 		/*
4879 		 * The use of list_for_each_entry_safe_reverse() is required
4880 		 * because each task is going to be removed from and added back
4881 		 * to the runnable_list during iteration. Because they're added
4882 		 * to the tail of the list, safe reverse iteration can still
4883 		 * visit all nodes.
4884 		 */
4885 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4886 						 scx.runnable_node) {
4887 			struct sched_enq_and_set_ctx ctx;
4888 
4889 			/* cycling deq/enq is enough, see the function comment */
4890 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4891 			sched_enq_and_set_task(&ctx);
4892 		}
4893 
4894 		/* resched to restore ticks and idle state */
4895 		if (cpu_online(cpu) || cpu == smp_processor_id())
4896 			resched_curr(rq);
4897 
4898 		raw_spin_rq_unlock(rq);
4899 	}
4900 
4901 	atomic_dec(&scx_ops_breather_depth);
4902 unlock:
4903 	raw_spin_unlock_irqrestore(&bypass_lock, flags);
4904 	scx_clear_softlockup();
4905 }
4906 
4907 static void free_exit_info(struct scx_exit_info *ei)
4908 {
4909 	kfree(ei->dump);
4910 	kfree(ei->msg);
4911 	kfree(ei->bt);
4912 	kfree(ei);
4913 }
4914 
4915 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4916 {
4917 	struct scx_exit_info *ei;
4918 
4919 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4920 	if (!ei)
4921 		return NULL;
4922 
4923 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4924 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4925 	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4926 
4927 	if (!ei->bt || !ei->msg || !ei->dump) {
4928 		free_exit_info(ei);
4929 		return NULL;
4930 	}
4931 
4932 	return ei;
4933 }
4934 
4935 static const char *scx_exit_reason(enum scx_exit_kind kind)
4936 {
4937 	switch (kind) {
4938 	case SCX_EXIT_UNREG:
4939 		return "unregistered from user space";
4940 	case SCX_EXIT_UNREG_BPF:
4941 		return "unregistered from BPF";
4942 	case SCX_EXIT_UNREG_KERN:
4943 		return "unregistered from the main kernel";
4944 	case SCX_EXIT_SYSRQ:
4945 		return "disabled by sysrq-S";
4946 	case SCX_EXIT_ERROR:
4947 		return "runtime error";
4948 	case SCX_EXIT_ERROR_BPF:
4949 		return "scx_bpf_error";
4950 	case SCX_EXIT_ERROR_STALL:
4951 		return "runnable task stall";
4952 	default:
4953 		return "<UNKNOWN>";
4954 	}
4955 }
4956 
4957 static void scx_ops_disable_workfn(struct kthread_work *work)
4958 {
4959 	struct scx_exit_info *ei = scx_exit_info;
4960 	struct scx_task_iter sti;
4961 	struct task_struct *p;
4962 	struct rhashtable_iter rht_iter;
4963 	struct scx_dispatch_q *dsq;
4964 	int i, kind, cpu;
4965 
4966 	kind = atomic_read(&scx_exit_kind);
4967 	while (true) {
4968 		/*
4969 		 * NONE indicates that a new scx_ops has been registered since
4970 		 * disable was scheduled - don't kill the new ops. DONE
4971 		 * indicates that the ops has already been disabled.
4972 		 */
4973 		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4974 			return;
4975 		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4976 			break;
4977 	}
4978 	ei->kind = kind;
4979 	ei->reason = scx_exit_reason(ei->kind);
4980 
4981 	/* guarantee forward progress by bypassing scx_ops */
4982 	scx_ops_bypass(true);
4983 
4984 	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4985 	case SCX_OPS_DISABLING:
4986 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4987 		break;
4988 	case SCX_OPS_DISABLED:
4989 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
4990 			scx_exit_info->msg);
4991 		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4992 			     SCX_OPS_DISABLING);
4993 		goto done;
4994 	default:
4995 		break;
4996 	}
4997 
4998 	/*
4999 	 * Here, every runnable task is guaranteed to make forward progress and
5000 	 * we can safely use blocking synchronization constructs. Actually
5001 	 * disable ops.
5002 	 */
5003 	mutex_lock(&scx_ops_enable_mutex);
5004 
5005 	static_branch_disable(&__scx_switched_all);
5006 	WRITE_ONCE(scx_switching_all, false);
5007 
5008 	/*
5009 	 * Shut down cgroup support before tasks so that the cgroup attach path
5010 	 * doesn't race against scx_ops_exit_task().
5011 	 */
5012 	scx_cgroup_lock();
5013 	scx_cgroup_exit();
5014 	scx_cgroup_unlock();
5015 
5016 	/*
5017 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5018 	 * must be switched out and exited synchronously.
5019 	 */
5020 	percpu_down_write(&scx_fork_rwsem);
5021 
5022 	scx_ops_init_task_enabled = false;
5023 
5024 	scx_task_iter_start(&sti);
5025 	while ((p = scx_task_iter_next_locked(&sti))) {
5026 		const struct sched_class *old_class = p->sched_class;
5027 		const struct sched_class *new_class =
5028 			__setscheduler_class(p->policy, p->prio);
5029 		struct sched_enq_and_set_ctx ctx;
5030 
5031 		if (old_class != new_class && p->se.sched_delayed)
5032 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5033 
5034 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5035 
5036 		p->sched_class = new_class;
5037 		check_class_changing(task_rq(p), p, old_class);
5038 
5039 		sched_enq_and_set_task(&ctx);
5040 
5041 		check_class_changed(task_rq(p), p, old_class, p->prio);
5042 		scx_ops_exit_task(p);
5043 	}
5044 	scx_task_iter_stop(&sti);
5045 	percpu_up_write(&scx_fork_rwsem);
5046 
5047 	/*
5048 	 * Invalidate all the rq clocks to prevent getting outdated
5049 	 * rq clocks from a previous scx scheduler.
5050 	 */
5051 	for_each_possible_cpu(cpu) {
5052 		struct rq *rq = cpu_rq(cpu);
5053 		scx_rq_clock_invalidate(rq);
5054 	}
5055 
5056 	/* no task is on scx, turn off all the switches and flush in-progress calls */
5057 	static_branch_disable(&__scx_ops_enabled);
5058 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
5059 		static_branch_disable(&scx_has_op[i]);
5060 	static_branch_disable(&scx_ops_enq_last);
5061 	static_branch_disable(&scx_ops_enq_exiting);
5062 	static_branch_disable(&scx_ops_cpu_preempt);
5063 	static_branch_disable(&scx_builtin_idle_enabled);
5064 	synchronize_rcu();
5065 
5066 	if (ei->kind >= SCX_EXIT_ERROR) {
5067 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5068 		       scx_ops.name, ei->reason);
5069 
5070 		if (ei->msg[0] != '\0')
5071 			pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
5072 #ifdef CONFIG_STACKTRACE
5073 		stack_trace_print(ei->bt, ei->bt_len, 2);
5074 #endif
5075 	} else {
5076 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5077 			scx_ops.name, ei->reason);
5078 	}
5079 
5080 	if (scx_ops.exit)
5081 		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
5082 
5083 	cancel_delayed_work_sync(&scx_watchdog_work);
5084 
5085 	/*
5086 	 * Delete the kobject from the hierarchy eagerly in addition to just
5087 	 * dropping a reference. Otherwise, if the object is deleted
5088 	 * asynchronously, sysfs could observe an object of the same name still
5089 	 * in the hierarchy when another scheduler is loaded.
5090 	 */
5091 	kobject_del(scx_root_kobj);
5092 	kobject_put(scx_root_kobj);
5093 	scx_root_kobj = NULL;
5094 
5095 	memset(&scx_ops, 0, sizeof(scx_ops));
5096 
5097 	rhashtable_walk_enter(&dsq_hash, &rht_iter);
5098 	do {
5099 		rhashtable_walk_start(&rht_iter);
5100 
5101 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
5102 			destroy_dsq(dsq->id);
5103 
5104 		rhashtable_walk_stop(&rht_iter);
5105 	} while (dsq == ERR_PTR(-EAGAIN));
5106 	rhashtable_walk_exit(&rht_iter);
5107 
5108 	free_percpu(scx_dsp_ctx);
5109 	scx_dsp_ctx = NULL;
5110 	scx_dsp_max_batch = 0;
5111 
5112 	free_exit_info(scx_exit_info);
5113 	scx_exit_info = NULL;
5114 
5115 	mutex_unlock(&scx_ops_enable_mutex);
5116 
5117 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5118 		     SCX_OPS_DISABLING);
5119 done:
5120 	scx_ops_bypass(false);
5121 }
5122 
5123 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
5124 
5125 static void schedule_scx_ops_disable_work(void)
5126 {
5127 	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
5128 
5129 	/*
5130 	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
5131 	 * scx_ops_helper isn't set up yet, there's nothing to do.
5132 	 */
5133 	if (helper)
5134 		kthread_queue_work(helper, &scx_ops_disable_work);
5135 }
5136 
5137 static void scx_ops_disable(enum scx_exit_kind kind)
5138 {
5139 	int none = SCX_EXIT_NONE;
5140 
5141 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5142 		kind = SCX_EXIT_ERROR;
5143 
5144 	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
5145 
5146 	schedule_scx_ops_disable_work();
5147 }
5148 
5149 static void dump_newline(struct seq_buf *s)
5150 {
5151 	trace_sched_ext_dump("");
5152 
5153 	/* @s may be zero sized and seq_buf triggers WARN if so */
5154 	if (s->size)
5155 		seq_buf_putc(s, '\n');
5156 }
5157 
5158 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5159 {
5160 	va_list args;
5161 
5162 #ifdef CONFIG_TRACEPOINTS
5163 	if (trace_sched_ext_dump_enabled()) {
5164 		/* protected by scx_dump_state()::dump_lock */
5165 		static char line_buf[SCX_EXIT_MSG_LEN];
5166 
5167 		va_start(args, fmt);
5168 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5169 		va_end(args);
5170 
5171 		trace_sched_ext_dump(line_buf);
5172 	}
5173 #endif
5174 	/* @s may be zero sized and seq_buf triggers WARN if so */
5175 	if (s->size) {
5176 		va_start(args, fmt);
5177 		seq_buf_vprintf(s, fmt, args);
5178 		va_end(args);
5179 
5180 		seq_buf_putc(s, '\n');
5181 	}
5182 }
5183 
5184 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5185 			     const unsigned long *bt, unsigned int len)
5186 {
5187 	unsigned int i;
5188 
5189 	for (i = 0; i < len; i++)
5190 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5191 }
5192 
5193 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5194 {
5195 	struct scx_dump_data *dd = &scx_dump_data;
5196 
5197 	lockdep_assert_irqs_disabled();
5198 
5199 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
5200 	dd->first = true;
5201 	dd->cursor = 0;
5202 	dd->s = s;
5203 	dd->prefix = prefix;
5204 }
5205 
5206 static void ops_dump_flush(void)
5207 {
5208 	struct scx_dump_data *dd = &scx_dump_data;
5209 	char *line = dd->buf.line;
5210 
5211 	if (!dd->cursor)
5212 		return;
5213 
5214 	/*
5215 	 * There's something to flush and this is the first line. Insert a blank
5216 	 * line to distinguish ops dump.
5217 	 */
5218 	if (dd->first) {
5219 		dump_newline(dd->s);
5220 		dd->first = false;
5221 	}
5222 
5223 	/*
5224 	 * There may be multiple lines in $line. Scan and emit each line
5225 	 * separately.
5226 	 */
5227 	while (true) {
5228 		char *end = line;
5229 		char c;
5230 
5231 		while (*end != '\n' && *end != '\0')
5232 			end++;
5233 
5234 		/*
5235 		 * If $line overflowed, it may not have newline at the end.
5236 		 * Always emit with a newline.
5237 		 */
5238 		c = *end;
5239 		*end = '\0';
5240 		dump_line(dd->s, "%s%s", dd->prefix, line);
5241 		if (c == '\0')
5242 			break;
5243 
5244 		/* move to the next line */
5245 		end++;
5246 		if (*end == '\0')
5247 			break;
5248 		line = end;
5249 	}
5250 
5251 	dd->cursor = 0;
5252 }
5253 
5254 static void ops_dump_exit(void)
5255 {
5256 	ops_dump_flush();
5257 	scx_dump_data.cpu = -1;
5258 }
5259 
5260 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5261 			  struct task_struct *p, char marker)
5262 {
5263 	static unsigned long bt[SCX_EXIT_BT_LEN];
5264 	char dsq_id_buf[19] = "(n/a)";
5265 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5266 	unsigned int bt_len = 0;
5267 
5268 	if (p->scx.dsq)
5269 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5270 			  (unsigned long long)p->scx.dsq->id);
5271 
5272 	dump_newline(s);
5273 	dump_line(s, " %c%c %s[%d] %+ldms",
5274 		  marker, task_state_to_char(p), p->comm, p->pid,
5275 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5276 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5277 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5278 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5279 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
5280 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu slice=%llu",
5281 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
5282 		  p->scx.dsq_vtime, p->scx.slice);
5283 	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5284 
5285 	if (SCX_HAS_OP(dump_task)) {
5286 		ops_dump_init(s, "    ");
5287 		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5288 		ops_dump_exit();
5289 	}
5290 
5291 #ifdef CONFIG_STACKTRACE
5292 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5293 #endif
5294 	if (bt_len) {
5295 		dump_newline(s);
5296 		dump_stack_trace(s, "    ", bt, bt_len);
5297 	}
5298 }
5299 
5300 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5301 {
5302 	static DEFINE_SPINLOCK(dump_lock);
5303 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5304 	struct scx_dump_ctx dctx = {
5305 		.kind = ei->kind,
5306 		.exit_code = ei->exit_code,
5307 		.reason = ei->reason,
5308 		.at_ns = ktime_get_ns(),
5309 		.at_jiffies = jiffies,
5310 	};
5311 	struct seq_buf s;
5312 	unsigned long flags;
5313 	char *buf;
5314 	int cpu;
5315 
5316 	spin_lock_irqsave(&dump_lock, flags);
5317 
5318 	seq_buf_init(&s, ei->dump, dump_len);
5319 
5320 	if (ei->kind == SCX_EXIT_NONE) {
5321 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
5322 	} else {
5323 		dump_line(&s, "%s[%d] triggered exit kind %d:",
5324 			  current->comm, current->pid, ei->kind);
5325 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
5326 		dump_newline(&s);
5327 		dump_line(&s, "Backtrace:");
5328 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
5329 	}
5330 
5331 	if (SCX_HAS_OP(dump)) {
5332 		ops_dump_init(&s, "");
5333 		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5334 		ops_dump_exit();
5335 	}
5336 
5337 	dump_newline(&s);
5338 	dump_line(&s, "CPU states");
5339 	dump_line(&s, "----------");
5340 
5341 	for_each_possible_cpu(cpu) {
5342 		struct rq *rq = cpu_rq(cpu);
5343 		struct rq_flags rf;
5344 		struct task_struct *p;
5345 		struct seq_buf ns;
5346 		size_t avail, used;
5347 		bool idle;
5348 
5349 		rq_lock(rq, &rf);
5350 
5351 		idle = list_empty(&rq->scx.runnable_list) &&
5352 			rq->curr->sched_class == &idle_sched_class;
5353 
5354 		if (idle && !SCX_HAS_OP(dump_cpu))
5355 			goto next;
5356 
5357 		/*
5358 		 * We don't yet know whether ops.dump_cpu() will produce output
5359 		 * and we may want to skip the default CPU dump if it doesn't.
5360 		 * Use a nested seq_buf to generate the standard dump so that we
5361 		 * can decide whether to commit later.
5362 		 */
5363 		avail = seq_buf_get_buf(&s, &buf);
5364 		seq_buf_init(&ns, buf, avail);
5365 
5366 		dump_newline(&ns);
5367 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5368 			  cpu, rq->scx.nr_running, rq->scx.flags,
5369 			  rq->scx.cpu_released, rq->scx.ops_qseq,
5370 			  rq->scx.pnt_seq);
5371 		dump_line(&ns, "          curr=%s[%d] class=%ps",
5372 			  rq->curr->comm, rq->curr->pid,
5373 			  rq->curr->sched_class);
5374 		if (!cpumask_empty(rq->scx.cpus_to_kick))
5375 			dump_line(&ns, "  cpus_to_kick   : %*pb",
5376 				  cpumask_pr_args(rq->scx.cpus_to_kick));
5377 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5378 			dump_line(&ns, "  idle_to_kick   : %*pb",
5379 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5380 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
5381 			dump_line(&ns, "  cpus_to_preempt: %*pb",
5382 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
5383 		if (!cpumask_empty(rq->scx.cpus_to_wait))
5384 			dump_line(&ns, "  cpus_to_wait   : %*pb",
5385 				  cpumask_pr_args(rq->scx.cpus_to_wait));
5386 
5387 		used = seq_buf_used(&ns);
5388 		if (SCX_HAS_OP(dump_cpu)) {
5389 			ops_dump_init(&ns, "  ");
5390 			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5391 			ops_dump_exit();
5392 		}
5393 
5394 		/*
5395 		 * If idle && nothing generated by ops.dump_cpu(), there's
5396 		 * nothing interesting. Skip.
5397 		 */
5398 		if (idle && used == seq_buf_used(&ns))
5399 			goto next;
5400 
5401 		/*
5402 		 * $s may already have overflowed when $ns was created. If so,
5403 		 * calling commit on it will trigger BUG.
5404 		 */
5405 		if (avail) {
5406 			seq_buf_commit(&s, seq_buf_used(&ns));
5407 			if (seq_buf_has_overflowed(&ns))
5408 				seq_buf_set_overflow(&s);
5409 		}
5410 
5411 		if (rq->curr->sched_class == &ext_sched_class)
5412 			scx_dump_task(&s, &dctx, rq->curr, '*');
5413 
5414 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5415 			scx_dump_task(&s, &dctx, p, ' ');
5416 	next:
5417 		rq_unlock(rq, &rf);
5418 	}
5419 
5420 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5421 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5422 		       trunc_marker, sizeof(trunc_marker));
5423 
5424 	spin_unlock_irqrestore(&dump_lock, flags);
5425 }
5426 
5427 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5428 {
5429 	struct scx_exit_info *ei = scx_exit_info;
5430 
5431 	if (ei->kind >= SCX_EXIT_ERROR)
5432 		scx_dump_state(ei, scx_ops.exit_dump_len);
5433 
5434 	schedule_scx_ops_disable_work();
5435 }
5436 
5437 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5438 
5439 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5440 					     s64 exit_code,
5441 					     const char *fmt, ...)
5442 {
5443 	struct scx_exit_info *ei = scx_exit_info;
5444 	int none = SCX_EXIT_NONE;
5445 	va_list args;
5446 
5447 	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5448 		return;
5449 
5450 	ei->exit_code = exit_code;
5451 #ifdef CONFIG_STACKTRACE
5452 	if (kind >= SCX_EXIT_ERROR)
5453 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5454 #endif
5455 	va_start(args, fmt);
5456 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5457 	va_end(args);
5458 
5459 	/*
5460 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5461 	 * in scx_ops_disable_workfn().
5462 	 */
5463 	ei->kind = kind;
5464 	ei->reason = scx_exit_reason(ei->kind);
5465 
5466 	irq_work_queue(&scx_ops_error_irq_work);
5467 }
5468 
5469 static struct kthread_worker *scx_create_rt_helper(const char *name)
5470 {
5471 	struct kthread_worker *helper;
5472 
5473 	helper = kthread_run_worker(0, name);
5474 	if (helper)
5475 		sched_set_fifo(helper->task);
5476 	return helper;
5477 }
5478 
5479 static void check_hotplug_seq(const struct sched_ext_ops *ops)
5480 {
5481 	unsigned long long global_hotplug_seq;
5482 
5483 	/*
5484 	 * If a hotplug event has occurred between when a scheduler was
5485 	 * initialized, and when we were able to attach, exit and notify user
5486 	 * space about it.
5487 	 */
5488 	if (ops->hotplug_seq) {
5489 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5490 		if (ops->hotplug_seq != global_hotplug_seq) {
5491 			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5492 				     "expected hotplug seq %llu did not match actual %llu",
5493 				     ops->hotplug_seq, global_hotplug_seq);
5494 		}
5495 	}
5496 }
5497 
5498 static int validate_ops(const struct sched_ext_ops *ops)
5499 {
5500 	/*
5501 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5502 	 * ops.enqueue() callback isn't implemented.
5503 	 */
5504 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5505 		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5506 		return -EINVAL;
5507 	}
5508 
5509 	return 0;
5510 }
5511 
5512 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5513 {
5514 	struct scx_task_iter sti;
5515 	struct task_struct *p;
5516 	unsigned long timeout;
5517 	int i, cpu, node, ret;
5518 
5519 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5520 			   cpu_possible_mask)) {
5521 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5522 		return -EINVAL;
5523 	}
5524 
5525 	mutex_lock(&scx_ops_enable_mutex);
5526 
5527 	if (!scx_ops_helper) {
5528 		WRITE_ONCE(scx_ops_helper,
5529 			   scx_create_rt_helper("sched_ext_ops_helper"));
5530 		if (!scx_ops_helper) {
5531 			ret = -ENOMEM;
5532 			goto err_unlock;
5533 		}
5534 	}
5535 
5536 	if (!global_dsqs) {
5537 		struct scx_dispatch_q **dsqs;
5538 
5539 		dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5540 		if (!dsqs) {
5541 			ret = -ENOMEM;
5542 			goto err_unlock;
5543 		}
5544 
5545 		for_each_node_state(node, N_POSSIBLE) {
5546 			struct scx_dispatch_q *dsq;
5547 
5548 			dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5549 			if (!dsq) {
5550 				for_each_node_state(node, N_POSSIBLE)
5551 					kfree(dsqs[node]);
5552 				kfree(dsqs);
5553 				ret = -ENOMEM;
5554 				goto err_unlock;
5555 			}
5556 
5557 			init_dsq(dsq, SCX_DSQ_GLOBAL);
5558 			dsqs[node] = dsq;
5559 		}
5560 
5561 		global_dsqs = dsqs;
5562 	}
5563 
5564 	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5565 		ret = -EBUSY;
5566 		goto err_unlock;
5567 	}
5568 
5569 	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5570 	if (!scx_root_kobj) {
5571 		ret = -ENOMEM;
5572 		goto err_unlock;
5573 	}
5574 
5575 	scx_root_kobj->kset = scx_kset;
5576 	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5577 	if (ret < 0)
5578 		goto err;
5579 
5580 	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5581 	if (!scx_exit_info) {
5582 		ret = -ENOMEM;
5583 		goto err_del;
5584 	}
5585 
5586 	/*
5587 	 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5588 	 * disable path. Failure triggers full disabling from here on.
5589 	 */
5590 	scx_ops = *ops;
5591 
5592 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5593 		     SCX_OPS_DISABLED);
5594 
5595 	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5596 	scx_warned_zero_slice = false;
5597 
5598 	atomic_long_set(&scx_nr_rejected, 0);
5599 
5600 	for_each_possible_cpu(cpu)
5601 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5602 
5603 	/*
5604 	 * Keep CPUs stable during enable so that the BPF scheduler can track
5605 	 * online CPUs by watching ->on/offline_cpu() after ->init().
5606 	 */
5607 	cpus_read_lock();
5608 
5609 	if (scx_ops.init) {
5610 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5611 		if (ret) {
5612 			ret = ops_sanitize_err("init", ret);
5613 			cpus_read_unlock();
5614 			scx_ops_error("ops.init() failed (%d)", ret);
5615 			goto err_disable;
5616 		}
5617 	}
5618 
5619 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5620 		if (((void (**)(void))ops)[i])
5621 			static_branch_enable_cpuslocked(&scx_has_op[i]);
5622 
5623 	check_hotplug_seq(ops);
5624 #ifdef CONFIG_SMP
5625 	update_selcpu_topology();
5626 #endif
5627 	cpus_read_unlock();
5628 
5629 	ret = validate_ops(ops);
5630 	if (ret)
5631 		goto err_disable;
5632 
5633 	WARN_ON_ONCE(scx_dsp_ctx);
5634 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5635 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5636 						   scx_dsp_max_batch),
5637 				     __alignof__(struct scx_dsp_ctx));
5638 	if (!scx_dsp_ctx) {
5639 		ret = -ENOMEM;
5640 		goto err_disable;
5641 	}
5642 
5643 	if (ops->timeout_ms)
5644 		timeout = msecs_to_jiffies(ops->timeout_ms);
5645 	else
5646 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5647 
5648 	WRITE_ONCE(scx_watchdog_timeout, timeout);
5649 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5650 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5651 			   scx_watchdog_timeout / 2);
5652 
5653 	/*
5654 	 * Once __scx_ops_enabled is set, %current can be switched to SCX
5655 	 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5656 	 * userspace scheduling) may not function correctly before all tasks are
5657 	 * switched. Init in bypass mode to guarantee forward progress.
5658 	 */
5659 	scx_ops_bypass(true);
5660 
5661 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5662 		if (((void (**)(void))ops)[i])
5663 			static_branch_enable(&scx_has_op[i]);
5664 
5665 	if (ops->flags & SCX_OPS_ENQ_LAST)
5666 		static_branch_enable(&scx_ops_enq_last);
5667 
5668 	if (ops->flags & SCX_OPS_ENQ_EXITING)
5669 		static_branch_enable(&scx_ops_enq_exiting);
5670 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5671 		static_branch_enable(&scx_ops_cpu_preempt);
5672 
5673 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5674 		reset_idle_masks();
5675 		static_branch_enable(&scx_builtin_idle_enabled);
5676 	} else {
5677 		static_branch_disable(&scx_builtin_idle_enabled);
5678 	}
5679 
5680 	/*
5681 	 * Lock out forks, cgroup on/offlining and moves before opening the
5682 	 * floodgate so that they don't wander into the operations prematurely.
5683 	 */
5684 	percpu_down_write(&scx_fork_rwsem);
5685 
5686 	WARN_ON_ONCE(scx_ops_init_task_enabled);
5687 	scx_ops_init_task_enabled = true;
5688 
5689 	/*
5690 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5691 	 * preventing new tasks from being added. No need to exclude tasks
5692 	 * leaving as sched_ext_free() can handle both prepped and enabled
5693 	 * tasks. Prep all tasks first and then enable them with preemption
5694 	 * disabled.
5695 	 *
5696 	 * All cgroups should be initialized before scx_ops_init_task() so that
5697 	 * the BPF scheduler can reliably track each task's cgroup membership
5698 	 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5699 	 * migrations while tasks are being initialized so that
5700 	 * scx_cgroup_can_attach() never sees uninitialized tasks.
5701 	 */
5702 	scx_cgroup_lock();
5703 	ret = scx_cgroup_init();
5704 	if (ret)
5705 		goto err_disable_unlock_all;
5706 
5707 	scx_task_iter_start(&sti);
5708 	while ((p = scx_task_iter_next_locked(&sti))) {
5709 		/*
5710 		 * @p may already be dead, have lost all its usages counts and
5711 		 * be waiting for RCU grace period before being freed. @p can't
5712 		 * be initialized for SCX in such cases and should be ignored.
5713 		 */
5714 		if (!tryget_task_struct(p))
5715 			continue;
5716 
5717 		scx_task_iter_unlock(&sti);
5718 
5719 		ret = scx_ops_init_task(p, task_group(p), false);
5720 		if (ret) {
5721 			put_task_struct(p);
5722 			scx_task_iter_relock(&sti);
5723 			scx_task_iter_stop(&sti);
5724 			scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5725 				      ret, p->comm, p->pid);
5726 			goto err_disable_unlock_all;
5727 		}
5728 
5729 		scx_set_task_state(p, SCX_TASK_READY);
5730 
5731 		put_task_struct(p);
5732 		scx_task_iter_relock(&sti);
5733 	}
5734 	scx_task_iter_stop(&sti);
5735 	scx_cgroup_unlock();
5736 	percpu_up_write(&scx_fork_rwsem);
5737 
5738 	/*
5739 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5740 	 * all eligible tasks.
5741 	 */
5742 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5743 	static_branch_enable(&__scx_ops_enabled);
5744 
5745 	/*
5746 	 * We're fully committed and can't fail. The task READY -> ENABLED
5747 	 * transitions here are synchronized against sched_ext_free() through
5748 	 * scx_tasks_lock.
5749 	 */
5750 	percpu_down_write(&scx_fork_rwsem);
5751 	scx_task_iter_start(&sti);
5752 	while ((p = scx_task_iter_next_locked(&sti))) {
5753 		const struct sched_class *old_class = p->sched_class;
5754 		const struct sched_class *new_class =
5755 			__setscheduler_class(p->policy, p->prio);
5756 		struct sched_enq_and_set_ctx ctx;
5757 
5758 		if (old_class != new_class && p->se.sched_delayed)
5759 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5760 
5761 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5762 
5763 		p->scx.slice = SCX_SLICE_DFL;
5764 		p->sched_class = new_class;
5765 		check_class_changing(task_rq(p), p, old_class);
5766 
5767 		sched_enq_and_set_task(&ctx);
5768 
5769 		check_class_changed(task_rq(p), p, old_class, p->prio);
5770 	}
5771 	scx_task_iter_stop(&sti);
5772 	percpu_up_write(&scx_fork_rwsem);
5773 
5774 	scx_ops_bypass(false);
5775 
5776 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5777 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5778 		goto err_disable;
5779 	}
5780 
5781 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5782 		static_branch_enable(&__scx_switched_all);
5783 
5784 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5785 		scx_ops.name, scx_switched_all() ? "" : " (partial)");
5786 	kobject_uevent(scx_root_kobj, KOBJ_ADD);
5787 	mutex_unlock(&scx_ops_enable_mutex);
5788 
5789 	atomic_long_inc(&scx_enable_seq);
5790 
5791 	return 0;
5792 
5793 err_del:
5794 	kobject_del(scx_root_kobj);
5795 err:
5796 	kobject_put(scx_root_kobj);
5797 	scx_root_kobj = NULL;
5798 	if (scx_exit_info) {
5799 		free_exit_info(scx_exit_info);
5800 		scx_exit_info = NULL;
5801 	}
5802 err_unlock:
5803 	mutex_unlock(&scx_ops_enable_mutex);
5804 	return ret;
5805 
5806 err_disable_unlock_all:
5807 	scx_cgroup_unlock();
5808 	percpu_up_write(&scx_fork_rwsem);
5809 	scx_ops_bypass(false);
5810 err_disable:
5811 	mutex_unlock(&scx_ops_enable_mutex);
5812 	/*
5813 	 * Returning an error code here would not pass all the error information
5814 	 * to userspace. Record errno using scx_ops_error() for cases
5815 	 * scx_ops_error() wasn't already invoked and exit indicating success so
5816 	 * that the error is notified through ops.exit() with all the details.
5817 	 *
5818 	 * Flush scx_ops_disable_work to ensure that error is reported before
5819 	 * init completion.
5820 	 */
5821 	scx_ops_error("scx_ops_enable() failed (%d)", ret);
5822 	kthread_flush_work(&scx_ops_disable_work);
5823 	return 0;
5824 }
5825 
5826 
5827 /********************************************************************************
5828  * bpf_struct_ops plumbing.
5829  */
5830 #include <linux/bpf_verifier.h>
5831 #include <linux/bpf.h>
5832 #include <linux/btf.h>
5833 
5834 static const struct btf_type *task_struct_type;
5835 
5836 static bool bpf_scx_is_valid_access(int off, int size,
5837 				    enum bpf_access_type type,
5838 				    const struct bpf_prog *prog,
5839 				    struct bpf_insn_access_aux *info)
5840 {
5841 	if (type != BPF_READ)
5842 		return false;
5843 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5844 		return false;
5845 	if (off % size != 0)
5846 		return false;
5847 
5848 	return btf_ctx_access(off, size, type, prog, info);
5849 }
5850 
5851 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5852 				     const struct bpf_reg_state *reg, int off,
5853 				     int size)
5854 {
5855 	const struct btf_type *t;
5856 
5857 	t = btf_type_by_id(reg->btf, reg->btf_id);
5858 	if (t == task_struct_type) {
5859 		if (off >= offsetof(struct task_struct, scx.slice) &&
5860 		    off + size <= offsetofend(struct task_struct, scx.slice))
5861 			return SCALAR_VALUE;
5862 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5863 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5864 			return SCALAR_VALUE;
5865 		if (off >= offsetof(struct task_struct, scx.disallow) &&
5866 		    off + size <= offsetofend(struct task_struct, scx.disallow))
5867 			return SCALAR_VALUE;
5868 	}
5869 
5870 	return -EACCES;
5871 }
5872 
5873 static const struct bpf_func_proto *
5874 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5875 {
5876 	switch (func_id) {
5877 	case BPF_FUNC_task_storage_get:
5878 		return &bpf_task_storage_get_proto;
5879 	case BPF_FUNC_task_storage_delete:
5880 		return &bpf_task_storage_delete_proto;
5881 	default:
5882 		return bpf_base_func_proto(func_id, prog);
5883 	}
5884 }
5885 
5886 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5887 	.get_func_proto = bpf_scx_get_func_proto,
5888 	.is_valid_access = bpf_scx_is_valid_access,
5889 	.btf_struct_access = bpf_scx_btf_struct_access,
5890 };
5891 
5892 static int bpf_scx_init_member(const struct btf_type *t,
5893 			       const struct btf_member *member,
5894 			       void *kdata, const void *udata)
5895 {
5896 	const struct sched_ext_ops *uops = udata;
5897 	struct sched_ext_ops *ops = kdata;
5898 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5899 	int ret;
5900 
5901 	switch (moff) {
5902 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5903 		if (*(u32 *)(udata + moff) > INT_MAX)
5904 			return -E2BIG;
5905 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5906 		return 1;
5907 	case offsetof(struct sched_ext_ops, flags):
5908 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5909 			return -EINVAL;
5910 		ops->flags = *(u64 *)(udata + moff);
5911 		return 1;
5912 	case offsetof(struct sched_ext_ops, name):
5913 		ret = bpf_obj_name_cpy(ops->name, uops->name,
5914 				       sizeof(ops->name));
5915 		if (ret < 0)
5916 			return ret;
5917 		if (ret == 0)
5918 			return -EINVAL;
5919 		return 1;
5920 	case offsetof(struct sched_ext_ops, timeout_ms):
5921 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5922 		    SCX_WATCHDOG_MAX_TIMEOUT)
5923 			return -E2BIG;
5924 		ops->timeout_ms = *(u32 *)(udata + moff);
5925 		return 1;
5926 	case offsetof(struct sched_ext_ops, exit_dump_len):
5927 		ops->exit_dump_len =
5928 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5929 		return 1;
5930 	case offsetof(struct sched_ext_ops, hotplug_seq):
5931 		ops->hotplug_seq = *(u64 *)(udata + moff);
5932 		return 1;
5933 	}
5934 
5935 	return 0;
5936 }
5937 
5938 static int bpf_scx_check_member(const struct btf_type *t,
5939 				const struct btf_member *member,
5940 				const struct bpf_prog *prog)
5941 {
5942 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5943 
5944 	switch (moff) {
5945 	case offsetof(struct sched_ext_ops, init_task):
5946 #ifdef CONFIG_EXT_GROUP_SCHED
5947 	case offsetof(struct sched_ext_ops, cgroup_init):
5948 	case offsetof(struct sched_ext_ops, cgroup_exit):
5949 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5950 #endif
5951 	case offsetof(struct sched_ext_ops, cpu_online):
5952 	case offsetof(struct sched_ext_ops, cpu_offline):
5953 	case offsetof(struct sched_ext_ops, init):
5954 	case offsetof(struct sched_ext_ops, exit):
5955 		break;
5956 	default:
5957 		if (prog->sleepable)
5958 			return -EINVAL;
5959 	}
5960 
5961 	return 0;
5962 }
5963 
5964 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5965 {
5966 	return scx_ops_enable(kdata, link);
5967 }
5968 
5969 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5970 {
5971 	scx_ops_disable(SCX_EXIT_UNREG);
5972 	kthread_flush_work(&scx_ops_disable_work);
5973 }
5974 
5975 static int bpf_scx_init(struct btf *btf)
5976 {
5977 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5978 
5979 	return 0;
5980 }
5981 
5982 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5983 {
5984 	/*
5985 	 * sched_ext does not support updating the actively-loaded BPF
5986 	 * scheduler, as registering a BPF scheduler can always fail if the
5987 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5988 	 * etc. Similarly, we can always race with unregistration happening
5989 	 * elsewhere, such as with sysrq.
5990 	 */
5991 	return -EOPNOTSUPP;
5992 }
5993 
5994 static int bpf_scx_validate(void *kdata)
5995 {
5996 	return 0;
5997 }
5998 
5999 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
6000 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
6001 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
6002 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
6003 static void sched_ext_ops__tick(struct task_struct *p) {}
6004 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
6005 static void sched_ext_ops__running(struct task_struct *p) {}
6006 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
6007 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
6008 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
6009 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
6010 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
6011 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
6012 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
6013 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
6014 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
6015 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
6016 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
6017 static void sched_ext_ops__enable(struct task_struct *p) {}
6018 static void sched_ext_ops__disable(struct task_struct *p) {}
6019 #ifdef CONFIG_EXT_GROUP_SCHED
6020 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
6021 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
6022 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
6023 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6024 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
6025 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
6026 #endif
6027 static void sched_ext_ops__cpu_online(s32 cpu) {}
6028 static void sched_ext_ops__cpu_offline(s32 cpu) {}
6029 static s32 sched_ext_ops__init(void) { return -EINVAL; }
6030 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
6031 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
6032 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
6033 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
6034 
6035 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
6036 	.select_cpu		= sched_ext_ops__select_cpu,
6037 	.enqueue		= sched_ext_ops__enqueue,
6038 	.dequeue		= sched_ext_ops__dequeue,
6039 	.dispatch		= sched_ext_ops__dispatch,
6040 	.tick			= sched_ext_ops__tick,
6041 	.runnable		= sched_ext_ops__runnable,
6042 	.running		= sched_ext_ops__running,
6043 	.stopping		= sched_ext_ops__stopping,
6044 	.quiescent		= sched_ext_ops__quiescent,
6045 	.yield			= sched_ext_ops__yield,
6046 	.core_sched_before	= sched_ext_ops__core_sched_before,
6047 	.set_weight		= sched_ext_ops__set_weight,
6048 	.set_cpumask		= sched_ext_ops__set_cpumask,
6049 	.update_idle		= sched_ext_ops__update_idle,
6050 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
6051 	.cpu_release		= sched_ext_ops__cpu_release,
6052 	.init_task		= sched_ext_ops__init_task,
6053 	.exit_task		= sched_ext_ops__exit_task,
6054 	.enable			= sched_ext_ops__enable,
6055 	.disable		= sched_ext_ops__disable,
6056 #ifdef CONFIG_EXT_GROUP_SCHED
6057 	.cgroup_init		= sched_ext_ops__cgroup_init,
6058 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
6059 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
6060 	.cgroup_move		= sched_ext_ops__cgroup_move,
6061 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
6062 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
6063 #endif
6064 	.cpu_online		= sched_ext_ops__cpu_online,
6065 	.cpu_offline		= sched_ext_ops__cpu_offline,
6066 	.init			= sched_ext_ops__init,
6067 	.exit			= sched_ext_ops__exit,
6068 	.dump			= sched_ext_ops__dump,
6069 	.dump_cpu		= sched_ext_ops__dump_cpu,
6070 	.dump_task		= sched_ext_ops__dump_task,
6071 };
6072 
6073 static struct bpf_struct_ops bpf_sched_ext_ops = {
6074 	.verifier_ops = &bpf_scx_verifier_ops,
6075 	.reg = bpf_scx_reg,
6076 	.unreg = bpf_scx_unreg,
6077 	.check_member = bpf_scx_check_member,
6078 	.init_member = bpf_scx_init_member,
6079 	.init = bpf_scx_init,
6080 	.update = bpf_scx_update,
6081 	.validate = bpf_scx_validate,
6082 	.name = "sched_ext_ops",
6083 	.owner = THIS_MODULE,
6084 	.cfi_stubs = &__bpf_ops_sched_ext_ops
6085 };
6086 
6087 
6088 /********************************************************************************
6089  * System integration and init.
6090  */
6091 
6092 static void sysrq_handle_sched_ext_reset(u8 key)
6093 {
6094 	if (scx_ops_helper)
6095 		scx_ops_disable(SCX_EXIT_SYSRQ);
6096 	else
6097 		pr_info("sched_ext: BPF scheduler not yet used\n");
6098 }
6099 
6100 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6101 	.handler	= sysrq_handle_sched_ext_reset,
6102 	.help_msg	= "reset-sched-ext(S)",
6103 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
6104 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
6105 };
6106 
6107 static void sysrq_handle_sched_ext_dump(u8 key)
6108 {
6109 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6110 
6111 	if (scx_enabled())
6112 		scx_dump_state(&ei, 0);
6113 }
6114 
6115 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6116 	.handler	= sysrq_handle_sched_ext_dump,
6117 	.help_msg	= "dump-sched-ext(D)",
6118 	.action_msg	= "Trigger sched_ext debug dump",
6119 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
6120 };
6121 
6122 static bool can_skip_idle_kick(struct rq *rq)
6123 {
6124 	lockdep_assert_rq_held(rq);
6125 
6126 	/*
6127 	 * We can skip idle kicking if @rq is going to go through at least one
6128 	 * full SCX scheduling cycle before going idle. Just checking whether
6129 	 * curr is not idle is insufficient because we could be racing
6130 	 * balance_one() trying to pull the next task from a remote rq, which
6131 	 * may fail, and @rq may become idle afterwards.
6132 	 *
6133 	 * The race window is small and we don't and can't guarantee that @rq is
6134 	 * only kicked while idle anyway. Skip only when sure.
6135 	 */
6136 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6137 }
6138 
6139 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6140 {
6141 	struct rq *rq = cpu_rq(cpu);
6142 	struct scx_rq *this_scx = &this_rq->scx;
6143 	bool should_wait = false;
6144 	unsigned long flags;
6145 
6146 	raw_spin_rq_lock_irqsave(rq, flags);
6147 
6148 	/*
6149 	 * During CPU hotplug, a CPU may depend on kicking itself to make
6150 	 * forward progress. Allow kicking self regardless of online state.
6151 	 */
6152 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6153 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6154 			if (rq->curr->sched_class == &ext_sched_class)
6155 				rq->curr->scx.slice = 0;
6156 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6157 		}
6158 
6159 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6160 			pseqs[cpu] = rq->scx.pnt_seq;
6161 			should_wait = true;
6162 		}
6163 
6164 		resched_curr(rq);
6165 	} else {
6166 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6167 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6168 	}
6169 
6170 	raw_spin_rq_unlock_irqrestore(rq, flags);
6171 
6172 	return should_wait;
6173 }
6174 
6175 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6176 {
6177 	struct rq *rq = cpu_rq(cpu);
6178 	unsigned long flags;
6179 
6180 	raw_spin_rq_lock_irqsave(rq, flags);
6181 
6182 	if (!can_skip_idle_kick(rq) &&
6183 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6184 		resched_curr(rq);
6185 
6186 	raw_spin_rq_unlock_irqrestore(rq, flags);
6187 }
6188 
6189 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6190 {
6191 	struct rq *this_rq = this_rq();
6192 	struct scx_rq *this_scx = &this_rq->scx;
6193 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6194 	bool should_wait = false;
6195 	s32 cpu;
6196 
6197 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
6198 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6199 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6200 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6201 	}
6202 
6203 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6204 		kick_one_cpu_if_idle(cpu, this_rq);
6205 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6206 	}
6207 
6208 	if (!should_wait)
6209 		return;
6210 
6211 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
6212 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6213 
6214 		if (cpu != cpu_of(this_rq)) {
6215 			/*
6216 			 * Pairs with smp_store_release() issued by this CPU in
6217 			 * switch_class() on the resched path.
6218 			 *
6219 			 * We busy-wait here to guarantee that no other task can
6220 			 * be scheduled on our core before the target CPU has
6221 			 * entered the resched path.
6222 			 */
6223 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6224 				cpu_relax();
6225 		}
6226 
6227 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6228 	}
6229 }
6230 
6231 /**
6232  * print_scx_info - print out sched_ext scheduler state
6233  * @log_lvl: the log level to use when printing
6234  * @p: target task
6235  *
6236  * If a sched_ext scheduler is enabled, print the name and state of the
6237  * scheduler. If @p is on sched_ext, print further information about the task.
6238  *
6239  * This function can be safely called on any task as long as the task_struct
6240  * itself is accessible. While safe, this function isn't synchronized and may
6241  * print out mixups or garbages of limited length.
6242  */
6243 void print_scx_info(const char *log_lvl, struct task_struct *p)
6244 {
6245 	enum scx_ops_enable_state state = scx_ops_enable_state();
6246 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6247 	char runnable_at_buf[22] = "?";
6248 	struct sched_class *class;
6249 	unsigned long runnable_at;
6250 
6251 	if (state == SCX_OPS_DISABLED)
6252 		return;
6253 
6254 	/*
6255 	 * Carefully check if the task was running on sched_ext, and then
6256 	 * carefully copy the time it's been runnable, and its state.
6257 	 */
6258 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6259 	    class != &ext_sched_class) {
6260 		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6261 		       scx_ops_enable_state_str[state], all);
6262 		return;
6263 	}
6264 
6265 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6266 				      sizeof(runnable_at)))
6267 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6268 			  jiffies_delta_msecs(runnable_at, jiffies));
6269 
6270 	/* print everything onto one line to conserve console space */
6271 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6272 	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6273 	       runnable_at_buf);
6274 }
6275 
6276 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6277 {
6278 	/*
6279 	 * SCX schedulers often have userspace components which are sometimes
6280 	 * involved in critial scheduling paths. PM operations involve freezing
6281 	 * userspace which can lead to scheduling misbehaviors including stalls.
6282 	 * Let's bypass while PM operations are in progress.
6283 	 */
6284 	switch (event) {
6285 	case PM_HIBERNATION_PREPARE:
6286 	case PM_SUSPEND_PREPARE:
6287 	case PM_RESTORE_PREPARE:
6288 		scx_ops_bypass(true);
6289 		break;
6290 	case PM_POST_HIBERNATION:
6291 	case PM_POST_SUSPEND:
6292 	case PM_POST_RESTORE:
6293 		scx_ops_bypass(false);
6294 		break;
6295 	}
6296 
6297 	return NOTIFY_OK;
6298 }
6299 
6300 static struct notifier_block scx_pm_notifier = {
6301 	.notifier_call = scx_pm_handler,
6302 };
6303 
6304 void __init init_sched_ext_class(void)
6305 {
6306 	s32 cpu, v;
6307 
6308 	/*
6309 	 * The following is to prevent the compiler from optimizing out the enum
6310 	 * definitions so that BPF scheduler implementations can use them
6311 	 * through the generated vmlinux.h.
6312 	 */
6313 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6314 		   SCX_TG_ONLINE);
6315 
6316 	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6317 #ifdef CONFIG_SMP
6318 	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
6319 	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
6320 #endif
6321 	scx_kick_cpus_pnt_seqs =
6322 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6323 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
6324 	BUG_ON(!scx_kick_cpus_pnt_seqs);
6325 
6326 	for_each_possible_cpu(cpu) {
6327 		struct rq *rq = cpu_rq(cpu);
6328 
6329 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6330 		INIT_LIST_HEAD(&rq->scx.runnable_list);
6331 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6332 
6333 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
6334 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
6335 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
6336 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
6337 		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6338 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6339 
6340 		if (cpu_online(cpu))
6341 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6342 	}
6343 
6344 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6345 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6346 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6347 }
6348 
6349 
6350 /********************************************************************************
6351  * Helpers that can be called from the BPF scheduler.
6352  */
6353 #include <linux/btf_ids.h>
6354 
6355 __bpf_kfunc_start_defs();
6356 
6357 static bool check_builtin_idle_enabled(void)
6358 {
6359 	if (static_branch_likely(&scx_builtin_idle_enabled))
6360 		return true;
6361 
6362 	scx_ops_error("built-in idle tracking is disabled");
6363 	return false;
6364 }
6365 
6366 /**
6367  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
6368  * @p: task_struct to select a CPU for
6369  * @prev_cpu: CPU @p was on previously
6370  * @wake_flags: %SCX_WAKE_* flags
6371  * @is_idle: out parameter indicating whether the returned CPU is idle
6372  *
6373  * Can only be called from ops.select_cpu() if the built-in CPU selection is
6374  * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
6375  * @p, @prev_cpu and @wake_flags match ops.select_cpu().
6376  *
6377  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
6378  * currently idle and thus a good candidate for direct dispatching.
6379  */
6380 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
6381 				       u64 wake_flags, bool *is_idle)
6382 {
6383 	if (!check_builtin_idle_enabled())
6384 		goto prev_cpu;
6385 
6386 	if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
6387 		goto prev_cpu;
6388 
6389 #ifdef CONFIG_SMP
6390 	return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
6391 #endif
6392 
6393 prev_cpu:
6394 	*is_idle = false;
6395 	return prev_cpu;
6396 }
6397 
6398 __bpf_kfunc_end_defs();
6399 
6400 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
6401 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
6402 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
6403 
6404 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6405 	.owner			= THIS_MODULE,
6406 	.set			= &scx_kfunc_ids_select_cpu,
6407 };
6408 
6409 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6410 {
6411 	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6412 		return false;
6413 
6414 	lockdep_assert_irqs_disabled();
6415 
6416 	if (unlikely(!p)) {
6417 		scx_ops_error("called with NULL task");
6418 		return false;
6419 	}
6420 
6421 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6422 		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6423 		return false;
6424 	}
6425 
6426 	return true;
6427 }
6428 
6429 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6430 				  u64 enq_flags)
6431 {
6432 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6433 	struct task_struct *ddsp_task;
6434 
6435 	ddsp_task = __this_cpu_read(direct_dispatch_task);
6436 	if (ddsp_task) {
6437 		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6438 		return;
6439 	}
6440 
6441 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6442 		scx_ops_error("dispatch buffer overflow");
6443 		return;
6444 	}
6445 
6446 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6447 		.task = p,
6448 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6449 		.dsq_id = dsq_id,
6450 		.enq_flags = enq_flags,
6451 	};
6452 }
6453 
6454 __bpf_kfunc_start_defs();
6455 
6456 /**
6457  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6458  * @p: task_struct to insert
6459  * @dsq_id: DSQ to insert into
6460  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6461  * @enq_flags: SCX_ENQ_*
6462  *
6463  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6464  * call this function spuriously. Can be called from ops.enqueue(),
6465  * ops.select_cpu(), and ops.dispatch().
6466  *
6467  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6468  * and @p must match the task being enqueued.
6469  *
6470  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6471  * will be directly inserted into the corresponding dispatch queue after
6472  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6473  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6474  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6475  * task is inserted.
6476  *
6477  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6478  * and this function can be called upto ops.dispatch_max_batch times to insert
6479  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6480  * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6481  *
6482  * This function doesn't have any locking restrictions and may be called under
6483  * BPF locks (in the future when BPF introduces more flexible locking).
6484  *
6485  * @p is allowed to run for @slice. The scheduling path is triggered on slice
6486  * exhaustion. If zero, the current residual slice is maintained. If
6487  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6488  * scx_bpf_kick_cpu() to trigger scheduling.
6489  */
6490 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6491 				    u64 enq_flags)
6492 {
6493 	if (!scx_dsq_insert_preamble(p, enq_flags))
6494 		return;
6495 
6496 	if (slice)
6497 		p->scx.slice = slice;
6498 	else
6499 		p->scx.slice = p->scx.slice ?: 1;
6500 
6501 	scx_dsq_insert_commit(p, dsq_id, enq_flags);
6502 }
6503 
6504 /* for backward compatibility, will be removed in v6.15 */
6505 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6506 				  u64 enq_flags)
6507 {
6508 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6509 	scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6510 }
6511 
6512 /**
6513  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6514  * @p: task_struct to insert
6515  * @dsq_id: DSQ to insert into
6516  * @slice: duration @p can run for in nsecs, 0 to keep the current value
6517  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6518  * @enq_flags: SCX_ENQ_*
6519  *
6520  * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6521  * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6522  * are identical to scx_bpf_dsq_insert().
6523  *
6524  * @vtime ordering is according to time_before64() which considers wrapping. A
6525  * numerically larger vtime may indicate an earlier position in the ordering and
6526  * vice-versa.
6527  *
6528  * A DSQ can only be used as a FIFO or priority queue at any given time and this
6529  * function must not be called on a DSQ which already has one or more FIFO tasks
6530  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6531  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6532  */
6533 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6534 					  u64 slice, u64 vtime, u64 enq_flags)
6535 {
6536 	if (!scx_dsq_insert_preamble(p, enq_flags))
6537 		return;
6538 
6539 	if (slice)
6540 		p->scx.slice = slice;
6541 	else
6542 		p->scx.slice = p->scx.slice ?: 1;
6543 
6544 	p->scx.dsq_vtime = vtime;
6545 
6546 	scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6547 }
6548 
6549 /* for backward compatibility, will be removed in v6.15 */
6550 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6551 					u64 slice, u64 vtime, u64 enq_flags)
6552 {
6553 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6554 	scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6555 }
6556 
6557 __bpf_kfunc_end_defs();
6558 
6559 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6560 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6561 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6562 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6563 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6564 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6565 
6566 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6567 	.owner			= THIS_MODULE,
6568 	.set			= &scx_kfunc_ids_enqueue_dispatch,
6569 };
6570 
6571 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6572 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6573 {
6574 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6575 	struct rq *this_rq, *src_rq, *locked_rq;
6576 	bool dispatched = false;
6577 	bool in_balance;
6578 	unsigned long flags;
6579 
6580 	if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6581 		return false;
6582 
6583 	/*
6584 	 * Can be called from either ops.dispatch() locking this_rq() or any
6585 	 * context where no rq lock is held. If latter, lock @p's task_rq which
6586 	 * we'll likely need anyway.
6587 	 */
6588 	src_rq = task_rq(p);
6589 
6590 	local_irq_save(flags);
6591 	this_rq = this_rq();
6592 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6593 
6594 	if (in_balance) {
6595 		if (this_rq != src_rq) {
6596 			raw_spin_rq_unlock(this_rq);
6597 			raw_spin_rq_lock(src_rq);
6598 		}
6599 	} else {
6600 		raw_spin_rq_lock(src_rq);
6601 	}
6602 
6603 	/*
6604 	 * If the BPF scheduler keeps calling this function repeatedly, it can
6605 	 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6606 	 * breather if necessary.
6607 	 */
6608 	scx_ops_breather(src_rq);
6609 
6610 	locked_rq = src_rq;
6611 	raw_spin_lock(&src_dsq->lock);
6612 
6613 	/*
6614 	 * Did someone else get to it? @p could have already left $src_dsq, got
6615 	 * re-enqueud, or be in the process of being consumed by someone else.
6616 	 */
6617 	if (unlikely(p->scx.dsq != src_dsq ||
6618 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6619 		     p->scx.holding_cpu >= 0) ||
6620 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
6621 		raw_spin_unlock(&src_dsq->lock);
6622 		goto out;
6623 	}
6624 
6625 	/* @p is still on $src_dsq and stable, determine the destination */
6626 	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6627 
6628 	/*
6629 	 * Apply vtime and slice updates before moving so that the new time is
6630 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6631 	 * this is safe as we're locking it.
6632 	 */
6633 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6634 		p->scx.dsq_vtime = kit->vtime;
6635 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6636 		p->scx.slice = kit->slice;
6637 
6638 	/* execute move */
6639 	locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6640 	dispatched = true;
6641 out:
6642 	if (in_balance) {
6643 		if (this_rq != locked_rq) {
6644 			raw_spin_rq_unlock(locked_rq);
6645 			raw_spin_rq_lock(this_rq);
6646 		}
6647 	} else {
6648 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6649 	}
6650 
6651 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6652 			       __SCX_DSQ_ITER_HAS_VTIME);
6653 	return dispatched;
6654 }
6655 
6656 __bpf_kfunc_start_defs();
6657 
6658 /**
6659  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6660  *
6661  * Can only be called from ops.dispatch().
6662  */
6663 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6664 {
6665 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6666 		return 0;
6667 
6668 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6669 }
6670 
6671 /**
6672  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6673  *
6674  * Cancel the latest dispatch. Can be called multiple times to cancel further
6675  * dispatches. Can only be called from ops.dispatch().
6676  */
6677 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6678 {
6679 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6680 
6681 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6682 		return;
6683 
6684 	if (dspc->cursor > 0)
6685 		dspc->cursor--;
6686 	else
6687 		scx_ops_error("dispatch buffer underflow");
6688 }
6689 
6690 /**
6691  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6692  * @dsq_id: DSQ to move task from
6693  *
6694  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6695  * local DSQ for execution. Can only be called from ops.dispatch().
6696  *
6697  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6698  * before trying to move from the specified DSQ. It may also grab rq locks and
6699  * thus can't be called under any BPF locks.
6700  *
6701  * Returns %true if a task has been moved, %false if there isn't any task to
6702  * move.
6703  */
6704 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6705 {
6706 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6707 	struct scx_dispatch_q *dsq;
6708 
6709 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6710 		return false;
6711 
6712 	flush_dispatch_buf(dspc->rq);
6713 
6714 	dsq = find_user_dsq(dsq_id);
6715 	if (unlikely(!dsq)) {
6716 		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6717 		return false;
6718 	}
6719 
6720 	if (consume_dispatch_q(dspc->rq, dsq)) {
6721 		/*
6722 		 * A successfully consumed task can be dequeued before it starts
6723 		 * running while the CPU is trying to migrate other dispatched
6724 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6725 		 * local DSQ.
6726 		 */
6727 		dspc->nr_tasks++;
6728 		return true;
6729 	} else {
6730 		return false;
6731 	}
6732 }
6733 
6734 /* for backward compatibility, will be removed in v6.15 */
6735 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6736 {
6737 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6738 	return scx_bpf_dsq_move_to_local(dsq_id);
6739 }
6740 
6741 /**
6742  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6743  * @it__iter: DSQ iterator in progress
6744  * @slice: duration the moved task can run for in nsecs
6745  *
6746  * Override the slice of the next task that will be moved from @it__iter using
6747  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6748  * slice duration is kept.
6749  */
6750 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6751 					    u64 slice)
6752 {
6753 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6754 
6755 	kit->slice = slice;
6756 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6757 }
6758 
6759 /* for backward compatibility, will be removed in v6.15 */
6760 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6761 			struct bpf_iter_scx_dsq *it__iter, u64 slice)
6762 {
6763 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6764 	scx_bpf_dsq_move_set_slice(it__iter, slice);
6765 }
6766 
6767 /**
6768  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6769  * @it__iter: DSQ iterator in progress
6770  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6771  *
6772  * Override the vtime of the next task that will be moved from @it__iter using
6773  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6774  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6775  * override is ignored and cleared.
6776  */
6777 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6778 					    u64 vtime)
6779 {
6780 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6781 
6782 	kit->vtime = vtime;
6783 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6784 }
6785 
6786 /* for backward compatibility, will be removed in v6.15 */
6787 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6788 			struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6789 {
6790 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6791 	scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6792 }
6793 
6794 /**
6795  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6796  * @it__iter: DSQ iterator in progress
6797  * @p: task to transfer
6798  * @dsq_id: DSQ to move @p to
6799  * @enq_flags: SCX_ENQ_*
6800  *
6801  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6802  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6803  * be the destination.
6804  *
6805  * For the transfer to be successful, @p must still be on the DSQ and have been
6806  * queued before the DSQ iteration started. This function doesn't care whether
6807  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6808  * been queued before the iteration started.
6809  *
6810  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6811  *
6812  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6813  * lock (e.g. BPF timers or SYSCALL programs).
6814  *
6815  * Returns %true if @p has been consumed, %false if @p had already been consumed
6816  * or dequeued.
6817  */
6818 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6819 				  struct task_struct *p, u64 dsq_id,
6820 				  u64 enq_flags)
6821 {
6822 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6823 			    p, dsq_id, enq_flags);
6824 }
6825 
6826 /* for backward compatibility, will be removed in v6.15 */
6827 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6828 					   struct task_struct *p, u64 dsq_id,
6829 					   u64 enq_flags)
6830 {
6831 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6832 	return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6833 }
6834 
6835 /**
6836  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6837  * @it__iter: DSQ iterator in progress
6838  * @p: task to transfer
6839  * @dsq_id: DSQ to move @p to
6840  * @enq_flags: SCX_ENQ_*
6841  *
6842  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6843  * priority queue of the DSQ specified by @dsq_id. The destination must be a
6844  * user DSQ as only user DSQs support priority queue.
6845  *
6846  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6847  * and scx_bpf_dsq_move_set_vtime() to update.
6848  *
6849  * All other aspects are identical to scx_bpf_dsq_move(). See
6850  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6851  */
6852 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6853 					struct task_struct *p, u64 dsq_id,
6854 					u64 enq_flags)
6855 {
6856 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6857 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6858 }
6859 
6860 /* for backward compatibility, will be removed in v6.15 */
6861 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6862 						 struct task_struct *p, u64 dsq_id,
6863 						 u64 enq_flags)
6864 {
6865 	printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6866 	return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6867 }
6868 
6869 __bpf_kfunc_end_defs();
6870 
6871 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6872 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6873 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6874 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6875 BTF_ID_FLAGS(func, scx_bpf_consume)
6876 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6877 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6878 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6879 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6880 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6881 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6882 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6883 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6884 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6885 
6886 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6887 	.owner			= THIS_MODULE,
6888 	.set			= &scx_kfunc_ids_dispatch,
6889 };
6890 
6891 __bpf_kfunc_start_defs();
6892 
6893 /**
6894  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6895  *
6896  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6897  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6898  * processed tasks. Can only be called from ops.cpu_release().
6899  */
6900 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6901 {
6902 	LIST_HEAD(tasks);
6903 	u32 nr_enqueued = 0;
6904 	struct rq *rq;
6905 	struct task_struct *p, *n;
6906 
6907 	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6908 		return 0;
6909 
6910 	rq = cpu_rq(smp_processor_id());
6911 	lockdep_assert_rq_held(rq);
6912 
6913 	/*
6914 	 * The BPF scheduler may choose to dispatch tasks back to
6915 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6916 	 * first to avoid processing the same tasks repeatedly.
6917 	 */
6918 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6919 				 scx.dsq_list.node) {
6920 		/*
6921 		 * If @p is being migrated, @p's current CPU may not agree with
6922 		 * its allowed CPUs and the migration_cpu_stop is about to
6923 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6924 		 *
6925 		 * While racing sched property changes may also dequeue and
6926 		 * re-enqueue a migrating task while its current CPU and allowed
6927 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6928 		 * the current local DSQ for running tasks and thus are not
6929 		 * visible to the BPF scheduler.
6930 		 */
6931 		if (p->migration_pending)
6932 			continue;
6933 
6934 		dispatch_dequeue(rq, p);
6935 		list_add_tail(&p->scx.dsq_list.node, &tasks);
6936 	}
6937 
6938 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6939 		list_del_init(&p->scx.dsq_list.node);
6940 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6941 		nr_enqueued++;
6942 	}
6943 
6944 	return nr_enqueued;
6945 }
6946 
6947 __bpf_kfunc_end_defs();
6948 
6949 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6950 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6951 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6952 
6953 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6954 	.owner			= THIS_MODULE,
6955 	.set			= &scx_kfunc_ids_cpu_release,
6956 };
6957 
6958 __bpf_kfunc_start_defs();
6959 
6960 /**
6961  * scx_bpf_create_dsq - Create a custom DSQ
6962  * @dsq_id: DSQ to create
6963  * @node: NUMA node to allocate from
6964  *
6965  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6966  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6967  */
6968 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6969 {
6970 	if (unlikely(node >= (int)nr_node_ids ||
6971 		     (node < 0 && node != NUMA_NO_NODE)))
6972 		return -EINVAL;
6973 	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6974 }
6975 
6976 __bpf_kfunc_end_defs();
6977 
6978 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6979 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6980 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6981 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6982 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6983 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6984 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6985 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6986 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6987 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6988 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6989 
6990 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6991 	.owner			= THIS_MODULE,
6992 	.set			= &scx_kfunc_ids_unlocked,
6993 };
6994 
6995 __bpf_kfunc_start_defs();
6996 
6997 /**
6998  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6999  * @cpu: cpu to kick
7000  * @flags: %SCX_KICK_* flags
7001  *
7002  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
7003  * trigger rescheduling on a busy CPU. This can be called from any online
7004  * scx_ops operation and the actual kicking is performed asynchronously through
7005  * an irq work.
7006  */
7007 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
7008 {
7009 	struct rq *this_rq;
7010 	unsigned long irq_flags;
7011 
7012 	if (!ops_cpu_valid(cpu, NULL))
7013 		return;
7014 
7015 	local_irq_save(irq_flags);
7016 
7017 	this_rq = this_rq();
7018 
7019 	/*
7020 	 * While bypassing for PM ops, IRQ handling may not be online which can
7021 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
7022 	 * IRQ status update. Suppress kicking.
7023 	 */
7024 	if (scx_rq_bypassing(this_rq))
7025 		goto out;
7026 
7027 	/*
7028 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
7029 	 * rq locks. We can probably be smarter and avoid bouncing if called
7030 	 * from ops which don't hold a rq lock.
7031 	 */
7032 	if (flags & SCX_KICK_IDLE) {
7033 		struct rq *target_rq = cpu_rq(cpu);
7034 
7035 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
7036 			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
7037 
7038 		if (raw_spin_rq_trylock(target_rq)) {
7039 			if (can_skip_idle_kick(target_rq)) {
7040 				raw_spin_rq_unlock(target_rq);
7041 				goto out;
7042 			}
7043 			raw_spin_rq_unlock(target_rq);
7044 		}
7045 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
7046 	} else {
7047 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
7048 
7049 		if (flags & SCX_KICK_PREEMPT)
7050 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
7051 		if (flags & SCX_KICK_WAIT)
7052 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
7053 	}
7054 
7055 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
7056 out:
7057 	local_irq_restore(irq_flags);
7058 }
7059 
7060 /**
7061  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
7062  * @dsq_id: id of the DSQ
7063  *
7064  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
7065  * -%ENOENT is returned.
7066  */
7067 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
7068 {
7069 	struct scx_dispatch_q *dsq;
7070 	s32 ret;
7071 
7072 	preempt_disable();
7073 
7074 	if (dsq_id == SCX_DSQ_LOCAL) {
7075 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
7076 		goto out;
7077 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
7078 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
7079 
7080 		if (ops_cpu_valid(cpu, NULL)) {
7081 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
7082 			goto out;
7083 		}
7084 	} else {
7085 		dsq = find_user_dsq(dsq_id);
7086 		if (dsq) {
7087 			ret = READ_ONCE(dsq->nr);
7088 			goto out;
7089 		}
7090 	}
7091 	ret = -ENOENT;
7092 out:
7093 	preempt_enable();
7094 	return ret;
7095 }
7096 
7097 /**
7098  * scx_bpf_destroy_dsq - Destroy a custom DSQ
7099  * @dsq_id: DSQ to destroy
7100  *
7101  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7102  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7103  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7104  * which doesn't exist. Can be called from any online scx_ops operations.
7105  */
7106 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7107 {
7108 	destroy_dsq(dsq_id);
7109 }
7110 
7111 /**
7112  * bpf_iter_scx_dsq_new - Create a DSQ iterator
7113  * @it: iterator to initialize
7114  * @dsq_id: DSQ to iterate
7115  * @flags: %SCX_DSQ_ITER_*
7116  *
7117  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7118  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7119  * tasks which are already queued when this function is invoked.
7120  */
7121 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7122 				     u64 flags)
7123 {
7124 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7125 
7126 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7127 		     sizeof(struct bpf_iter_scx_dsq));
7128 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7129 		     __alignof__(struct bpf_iter_scx_dsq));
7130 
7131 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7132 		return -EINVAL;
7133 
7134 	kit->dsq = find_user_dsq(dsq_id);
7135 	if (!kit->dsq)
7136 		return -ENOENT;
7137 
7138 	INIT_LIST_HEAD(&kit->cursor.node);
7139 	kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7140 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7141 
7142 	return 0;
7143 }
7144 
7145 /**
7146  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7147  * @it: iterator to progress
7148  *
7149  * Return the next task. See bpf_iter_scx_dsq_new().
7150  */
7151 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7152 {
7153 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7154 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7155 	struct task_struct *p;
7156 	unsigned long flags;
7157 
7158 	if (!kit->dsq)
7159 		return NULL;
7160 
7161 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7162 
7163 	if (list_empty(&kit->cursor.node))
7164 		p = NULL;
7165 	else
7166 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7167 
7168 	/*
7169 	 * Only tasks which were queued before the iteration started are
7170 	 * visible. This bounds BPF iterations and guarantees that vtime never
7171 	 * jumps in the other direction while iterating.
7172 	 */
7173 	do {
7174 		p = nldsq_next_task(kit->dsq, p, rev);
7175 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7176 
7177 	if (p) {
7178 		if (rev)
7179 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7180 		else
7181 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7182 	} else {
7183 		list_del_init(&kit->cursor.node);
7184 	}
7185 
7186 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7187 
7188 	return p;
7189 }
7190 
7191 /**
7192  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7193  * @it: iterator to destroy
7194  *
7195  * Undo scx_iter_scx_dsq_new().
7196  */
7197 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7198 {
7199 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7200 
7201 	if (!kit->dsq)
7202 		return;
7203 
7204 	if (!list_empty(&kit->cursor.node)) {
7205 		unsigned long flags;
7206 
7207 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7208 		list_del_init(&kit->cursor.node);
7209 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7210 	}
7211 	kit->dsq = NULL;
7212 }
7213 
7214 __bpf_kfunc_end_defs();
7215 
7216 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7217 			 char *fmt, unsigned long long *data, u32 data__sz)
7218 {
7219 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7220 	s32 ret;
7221 
7222 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7223 	    (data__sz && !data)) {
7224 		scx_ops_error("invalid data=%p and data__sz=%u",
7225 			      (void *)data, data__sz);
7226 		return -EINVAL;
7227 	}
7228 
7229 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7230 	if (ret < 0) {
7231 		scx_ops_error("failed to read data fields (%d)", ret);
7232 		return ret;
7233 	}
7234 
7235 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7236 				  &bprintf_data);
7237 	if (ret < 0) {
7238 		scx_ops_error("format preparation failed (%d)", ret);
7239 		return ret;
7240 	}
7241 
7242 	ret = bstr_printf(line_buf, line_size, fmt,
7243 			  bprintf_data.bin_args);
7244 	bpf_bprintf_cleanup(&bprintf_data);
7245 	if (ret < 0) {
7246 		scx_ops_error("(\"%s\", %p, %u) failed to format",
7247 			      fmt, data, data__sz);
7248 		return ret;
7249 	}
7250 
7251 	return ret;
7252 }
7253 
7254 static s32 bstr_format(struct scx_bstr_buf *buf,
7255 		       char *fmt, unsigned long long *data, u32 data__sz)
7256 {
7257 	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7258 			     fmt, data, data__sz);
7259 }
7260 
7261 __bpf_kfunc_start_defs();
7262 
7263 /**
7264  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7265  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7266  * @fmt: error message format string
7267  * @data: format string parameters packaged using ___bpf_fill() macro
7268  * @data__sz: @data len, must end in '__sz' for the verifier
7269  *
7270  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7271  * disabling.
7272  */
7273 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7274 				   unsigned long long *data, u32 data__sz)
7275 {
7276 	unsigned long flags;
7277 
7278 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7279 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7280 		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
7281 				  scx_exit_bstr_buf.line);
7282 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7283 }
7284 
7285 /**
7286  * scx_bpf_error_bstr - Indicate fatal error
7287  * @fmt: error message format string
7288  * @data: format string parameters packaged using ___bpf_fill() macro
7289  * @data__sz: @data len, must end in '__sz' for the verifier
7290  *
7291  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7292  * disabling.
7293  */
7294 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7295 				    u32 data__sz)
7296 {
7297 	unsigned long flags;
7298 
7299 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7300 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7301 		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7302 				  scx_exit_bstr_buf.line);
7303 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7304 }
7305 
7306 /**
7307  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7308  * @fmt: format string
7309  * @data: format string parameters packaged using ___bpf_fill() macro
7310  * @data__sz: @data len, must end in '__sz' for the verifier
7311  *
7312  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7313  * dump_task() to generate extra debug dump specific to the BPF scheduler.
7314  *
7315  * The extra dump may be multiple lines. A single line may be split over
7316  * multiple calls. The last line is automatically terminated.
7317  */
7318 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7319 				   u32 data__sz)
7320 {
7321 	struct scx_dump_data *dd = &scx_dump_data;
7322 	struct scx_bstr_buf *buf = &dd->buf;
7323 	s32 ret;
7324 
7325 	if (raw_smp_processor_id() != dd->cpu) {
7326 		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7327 		return;
7328 	}
7329 
7330 	/* append the formatted string to the line buf */
7331 	ret = __bstr_format(buf->data, buf->line + dd->cursor,
7332 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7333 	if (ret < 0) {
7334 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7335 			  dd->prefix, fmt, data, data__sz, ret);
7336 		return;
7337 	}
7338 
7339 	dd->cursor += ret;
7340 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7341 
7342 	if (!dd->cursor)
7343 		return;
7344 
7345 	/*
7346 	 * If the line buf overflowed or ends in a newline, flush it into the
7347 	 * dump. This is to allow the caller to generate a single line over
7348 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7349 	 * the line buf, the only case which can lead to an unexpected
7350 	 * truncation is when the caller keeps generating newlines in the middle
7351 	 * instead of the end consecutively. Don't do that.
7352 	 */
7353 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7354 		ops_dump_flush();
7355 }
7356 
7357 /**
7358  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7359  * @cpu: CPU of interest
7360  *
7361  * Return the maximum relative capacity of @cpu in relation to the most
7362  * performant CPU in the system. The return value is in the range [1,
7363  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7364  */
7365 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7366 {
7367 	if (ops_cpu_valid(cpu, NULL))
7368 		return arch_scale_cpu_capacity(cpu);
7369 	else
7370 		return SCX_CPUPERF_ONE;
7371 }
7372 
7373 /**
7374  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7375  * @cpu: CPU of interest
7376  *
7377  * Return the current relative performance of @cpu in relation to its maximum.
7378  * The return value is in the range [1, %SCX_CPUPERF_ONE].
7379  *
7380  * The current performance level of a CPU in relation to the maximum performance
7381  * available in the system can be calculated as follows:
7382  *
7383  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7384  *
7385  * The result is in the range [1, %SCX_CPUPERF_ONE].
7386  */
7387 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7388 {
7389 	if (ops_cpu_valid(cpu, NULL))
7390 		return arch_scale_freq_capacity(cpu);
7391 	else
7392 		return SCX_CPUPERF_ONE;
7393 }
7394 
7395 /**
7396  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7397  * @cpu: CPU of interest
7398  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7399  *
7400  * Set the target performance level of @cpu to @perf. @perf is in linear
7401  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7402  * schedutil cpufreq governor chooses the target frequency.
7403  *
7404  * The actual performance level chosen, CPU grouping, and the overhead and
7405  * latency of the operations are dependent on the hardware and cpufreq driver in
7406  * use. Consult hardware and cpufreq documentation for more information. The
7407  * current performance level can be monitored using scx_bpf_cpuperf_cur().
7408  */
7409 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7410 {
7411 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
7412 		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7413 		return;
7414 	}
7415 
7416 	if (ops_cpu_valid(cpu, NULL)) {
7417 		struct rq *rq = cpu_rq(cpu);
7418 
7419 		rq->scx.cpuperf_target = perf;
7420 
7421 		rcu_read_lock_sched_notrace();
7422 		cpufreq_update_util(cpu_rq(cpu), 0);
7423 		rcu_read_unlock_sched_notrace();
7424 	}
7425 }
7426 
7427 /**
7428  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7429  *
7430  * All valid CPU IDs in the system are smaller than the returned value.
7431  */
7432 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7433 {
7434 	return nr_cpu_ids;
7435 }
7436 
7437 /**
7438  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7439  */
7440 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7441 {
7442 	return cpu_possible_mask;
7443 }
7444 
7445 /**
7446  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7447  */
7448 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7449 {
7450 	return cpu_online_mask;
7451 }
7452 
7453 /**
7454  * scx_bpf_put_cpumask - Release a possible/online cpumask
7455  * @cpumask: cpumask to release
7456  */
7457 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7458 {
7459 	/*
7460 	 * Empty function body because we aren't actually acquiring or releasing
7461 	 * a reference to a global cpumask, which is read-only in the caller and
7462 	 * is never released. The acquire / release semantics here are just used
7463 	 * to make the cpumask is a trusted pointer in the caller.
7464 	 */
7465 }
7466 
7467 /**
7468  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
7469  * per-CPU cpumask.
7470  *
7471  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7472  */
7473 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
7474 {
7475 	if (!check_builtin_idle_enabled())
7476 		return cpu_none_mask;
7477 
7478 #ifdef CONFIG_SMP
7479 	return idle_masks.cpu;
7480 #else
7481 	return cpu_none_mask;
7482 #endif
7483 }
7484 
7485 /**
7486  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
7487  * per-physical-core cpumask. Can be used to determine if an entire physical
7488  * core is free.
7489  *
7490  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7491  */
7492 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
7493 {
7494 	if (!check_builtin_idle_enabled())
7495 		return cpu_none_mask;
7496 
7497 #ifdef CONFIG_SMP
7498 	if (sched_smt_active())
7499 		return idle_masks.smt;
7500 	else
7501 		return idle_masks.cpu;
7502 #else
7503 	return cpu_none_mask;
7504 #endif
7505 }
7506 
7507 /**
7508  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7509  * either the percpu, or SMT idle-tracking cpumask.
7510  * @idle_mask: &cpumask to use
7511  */
7512 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7513 {
7514 	/*
7515 	 * Empty function body because we aren't actually acquiring or releasing
7516 	 * a reference to a global idle cpumask, which is read-only in the
7517 	 * caller and is never released. The acquire / release semantics here
7518 	 * are just used to make the cpumask a trusted pointer in the caller.
7519 	 */
7520 }
7521 
7522 /**
7523  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7524  * @cpu: cpu to test and clear idle for
7525  *
7526  * Returns %true if @cpu was idle and its idle state was successfully cleared.
7527  * %false otherwise.
7528  *
7529  * Unavailable if ops.update_idle() is implemented and
7530  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7531  */
7532 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7533 {
7534 	if (!check_builtin_idle_enabled())
7535 		return false;
7536 
7537 	if (ops_cpu_valid(cpu, NULL))
7538 		return test_and_clear_cpu_idle(cpu);
7539 	else
7540 		return false;
7541 }
7542 
7543 /**
7544  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7545  * @cpus_allowed: Allowed cpumask
7546  * @flags: %SCX_PICK_IDLE_CPU_* flags
7547  *
7548  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7549  * number on success. -%EBUSY if no matching cpu was found.
7550  *
7551  * Idle CPU tracking may race against CPU scheduling state transitions. For
7552  * example, this function may return -%EBUSY as CPUs are transitioning into the
7553  * idle state. If the caller then assumes that there will be dispatch events on
7554  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7555  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7556  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7557  * event in the near future.
7558  *
7559  * Unavailable if ops.update_idle() is implemented and
7560  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7561  */
7562 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7563 				      u64 flags)
7564 {
7565 	if (!check_builtin_idle_enabled())
7566 		return -EBUSY;
7567 
7568 	return scx_pick_idle_cpu(cpus_allowed, flags);
7569 }
7570 
7571 /**
7572  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7573  * @cpus_allowed: Allowed cpumask
7574  * @flags: %SCX_PICK_IDLE_CPU_* flags
7575  *
7576  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7577  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7578  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7579  * empty.
7580  *
7581  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7582  * set, this function can't tell which CPUs are idle and will always pick any
7583  * CPU.
7584  */
7585 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7586 				     u64 flags)
7587 {
7588 	s32 cpu;
7589 
7590 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
7591 		cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7592 		if (cpu >= 0)
7593 			return cpu;
7594 	}
7595 
7596 	cpu = cpumask_any_distribute(cpus_allowed);
7597 	if (cpu < nr_cpu_ids)
7598 		return cpu;
7599 	else
7600 		return -EBUSY;
7601 }
7602 
7603 /**
7604  * scx_bpf_task_running - Is task currently running?
7605  * @p: task of interest
7606  */
7607 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7608 {
7609 	return task_rq(p)->curr == p;
7610 }
7611 
7612 /**
7613  * scx_bpf_task_cpu - CPU a task is currently associated with
7614  * @p: task of interest
7615  */
7616 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7617 {
7618 	return task_cpu(p);
7619 }
7620 
7621 /**
7622  * scx_bpf_cpu_rq - Fetch the rq of a CPU
7623  * @cpu: CPU of the rq
7624  */
7625 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7626 {
7627 	if (!ops_cpu_valid(cpu, NULL))
7628 		return NULL;
7629 
7630 	return cpu_rq(cpu);
7631 }
7632 
7633 /**
7634  * scx_bpf_task_cgroup - Return the sched cgroup of a task
7635  * @p: task of interest
7636  *
7637  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7638  * from the scheduler's POV. SCX operations should use this function to
7639  * determine @p's current cgroup as, unlike following @p->cgroups,
7640  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7641  * rq-locked operations. Can be called on the parameter tasks of rq-locked
7642  * operations. The restriction guarantees that @p's rq is locked by the caller.
7643  */
7644 #ifdef CONFIG_CGROUP_SCHED
7645 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7646 {
7647 	struct task_group *tg = p->sched_task_group;
7648 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7649 
7650 	if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7651 		goto out;
7652 
7653 	cgrp = tg_cgrp(tg);
7654 
7655 out:
7656 	cgroup_get(cgrp);
7657 	return cgrp;
7658 }
7659 #endif
7660 
7661 /**
7662  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7663  * clock for the current CPU. The clock returned is in nanoseconds.
7664  *
7665  * It provides the following properties:
7666  *
7667  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7668  *  to account for execution time and track tasks' runtime properties.
7669  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7670  *  eventually reads a hardware timestamp counter -- is neither performant nor
7671  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
7672  *  using the rq clock in the scheduler core whenever possible.
7673  *
7674  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7675  *  scheduler use cases, the required clock resolution is lower than the most
7676  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7677  *  uses the rq clock in the scheduler core whenever it is valid. It considers
7678  *  that the rq clock is valid from the time the rq clock is updated
7679  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7680  *
7681  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7682  *  guarantees the clock never goes backward when comparing them in the same
7683  *  CPU. On the other hand, when comparing clocks in different CPUs, there
7684  *  is no such guarantee -- the clock can go backward. It provides a
7685  *  monotonically *non-decreasing* clock so that it would provide the same
7686  *  clock values in two different scx_bpf_now() calls in the same CPU
7687  *  during the same period of when the rq clock is valid.
7688  */
7689 __bpf_kfunc u64 scx_bpf_now(void)
7690 {
7691 	struct rq *rq;
7692 	u64 clock;
7693 
7694 	preempt_disable();
7695 
7696 	rq = this_rq();
7697 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7698 		/*
7699 		 * If the rq clock is valid, use the cached rq clock.
7700 		 *
7701 		 * Note that scx_bpf_now() is re-entrant between a process
7702 		 * context and an interrupt context (e.g., timer interrupt).
7703 		 * However, we don't need to consider the race between them
7704 		 * because such race is not observable from a caller.
7705 		 */
7706 		clock = READ_ONCE(rq->scx.clock);
7707 	} else {
7708 		/*
7709 		 * Otherwise, return a fresh rq clock.
7710 		 *
7711 		 * The rq clock is updated outside of the rq lock.
7712 		 * In this case, keep the updated rq clock invalid so the next
7713 		 * kfunc call outside the rq lock gets a fresh rq clock.
7714 		 */
7715 		clock = sched_clock_cpu(cpu_of(rq));
7716 	}
7717 
7718 	preempt_enable();
7719 
7720 	return clock;
7721 }
7722 
7723 __bpf_kfunc_end_defs();
7724 
7725 BTF_KFUNCS_START(scx_kfunc_ids_any)
7726 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7727 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7728 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7729 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7730 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7731 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7732 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7733 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7734 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7735 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7736 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7737 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7738 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7739 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7740 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7741 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7742 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7743 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7744 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7745 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7746 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7747 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7748 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7749 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7750 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7751 #ifdef CONFIG_CGROUP_SCHED
7752 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7753 #endif
7754 BTF_ID_FLAGS(func, scx_bpf_now)
7755 BTF_KFUNCS_END(scx_kfunc_ids_any)
7756 
7757 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7758 	.owner			= THIS_MODULE,
7759 	.set			= &scx_kfunc_ids_any,
7760 };
7761 
7762 static int __init scx_init(void)
7763 {
7764 	int ret;
7765 
7766 	/*
7767 	 * kfunc registration can't be done from init_sched_ext_class() as
7768 	 * register_btf_kfunc_id_set() needs most of the system to be up.
7769 	 *
7770 	 * Some kfuncs are context-sensitive and can only be called from
7771 	 * specific SCX ops. They are grouped into BTF sets accordingly.
7772 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7773 	 * restrictions. Eventually, the verifier should be able to enforce
7774 	 * them. For now, register them the same and make each kfunc explicitly
7775 	 * check using scx_kf_allowed().
7776 	 */
7777 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7778 					     &scx_kfunc_set_select_cpu)) ||
7779 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7780 					     &scx_kfunc_set_enqueue_dispatch)) ||
7781 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7782 					     &scx_kfunc_set_dispatch)) ||
7783 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7784 					     &scx_kfunc_set_cpu_release)) ||
7785 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7786 					     &scx_kfunc_set_unlocked)) ||
7787 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7788 					     &scx_kfunc_set_unlocked)) ||
7789 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7790 					     &scx_kfunc_set_any)) ||
7791 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7792 					     &scx_kfunc_set_any)) ||
7793 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7794 					     &scx_kfunc_set_any))) {
7795 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7796 		return ret;
7797 	}
7798 
7799 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7800 	if (ret) {
7801 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7802 		return ret;
7803 	}
7804 
7805 	ret = register_pm_notifier(&scx_pm_notifier);
7806 	if (ret) {
7807 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7808 		return ret;
7809 	}
7810 
7811 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7812 	if (!scx_kset) {
7813 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7814 		return -ENOMEM;
7815 	}
7816 
7817 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7818 	if (ret < 0) {
7819 		pr_err("sched_ext: Failed to add global attributes\n");
7820 		return ret;
7821 	}
7822 
7823 	return 0;
7824 }
7825 __initcall(scx_init);
7826