xref: /linux/kernel/sched/ext.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10 
11 enum scx_consts {
12 	SCX_SLICE_BYPASS		= SCX_SLICE_DFL / 4,
13 	SCX_DSP_DFL_MAX_BATCH		= 32,
14 	SCX_DSP_MAX_LOOPS		= 32,
15 	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
16 
17 	SCX_EXIT_BT_LEN			= 64,
18 	SCX_EXIT_MSG_LEN		= 1024,
19 	SCX_EXIT_DUMP_DFL_LEN		= 32768,
20 
21 	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
22 };
23 
24 enum scx_exit_kind {
25 	SCX_EXIT_NONE,
26 	SCX_EXIT_DONE,
27 
28 	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
29 	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
30 	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
31 	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
32 
33 	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
34 	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
35 	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
36 };
37 
38 /*
39  * An exit code can be specified when exiting with scx_bpf_exit() or
40  * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
41  * respectively. The codes are 64bit of the format:
42  *
43  *   Bits: [63  ..  48 47   ..  32 31 .. 0]
44  *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
45  *
46  *   SYS ACT: System-defined exit actions
47  *   SYS RSN: System-defined exit reasons
48  *   USR    : User-defined exit codes and reasons
49  *
50  * Using the above, users may communicate intention and context by ORing system
51  * actions and/or system reasons with a user-defined exit code.
52  */
53 enum scx_exit_code {
54 	/* Reasons */
55 	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
56 
57 	/* Actions */
58 	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
59 };
60 
61 /*
62  * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
63  * being disabled.
64  */
65 struct scx_exit_info {
66 	/* %SCX_EXIT_* - broad category of the exit reason */
67 	enum scx_exit_kind	kind;
68 
69 	/* exit code if gracefully exiting */
70 	s64			exit_code;
71 
72 	/* textual representation of the above */
73 	const char		*reason;
74 
75 	/* backtrace if exiting due to an error */
76 	unsigned long		*bt;
77 	u32			bt_len;
78 
79 	/* informational message */
80 	char			*msg;
81 
82 	/* debug dump */
83 	char			*dump;
84 };
85 
86 /* sched_ext_ops.flags */
87 enum scx_ops_flags {
88 	/*
89 	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
90 	 */
91 	SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
92 
93 	/*
94 	 * By default, if there are no other task to run on the CPU, ext core
95 	 * keeps running the current task even after its slice expires. If this
96 	 * flag is specified, such tasks are passed to ops.enqueue() with
97 	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
98 	 */
99 	SCX_OPS_ENQ_LAST	= 1LLU << 1,
100 
101 	/*
102 	 * An exiting task may schedule after PF_EXITING is set. In such cases,
103 	 * bpf_task_from_pid() may not be able to find the task and if the BPF
104 	 * scheduler depends on pid lookup for dispatching, the task will be
105 	 * lost leading to various issues including RCU grace period stalls.
106 	 *
107 	 * To mask this problem, by default, unhashed tasks are automatically
108 	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
109 	 * depend on pid lookups and wants to handle these tasks directly, the
110 	 * following flag can be used.
111 	 */
112 	SCX_OPS_ENQ_EXITING	= 1LLU << 2,
113 
114 	/*
115 	 * If set, only tasks with policy set to SCHED_EXT are attached to
116 	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
117 	 */
118 	SCX_OPS_SWITCH_PARTIAL	= 1LLU << 3,
119 
120 	/*
121 	 * CPU cgroup support flags
122 	 */
123 	SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16,	/* cpu.weight */
124 
125 	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
126 				  SCX_OPS_ENQ_LAST |
127 				  SCX_OPS_ENQ_EXITING |
128 				  SCX_OPS_SWITCH_PARTIAL |
129 				  SCX_OPS_HAS_CGROUP_WEIGHT,
130 };
131 
132 /* argument container for ops.init_task() */
133 struct scx_init_task_args {
134 	/*
135 	 * Set if ops.init_task() is being invoked on the fork path, as opposed
136 	 * to the scheduler transition path.
137 	 */
138 	bool			fork;
139 #ifdef CONFIG_EXT_GROUP_SCHED
140 	/* the cgroup the task is joining */
141 	struct cgroup		*cgroup;
142 #endif
143 };
144 
145 /* argument container for ops.exit_task() */
146 struct scx_exit_task_args {
147 	/* Whether the task exited before running on sched_ext. */
148 	bool cancelled;
149 };
150 
151 /* argument container for ops->cgroup_init() */
152 struct scx_cgroup_init_args {
153 	/* the weight of the cgroup [1..10000] */
154 	u32			weight;
155 };
156 
157 enum scx_cpu_preempt_reason {
158 	/* next task is being scheduled by &sched_class_rt */
159 	SCX_CPU_PREEMPT_RT,
160 	/* next task is being scheduled by &sched_class_dl */
161 	SCX_CPU_PREEMPT_DL,
162 	/* next task is being scheduled by &sched_class_stop */
163 	SCX_CPU_PREEMPT_STOP,
164 	/* unknown reason for SCX being preempted */
165 	SCX_CPU_PREEMPT_UNKNOWN,
166 };
167 
168 /*
169  * Argument container for ops->cpu_acquire(). Currently empty, but may be
170  * expanded in the future.
171  */
172 struct scx_cpu_acquire_args {};
173 
174 /* argument container for ops->cpu_release() */
175 struct scx_cpu_release_args {
176 	/* the reason the CPU was preempted */
177 	enum scx_cpu_preempt_reason reason;
178 
179 	/* the task that's going to be scheduled on the CPU */
180 	struct task_struct	*task;
181 };
182 
183 /*
184  * Informational context provided to dump operations.
185  */
186 struct scx_dump_ctx {
187 	enum scx_exit_kind	kind;
188 	s64			exit_code;
189 	const char		*reason;
190 	u64			at_ns;
191 	u64			at_jiffies;
192 };
193 
194 /**
195  * struct sched_ext_ops - Operation table for BPF scheduler implementation
196  *
197  * Userland can implement an arbitrary scheduling policy by implementing and
198  * loading operations in this table.
199  */
200 struct sched_ext_ops {
201 	/**
202 	 * select_cpu - Pick the target CPU for a task which is being woken up
203 	 * @p: task being woken up
204 	 * @prev_cpu: the cpu @p was on before sleeping
205 	 * @wake_flags: SCX_WAKE_*
206 	 *
207 	 * Decision made here isn't final. @p may be moved to any CPU while it
208 	 * is getting dispatched for execution later. However, as @p is not on
209 	 * the rq at this point, getting the eventual execution CPU right here
210 	 * saves a small bit of overhead down the line.
211 	 *
212 	 * If an idle CPU is returned, the CPU is kicked and will try to
213 	 * dispatch. While an explicit custom mechanism can be added,
214 	 * select_cpu() serves as the default way to wake up idle CPUs.
215 	 *
216 	 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
217 	 * is dispatched, the ops.enqueue() callback will be skipped. Finally,
218 	 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
219 	 * local DSQ of whatever CPU is returned by this callback.
220 	 */
221 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
222 
223 	/**
224 	 * enqueue - Enqueue a task on the BPF scheduler
225 	 * @p: task being enqueued
226 	 * @enq_flags: %SCX_ENQ_*
227 	 *
228 	 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
229 	 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
230 	 * scheduler owns @p and if it fails to dispatch @p, the task will
231 	 * stall.
232 	 *
233 	 * If @p was dispatched from ops.select_cpu(), this callback is
234 	 * skipped.
235 	 */
236 	void (*enqueue)(struct task_struct *p, u64 enq_flags);
237 
238 	/**
239 	 * dequeue - Remove a task from the BPF scheduler
240 	 * @p: task being dequeued
241 	 * @deq_flags: %SCX_DEQ_*
242 	 *
243 	 * Remove @p from the BPF scheduler. This is usually called to isolate
244 	 * the task while updating its scheduling properties (e.g. priority).
245 	 *
246 	 * The ext core keeps track of whether the BPF side owns a given task or
247 	 * not and can gracefully ignore spurious dispatches from BPF side,
248 	 * which makes it safe to not implement this method. However, depending
249 	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
250 	 * scheduling position not being updated across a priority change.
251 	 */
252 	void (*dequeue)(struct task_struct *p, u64 deq_flags);
253 
254 	/**
255 	 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
256 	 * @cpu: CPU to dispatch tasks for
257 	 * @prev: previous task being switched out
258 	 *
259 	 * Called when a CPU's local dsq is empty. The operation should dispatch
260 	 * one or more tasks from the BPF scheduler into the DSQs using
261 	 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
262 	 * scx_bpf_consume().
263 	 *
264 	 * The maximum number of times scx_bpf_dispatch() can be called without
265 	 * an intervening scx_bpf_consume() is specified by
266 	 * ops.dispatch_max_batch. See the comments on top of the two functions
267 	 * for more details.
268 	 *
269 	 * When not %NULL, @prev is an SCX task with its slice depleted. If
270 	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
271 	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
272 	 * ops.dispatch() returns. To keep executing @prev, return without
273 	 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
274 	 */
275 	void (*dispatch)(s32 cpu, struct task_struct *prev);
276 
277 	/**
278 	 * tick - Periodic tick
279 	 * @p: task running currently
280 	 *
281 	 * This operation is called every 1/HZ seconds on CPUs which are
282 	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
283 	 * immediate dispatch cycle on the CPU.
284 	 */
285 	void (*tick)(struct task_struct *p);
286 
287 	/**
288 	 * runnable - A task is becoming runnable on its associated CPU
289 	 * @p: task becoming runnable
290 	 * @enq_flags: %SCX_ENQ_*
291 	 *
292 	 * This and the following three functions can be used to track a task's
293 	 * execution state transitions. A task becomes ->runnable() on a CPU,
294 	 * and then goes through one or more ->running() and ->stopping() pairs
295 	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
296 	 * done running on the CPU.
297 	 *
298 	 * @p is becoming runnable on the CPU because it's
299 	 *
300 	 * - waking up (%SCX_ENQ_WAKEUP)
301 	 * - being moved from another CPU
302 	 * - being restored after temporarily taken off the queue for an
303 	 *   attribute change.
304 	 *
305 	 * This and ->enqueue() are related but not coupled. This operation
306 	 * notifies @p's state transition and may not be followed by ->enqueue()
307 	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
308 	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
309 	 * task may be ->enqueue()'d without being preceded by this operation
310 	 * e.g. after exhausting its slice.
311 	 */
312 	void (*runnable)(struct task_struct *p, u64 enq_flags);
313 
314 	/**
315 	 * running - A task is starting to run on its associated CPU
316 	 * @p: task starting to run
317 	 *
318 	 * See ->runnable() for explanation on the task state notifiers.
319 	 */
320 	void (*running)(struct task_struct *p);
321 
322 	/**
323 	 * stopping - A task is stopping execution
324 	 * @p: task stopping to run
325 	 * @runnable: is task @p still runnable?
326 	 *
327 	 * See ->runnable() for explanation on the task state notifiers. If
328 	 * !@runnable, ->quiescent() will be invoked after this operation
329 	 * returns.
330 	 */
331 	void (*stopping)(struct task_struct *p, bool runnable);
332 
333 	/**
334 	 * quiescent - A task is becoming not runnable on its associated CPU
335 	 * @p: task becoming not runnable
336 	 * @deq_flags: %SCX_DEQ_*
337 	 *
338 	 * See ->runnable() for explanation on the task state notifiers.
339 	 *
340 	 * @p is becoming quiescent on the CPU because it's
341 	 *
342 	 * - sleeping (%SCX_DEQ_SLEEP)
343 	 * - being moved to another CPU
344 	 * - being temporarily taken off the queue for an attribute change
345 	 *   (%SCX_DEQ_SAVE)
346 	 *
347 	 * This and ->dequeue() are related but not coupled. This operation
348 	 * notifies @p's state transition and may not be preceded by ->dequeue()
349 	 * e.g. when @p is being dispatched to a remote CPU.
350 	 */
351 	void (*quiescent)(struct task_struct *p, u64 deq_flags);
352 
353 	/**
354 	 * yield - Yield CPU
355 	 * @from: yielding task
356 	 * @to: optional yield target task
357 	 *
358 	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
359 	 * The BPF scheduler should ensure that other available tasks are
360 	 * dispatched before the yielding task. Return value is ignored in this
361 	 * case.
362 	 *
363 	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
364 	 * scheduler can implement the request, return %true; otherwise, %false.
365 	 */
366 	bool (*yield)(struct task_struct *from, struct task_struct *to);
367 
368 	/**
369 	 * core_sched_before - Task ordering for core-sched
370 	 * @a: task A
371 	 * @b: task B
372 	 *
373 	 * Used by core-sched to determine the ordering between two tasks. See
374 	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
375 	 * core-sched.
376 	 *
377 	 * Both @a and @b are runnable and may or may not currently be queued on
378 	 * the BPF scheduler. Should return %true if @a should run before @b.
379 	 * %false if there's no required ordering or @b should run before @a.
380 	 *
381 	 * If not specified, the default is ordering them according to when they
382 	 * became runnable.
383 	 */
384 	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
385 
386 	/**
387 	 * set_weight - Set task weight
388 	 * @p: task to set weight for
389 	 * @weight: new weight [1..10000]
390 	 *
391 	 * Update @p's weight to @weight.
392 	 */
393 	void (*set_weight)(struct task_struct *p, u32 weight);
394 
395 	/**
396 	 * set_cpumask - Set CPU affinity
397 	 * @p: task to set CPU affinity for
398 	 * @cpumask: cpumask of cpus that @p can run on
399 	 *
400 	 * Update @p's CPU affinity to @cpumask.
401 	 */
402 	void (*set_cpumask)(struct task_struct *p,
403 			    const struct cpumask *cpumask);
404 
405 	/**
406 	 * update_idle - Update the idle state of a CPU
407 	 * @cpu: CPU to udpate the idle state for
408 	 * @idle: whether entering or exiting the idle state
409 	 *
410 	 * This operation is called when @rq's CPU goes or leaves the idle
411 	 * state. By default, implementing this operation disables the built-in
412 	 * idle CPU tracking and the following helpers become unavailable:
413 	 *
414 	 * - scx_bpf_select_cpu_dfl()
415 	 * - scx_bpf_test_and_clear_cpu_idle()
416 	 * - scx_bpf_pick_idle_cpu()
417 	 *
418 	 * The user also must implement ops.select_cpu() as the default
419 	 * implementation relies on scx_bpf_select_cpu_dfl().
420 	 *
421 	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
422 	 * tracking.
423 	 */
424 	void (*update_idle)(s32 cpu, bool idle);
425 
426 	/**
427 	 * cpu_acquire - A CPU is becoming available to the BPF scheduler
428 	 * @cpu: The CPU being acquired by the BPF scheduler.
429 	 * @args: Acquire arguments, see the struct definition.
430 	 *
431 	 * A CPU that was previously released from the BPF scheduler is now once
432 	 * again under its control.
433 	 */
434 	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
435 
436 	/**
437 	 * cpu_release - A CPU is taken away from the BPF scheduler
438 	 * @cpu: The CPU being released by the BPF scheduler.
439 	 * @args: Release arguments, see the struct definition.
440 	 *
441 	 * The specified CPU is no longer under the control of the BPF
442 	 * scheduler. This could be because it was preempted by a higher
443 	 * priority sched_class, though there may be other reasons as well. The
444 	 * caller should consult @args->reason to determine the cause.
445 	 */
446 	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
447 
448 	/**
449 	 * init_task - Initialize a task to run in a BPF scheduler
450 	 * @p: task to initialize for BPF scheduling
451 	 * @args: init arguments, see the struct definition
452 	 *
453 	 * Either we're loading a BPF scheduler or a new task is being forked.
454 	 * Initialize @p for BPF scheduling. This operation may block and can
455 	 * be used for allocations, and is called exactly once for a task.
456 	 *
457 	 * Return 0 for success, -errno for failure. An error return while
458 	 * loading will abort loading of the BPF scheduler. During a fork, it
459 	 * will abort that specific fork.
460 	 */
461 	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
462 
463 	/**
464 	 * exit_task - Exit a previously-running task from the system
465 	 * @p: task to exit
466 	 *
467 	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
468 	 * necessary cleanup for @p.
469 	 */
470 	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
471 
472 	/**
473 	 * enable - Enable BPF scheduling for a task
474 	 * @p: task to enable BPF scheduling for
475 	 *
476 	 * Enable @p for BPF scheduling. enable() is called on @p any time it
477 	 * enters SCX, and is always paired with a matching disable().
478 	 */
479 	void (*enable)(struct task_struct *p);
480 
481 	/**
482 	 * disable - Disable BPF scheduling for a task
483 	 * @p: task to disable BPF scheduling for
484 	 *
485 	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
486 	 * Disable BPF scheduling for @p. A disable() call is always matched
487 	 * with a prior enable() call.
488 	 */
489 	void (*disable)(struct task_struct *p);
490 
491 	/**
492 	 * dump - Dump BPF scheduler state on error
493 	 * @ctx: debug dump context
494 	 *
495 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
496 	 */
497 	void (*dump)(struct scx_dump_ctx *ctx);
498 
499 	/**
500 	 * dump_cpu - Dump BPF scheduler state for a CPU on error
501 	 * @ctx: debug dump context
502 	 * @cpu: CPU to generate debug dump for
503 	 * @idle: @cpu is currently idle without any runnable tasks
504 	 *
505 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
506 	 * @cpu. If @idle is %true and this operation doesn't produce any
507 	 * output, @cpu is skipped for dump.
508 	 */
509 	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
510 
511 	/**
512 	 * dump_task - Dump BPF scheduler state for a runnable task on error
513 	 * @ctx: debug dump context
514 	 * @p: runnable task to generate debug dump for
515 	 *
516 	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
517 	 * @p.
518 	 */
519 	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
520 
521 #ifdef CONFIG_EXT_GROUP_SCHED
522 	/**
523 	 * cgroup_init - Initialize a cgroup
524 	 * @cgrp: cgroup being initialized
525 	 * @args: init arguments, see the struct definition
526 	 *
527 	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
528 	 * @cgrp for sched_ext. This operation may block.
529 	 *
530 	 * Return 0 for success, -errno for failure. An error return while
531 	 * loading will abort loading of the BPF scheduler. During cgroup
532 	 * creation, it will abort the specific cgroup creation.
533 	 */
534 	s32 (*cgroup_init)(struct cgroup *cgrp,
535 			   struct scx_cgroup_init_args *args);
536 
537 	/**
538 	 * cgroup_exit - Exit a cgroup
539 	 * @cgrp: cgroup being exited
540 	 *
541 	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
542 	 * @cgrp for sched_ext. This operation my block.
543 	 */
544 	void (*cgroup_exit)(struct cgroup *cgrp);
545 
546 	/**
547 	 * cgroup_prep_move - Prepare a task to be moved to a different cgroup
548 	 * @p: task being moved
549 	 * @from: cgroup @p is being moved from
550 	 * @to: cgroup @p is being moved to
551 	 *
552 	 * Prepare @p for move from cgroup @from to @to. This operation may
553 	 * block and can be used for allocations.
554 	 *
555 	 * Return 0 for success, -errno for failure. An error return aborts the
556 	 * migration.
557 	 */
558 	s32 (*cgroup_prep_move)(struct task_struct *p,
559 				struct cgroup *from, struct cgroup *to);
560 
561 	/**
562 	 * cgroup_move - Commit cgroup move
563 	 * @p: task being moved
564 	 * @from: cgroup @p is being moved from
565 	 * @to: cgroup @p is being moved to
566 	 *
567 	 * Commit the move. @p is dequeued during this operation.
568 	 */
569 	void (*cgroup_move)(struct task_struct *p,
570 			    struct cgroup *from, struct cgroup *to);
571 
572 	/**
573 	 * cgroup_cancel_move - Cancel cgroup move
574 	 * @p: task whose cgroup move is being canceled
575 	 * @from: cgroup @p was being moved from
576 	 * @to: cgroup @p was being moved to
577 	 *
578 	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
579 	 * Undo the preparation.
580 	 */
581 	void (*cgroup_cancel_move)(struct task_struct *p,
582 				   struct cgroup *from, struct cgroup *to);
583 
584 	/**
585 	 * cgroup_set_weight - A cgroup's weight is being changed
586 	 * @cgrp: cgroup whose weight is being updated
587 	 * @weight: new weight [1..10000]
588 	 *
589 	 * Update @tg's weight to @weight.
590 	 */
591 	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
592 #endif	/* CONFIG_CGROUPS */
593 
594 	/*
595 	 * All online ops must come before ops.cpu_online().
596 	 */
597 
598 	/**
599 	 * cpu_online - A CPU became online
600 	 * @cpu: CPU which just came up
601 	 *
602 	 * @cpu just came online. @cpu will not call ops.enqueue() or
603 	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
604 	 */
605 	void (*cpu_online)(s32 cpu);
606 
607 	/**
608 	 * cpu_offline - A CPU is going offline
609 	 * @cpu: CPU which is going offline
610 	 *
611 	 * @cpu is going offline. @cpu will not call ops.enqueue() or
612 	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
613 	 */
614 	void (*cpu_offline)(s32 cpu);
615 
616 	/*
617 	 * All CPU hotplug ops must come before ops.init().
618 	 */
619 
620 	/**
621 	 * init - Initialize the BPF scheduler
622 	 */
623 	s32 (*init)(void);
624 
625 	/**
626 	 * exit - Clean up after the BPF scheduler
627 	 * @info: Exit info
628 	 *
629 	 * ops.exit() is also called on ops.init() failure, which is a bit
630 	 * unusual. This is to allow rich reporting through @info on how
631 	 * ops.init() failed.
632 	 */
633 	void (*exit)(struct scx_exit_info *info);
634 
635 	/**
636 	 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
637 	 */
638 	u32 dispatch_max_batch;
639 
640 	/**
641 	 * flags - %SCX_OPS_* flags
642 	 */
643 	u64 flags;
644 
645 	/**
646 	 * timeout_ms - The maximum amount of time, in milliseconds, that a
647 	 * runnable task should be able to wait before being scheduled. The
648 	 * maximum timeout may not exceed the default timeout of 30 seconds.
649 	 *
650 	 * Defaults to the maximum allowed timeout value of 30 seconds.
651 	 */
652 	u32 timeout_ms;
653 
654 	/**
655 	 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
656 	 * value of 32768 is used.
657 	 */
658 	u32 exit_dump_len;
659 
660 	/**
661 	 * hotplug_seq - A sequence number that may be set by the scheduler to
662 	 * detect when a hotplug event has occurred during the loading process.
663 	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
664 	 * load if the sequence number does not match @scx_hotplug_seq on the
665 	 * enable path.
666 	 */
667 	u64 hotplug_seq;
668 
669 	/**
670 	 * name - BPF scheduler's name
671 	 *
672 	 * Must be a non-zero valid BPF object name including only isalnum(),
673 	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
674 	 * BPF scheduler is enabled.
675 	 */
676 	char name[SCX_OPS_NAME_LEN];
677 };
678 
679 enum scx_opi {
680 	SCX_OPI_BEGIN			= 0,
681 	SCX_OPI_NORMAL_BEGIN		= 0,
682 	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
683 	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
684 	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
685 	SCX_OPI_END			= SCX_OP_IDX(init),
686 };
687 
688 enum scx_wake_flags {
689 	/* expose select WF_* flags as enums */
690 	SCX_WAKE_FORK		= WF_FORK,
691 	SCX_WAKE_TTWU		= WF_TTWU,
692 	SCX_WAKE_SYNC		= WF_SYNC,
693 };
694 
695 enum scx_enq_flags {
696 	/* expose select ENQUEUE_* flags as enums */
697 	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
698 	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
699 	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
700 
701 	/* high 32bits are SCX specific */
702 
703 	/*
704 	 * Set the following to trigger preemption when calling
705 	 * scx_bpf_dispatch() with a local dsq as the target. The slice of the
706 	 * current task is cleared to zero and the CPU is kicked into the
707 	 * scheduling path. Implies %SCX_ENQ_HEAD.
708 	 */
709 	SCX_ENQ_PREEMPT		= 1LLU << 32,
710 
711 	/*
712 	 * The task being enqueued was previously enqueued on the current CPU's
713 	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
714 	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
715 	 * invoked in a ->cpu_release() callback, and the task is again
716 	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
717 	 * task will not be scheduled on the CPU until at least the next invocation
718 	 * of the ->cpu_acquire() callback.
719 	 */
720 	SCX_ENQ_REENQ		= 1LLU << 40,
721 
722 	/*
723 	 * The task being enqueued is the only task available for the cpu. By
724 	 * default, ext core keeps executing such tasks but when
725 	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
726 	 * %SCX_ENQ_LAST flag set.
727 	 *
728 	 * The BPF scheduler is responsible for triggering a follow-up
729 	 * scheduling event. Otherwise, Execution may stall.
730 	 */
731 	SCX_ENQ_LAST		= 1LLU << 41,
732 
733 	/* high 8 bits are internal */
734 	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
735 
736 	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
737 	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
738 };
739 
740 enum scx_deq_flags {
741 	/* expose select DEQUEUE_* flags as enums */
742 	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
743 
744 	/* high 32bits are SCX specific */
745 
746 	/*
747 	 * The generic core-sched layer decided to execute the task even though
748 	 * it hasn't been dispatched yet. Dequeue from the BPF side.
749 	 */
750 	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
751 };
752 
753 enum scx_pick_idle_cpu_flags {
754 	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
755 };
756 
757 enum scx_kick_flags {
758 	/*
759 	 * Kick the target CPU if idle. Guarantees that the target CPU goes
760 	 * through at least one full scheduling cycle before going idle. If the
761 	 * target CPU can be determined to be currently not idle and going to go
762 	 * through a scheduling cycle before going idle, noop.
763 	 */
764 	SCX_KICK_IDLE		= 1LLU << 0,
765 
766 	/*
767 	 * Preempt the current task and execute the dispatch path. If the
768 	 * current task of the target CPU is an SCX task, its ->scx.slice is
769 	 * cleared to zero before the scheduling path is invoked so that the
770 	 * task expires and the dispatch path is invoked.
771 	 */
772 	SCX_KICK_PREEMPT	= 1LLU << 1,
773 
774 	/*
775 	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
776 	 * return after the target CPU finishes picking the next task.
777 	 */
778 	SCX_KICK_WAIT		= 1LLU << 2,
779 };
780 
781 enum scx_tg_flags {
782 	SCX_TG_ONLINE		= 1U << 0,
783 	SCX_TG_INITED		= 1U << 1,
784 };
785 
786 enum scx_ops_enable_state {
787 	SCX_OPS_ENABLING,
788 	SCX_OPS_ENABLED,
789 	SCX_OPS_DISABLING,
790 	SCX_OPS_DISABLED,
791 };
792 
793 static const char *scx_ops_enable_state_str[] = {
794 	[SCX_OPS_ENABLING]	= "enabling",
795 	[SCX_OPS_ENABLED]	= "enabled",
796 	[SCX_OPS_DISABLING]	= "disabling",
797 	[SCX_OPS_DISABLED]	= "disabled",
798 };
799 
800 /*
801  * sched_ext_entity->ops_state
802  *
803  * Used to track the task ownership between the SCX core and the BPF scheduler.
804  * State transitions look as follows:
805  *
806  * NONE -> QUEUEING -> QUEUED -> DISPATCHING
807  *   ^              |                 |
808  *   |              v                 v
809  *   \-------------------------------/
810  *
811  * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
812  * sites for explanations on the conditions being waited upon and why they are
813  * safe. Transitions out of them into NONE or QUEUED must store_release and the
814  * waiters should load_acquire.
815  *
816  * Tracking scx_ops_state enables sched_ext core to reliably determine whether
817  * any given task can be dispatched by the BPF scheduler at all times and thus
818  * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
819  * to try to dispatch any task anytime regardless of its state as the SCX core
820  * can safely reject invalid dispatches.
821  */
822 enum scx_ops_state {
823 	SCX_OPSS_NONE,		/* owned by the SCX core */
824 	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
825 	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
826 	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
827 
828 	/*
829 	 * QSEQ brands each QUEUED instance so that, when dispatch races
830 	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
831 	 * on the task being dispatched.
832 	 *
833 	 * As some 32bit archs can't do 64bit store_release/load_acquire,
834 	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
835 	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
836 	 * and runs with IRQ disabled. 30 bits should be sufficient.
837 	 */
838 	SCX_OPSS_QSEQ_SHIFT	= 2,
839 };
840 
841 /* Use macros to ensure that the type is unsigned long for the masks */
842 #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
843 #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
844 
845 /*
846  * During exit, a task may schedule after losing its PIDs. When disabling the
847  * BPF scheduler, we need to be able to iterate tasks in every state to
848  * guarantee system safety. Maintain a dedicated task list which contains every
849  * task between its fork and eventual free.
850  */
851 static DEFINE_SPINLOCK(scx_tasks_lock);
852 static LIST_HEAD(scx_tasks);
853 
854 /* ops enable/disable */
855 static struct kthread_worker *scx_ops_helper;
856 static DEFINE_MUTEX(scx_ops_enable_mutex);
857 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
858 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
859 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
860 static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
861 static bool scx_ops_init_task_enabled;
862 static bool scx_switching_all;
863 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
864 
865 static struct sched_ext_ops scx_ops;
866 static bool scx_warned_zero_slice;
867 
868 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
869 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
870 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
871 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
872 
873 static struct static_key_false scx_has_op[SCX_OPI_END] =
874 	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
875 
876 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
877 static struct scx_exit_info *scx_exit_info;
878 
879 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
880 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
881 
882 /*
883  * A monotically increasing sequence number that is incremented every time a
884  * scheduler is enabled. This can be used by to check if any custom sched_ext
885  * scheduler has ever been used in the system.
886  */
887 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
888 
889 /*
890  * The maximum amount of time in jiffies that a task may be runnable without
891  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
892  * scx_ops_error().
893  */
894 static unsigned long scx_watchdog_timeout;
895 
896 /*
897  * The last time the delayed work was run. This delayed work relies on
898  * ksoftirqd being able to run to service timer interrupts, so it's possible
899  * that this work itself could get wedged. To account for this, we check that
900  * it's not stalled in the timer tick, and trigger an error if it is.
901  */
902 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
903 
904 static struct delayed_work scx_watchdog_work;
905 
906 /* idle tracking */
907 #ifdef CONFIG_SMP
908 #ifdef CONFIG_CPUMASK_OFFSTACK
909 #define CL_ALIGNED_IF_ONSTACK
910 #else
911 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
912 #endif
913 
914 static struct {
915 	cpumask_var_t cpu;
916 	cpumask_var_t smt;
917 } idle_masks CL_ALIGNED_IF_ONSTACK;
918 
919 #endif	/* CONFIG_SMP */
920 
921 /* for %SCX_KICK_WAIT */
922 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
923 
924 /*
925  * Direct dispatch marker.
926  *
927  * Non-NULL values are used for direct dispatch from enqueue path. A valid
928  * pointer points to the task currently being enqueued. An ERR_PTR value is used
929  * to indicate that direct dispatch has already happened.
930  */
931 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
932 
933 /*
934  * Dispatch queues.
935  *
936  * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
937  * to avoid live-locking in bypass mode where all tasks are dispatched to
938  * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
939  * sufficient, it can be further split.
940  */
941 static struct scx_dispatch_q **global_dsqs;
942 
943 static const struct rhashtable_params dsq_hash_params = {
944 	.key_len		= 8,
945 	.key_offset		= offsetof(struct scx_dispatch_q, id),
946 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
947 };
948 
949 static struct rhashtable dsq_hash;
950 static LLIST_HEAD(dsqs_to_free);
951 
952 /* dispatch buf */
953 struct scx_dsp_buf_ent {
954 	struct task_struct	*task;
955 	unsigned long		qseq;
956 	u64			dsq_id;
957 	u64			enq_flags;
958 };
959 
960 static u32 scx_dsp_max_batch;
961 
962 struct scx_dsp_ctx {
963 	struct rq		*rq;
964 	u32			cursor;
965 	u32			nr_tasks;
966 	struct scx_dsp_buf_ent	buf[];
967 };
968 
969 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
970 
971 /* string formatting from BPF */
972 struct scx_bstr_buf {
973 	u64			data[MAX_BPRINTF_VARARGS];
974 	char			line[SCX_EXIT_MSG_LEN];
975 };
976 
977 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
978 static struct scx_bstr_buf scx_exit_bstr_buf;
979 
980 /* ops debug dump */
981 struct scx_dump_data {
982 	s32			cpu;
983 	bool			first;
984 	s32			cursor;
985 	struct seq_buf		*s;
986 	const char		*prefix;
987 	struct scx_bstr_buf	buf;
988 };
989 
990 static struct scx_dump_data scx_dump_data = {
991 	.cpu			= -1,
992 };
993 
994 /* /sys/kernel/sched_ext interface */
995 static struct kset *scx_kset;
996 static struct kobject *scx_root_kobj;
997 
998 #define CREATE_TRACE_POINTS
999 #include <trace/events/sched_ext.h>
1000 
1001 static void process_ddsp_deferred_locals(struct rq *rq);
1002 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1003 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1004 					     s64 exit_code,
1005 					     const char *fmt, ...);
1006 
1007 #define scx_ops_error_kind(err, fmt, args...)					\
1008 	scx_ops_exit_kind((err), 0, fmt, ##args)
1009 
1010 #define scx_ops_exit(code, fmt, args...)					\
1011 	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1012 
1013 #define scx_ops_error(fmt, args...)						\
1014 	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1015 
1016 #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1017 
1018 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1019 {
1020 	if (time_after(at, now))
1021 		return jiffies_to_msecs(at - now);
1022 	else
1023 		return -(long)jiffies_to_msecs(now - at);
1024 }
1025 
1026 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
1027 static u32 higher_bits(u32 flags)
1028 {
1029 	return ~((1 << fls(flags)) - 1);
1030 }
1031 
1032 /* return the mask with only the highest bit set */
1033 static u32 highest_bit(u32 flags)
1034 {
1035 	int bit = fls(flags);
1036 	return ((u64)1 << bit) >> 1;
1037 }
1038 
1039 static bool u32_before(u32 a, u32 b)
1040 {
1041 	return (s32)(a - b) < 0;
1042 }
1043 
1044 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1045 {
1046 	return global_dsqs[cpu_to_node(task_cpu(p))];
1047 }
1048 
1049 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1050 {
1051 	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1052 }
1053 
1054 /*
1055  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1056  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1057  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1058  * whether it's running from an allowed context.
1059  *
1060  * @mask is constant, always inline to cull the mask calculations.
1061  */
1062 static __always_inline void scx_kf_allow(u32 mask)
1063 {
1064 	/* nesting is allowed only in increasing scx_kf_mask order */
1065 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1066 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1067 		  current->scx.kf_mask, mask);
1068 	current->scx.kf_mask |= mask;
1069 	barrier();
1070 }
1071 
1072 static void scx_kf_disallow(u32 mask)
1073 {
1074 	barrier();
1075 	current->scx.kf_mask &= ~mask;
1076 }
1077 
1078 #define SCX_CALL_OP(mask, op, args...)						\
1079 do {										\
1080 	if (mask) {								\
1081 		scx_kf_allow(mask);						\
1082 		scx_ops.op(args);						\
1083 		scx_kf_disallow(mask);						\
1084 	} else {								\
1085 		scx_ops.op(args);						\
1086 	}									\
1087 } while (0)
1088 
1089 #define SCX_CALL_OP_RET(mask, op, args...)					\
1090 ({										\
1091 	__typeof__(scx_ops.op(args)) __ret;					\
1092 	if (mask) {								\
1093 		scx_kf_allow(mask);						\
1094 		__ret = scx_ops.op(args);					\
1095 		scx_kf_disallow(mask);						\
1096 	} else {								\
1097 		__ret = scx_ops.op(args);					\
1098 	}									\
1099 	__ret;									\
1100 })
1101 
1102 /*
1103  * Some kfuncs are allowed only on the tasks that are subjects of the
1104  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1105  * restrictions, the following SCX_CALL_OP_*() variants should be used when
1106  * invoking scx_ops operations that take task arguments. These can only be used
1107  * for non-nesting operations due to the way the tasks are tracked.
1108  *
1109  * kfuncs which can only operate on such tasks can in turn use
1110  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1111  * the specific task.
1112  */
1113 #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
1114 do {										\
1115 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1116 	current->scx.kf_tasks[0] = task;					\
1117 	SCX_CALL_OP(mask, op, task, ##args);					\
1118 	current->scx.kf_tasks[0] = NULL;					\
1119 } while (0)
1120 
1121 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
1122 ({										\
1123 	__typeof__(scx_ops.op(task, ##args)) __ret;				\
1124 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1125 	current->scx.kf_tasks[0] = task;					\
1126 	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1127 	current->scx.kf_tasks[0] = NULL;					\
1128 	__ret;									\
1129 })
1130 
1131 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1132 ({										\
1133 	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1134 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1135 	current->scx.kf_tasks[0] = task0;					\
1136 	current->scx.kf_tasks[1] = task1;					\
1137 	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1138 	current->scx.kf_tasks[0] = NULL;					\
1139 	current->scx.kf_tasks[1] = NULL;					\
1140 	__ret;									\
1141 })
1142 
1143 /* @mask is constant, always inline to cull unnecessary branches */
1144 static __always_inline bool scx_kf_allowed(u32 mask)
1145 {
1146 	if (unlikely(!(current->scx.kf_mask & mask))) {
1147 		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1148 			      mask, current->scx.kf_mask);
1149 		return false;
1150 	}
1151 
1152 	/*
1153 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1154 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1155 	 * inside ops.dispatch(). We don't need to check boundaries for any
1156 	 * blocking kfuncs as the verifier ensures they're only called from
1157 	 * sleepable progs.
1158 	 */
1159 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1160 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1161 		scx_ops_error("cpu_release kfunc called from a nested operation");
1162 		return false;
1163 	}
1164 
1165 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1166 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1167 		scx_ops_error("dispatch kfunc called from a nested operation");
1168 		return false;
1169 	}
1170 
1171 	return true;
1172 }
1173 
1174 /* see SCX_CALL_OP_TASK() */
1175 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1176 							struct task_struct *p)
1177 {
1178 	if (!scx_kf_allowed(mask))
1179 		return false;
1180 
1181 	if (unlikely((p != current->scx.kf_tasks[0] &&
1182 		      p != current->scx.kf_tasks[1]))) {
1183 		scx_ops_error("called on a task not being operated on");
1184 		return false;
1185 	}
1186 
1187 	return true;
1188 }
1189 
1190 static bool scx_kf_allowed_if_unlocked(void)
1191 {
1192 	return !current->scx.kf_mask;
1193 }
1194 
1195 /**
1196  * nldsq_next_task - Iterate to the next task in a non-local DSQ
1197  * @dsq: user dsq being interated
1198  * @cur: current position, %NULL to start iteration
1199  * @rev: walk backwards
1200  *
1201  * Returns %NULL when iteration is finished.
1202  */
1203 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1204 					   struct task_struct *cur, bool rev)
1205 {
1206 	struct list_head *list_node;
1207 	struct scx_dsq_list_node *dsq_lnode;
1208 
1209 	lockdep_assert_held(&dsq->lock);
1210 
1211 	if (cur)
1212 		list_node = &cur->scx.dsq_list.node;
1213 	else
1214 		list_node = &dsq->list;
1215 
1216 	/* find the next task, need to skip BPF iteration cursors */
1217 	do {
1218 		if (rev)
1219 			list_node = list_node->prev;
1220 		else
1221 			list_node = list_node->next;
1222 
1223 		if (list_node == &dsq->list)
1224 			return NULL;
1225 
1226 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1227 					 node);
1228 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1229 
1230 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1231 }
1232 
1233 #define nldsq_for_each_task(p, dsq)						\
1234 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
1235 	     (p) = nldsq_next_task((dsq), (p), false))
1236 
1237 
1238 /*
1239  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1240  * dispatch order. BPF-visible iterator is opaque and larger to allow future
1241  * changes without breaking backward compatibility. Can be used with
1242  * bpf_for_each(). See bpf_iter_scx_dsq_*().
1243  */
1244 enum scx_dsq_iter_flags {
1245 	/* iterate in the reverse dispatch order */
1246 	SCX_DSQ_ITER_REV		= 1U << 16,
1247 
1248 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
1249 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
1250 
1251 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
1252 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
1253 					  __SCX_DSQ_ITER_HAS_SLICE |
1254 					  __SCX_DSQ_ITER_HAS_VTIME,
1255 };
1256 
1257 struct bpf_iter_scx_dsq_kern {
1258 	struct scx_dsq_list_node	cursor;
1259 	struct scx_dispatch_q		*dsq;
1260 	u64				slice;
1261 	u64				vtime;
1262 } __attribute__((aligned(8)));
1263 
1264 struct bpf_iter_scx_dsq {
1265 	u64				__opaque[6];
1266 } __attribute__((aligned(8)));
1267 
1268 
1269 /*
1270  * SCX task iterator.
1271  */
1272 struct scx_task_iter {
1273 	struct sched_ext_entity		cursor;
1274 	struct task_struct		*locked;
1275 	struct rq			*rq;
1276 	struct rq_flags			rf;
1277 };
1278 
1279 /**
1280  * scx_task_iter_init - Initialize a task iterator
1281  * @iter: iterator to init
1282  *
1283  * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
1284  * @iter must eventually be exited with scx_task_iter_exit().
1285  *
1286  * scx_tasks_lock may be released between this and the first next() call or
1287  * between any two next() calls. If scx_tasks_lock is released between two
1288  * next() calls, the caller is responsible for ensuring that the task being
1289  * iterated remains accessible either through RCU read lock or obtaining a
1290  * reference count.
1291  *
1292  * All tasks which existed when the iteration started are guaranteed to be
1293  * visited as long as they still exist.
1294  */
1295 static void scx_task_iter_init(struct scx_task_iter *iter)
1296 {
1297 	lockdep_assert_held(&scx_tasks_lock);
1298 
1299 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1300 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1301 
1302 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1303 	list_add(&iter->cursor.tasks_node, &scx_tasks);
1304 	iter->locked = NULL;
1305 }
1306 
1307 /**
1308  * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
1309  * @iter: iterator to unlock rq for
1310  *
1311  * If @iter is in the middle of a locked iteration, it may be locking the rq of
1312  * the task currently being visited. Unlock the rq if so. This function can be
1313  * safely called anytime during an iteration.
1314  *
1315  * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
1316  * not locking an rq.
1317  */
1318 static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1319 {
1320 	if (iter->locked) {
1321 		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1322 		iter->locked = NULL;
1323 		return true;
1324 	} else {
1325 		return false;
1326 	}
1327 }
1328 
1329 /**
1330  * scx_task_iter_exit - Exit a task iterator
1331  * @iter: iterator to exit
1332  *
1333  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
1334  * If the iterator holds a task's rq lock, that rq lock is released. See
1335  * scx_task_iter_init() for details.
1336  */
1337 static void scx_task_iter_exit(struct scx_task_iter *iter)
1338 {
1339 	lockdep_assert_held(&scx_tasks_lock);
1340 
1341 	scx_task_iter_rq_unlock(iter);
1342 	list_del_init(&iter->cursor.tasks_node);
1343 }
1344 
1345 /**
1346  * scx_task_iter_next - Next task
1347  * @iter: iterator to walk
1348  *
1349  * Visit the next task. See scx_task_iter_init() for details.
1350  */
1351 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1352 {
1353 	struct list_head *cursor = &iter->cursor.tasks_node;
1354 	struct sched_ext_entity *pos;
1355 
1356 	lockdep_assert_held(&scx_tasks_lock);
1357 
1358 	list_for_each_entry(pos, cursor, tasks_node) {
1359 		if (&pos->tasks_node == &scx_tasks)
1360 			return NULL;
1361 		if (!(pos->flags & SCX_TASK_CURSOR)) {
1362 			list_move(cursor, &pos->tasks_node);
1363 			return container_of(pos, struct task_struct, scx);
1364 		}
1365 	}
1366 
1367 	/* can't happen, should always terminate at scx_tasks above */
1368 	BUG();
1369 }
1370 
1371 /**
1372  * scx_task_iter_next_locked - Next non-idle task with its rq locked
1373  * @iter: iterator to walk
1374  * @include_dead: Whether we should include dead tasks in the iteration
1375  *
1376  * Visit the non-idle task with its rq lock held. Allows callers to specify
1377  * whether they would like to filter out dead tasks. See scx_task_iter_init()
1378  * for details.
1379  */
1380 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1381 {
1382 	struct task_struct *p;
1383 
1384 	scx_task_iter_rq_unlock(iter);
1385 
1386 	while ((p = scx_task_iter_next(iter))) {
1387 		/*
1388 		 * scx_task_iter is used to prepare and move tasks into SCX
1389 		 * while loading the BPF scheduler and vice-versa while
1390 		 * unloading. The init_tasks ("swappers") should be excluded
1391 		 * from the iteration because:
1392 		 *
1393 		 * - It's unsafe to use __setschduler_prio() on an init_task to
1394 		 *   determine the sched_class to use as it won't preserve its
1395 		 *   idle_sched_class.
1396 		 *
1397 		 * - ops.init/exit_task() can easily be confused if called with
1398 		 *   init_tasks as they, e.g., share PID 0.
1399 		 *
1400 		 * As init_tasks are never scheduled through SCX, they can be
1401 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1402 		 * doesn't work here:
1403 		 *
1404 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1405 		 *   yet been onlined.
1406 		 *
1407 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1408 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
1409 		 *
1410 		 * Test for idle_sched_class as only init_tasks are on it.
1411 		 */
1412 		if (p->sched_class != &idle_sched_class)
1413 			break;
1414 	}
1415 	if (!p)
1416 		return NULL;
1417 
1418 	iter->rq = task_rq_lock(p, &iter->rf);
1419 	iter->locked = p;
1420 
1421 	return p;
1422 }
1423 
1424 static enum scx_ops_enable_state scx_ops_enable_state(void)
1425 {
1426 	return atomic_read(&scx_ops_enable_state_var);
1427 }
1428 
1429 static enum scx_ops_enable_state
1430 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1431 {
1432 	return atomic_xchg(&scx_ops_enable_state_var, to);
1433 }
1434 
1435 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1436 					enum scx_ops_enable_state from)
1437 {
1438 	int from_v = from;
1439 
1440 	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1441 }
1442 
1443 static bool scx_rq_bypassing(struct rq *rq)
1444 {
1445 	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1446 }
1447 
1448 /**
1449  * wait_ops_state - Busy-wait the specified ops state to end
1450  * @p: target task
1451  * @opss: state to wait the end of
1452  *
1453  * Busy-wait for @p to transition out of @opss. This can only be used when the
1454  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1455  * has load_acquire semantics to ensure that the caller can see the updates made
1456  * in the enqueueing and dispatching paths.
1457  */
1458 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1459 {
1460 	do {
1461 		cpu_relax();
1462 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1463 }
1464 
1465 /**
1466  * ops_cpu_valid - Verify a cpu number
1467  * @cpu: cpu number which came from a BPF ops
1468  * @where: extra information reported on error
1469  *
1470  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1471  * Verify that it is in range and one of the possible cpus. If invalid, trigger
1472  * an ops error.
1473  */
1474 static bool ops_cpu_valid(s32 cpu, const char *where)
1475 {
1476 	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1477 		return true;
1478 	} else {
1479 		scx_ops_error("invalid CPU %d%s%s", cpu,
1480 			      where ? " " : "", where ?: "");
1481 		return false;
1482 	}
1483 }
1484 
1485 /**
1486  * ops_sanitize_err - Sanitize a -errno value
1487  * @ops_name: operation to blame on failure
1488  * @err: -errno value to sanitize
1489  *
1490  * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1491  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1492  * cause misbehaviors. For an example, a large negative return from
1493  * ops.init_task() triggers an oops when passed up the call chain because the
1494  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1495  * handled as a pointer.
1496  */
1497 static int ops_sanitize_err(const char *ops_name, s32 err)
1498 {
1499 	if (err < 0 && err >= -MAX_ERRNO)
1500 		return err;
1501 
1502 	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1503 	return -EPROTO;
1504 }
1505 
1506 static void run_deferred(struct rq *rq)
1507 {
1508 	process_ddsp_deferred_locals(rq);
1509 }
1510 
1511 #ifdef CONFIG_SMP
1512 static void deferred_bal_cb_workfn(struct rq *rq)
1513 {
1514 	run_deferred(rq);
1515 }
1516 #endif
1517 
1518 static void deferred_irq_workfn(struct irq_work *irq_work)
1519 {
1520 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1521 
1522 	raw_spin_rq_lock(rq);
1523 	run_deferred(rq);
1524 	raw_spin_rq_unlock(rq);
1525 }
1526 
1527 /**
1528  * schedule_deferred - Schedule execution of deferred actions on an rq
1529  * @rq: target rq
1530  *
1531  * Schedule execution of deferred actions on @rq. Must be called with @rq
1532  * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1533  * can unlock @rq to e.g. migrate tasks to other rqs.
1534  */
1535 static void schedule_deferred(struct rq *rq)
1536 {
1537 	lockdep_assert_rq_held(rq);
1538 
1539 #ifdef CONFIG_SMP
1540 	/*
1541 	 * If in the middle of waking up a task, task_woken_scx() will be called
1542 	 * afterwards which will then run the deferred actions, no need to
1543 	 * schedule anything.
1544 	 */
1545 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1546 		return;
1547 
1548 	/*
1549 	 * If in balance, the balance callbacks will be called before rq lock is
1550 	 * released. Schedule one.
1551 	 */
1552 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1553 		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1554 				       deferred_bal_cb_workfn);
1555 		return;
1556 	}
1557 #endif
1558 	/*
1559 	 * No scheduler hooks available. Queue an irq work. They are executed on
1560 	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1561 	 * The above WAKEUP and BALANCE paths should cover most of the cases and
1562 	 * the time to IRQ re-enable shouldn't be long.
1563 	 */
1564 	irq_work_queue(&rq->scx.deferred_irq_work);
1565 }
1566 
1567 /**
1568  * touch_core_sched - Update timestamp used for core-sched task ordering
1569  * @rq: rq to read clock from, must be locked
1570  * @p: task to update the timestamp for
1571  *
1572  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1573  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1574  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1575  * exhaustion).
1576  */
1577 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1578 {
1579 	lockdep_assert_rq_held(rq);
1580 
1581 #ifdef CONFIG_SCHED_CORE
1582 	/*
1583 	 * It's okay to update the timestamp spuriously. Use
1584 	 * sched_core_disabled() which is cheaper than enabled().
1585 	 *
1586 	 * As this is used to determine ordering between tasks of sibling CPUs,
1587 	 * it may be better to use per-core dispatch sequence instead.
1588 	 */
1589 	if (!sched_core_disabled())
1590 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1591 #endif
1592 }
1593 
1594 /**
1595  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1596  * @rq: rq to read clock from, must be locked
1597  * @p: task being dispatched
1598  *
1599  * If the BPF scheduler implements custom core-sched ordering via
1600  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1601  * ordering within each local DSQ. This function is called from dispatch paths
1602  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1603  */
1604 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1605 {
1606 	lockdep_assert_rq_held(rq);
1607 
1608 #ifdef CONFIG_SCHED_CORE
1609 	if (SCX_HAS_OP(core_sched_before))
1610 		touch_core_sched(rq, p);
1611 #endif
1612 }
1613 
1614 static void update_curr_scx(struct rq *rq)
1615 {
1616 	struct task_struct *curr = rq->curr;
1617 	s64 delta_exec;
1618 
1619 	delta_exec = update_curr_common(rq);
1620 	if (unlikely(delta_exec <= 0))
1621 		return;
1622 
1623 	if (curr->scx.slice != SCX_SLICE_INF) {
1624 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1625 		if (!curr->scx.slice)
1626 			touch_core_sched(rq, curr);
1627 	}
1628 }
1629 
1630 static bool scx_dsq_priq_less(struct rb_node *node_a,
1631 			      const struct rb_node *node_b)
1632 {
1633 	const struct task_struct *a =
1634 		container_of(node_a, struct task_struct, scx.dsq_priq);
1635 	const struct task_struct *b =
1636 		container_of(node_b, struct task_struct, scx.dsq_priq);
1637 
1638 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1639 }
1640 
1641 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1642 {
1643 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1644 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1645 }
1646 
1647 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1648 			     u64 enq_flags)
1649 {
1650 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1651 
1652 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1653 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1654 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1655 
1656 	if (!is_local) {
1657 		raw_spin_lock(&dsq->lock);
1658 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1659 			scx_ops_error("attempting to dispatch to a destroyed dsq");
1660 			/* fall back to the global dsq */
1661 			raw_spin_unlock(&dsq->lock);
1662 			dsq = find_global_dsq(p);
1663 			raw_spin_lock(&dsq->lock);
1664 		}
1665 	}
1666 
1667 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1668 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1669 		/*
1670 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1671 		 * their FIFO queues. To avoid confusion and accidentally
1672 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1673 		 * disallow any internal DSQ from doing vtime ordering of
1674 		 * tasks.
1675 		 */
1676 		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1677 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1678 	}
1679 
1680 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1681 		struct rb_node *rbp;
1682 
1683 		/*
1684 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1685 		 * linked to both the rbtree and list on PRIQs, this can only be
1686 		 * tested easily when adding the first task.
1687 		 */
1688 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1689 			     nldsq_next_task(dsq, NULL, false)))
1690 			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1691 				      dsq->id);
1692 
1693 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1694 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1695 
1696 		/*
1697 		 * Find the previous task and insert after it on the list so
1698 		 * that @dsq->list is vtime ordered.
1699 		 */
1700 		rbp = rb_prev(&p->scx.dsq_priq);
1701 		if (rbp) {
1702 			struct task_struct *prev =
1703 				container_of(rbp, struct task_struct,
1704 					     scx.dsq_priq);
1705 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1706 		} else {
1707 			list_add(&p->scx.dsq_list.node, &dsq->list);
1708 		}
1709 	} else {
1710 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1711 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1712 			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1713 				      dsq->id);
1714 
1715 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1716 			list_add(&p->scx.dsq_list.node, &dsq->list);
1717 		else
1718 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1719 	}
1720 
1721 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1722 	dsq->seq++;
1723 	p->scx.dsq_seq = dsq->seq;
1724 
1725 	dsq_mod_nr(dsq, 1);
1726 	p->scx.dsq = dsq;
1727 
1728 	/*
1729 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1730 	 * direct dispatch path, but we clear them here because the direct
1731 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1732 	 * bypass.
1733 	 */
1734 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1735 	p->scx.ddsp_enq_flags = 0;
1736 
1737 	/*
1738 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1739 	 * match waiters' load_acquire.
1740 	 */
1741 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1742 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1743 
1744 	if (is_local) {
1745 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1746 		bool preempt = false;
1747 
1748 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1749 		    rq->curr->sched_class == &ext_sched_class) {
1750 			rq->curr->scx.slice = 0;
1751 			preempt = true;
1752 		}
1753 
1754 		if (preempt || sched_class_above(&ext_sched_class,
1755 						 rq->curr->sched_class))
1756 			resched_curr(rq);
1757 	} else {
1758 		raw_spin_unlock(&dsq->lock);
1759 	}
1760 }
1761 
1762 static void task_unlink_from_dsq(struct task_struct *p,
1763 				 struct scx_dispatch_q *dsq)
1764 {
1765 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1766 
1767 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1768 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1769 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1770 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1771 	}
1772 
1773 	list_del_init(&p->scx.dsq_list.node);
1774 	dsq_mod_nr(dsq, -1);
1775 }
1776 
1777 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1778 {
1779 	struct scx_dispatch_q *dsq = p->scx.dsq;
1780 	bool is_local = dsq == &rq->scx.local_dsq;
1781 
1782 	if (!dsq) {
1783 		/*
1784 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1785 		 * Unlinking is all that's needed to cancel.
1786 		 */
1787 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1788 			list_del_init(&p->scx.dsq_list.node);
1789 
1790 		/*
1791 		 * When dispatching directly from the BPF scheduler to a local
1792 		 * DSQ, the task isn't associated with any DSQ but
1793 		 * @p->scx.holding_cpu may be set under the protection of
1794 		 * %SCX_OPSS_DISPATCHING.
1795 		 */
1796 		if (p->scx.holding_cpu >= 0)
1797 			p->scx.holding_cpu = -1;
1798 
1799 		return;
1800 	}
1801 
1802 	if (!is_local)
1803 		raw_spin_lock(&dsq->lock);
1804 
1805 	/*
1806 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1807 	 * change underneath us.
1808 	*/
1809 	if (p->scx.holding_cpu < 0) {
1810 		/* @p must still be on @dsq, dequeue */
1811 		task_unlink_from_dsq(p, dsq);
1812 	} else {
1813 		/*
1814 		 * We're racing against dispatch_to_local_dsq() which already
1815 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1816 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1817 		 * the race.
1818 		 */
1819 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1820 		p->scx.holding_cpu = -1;
1821 	}
1822 	p->scx.dsq = NULL;
1823 
1824 	if (!is_local)
1825 		raw_spin_unlock(&dsq->lock);
1826 }
1827 
1828 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1829 						    struct task_struct *p)
1830 {
1831 	struct scx_dispatch_q *dsq;
1832 
1833 	if (dsq_id == SCX_DSQ_LOCAL)
1834 		return &rq->scx.local_dsq;
1835 
1836 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1837 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1838 
1839 		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1840 			return find_global_dsq(p);
1841 
1842 		return &cpu_rq(cpu)->scx.local_dsq;
1843 	}
1844 
1845 	if (dsq_id == SCX_DSQ_GLOBAL)
1846 		dsq = find_global_dsq(p);
1847 	else
1848 		dsq = find_user_dsq(dsq_id);
1849 
1850 	if (unlikely(!dsq)) {
1851 		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1852 			      dsq_id, p->comm, p->pid);
1853 		return find_global_dsq(p);
1854 	}
1855 
1856 	return dsq;
1857 }
1858 
1859 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1860 				 struct task_struct *p, u64 dsq_id,
1861 				 u64 enq_flags)
1862 {
1863 	/*
1864 	 * Mark that dispatch already happened from ops.select_cpu() or
1865 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1866 	 * which can never match a valid task pointer.
1867 	 */
1868 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1869 
1870 	/* @p must match the task on the enqueue path */
1871 	if (unlikely(p != ddsp_task)) {
1872 		if (IS_ERR(ddsp_task))
1873 			scx_ops_error("%s[%d] already direct-dispatched",
1874 				      p->comm, p->pid);
1875 		else
1876 			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1877 				      ddsp_task->comm, ddsp_task->pid,
1878 				      p->comm, p->pid);
1879 		return;
1880 	}
1881 
1882 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1883 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1884 
1885 	p->scx.ddsp_dsq_id = dsq_id;
1886 	p->scx.ddsp_enq_flags = enq_flags;
1887 }
1888 
1889 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1890 {
1891 	struct rq *rq = task_rq(p);
1892 	struct scx_dispatch_q *dsq =
1893 		find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1894 
1895 	touch_core_sched_dispatch(rq, p);
1896 
1897 	p->scx.ddsp_enq_flags |= enq_flags;
1898 
1899 	/*
1900 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1901 	 * double lock a remote rq and enqueue to its local DSQ. For
1902 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1903 	 * the enqueue so that it's executed when @rq can be unlocked.
1904 	 */
1905 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1906 		unsigned long opss;
1907 
1908 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1909 
1910 		switch (opss & SCX_OPSS_STATE_MASK) {
1911 		case SCX_OPSS_NONE:
1912 			break;
1913 		case SCX_OPSS_QUEUEING:
1914 			/*
1915 			 * As @p was never passed to the BPF side, _release is
1916 			 * not strictly necessary. Still do it for consistency.
1917 			 */
1918 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1919 			break;
1920 		default:
1921 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1922 				  p->comm, p->pid, opss);
1923 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1924 			break;
1925 		}
1926 
1927 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1928 		list_add_tail(&p->scx.dsq_list.node,
1929 			      &rq->scx.ddsp_deferred_locals);
1930 		schedule_deferred(rq);
1931 		return;
1932 	}
1933 
1934 	dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1935 }
1936 
1937 static bool scx_rq_online(struct rq *rq)
1938 {
1939 	/*
1940 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1941 	 * the online state as seen from the BPF scheduler. cpu_active() test
1942 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1943 	 * stay set until the current scheduling operation is complete even if
1944 	 * we aren't locking @rq.
1945 	 */
1946 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1947 }
1948 
1949 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1950 			    int sticky_cpu)
1951 {
1952 	bool bypassing = scx_rq_bypassing(rq);
1953 	struct task_struct **ddsp_taskp;
1954 	unsigned long qseq;
1955 
1956 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1957 
1958 	/* rq migration */
1959 	if (sticky_cpu == cpu_of(rq))
1960 		goto local_norefill;
1961 
1962 	/*
1963 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1964 	 * is offline and are just running the hotplug path. Don't bother the
1965 	 * BPF scheduler.
1966 	 */
1967 	if (!scx_rq_online(rq))
1968 		goto local;
1969 
1970 	if (bypassing)
1971 		goto global;
1972 
1973 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1974 		goto direct;
1975 
1976 	/* see %SCX_OPS_ENQ_EXITING */
1977 	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
1978 	    unlikely(p->flags & PF_EXITING))
1979 		goto local;
1980 
1981 	if (!SCX_HAS_OP(enqueue))
1982 		goto global;
1983 
1984 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1985 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1986 
1987 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1988 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1989 
1990 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1991 	WARN_ON_ONCE(*ddsp_taskp);
1992 	*ddsp_taskp = p;
1993 
1994 	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
1995 
1996 	*ddsp_taskp = NULL;
1997 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1998 		goto direct;
1999 
2000 	/*
2001 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2002 	 * dequeue may be waiting. The store_release matches their load_acquire.
2003 	 */
2004 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2005 	return;
2006 
2007 direct:
2008 	direct_dispatch(p, enq_flags);
2009 	return;
2010 
2011 local:
2012 	/*
2013 	 * For task-ordering, slice refill must be treated as implying the end
2014 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2015 	 * higher priority it becomes from scx_prio_less()'s POV.
2016 	 */
2017 	touch_core_sched(rq, p);
2018 	p->scx.slice = SCX_SLICE_DFL;
2019 local_norefill:
2020 	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2021 	return;
2022 
2023 global:
2024 	touch_core_sched(rq, p);	/* see the comment in local: */
2025 	p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL;
2026 	dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2027 }
2028 
2029 static bool task_runnable(const struct task_struct *p)
2030 {
2031 	return !list_empty(&p->scx.runnable_node);
2032 }
2033 
2034 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2035 {
2036 	lockdep_assert_rq_held(rq);
2037 
2038 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2039 		p->scx.runnable_at = jiffies;
2040 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2041 	}
2042 
2043 	/*
2044 	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2045 	 * appened to the runnable_list.
2046 	 */
2047 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2048 }
2049 
2050 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2051 {
2052 	list_del_init(&p->scx.runnable_node);
2053 	if (reset_runnable_at)
2054 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2055 }
2056 
2057 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2058 {
2059 	int sticky_cpu = p->scx.sticky_cpu;
2060 
2061 	if (enq_flags & ENQUEUE_WAKEUP)
2062 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2063 
2064 	enq_flags |= rq->scx.extra_enq_flags;
2065 
2066 	if (sticky_cpu >= 0)
2067 		p->scx.sticky_cpu = -1;
2068 
2069 	/*
2070 	 * Restoring a running task will be immediately followed by
2071 	 * set_next_task_scx() which expects the task to not be on the BPF
2072 	 * scheduler as tasks can only start running through local DSQs. Force
2073 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2074 	 */
2075 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2076 		sticky_cpu = cpu_of(rq);
2077 
2078 	if (p->scx.flags & SCX_TASK_QUEUED) {
2079 		WARN_ON_ONCE(!task_runnable(p));
2080 		goto out;
2081 	}
2082 
2083 	set_task_runnable(rq, p);
2084 	p->scx.flags |= SCX_TASK_QUEUED;
2085 	rq->scx.nr_running++;
2086 	add_nr_running(rq, 1);
2087 
2088 	if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2089 		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2090 
2091 	if (enq_flags & SCX_ENQ_WAKEUP)
2092 		touch_core_sched(rq, p);
2093 
2094 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2095 out:
2096 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2097 }
2098 
2099 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2100 {
2101 	unsigned long opss;
2102 
2103 	/* dequeue is always temporary, don't reset runnable_at */
2104 	clr_task_runnable(p, false);
2105 
2106 	/* acquire ensures that we see the preceding updates on QUEUED */
2107 	opss = atomic_long_read_acquire(&p->scx.ops_state);
2108 
2109 	switch (opss & SCX_OPSS_STATE_MASK) {
2110 	case SCX_OPSS_NONE:
2111 		break;
2112 	case SCX_OPSS_QUEUEING:
2113 		/*
2114 		 * QUEUEING is started and finished while holding @p's rq lock.
2115 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2116 		 */
2117 		BUG();
2118 	case SCX_OPSS_QUEUED:
2119 		if (SCX_HAS_OP(dequeue))
2120 			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2121 
2122 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2123 					    SCX_OPSS_NONE))
2124 			break;
2125 		fallthrough;
2126 	case SCX_OPSS_DISPATCHING:
2127 		/*
2128 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2129 		 * wait for the transfer to complete so that @p doesn't get
2130 		 * added to its DSQ after dequeueing is complete.
2131 		 *
2132 		 * As we're waiting on DISPATCHING with the rq locked, the
2133 		 * dispatching side shouldn't try to lock the rq while
2134 		 * DISPATCHING is set. See dispatch_to_local_dsq().
2135 		 *
2136 		 * DISPATCHING shouldn't have qseq set and control can reach
2137 		 * here with NONE @opss from the above QUEUED case block.
2138 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2139 		 */
2140 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2141 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2142 		break;
2143 	}
2144 }
2145 
2146 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2147 {
2148 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2149 		WARN_ON_ONCE(task_runnable(p));
2150 		return true;
2151 	}
2152 
2153 	ops_dequeue(p, deq_flags);
2154 
2155 	/*
2156 	 * A currently running task which is going off @rq first gets dequeued
2157 	 * and then stops running. As we want running <-> stopping transitions
2158 	 * to be contained within runnable <-> quiescent transitions, trigger
2159 	 * ->stopping() early here instead of in put_prev_task_scx().
2160 	 *
2161 	 * @p may go through multiple stopping <-> running transitions between
2162 	 * here and put_prev_task_scx() if task attribute changes occur while
2163 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
2164 	 * information meaningful to the BPF scheduler and can be suppressed by
2165 	 * skipping the callbacks if the task is !QUEUED.
2166 	 */
2167 	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2168 		update_curr_scx(rq);
2169 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2170 	}
2171 
2172 	if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2173 		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2174 
2175 	if (deq_flags & SCX_DEQ_SLEEP)
2176 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2177 	else
2178 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2179 
2180 	p->scx.flags &= ~SCX_TASK_QUEUED;
2181 	rq->scx.nr_running--;
2182 	sub_nr_running(rq, 1);
2183 
2184 	dispatch_dequeue(rq, p);
2185 	return true;
2186 }
2187 
2188 static void yield_task_scx(struct rq *rq)
2189 {
2190 	struct task_struct *p = rq->curr;
2191 
2192 	if (SCX_HAS_OP(yield))
2193 		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2194 	else
2195 		p->scx.slice = 0;
2196 }
2197 
2198 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2199 {
2200 	struct task_struct *from = rq->curr;
2201 
2202 	if (SCX_HAS_OP(yield))
2203 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2204 	else
2205 		return false;
2206 }
2207 
2208 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2209 					 struct scx_dispatch_q *src_dsq,
2210 					 struct rq *dst_rq)
2211 {
2212 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2213 
2214 	/* @dsq is locked and @p is on @dst_rq */
2215 	lockdep_assert_held(&src_dsq->lock);
2216 	lockdep_assert_rq_held(dst_rq);
2217 
2218 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2219 
2220 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2221 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2222 	else
2223 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2224 
2225 	dsq_mod_nr(dst_dsq, 1);
2226 	p->scx.dsq = dst_dsq;
2227 }
2228 
2229 #ifdef CONFIG_SMP
2230 /**
2231  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2232  * @p: task to move
2233  * @enq_flags: %SCX_ENQ_*
2234  * @src_rq: rq to move the task from, locked on entry, released on return
2235  * @dst_rq: rq to move the task into, locked on return
2236  *
2237  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2238  */
2239 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2240 					  struct rq *src_rq, struct rq *dst_rq)
2241 {
2242 	lockdep_assert_rq_held(src_rq);
2243 
2244 	/* the following marks @p MIGRATING which excludes dequeue */
2245 	deactivate_task(src_rq, p, 0);
2246 	set_task_cpu(p, cpu_of(dst_rq));
2247 	p->scx.sticky_cpu = cpu_of(dst_rq);
2248 
2249 	raw_spin_rq_unlock(src_rq);
2250 	raw_spin_rq_lock(dst_rq);
2251 
2252 	/*
2253 	 * We want to pass scx-specific enq_flags but activate_task() will
2254 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2255 	 * @rq->scx.extra_enq_flags instead.
2256 	 */
2257 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2258 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2259 	dst_rq->scx.extra_enq_flags = enq_flags;
2260 	activate_task(dst_rq, p, 0);
2261 	dst_rq->scx.extra_enq_flags = 0;
2262 }
2263 
2264 /*
2265  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2266  * differences:
2267  *
2268  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2269  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2270  *   this CPU?".
2271  *
2272  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2273  *   must be allowed to finish on the CPU that it's currently on regardless of
2274  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2275  *   BPF scheduler shouldn't attempt to migrate a task which has migration
2276  *   disabled.
2277  *
2278  * - The BPF scheduler is bypassed while the rq is offline and we can always say
2279  *   no to the BPF scheduler initiated migrations while offline.
2280  */
2281 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2282 				      bool trigger_error)
2283 {
2284 	int cpu = cpu_of(rq);
2285 
2286 	/*
2287 	 * We don't require the BPF scheduler to avoid dispatching to offline
2288 	 * CPUs mostly for convenience but also because CPUs can go offline
2289 	 * between scx_bpf_dispatch() calls and here. Trigger error iff the
2290 	 * picked CPU is outside the allowed mask.
2291 	 */
2292 	if (!task_allowed_on_cpu(p, cpu)) {
2293 		if (trigger_error)
2294 			scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2295 				      cpu_of(rq), p->comm, p->pid);
2296 		return false;
2297 	}
2298 
2299 	if (unlikely(is_migration_disabled(p)))
2300 		return false;
2301 
2302 	if (!scx_rq_online(rq))
2303 		return false;
2304 
2305 	return true;
2306 }
2307 
2308 /**
2309  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2310  * @p: target task
2311  * @dsq: locked DSQ @p is currently on
2312  * @src_rq: rq @p is currently on, stable with @dsq locked
2313  *
2314  * Called with @dsq locked but no rq's locked. We want to move @p to a different
2315  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2316  * required when transferring into a local DSQ. Even when transferring into a
2317  * non-local DSQ, it's better to use the same mechanism to protect against
2318  * dequeues and maintain the invariant that @p->scx.dsq can only change while
2319  * @src_rq is locked, which e.g. scx_dump_task() depends on.
2320  *
2321  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2322  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2323  * this may race with dequeue, which can't drop the rq lock or fail, do a little
2324  * dancing from our side.
2325  *
2326  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2327  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2328  * would be cleared to -1. While other cpus may have updated it to different
2329  * values afterwards, as this operation can't be preempted or recurse, the
2330  * holding_cpu can never become this CPU again before we're done. Thus, we can
2331  * tell whether we lost to dequeue by testing whether the holding_cpu still
2332  * points to this CPU. See dispatch_dequeue() for the counterpart.
2333  *
2334  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2335  * still valid. %false if lost to dequeue.
2336  */
2337 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2338 				       struct scx_dispatch_q *dsq,
2339 				       struct rq *src_rq)
2340 {
2341 	s32 cpu = raw_smp_processor_id();
2342 
2343 	lockdep_assert_held(&dsq->lock);
2344 
2345 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2346 	task_unlink_from_dsq(p, dsq);
2347 	p->scx.holding_cpu = cpu;
2348 
2349 	raw_spin_unlock(&dsq->lock);
2350 	raw_spin_rq_lock(src_rq);
2351 
2352 	/* task_rq couldn't have changed if we're still the holding cpu */
2353 	return likely(p->scx.holding_cpu == cpu) &&
2354 		!WARN_ON_ONCE(src_rq != task_rq(p));
2355 }
2356 
2357 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2358 				struct scx_dispatch_q *dsq, struct rq *src_rq)
2359 {
2360 	raw_spin_rq_unlock(this_rq);
2361 
2362 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2363 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2364 		return true;
2365 	} else {
2366 		raw_spin_rq_unlock(src_rq);
2367 		raw_spin_rq_lock(this_rq);
2368 		return false;
2369 	}
2370 }
2371 #else	/* CONFIG_SMP */
2372 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2373 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
2374 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2375 #endif	/* CONFIG_SMP */
2376 
2377 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2378 {
2379 	struct task_struct *p;
2380 retry:
2381 	/*
2382 	 * The caller can't expect to successfully consume a task if the task's
2383 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2384 	 * @dsq->list without locking and skip if it seems empty.
2385 	 */
2386 	if (list_empty(&dsq->list))
2387 		return false;
2388 
2389 	raw_spin_lock(&dsq->lock);
2390 
2391 	nldsq_for_each_task(p, dsq) {
2392 		struct rq *task_rq = task_rq(p);
2393 
2394 		if (rq == task_rq) {
2395 			task_unlink_from_dsq(p, dsq);
2396 			move_local_task_to_local_dsq(p, 0, dsq, rq);
2397 			raw_spin_unlock(&dsq->lock);
2398 			return true;
2399 		}
2400 
2401 		if (task_can_run_on_remote_rq(p, rq, false)) {
2402 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2403 				return true;
2404 			goto retry;
2405 		}
2406 	}
2407 
2408 	raw_spin_unlock(&dsq->lock);
2409 	return false;
2410 }
2411 
2412 static bool consume_global_dsq(struct rq *rq)
2413 {
2414 	int node = cpu_to_node(cpu_of(rq));
2415 
2416 	return consume_dispatch_q(rq, global_dsqs[node]);
2417 }
2418 
2419 /**
2420  * dispatch_to_local_dsq - Dispatch a task to a local dsq
2421  * @rq: current rq which is locked
2422  * @dst_dsq: destination DSQ
2423  * @p: task to dispatch
2424  * @enq_flags: %SCX_ENQ_*
2425  *
2426  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2427  * DSQ. This function performs all the synchronization dancing needed because
2428  * local DSQs are protected with rq locks.
2429  *
2430  * The caller must have exclusive ownership of @p (e.g. through
2431  * %SCX_OPSS_DISPATCHING).
2432  */
2433 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2434 				  struct task_struct *p, u64 enq_flags)
2435 {
2436 	struct rq *src_rq = task_rq(p);
2437 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2438 
2439 	/*
2440 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2441 	 * be dequeued, its task_rq and cpus_allowed are stable too.
2442 	 *
2443 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2444 	 */
2445 	if (rq == src_rq && rq == dst_rq) {
2446 		dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2447 		return;
2448 	}
2449 
2450 #ifdef CONFIG_SMP
2451 	if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2452 		dispatch_enqueue(find_global_dsq(p), p,
2453 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2454 		return;
2455 	}
2456 
2457 	/*
2458 	 * @p is on a possibly remote @src_rq which we need to lock to move the
2459 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2460 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2461 	 * DISPATCHING.
2462 	 *
2463 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2464 	 * we're moving from a DSQ and use the same mechanism - mark the task
2465 	 * under transfer with holding_cpu, release DISPATCHING and then follow
2466 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2467 	 */
2468 	p->scx.holding_cpu = raw_smp_processor_id();
2469 
2470 	/* store_release ensures that dequeue sees the above */
2471 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2472 
2473 	/* switch to @src_rq lock */
2474 	if (rq != src_rq) {
2475 		raw_spin_rq_unlock(rq);
2476 		raw_spin_rq_lock(src_rq);
2477 	}
2478 
2479 	/* task_rq couldn't have changed if we're still the holding cpu */
2480 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2481 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2482 		/*
2483 		 * If @p is staying on the same rq, there's no need to go
2484 		 * through the full deactivate/activate cycle. Optimize by
2485 		 * abbreviating move_remote_task_to_local_dsq().
2486 		 */
2487 		if (src_rq == dst_rq) {
2488 			p->scx.holding_cpu = -1;
2489 			dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2490 		} else {
2491 			move_remote_task_to_local_dsq(p, enq_flags,
2492 						      src_rq, dst_rq);
2493 		}
2494 
2495 		/* if the destination CPU is idle, wake it up */
2496 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2497 			resched_curr(dst_rq);
2498 	}
2499 
2500 	/* switch back to @rq lock */
2501 	if (rq != dst_rq) {
2502 		raw_spin_rq_unlock(dst_rq);
2503 		raw_spin_rq_lock(rq);
2504 	}
2505 #else	/* CONFIG_SMP */
2506 	BUG();	/* control can not reach here on UP */
2507 #endif	/* CONFIG_SMP */
2508 }
2509 
2510 /**
2511  * finish_dispatch - Asynchronously finish dispatching a task
2512  * @rq: current rq which is locked
2513  * @p: task to finish dispatching
2514  * @qseq_at_dispatch: qseq when @p started getting dispatched
2515  * @dsq_id: destination DSQ ID
2516  * @enq_flags: %SCX_ENQ_*
2517  *
2518  * Dispatching to local DSQs may need to wait for queueing to complete or
2519  * require rq lock dancing. As we don't wanna do either while inside
2520  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2521  * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
2522  * task and its qseq. Once ops.dispatch() returns, this function is called to
2523  * finish up.
2524  *
2525  * There is no guarantee that @p is still valid for dispatching or even that it
2526  * was valid in the first place. Make sure that the task is still owned by the
2527  * BPF scheduler and claim the ownership before dispatching.
2528  */
2529 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2530 			    unsigned long qseq_at_dispatch,
2531 			    u64 dsq_id, u64 enq_flags)
2532 {
2533 	struct scx_dispatch_q *dsq;
2534 	unsigned long opss;
2535 
2536 	touch_core_sched_dispatch(rq, p);
2537 retry:
2538 	/*
2539 	 * No need for _acquire here. @p is accessed only after a successful
2540 	 * try_cmpxchg to DISPATCHING.
2541 	 */
2542 	opss = atomic_long_read(&p->scx.ops_state);
2543 
2544 	switch (opss & SCX_OPSS_STATE_MASK) {
2545 	case SCX_OPSS_DISPATCHING:
2546 	case SCX_OPSS_NONE:
2547 		/* someone else already got to it */
2548 		return;
2549 	case SCX_OPSS_QUEUED:
2550 		/*
2551 		 * If qseq doesn't match, @p has gone through at least one
2552 		 * dispatch/dequeue and re-enqueue cycle between
2553 		 * scx_bpf_dispatch() and here and we have no claim on it.
2554 		 */
2555 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2556 			return;
2557 
2558 		/*
2559 		 * While we know @p is accessible, we don't yet have a claim on
2560 		 * it - the BPF scheduler is allowed to dispatch tasks
2561 		 * spuriously and there can be a racing dequeue attempt. Let's
2562 		 * claim @p by atomically transitioning it from QUEUED to
2563 		 * DISPATCHING.
2564 		 */
2565 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2566 						   SCX_OPSS_DISPATCHING)))
2567 			break;
2568 		goto retry;
2569 	case SCX_OPSS_QUEUEING:
2570 		/*
2571 		 * do_enqueue_task() is in the process of transferring the task
2572 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2573 		 * holding any kernel or BPF resource that the enqueue path may
2574 		 * depend upon, it's safe to wait.
2575 		 */
2576 		wait_ops_state(p, opss);
2577 		goto retry;
2578 	}
2579 
2580 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2581 
2582 	dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2583 
2584 	if (dsq->id == SCX_DSQ_LOCAL)
2585 		dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2586 	else
2587 		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2588 }
2589 
2590 static void flush_dispatch_buf(struct rq *rq)
2591 {
2592 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2593 	u32 u;
2594 
2595 	for (u = 0; u < dspc->cursor; u++) {
2596 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2597 
2598 		finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2599 				ent->enq_flags);
2600 	}
2601 
2602 	dspc->nr_tasks += dspc->cursor;
2603 	dspc->cursor = 0;
2604 }
2605 
2606 static int balance_one(struct rq *rq, struct task_struct *prev)
2607 {
2608 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2609 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2610 	int nr_loops = SCX_DSP_MAX_LOOPS;
2611 
2612 	lockdep_assert_rq_held(rq);
2613 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2614 	rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2615 
2616 	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2617 	    unlikely(rq->scx.cpu_released)) {
2618 		/*
2619 		 * If the previous sched_class for the current CPU was not SCX,
2620 		 * notify the BPF scheduler that it again has control of the
2621 		 * core. This callback complements ->cpu_release(), which is
2622 		 * emitted in scx_next_task_picked().
2623 		 */
2624 		if (SCX_HAS_OP(cpu_acquire))
2625 			SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
2626 		rq->scx.cpu_released = false;
2627 	}
2628 
2629 	if (prev_on_scx) {
2630 		update_curr_scx(rq);
2631 
2632 		/*
2633 		 * If @prev is runnable & has slice left, it has priority and
2634 		 * fetching more just increases latency for the fetched tasks.
2635 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2636 		 * scheduler wants to handle this explicitly, it should
2637 		 * implement ->cpu_release().
2638 		 *
2639 		 * See scx_ops_disable_workfn() for the explanation on the
2640 		 * bypassing test.
2641 		 */
2642 		if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2643 		    prev->scx.slice && !scx_rq_bypassing(rq)) {
2644 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2645 			goto has_tasks;
2646 		}
2647 	}
2648 
2649 	/* if there already are tasks to run, nothing to do */
2650 	if (rq->scx.local_dsq.nr)
2651 		goto has_tasks;
2652 
2653 	if (consume_global_dsq(rq))
2654 		goto has_tasks;
2655 
2656 	if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2657 		goto no_tasks;
2658 
2659 	dspc->rq = rq;
2660 
2661 	/*
2662 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2663 	 * the local DSQ might still end up empty after a successful
2664 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2665 	 * produced some tasks, retry. The BPF scheduler may depend on this
2666 	 * looping behavior to simplify its implementation.
2667 	 */
2668 	do {
2669 		dspc->nr_tasks = 0;
2670 
2671 		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2672 			    prev_on_scx ? prev : NULL);
2673 
2674 		flush_dispatch_buf(rq);
2675 
2676 		if (rq->scx.local_dsq.nr)
2677 			goto has_tasks;
2678 		if (consume_global_dsq(rq))
2679 			goto has_tasks;
2680 
2681 		/*
2682 		 * ops.dispatch() can trap us in this loop by repeatedly
2683 		 * dispatching ineligible tasks. Break out once in a while to
2684 		 * allow the watchdog to run. As IRQ can't be enabled in
2685 		 * balance(), we want to complete this scheduling cycle and then
2686 		 * start a new one. IOW, we want to call resched_curr() on the
2687 		 * next, most likely idle, task, not the current one. Use
2688 		 * scx_bpf_kick_cpu() for deferred kicking.
2689 		 */
2690 		if (unlikely(!--nr_loops)) {
2691 			scx_bpf_kick_cpu(cpu_of(rq), 0);
2692 			break;
2693 		}
2694 	} while (dspc->nr_tasks);
2695 
2696 no_tasks:
2697 	/*
2698 	 * Didn't find another task to run. Keep running @prev unless
2699 	 * %SCX_OPS_ENQ_LAST is in effect.
2700 	 */
2701 	if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2702 	    (!static_branch_unlikely(&scx_ops_enq_last) ||
2703 	     scx_rq_bypassing(rq))) {
2704 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2705 		goto has_tasks;
2706 	}
2707 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2708 	return false;
2709 
2710 has_tasks:
2711 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2712 	return true;
2713 }
2714 
2715 static int balance_scx(struct rq *rq, struct task_struct *prev,
2716 		       struct rq_flags *rf)
2717 {
2718 	int ret;
2719 
2720 	rq_unpin_lock(rq, rf);
2721 
2722 	ret = balance_one(rq, prev);
2723 
2724 #ifdef CONFIG_SCHED_SMT
2725 	/*
2726 	 * When core-sched is enabled, this ops.balance() call will be followed
2727 	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2728 	 * siblings too.
2729 	 */
2730 	if (sched_core_enabled(rq)) {
2731 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2732 		int scpu;
2733 
2734 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2735 			struct rq *srq = cpu_rq(scpu);
2736 			struct task_struct *sprev = srq->curr;
2737 
2738 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2739 			update_rq_clock(srq);
2740 			balance_one(srq, sprev);
2741 		}
2742 	}
2743 #endif
2744 	rq_repin_lock(rq, rf);
2745 
2746 	return ret;
2747 }
2748 
2749 static void process_ddsp_deferred_locals(struct rq *rq)
2750 {
2751 	struct task_struct *p;
2752 
2753 	lockdep_assert_rq_held(rq);
2754 
2755 	/*
2756 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2757 	 * tasks directly dispatched to the local DSQs of other CPUs. See
2758 	 * direct_dispatch(). Keep popping from the head instead of using
2759 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2760 	 * temporarily.
2761 	 */
2762 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2763 				struct task_struct, scx.dsq_list.node))) {
2764 		struct scx_dispatch_q *dsq;
2765 
2766 		list_del_init(&p->scx.dsq_list.node);
2767 
2768 		dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2769 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2770 			dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2771 	}
2772 }
2773 
2774 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2775 {
2776 	if (p->scx.flags & SCX_TASK_QUEUED) {
2777 		/*
2778 		 * Core-sched might decide to execute @p before it is
2779 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2780 		 */
2781 		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2782 		dispatch_dequeue(rq, p);
2783 	}
2784 
2785 	p->se.exec_start = rq_clock_task(rq);
2786 
2787 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2788 	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2789 		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2790 
2791 	clr_task_runnable(p, true);
2792 
2793 	/*
2794 	 * @p is getting newly scheduled or got kicked after someone updated its
2795 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2796 	 */
2797 	if ((p->scx.slice == SCX_SLICE_INF) !=
2798 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2799 		if (p->scx.slice == SCX_SLICE_INF)
2800 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2801 		else
2802 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2803 
2804 		sched_update_tick_dependency(rq);
2805 
2806 		/*
2807 		 * For now, let's refresh the load_avgs just when transitioning
2808 		 * in and out of nohz. In the future, we might want to add a
2809 		 * mechanism which calls the following periodically on
2810 		 * tick-stopped CPUs.
2811 		 */
2812 		update_other_load_avgs(rq);
2813 	}
2814 }
2815 
2816 static enum scx_cpu_preempt_reason
2817 preempt_reason_from_class(const struct sched_class *class)
2818 {
2819 #ifdef CONFIG_SMP
2820 	if (class == &stop_sched_class)
2821 		return SCX_CPU_PREEMPT_STOP;
2822 #endif
2823 	if (class == &dl_sched_class)
2824 		return SCX_CPU_PREEMPT_DL;
2825 	if (class == &rt_sched_class)
2826 		return SCX_CPU_PREEMPT_RT;
2827 	return SCX_CPU_PREEMPT_UNKNOWN;
2828 }
2829 
2830 static void switch_class(struct rq *rq, struct task_struct *next)
2831 {
2832 	const struct sched_class *next_class = next->sched_class;
2833 
2834 #ifdef CONFIG_SMP
2835 	/*
2836 	 * Pairs with the smp_load_acquire() issued by a CPU in
2837 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2838 	 * resched.
2839 	 */
2840 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2841 #endif
2842 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2843 		return;
2844 
2845 	/*
2846 	 * The callback is conceptually meant to convey that the CPU is no
2847 	 * longer under the control of SCX. Therefore, don't invoke the callback
2848 	 * if the next class is below SCX (in which case the BPF scheduler has
2849 	 * actively decided not to schedule any tasks on the CPU).
2850 	 */
2851 	if (sched_class_above(&ext_sched_class, next_class))
2852 		return;
2853 
2854 	/*
2855 	 * At this point we know that SCX was preempted by a higher priority
2856 	 * sched_class, so invoke the ->cpu_release() callback if we have not
2857 	 * done so already. We only send the callback once between SCX being
2858 	 * preempted, and it regaining control of the CPU.
2859 	 *
2860 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2861 	 *  next time that balance_scx() is invoked.
2862 	 */
2863 	if (!rq->scx.cpu_released) {
2864 		if (SCX_HAS_OP(cpu_release)) {
2865 			struct scx_cpu_release_args args = {
2866 				.reason = preempt_reason_from_class(next_class),
2867 				.task = next,
2868 			};
2869 
2870 			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2871 				    cpu_release, cpu_of(rq), &args);
2872 		}
2873 		rq->scx.cpu_released = true;
2874 	}
2875 }
2876 
2877 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2878 			      struct task_struct *next)
2879 {
2880 	update_curr_scx(rq);
2881 
2882 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2883 	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2884 		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
2885 
2886 	if (p->scx.flags & SCX_TASK_QUEUED) {
2887 		set_task_runnable(rq, p);
2888 
2889 		/*
2890 		 * If @p has slice left and is being put, @p is getting
2891 		 * preempted by a higher priority scheduler class or core-sched
2892 		 * forcing a different task. Leave it at the head of the local
2893 		 * DSQ.
2894 		 */
2895 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
2896 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
2897 			return;
2898 		}
2899 
2900 		/*
2901 		 * If @p is runnable but we're about to enter a lower
2902 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2903 		 * ops.enqueue() that @p is the only one available for this cpu,
2904 		 * which should trigger an explicit follow-up scheduling event.
2905 		 */
2906 		if (sched_class_above(&ext_sched_class, next->sched_class)) {
2907 			WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
2908 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2909 		} else {
2910 			do_enqueue_task(rq, p, 0, -1);
2911 		}
2912 	}
2913 
2914 	if (next && next->sched_class != &ext_sched_class)
2915 		switch_class(rq, next);
2916 }
2917 
2918 static struct task_struct *first_local_task(struct rq *rq)
2919 {
2920 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
2921 					struct task_struct, scx.dsq_list.node);
2922 }
2923 
2924 static struct task_struct *pick_task_scx(struct rq *rq)
2925 {
2926 	struct task_struct *prev = rq->curr;
2927 	struct task_struct *p;
2928 
2929 	/*
2930 	 * If balance_scx() is telling us to keep running @prev, replenish slice
2931 	 * if necessary and keep running @prev. Otherwise, pop the first one
2932 	 * from the local DSQ.
2933 	 *
2934 	 * WORKAROUND:
2935 	 *
2936 	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
2937 	 * have gone through balance_scx(). Unfortunately, there currently is a
2938 	 * bug where fair could say yes on balance() but no on pick_task(),
2939 	 * which then ends up calling pick_task_scx() without preceding
2940 	 * balance_scx().
2941 	 *
2942 	 * For now, ignore cases where $prev is not on SCX. This isn't great and
2943 	 * can theoretically lead to stalls. However, for switch_all cases, this
2944 	 * happens only while a BPF scheduler is being loaded or unloaded, and,
2945 	 * for partial cases, fair will likely keep triggering this CPU.
2946 	 *
2947 	 * Once fair is fixed, restore WARN_ON_ONCE().
2948 	 */
2949 	if ((rq->scx.flags & SCX_RQ_BAL_KEEP) &&
2950 	    prev->sched_class == &ext_sched_class) {
2951 		p = prev;
2952 		if (!p->scx.slice)
2953 			p->scx.slice = SCX_SLICE_DFL;
2954 	} else {
2955 		p = first_local_task(rq);
2956 		if (!p)
2957 			return NULL;
2958 
2959 		if (unlikely(!p->scx.slice)) {
2960 			if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
2961 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n",
2962 						p->comm, p->pid);
2963 				scx_warned_zero_slice = true;
2964 			}
2965 			p->scx.slice = SCX_SLICE_DFL;
2966 		}
2967 	}
2968 
2969 	return p;
2970 }
2971 
2972 #ifdef CONFIG_SCHED_CORE
2973 /**
2974  * scx_prio_less - Task ordering for core-sched
2975  * @a: task A
2976  * @b: task B
2977  *
2978  * Core-sched is implemented as an additional scheduling layer on top of the
2979  * usual sched_class'es and needs to find out the expected task ordering. For
2980  * SCX, core-sched calls this function to interrogate the task ordering.
2981  *
2982  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2983  * to implement the default task ordering. The older the timestamp, the higher
2984  * prority the task - the global FIFO ordering matching the default scheduling
2985  * behavior.
2986  *
2987  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2988  * implement FIFO ordering within each local DSQ. See pick_task_scx().
2989  */
2990 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2991 		   bool in_fi)
2992 {
2993 	/*
2994 	 * The const qualifiers are dropped from task_struct pointers when
2995 	 * calling ops.core_sched_before(). Accesses are controlled by the
2996 	 * verifier.
2997 	 */
2998 	if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
2999 		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3000 					      (struct task_struct *)a,
3001 					      (struct task_struct *)b);
3002 	else
3003 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3004 }
3005 #endif	/* CONFIG_SCHED_CORE */
3006 
3007 #ifdef CONFIG_SMP
3008 
3009 static bool test_and_clear_cpu_idle(int cpu)
3010 {
3011 #ifdef CONFIG_SCHED_SMT
3012 	/*
3013 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3014 	 * cluster is not wholly idle either way. This also prevents
3015 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3016 	 */
3017 	if (sched_smt_active()) {
3018 		const struct cpumask *smt = cpu_smt_mask(cpu);
3019 
3020 		/*
3021 		 * If offline, @cpu is not its own sibling and
3022 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
3023 		 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3024 		 * is eventually cleared.
3025 		 */
3026 		if (cpumask_intersects(smt, idle_masks.smt))
3027 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3028 		else if (cpumask_test_cpu(cpu, idle_masks.smt))
3029 			__cpumask_clear_cpu(cpu, idle_masks.smt);
3030 	}
3031 #endif
3032 	return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3033 }
3034 
3035 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3036 {
3037 	int cpu;
3038 
3039 retry:
3040 	if (sched_smt_active()) {
3041 		cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3042 		if (cpu < nr_cpu_ids)
3043 			goto found;
3044 
3045 		if (flags & SCX_PICK_IDLE_CORE)
3046 			return -EBUSY;
3047 	}
3048 
3049 	cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3050 	if (cpu >= nr_cpu_ids)
3051 		return -EBUSY;
3052 
3053 found:
3054 	if (test_and_clear_cpu_idle(cpu))
3055 		return cpu;
3056 	else
3057 		goto retry;
3058 }
3059 
3060 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3061 			      u64 wake_flags, bool *found)
3062 {
3063 	s32 cpu;
3064 
3065 	*found = false;
3066 
3067 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
3068 		scx_ops_error("built-in idle tracking is disabled");
3069 		return prev_cpu;
3070 	}
3071 
3072 	/*
3073 	 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
3074 	 * under utilized, wake up @p to the local DSQ of the waker. Checking
3075 	 * only for an empty local DSQ is insufficient as it could give the
3076 	 * wakee an unfair advantage when the system is oversaturated.
3077 	 * Checking only for the presence of idle CPUs is also insufficient as
3078 	 * the local DSQ of the waker could have tasks piled up on it even if
3079 	 * there is an idle core elsewhere on the system.
3080 	 */
3081 	cpu = smp_processor_id();
3082 	if ((wake_flags & SCX_WAKE_SYNC) &&
3083 	    !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
3084 	    cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3085 		if (cpumask_test_cpu(cpu, p->cpus_ptr))
3086 			goto cpu_found;
3087 	}
3088 
3089 	/*
3090 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3091 	 * partially idle @prev_cpu.
3092 	 */
3093 	if (sched_smt_active()) {
3094 		if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3095 		    test_and_clear_cpu_idle(prev_cpu)) {
3096 			cpu = prev_cpu;
3097 			goto cpu_found;
3098 		}
3099 
3100 		cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3101 		if (cpu >= 0)
3102 			goto cpu_found;
3103 	}
3104 
3105 	if (test_and_clear_cpu_idle(prev_cpu)) {
3106 		cpu = prev_cpu;
3107 		goto cpu_found;
3108 	}
3109 
3110 	cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3111 	if (cpu >= 0)
3112 		goto cpu_found;
3113 
3114 	return prev_cpu;
3115 
3116 cpu_found:
3117 	*found = true;
3118 	return cpu;
3119 }
3120 
3121 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3122 {
3123 	/*
3124 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3125 	 * can be a good migration opportunity with low cache and memory
3126 	 * footprint. Returning a CPU different than @prev_cpu triggers
3127 	 * immediate rq migration. However, for SCX, as the current rq
3128 	 * association doesn't dictate where the task is going to run, this
3129 	 * doesn't fit well. If necessary, we can later add a dedicated method
3130 	 * which can decide to preempt self to force it through the regular
3131 	 * scheduling path.
3132 	 */
3133 	if (unlikely(wake_flags & WF_EXEC))
3134 		return prev_cpu;
3135 
3136 	if (SCX_HAS_OP(select_cpu)) {
3137 		s32 cpu;
3138 		struct task_struct **ddsp_taskp;
3139 
3140 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3141 		WARN_ON_ONCE(*ddsp_taskp);
3142 		*ddsp_taskp = p;
3143 
3144 		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3145 					   select_cpu, p, prev_cpu, wake_flags);
3146 		*ddsp_taskp = NULL;
3147 		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3148 			return cpu;
3149 		else
3150 			return prev_cpu;
3151 	} else {
3152 		bool found;
3153 		s32 cpu;
3154 
3155 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3156 		if (found) {
3157 			p->scx.slice = SCX_SLICE_DFL;
3158 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3159 		}
3160 		return cpu;
3161 	}
3162 }
3163 
3164 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3165 {
3166 	run_deferred(rq);
3167 }
3168 
3169 static void set_cpus_allowed_scx(struct task_struct *p,
3170 				 struct affinity_context *ac)
3171 {
3172 	set_cpus_allowed_common(p, ac);
3173 
3174 	/*
3175 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3176 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3177 	 * scheduler the effective one.
3178 	 *
3179 	 * Fine-grained memory write control is enforced by BPF making the const
3180 	 * designation pointless. Cast it away when calling the operation.
3181 	 */
3182 	if (SCX_HAS_OP(set_cpumask))
3183 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3184 				 (struct cpumask *)p->cpus_ptr);
3185 }
3186 
3187 static void reset_idle_masks(void)
3188 {
3189 	/*
3190 	 * Consider all online cpus idle. Should converge to the actual state
3191 	 * quickly.
3192 	 */
3193 	cpumask_copy(idle_masks.cpu, cpu_online_mask);
3194 	cpumask_copy(idle_masks.smt, cpu_online_mask);
3195 }
3196 
3197 void __scx_update_idle(struct rq *rq, bool idle)
3198 {
3199 	int cpu = cpu_of(rq);
3200 
3201 	if (SCX_HAS_OP(update_idle)) {
3202 		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3203 		if (!static_branch_unlikely(&scx_builtin_idle_enabled))
3204 			return;
3205 	}
3206 
3207 	if (idle)
3208 		cpumask_set_cpu(cpu, idle_masks.cpu);
3209 	else
3210 		cpumask_clear_cpu(cpu, idle_masks.cpu);
3211 
3212 #ifdef CONFIG_SCHED_SMT
3213 	if (sched_smt_active()) {
3214 		const struct cpumask *smt = cpu_smt_mask(cpu);
3215 
3216 		if (idle) {
3217 			/*
3218 			 * idle_masks.smt handling is racy but that's fine as
3219 			 * it's only for optimization and self-correcting.
3220 			 */
3221 			for_each_cpu(cpu, smt) {
3222 				if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3223 					return;
3224 			}
3225 			cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3226 		} else {
3227 			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3228 		}
3229 	}
3230 #endif
3231 }
3232 
3233 static void handle_hotplug(struct rq *rq, bool online)
3234 {
3235 	int cpu = cpu_of(rq);
3236 
3237 	atomic_long_inc(&scx_hotplug_seq);
3238 
3239 	if (online && SCX_HAS_OP(cpu_online))
3240 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3241 	else if (!online && SCX_HAS_OP(cpu_offline))
3242 		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3243 	else
3244 		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3245 			     "cpu %d going %s, exiting scheduler", cpu,
3246 			     online ? "online" : "offline");
3247 }
3248 
3249 void scx_rq_activate(struct rq *rq)
3250 {
3251 	handle_hotplug(rq, true);
3252 }
3253 
3254 void scx_rq_deactivate(struct rq *rq)
3255 {
3256 	handle_hotplug(rq, false);
3257 }
3258 
3259 static void rq_online_scx(struct rq *rq)
3260 {
3261 	rq->scx.flags |= SCX_RQ_ONLINE;
3262 }
3263 
3264 static void rq_offline_scx(struct rq *rq)
3265 {
3266 	rq->scx.flags &= ~SCX_RQ_ONLINE;
3267 }
3268 
3269 #else	/* CONFIG_SMP */
3270 
3271 static bool test_and_clear_cpu_idle(int cpu) { return false; }
3272 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
3273 static void reset_idle_masks(void) {}
3274 
3275 #endif	/* CONFIG_SMP */
3276 
3277 static bool check_rq_for_timeouts(struct rq *rq)
3278 {
3279 	struct task_struct *p;
3280 	struct rq_flags rf;
3281 	bool timed_out = false;
3282 
3283 	rq_lock_irqsave(rq, &rf);
3284 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3285 		unsigned long last_runnable = p->scx.runnable_at;
3286 
3287 		if (unlikely(time_after(jiffies,
3288 					last_runnable + scx_watchdog_timeout))) {
3289 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3290 
3291 			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3292 					   "%s[%d] failed to run for %u.%03us",
3293 					   p->comm, p->pid,
3294 					   dur_ms / 1000, dur_ms % 1000);
3295 			timed_out = true;
3296 			break;
3297 		}
3298 	}
3299 	rq_unlock_irqrestore(rq, &rf);
3300 
3301 	return timed_out;
3302 }
3303 
3304 static void scx_watchdog_workfn(struct work_struct *work)
3305 {
3306 	int cpu;
3307 
3308 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3309 
3310 	for_each_online_cpu(cpu) {
3311 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3312 			break;
3313 
3314 		cond_resched();
3315 	}
3316 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3317 			   scx_watchdog_timeout / 2);
3318 }
3319 
3320 void scx_tick(struct rq *rq)
3321 {
3322 	unsigned long last_check;
3323 
3324 	if (!scx_enabled())
3325 		return;
3326 
3327 	last_check = READ_ONCE(scx_watchdog_timestamp);
3328 	if (unlikely(time_after(jiffies,
3329 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3330 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3331 
3332 		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3333 				   "watchdog failed to check in for %u.%03us",
3334 				   dur_ms / 1000, dur_ms % 1000);
3335 	}
3336 
3337 	update_other_load_avgs(rq);
3338 }
3339 
3340 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3341 {
3342 	update_curr_scx(rq);
3343 
3344 	/*
3345 	 * While disabling, always resched and refresh core-sched timestamp as
3346 	 * we can't trust the slice management or ops.core_sched_before().
3347 	 */
3348 	if (scx_rq_bypassing(rq)) {
3349 		curr->scx.slice = 0;
3350 		touch_core_sched(rq, curr);
3351 	} else if (SCX_HAS_OP(tick)) {
3352 		SCX_CALL_OP(SCX_KF_REST, tick, curr);
3353 	}
3354 
3355 	if (!curr->scx.slice)
3356 		resched_curr(rq);
3357 }
3358 
3359 #ifdef CONFIG_EXT_GROUP_SCHED
3360 static struct cgroup *tg_cgrp(struct task_group *tg)
3361 {
3362 	/*
3363 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3364 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3365 	 * root cgroup.
3366 	 */
3367 	if (tg && tg->css.cgroup)
3368 		return tg->css.cgroup;
3369 	else
3370 		return &cgrp_dfl_root.cgrp;
3371 }
3372 
3373 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3374 
3375 #else	/* CONFIG_EXT_GROUP_SCHED */
3376 
3377 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3378 
3379 #endif	/* CONFIG_EXT_GROUP_SCHED */
3380 
3381 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3382 {
3383 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3384 }
3385 
3386 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3387 {
3388 	enum scx_task_state prev_state = scx_get_task_state(p);
3389 	bool warn = false;
3390 
3391 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3392 
3393 	switch (state) {
3394 	case SCX_TASK_NONE:
3395 		break;
3396 	case SCX_TASK_INIT:
3397 		warn = prev_state != SCX_TASK_NONE;
3398 		break;
3399 	case SCX_TASK_READY:
3400 		warn = prev_state == SCX_TASK_NONE;
3401 		break;
3402 	case SCX_TASK_ENABLED:
3403 		warn = prev_state != SCX_TASK_READY;
3404 		break;
3405 	default:
3406 		warn = true;
3407 		return;
3408 	}
3409 
3410 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3411 		  prev_state, state, p->comm, p->pid);
3412 
3413 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3414 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3415 }
3416 
3417 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3418 {
3419 	int ret;
3420 
3421 	p->scx.disallow = false;
3422 
3423 	if (SCX_HAS_OP(init_task)) {
3424 		struct scx_init_task_args args = {
3425 			SCX_INIT_TASK_ARGS_CGROUP(tg)
3426 			.fork = fork,
3427 		};
3428 
3429 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3430 		if (unlikely(ret)) {
3431 			ret = ops_sanitize_err("init_task", ret);
3432 			return ret;
3433 		}
3434 	}
3435 
3436 	scx_set_task_state(p, SCX_TASK_INIT);
3437 
3438 	if (p->scx.disallow) {
3439 		if (!fork) {
3440 			struct rq *rq;
3441 			struct rq_flags rf;
3442 
3443 			rq = task_rq_lock(p, &rf);
3444 
3445 			/*
3446 			 * We're in the load path and @p->policy will be applied
3447 			 * right after. Reverting @p->policy here and rejecting
3448 			 * %SCHED_EXT transitions from scx_check_setscheduler()
3449 			 * guarantees that if ops.init_task() sets @p->disallow,
3450 			 * @p can never be in SCX.
3451 			 */
3452 			if (p->policy == SCHED_EXT) {
3453 				p->policy = SCHED_NORMAL;
3454 				atomic_long_inc(&scx_nr_rejected);
3455 			}
3456 
3457 			task_rq_unlock(rq, p, &rf);
3458 		} else if (p->policy == SCHED_EXT) {
3459 			scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3460 				      p->comm, p->pid);
3461 		}
3462 	}
3463 
3464 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3465 	return 0;
3466 }
3467 
3468 static void scx_ops_enable_task(struct task_struct *p)
3469 {
3470 	u32 weight;
3471 
3472 	lockdep_assert_rq_held(task_rq(p));
3473 
3474 	/*
3475 	 * Set the weight before calling ops.enable() so that the scheduler
3476 	 * doesn't see a stale value if they inspect the task struct.
3477 	 */
3478 	if (task_has_idle_policy(p))
3479 		weight = WEIGHT_IDLEPRIO;
3480 	else
3481 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3482 
3483 	p->scx.weight = sched_weight_to_cgroup(weight);
3484 
3485 	if (SCX_HAS_OP(enable))
3486 		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3487 	scx_set_task_state(p, SCX_TASK_ENABLED);
3488 
3489 	if (SCX_HAS_OP(set_weight))
3490 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3491 }
3492 
3493 static void scx_ops_disable_task(struct task_struct *p)
3494 {
3495 	lockdep_assert_rq_held(task_rq(p));
3496 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3497 
3498 	if (SCX_HAS_OP(disable))
3499 		SCX_CALL_OP(SCX_KF_REST, disable, p);
3500 	scx_set_task_state(p, SCX_TASK_READY);
3501 }
3502 
3503 static void scx_ops_exit_task(struct task_struct *p)
3504 {
3505 	struct scx_exit_task_args args = {
3506 		.cancelled = false,
3507 	};
3508 
3509 	lockdep_assert_rq_held(task_rq(p));
3510 
3511 	switch (scx_get_task_state(p)) {
3512 	case SCX_TASK_NONE:
3513 		return;
3514 	case SCX_TASK_INIT:
3515 		args.cancelled = true;
3516 		break;
3517 	case SCX_TASK_READY:
3518 		break;
3519 	case SCX_TASK_ENABLED:
3520 		scx_ops_disable_task(p);
3521 		break;
3522 	default:
3523 		WARN_ON_ONCE(true);
3524 		return;
3525 	}
3526 
3527 	if (SCX_HAS_OP(exit_task))
3528 		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3529 	scx_set_task_state(p, SCX_TASK_NONE);
3530 }
3531 
3532 void init_scx_entity(struct sched_ext_entity *scx)
3533 {
3534 	/*
3535 	 * init_idle() calls this function again after fork sequence is
3536 	 * complete. Don't touch ->tasks_node as it's already linked.
3537 	 */
3538 	memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
3539 
3540 	INIT_LIST_HEAD(&scx->dsq_list.node);
3541 	RB_CLEAR_NODE(&scx->dsq_priq);
3542 	scx->sticky_cpu = -1;
3543 	scx->holding_cpu = -1;
3544 	INIT_LIST_HEAD(&scx->runnable_node);
3545 	scx->runnable_at = jiffies;
3546 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3547 	scx->slice = SCX_SLICE_DFL;
3548 }
3549 
3550 void scx_pre_fork(struct task_struct *p)
3551 {
3552 	/*
3553 	 * BPF scheduler enable/disable paths want to be able to iterate and
3554 	 * update all tasks which can become complex when racing forks. As
3555 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
3556 	 * exclude forks.
3557 	 */
3558 	percpu_down_read(&scx_fork_rwsem);
3559 }
3560 
3561 int scx_fork(struct task_struct *p)
3562 {
3563 	percpu_rwsem_assert_held(&scx_fork_rwsem);
3564 
3565 	if (scx_ops_init_task_enabled)
3566 		return scx_ops_init_task(p, task_group(p), true);
3567 	else
3568 		return 0;
3569 }
3570 
3571 void scx_post_fork(struct task_struct *p)
3572 {
3573 	if (scx_ops_init_task_enabled) {
3574 		scx_set_task_state(p, SCX_TASK_READY);
3575 
3576 		/*
3577 		 * Enable the task immediately if it's running on sched_ext.
3578 		 * Otherwise, it'll be enabled in switching_to_scx() if and
3579 		 * when it's ever configured to run with a SCHED_EXT policy.
3580 		 */
3581 		if (p->sched_class == &ext_sched_class) {
3582 			struct rq_flags rf;
3583 			struct rq *rq;
3584 
3585 			rq = task_rq_lock(p, &rf);
3586 			scx_ops_enable_task(p);
3587 			task_rq_unlock(rq, p, &rf);
3588 		}
3589 	}
3590 
3591 	spin_lock_irq(&scx_tasks_lock);
3592 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3593 	spin_unlock_irq(&scx_tasks_lock);
3594 
3595 	percpu_up_read(&scx_fork_rwsem);
3596 }
3597 
3598 void scx_cancel_fork(struct task_struct *p)
3599 {
3600 	if (scx_enabled()) {
3601 		struct rq *rq;
3602 		struct rq_flags rf;
3603 
3604 		rq = task_rq_lock(p, &rf);
3605 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3606 		scx_ops_exit_task(p);
3607 		task_rq_unlock(rq, p, &rf);
3608 	}
3609 
3610 	percpu_up_read(&scx_fork_rwsem);
3611 }
3612 
3613 void sched_ext_free(struct task_struct *p)
3614 {
3615 	unsigned long flags;
3616 
3617 	spin_lock_irqsave(&scx_tasks_lock, flags);
3618 	list_del_init(&p->scx.tasks_node);
3619 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
3620 
3621 	/*
3622 	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
3623 	 * ENABLED transitions can't race us. Disable ops for @p.
3624 	 */
3625 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3626 		struct rq_flags rf;
3627 		struct rq *rq;
3628 
3629 		rq = task_rq_lock(p, &rf);
3630 		scx_ops_exit_task(p);
3631 		task_rq_unlock(rq, p, &rf);
3632 	}
3633 }
3634 
3635 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3636 			      const struct load_weight *lw)
3637 {
3638 	lockdep_assert_rq_held(task_rq(p));
3639 
3640 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3641 	if (SCX_HAS_OP(set_weight))
3642 		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3643 }
3644 
3645 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
3646 {
3647 }
3648 
3649 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3650 {
3651 	scx_ops_enable_task(p);
3652 
3653 	/*
3654 	 * set_cpus_allowed_scx() is not called while @p is associated with a
3655 	 * different scheduler class. Keep the BPF scheduler up-to-date.
3656 	 */
3657 	if (SCX_HAS_OP(set_cpumask))
3658 		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3659 				 (struct cpumask *)p->cpus_ptr);
3660 }
3661 
3662 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3663 {
3664 	scx_ops_disable_task(p);
3665 }
3666 
3667 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
3668 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3669 
3670 int scx_check_setscheduler(struct task_struct *p, int policy)
3671 {
3672 	lockdep_assert_rq_held(task_rq(p));
3673 
3674 	/* if disallow, reject transitioning into SCX */
3675 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3676 	    p->policy != policy && policy == SCHED_EXT)
3677 		return -EACCES;
3678 
3679 	return 0;
3680 }
3681 
3682 #ifdef CONFIG_NO_HZ_FULL
3683 bool scx_can_stop_tick(struct rq *rq)
3684 {
3685 	struct task_struct *p = rq->curr;
3686 
3687 	if (scx_rq_bypassing(rq))
3688 		return false;
3689 
3690 	if (p->sched_class != &ext_sched_class)
3691 		return true;
3692 
3693 	/*
3694 	 * @rq can dispatch from different DSQs, so we can't tell whether it
3695 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3696 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3697 	 */
3698 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3699 }
3700 #endif
3701 
3702 #ifdef CONFIG_EXT_GROUP_SCHED
3703 
3704 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
3705 static bool scx_cgroup_enabled;
3706 static bool cgroup_warned_missing_weight;
3707 static bool cgroup_warned_missing_idle;
3708 
3709 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
3710 {
3711 	if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
3712 	    cgroup_warned_missing_weight)
3713 		return;
3714 
3715 	if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
3716 		return;
3717 
3718 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
3719 		scx_ops.name);
3720 	cgroup_warned_missing_weight = true;
3721 }
3722 
3723 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
3724 {
3725 	if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
3726 		return;
3727 
3728 	if (!tg->idle)
3729 		return;
3730 
3731 	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
3732 		scx_ops.name);
3733 	cgroup_warned_missing_idle = true;
3734 }
3735 
3736 int scx_tg_online(struct task_group *tg)
3737 {
3738 	int ret = 0;
3739 
3740 	WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3741 
3742 	percpu_down_read(&scx_cgroup_rwsem);
3743 
3744 	scx_cgroup_warn_missing_weight(tg);
3745 
3746 	if (scx_cgroup_enabled) {
3747 		if (SCX_HAS_OP(cgroup_init)) {
3748 			struct scx_cgroup_init_args args =
3749 				{ .weight = tg->scx_weight };
3750 
3751 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
3752 					      tg->css.cgroup, &args);
3753 			if (ret)
3754 				ret = ops_sanitize_err("cgroup_init", ret);
3755 		}
3756 		if (ret == 0)
3757 			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3758 	} else {
3759 		tg->scx_flags |= SCX_TG_ONLINE;
3760 	}
3761 
3762 	percpu_up_read(&scx_cgroup_rwsem);
3763 	return ret;
3764 }
3765 
3766 void scx_tg_offline(struct task_group *tg)
3767 {
3768 	WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
3769 
3770 	percpu_down_read(&scx_cgroup_rwsem);
3771 
3772 	if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
3773 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
3774 	tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3775 
3776 	percpu_up_read(&scx_cgroup_rwsem);
3777 }
3778 
3779 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3780 {
3781 	struct cgroup_subsys_state *css;
3782 	struct task_struct *p;
3783 	int ret;
3784 
3785 	/* released in scx_finish/cancel_attach() */
3786 	percpu_down_read(&scx_cgroup_rwsem);
3787 
3788 	if (!scx_cgroup_enabled)
3789 		return 0;
3790 
3791 	cgroup_taskset_for_each(p, css, tset) {
3792 		struct cgroup *from = tg_cgrp(task_group(p));
3793 		struct cgroup *to = tg_cgrp(css_tg(css));
3794 
3795 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
3796 
3797 		/*
3798 		 * sched_move_task() omits identity migrations. Let's match the
3799 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3800 		 * always match one-to-one.
3801 		 */
3802 		if (from == to)
3803 			continue;
3804 
3805 		if (SCX_HAS_OP(cgroup_prep_move)) {
3806 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
3807 					      p, from, css->cgroup);
3808 			if (ret)
3809 				goto err;
3810 		}
3811 
3812 		p->scx.cgrp_moving_from = from;
3813 	}
3814 
3815 	return 0;
3816 
3817 err:
3818 	cgroup_taskset_for_each(p, css, tset) {
3819 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3820 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3821 				    p->scx.cgrp_moving_from, css->cgroup);
3822 		p->scx.cgrp_moving_from = NULL;
3823 	}
3824 
3825 	percpu_up_read(&scx_cgroup_rwsem);
3826 	return ops_sanitize_err("cgroup_prep_move", ret);
3827 }
3828 
3829 void scx_move_task(struct task_struct *p)
3830 {
3831 	if (!scx_cgroup_enabled)
3832 		return;
3833 
3834 	/*
3835 	 * We're called from sched_move_task() which handles both cgroup and
3836 	 * autogroup moves. Ignore the latter.
3837 	 *
3838 	 * Also ignore exiting tasks, because in the exit path tasks transition
3839 	 * from the autogroup to the root group, so task_group_is_autogroup()
3840 	 * alone isn't able to catch exiting autogroup tasks. This is safe for
3841 	 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
3842 	 * tasks.
3843 	 */
3844 	if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
3845 		return;
3846 
3847 	/*
3848 	 * @p must have ops.cgroup_prep_move() called on it and thus
3849 	 * cgrp_moving_from set.
3850 	 */
3851 	if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3852 		SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
3853 			p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
3854 	p->scx.cgrp_moving_from = NULL;
3855 }
3856 
3857 void scx_cgroup_finish_attach(void)
3858 {
3859 	percpu_up_read(&scx_cgroup_rwsem);
3860 }
3861 
3862 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3863 {
3864 	struct cgroup_subsys_state *css;
3865 	struct task_struct *p;
3866 
3867 	if (!scx_cgroup_enabled)
3868 		goto out_unlock;
3869 
3870 	cgroup_taskset_for_each(p, css, tset) {
3871 		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3872 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3873 				    p->scx.cgrp_moving_from, css->cgroup);
3874 		p->scx.cgrp_moving_from = NULL;
3875 	}
3876 out_unlock:
3877 	percpu_up_read(&scx_cgroup_rwsem);
3878 }
3879 
3880 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3881 {
3882 	percpu_down_read(&scx_cgroup_rwsem);
3883 
3884 	if (scx_cgroup_enabled && tg->scx_weight != weight) {
3885 		if (SCX_HAS_OP(cgroup_set_weight))
3886 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
3887 				    tg_cgrp(tg), weight);
3888 		tg->scx_weight = weight;
3889 	}
3890 
3891 	percpu_up_read(&scx_cgroup_rwsem);
3892 }
3893 
3894 void scx_group_set_idle(struct task_group *tg, bool idle)
3895 {
3896 	percpu_down_read(&scx_cgroup_rwsem);
3897 	scx_cgroup_warn_missing_idle(tg);
3898 	percpu_up_read(&scx_cgroup_rwsem);
3899 }
3900 
3901 static void scx_cgroup_lock(void)
3902 {
3903 	percpu_down_write(&scx_cgroup_rwsem);
3904 }
3905 
3906 static void scx_cgroup_unlock(void)
3907 {
3908 	percpu_up_write(&scx_cgroup_rwsem);
3909 }
3910 
3911 #else	/* CONFIG_EXT_GROUP_SCHED */
3912 
3913 static inline void scx_cgroup_lock(void) {}
3914 static inline void scx_cgroup_unlock(void) {}
3915 
3916 #endif	/* CONFIG_EXT_GROUP_SCHED */
3917 
3918 /*
3919  * Omitted operations:
3920  *
3921  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3922  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
3923  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
3924  *
3925  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3926  *
3927  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3928  *   their current sched_class. Call them directly from sched core instead.
3929  */
3930 DEFINE_SCHED_CLASS(ext) = {
3931 	.enqueue_task		= enqueue_task_scx,
3932 	.dequeue_task		= dequeue_task_scx,
3933 	.yield_task		= yield_task_scx,
3934 	.yield_to_task		= yield_to_task_scx,
3935 
3936 	.wakeup_preempt		= wakeup_preempt_scx,
3937 
3938 	.balance		= balance_scx,
3939 	.pick_task		= pick_task_scx,
3940 
3941 	.put_prev_task		= put_prev_task_scx,
3942 	.set_next_task		= set_next_task_scx,
3943 
3944 #ifdef CONFIG_SMP
3945 	.select_task_rq		= select_task_rq_scx,
3946 	.task_woken		= task_woken_scx,
3947 	.set_cpus_allowed	= set_cpus_allowed_scx,
3948 
3949 	.rq_online		= rq_online_scx,
3950 	.rq_offline		= rq_offline_scx,
3951 #endif
3952 
3953 	.task_tick		= task_tick_scx,
3954 
3955 	.switching_to		= switching_to_scx,
3956 	.switched_from		= switched_from_scx,
3957 	.switched_to		= switched_to_scx,
3958 	.reweight_task		= reweight_task_scx,
3959 	.prio_changed		= prio_changed_scx,
3960 
3961 	.update_curr		= update_curr_scx,
3962 
3963 #ifdef CONFIG_UCLAMP_TASK
3964 	.uclamp_enabled		= 1,
3965 #endif
3966 };
3967 
3968 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3969 {
3970 	memset(dsq, 0, sizeof(*dsq));
3971 
3972 	raw_spin_lock_init(&dsq->lock);
3973 	INIT_LIST_HEAD(&dsq->list);
3974 	dsq->id = dsq_id;
3975 }
3976 
3977 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
3978 {
3979 	struct scx_dispatch_q *dsq;
3980 	int ret;
3981 
3982 	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
3983 		return ERR_PTR(-EINVAL);
3984 
3985 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
3986 	if (!dsq)
3987 		return ERR_PTR(-ENOMEM);
3988 
3989 	init_dsq(dsq, dsq_id);
3990 
3991 	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
3992 				     dsq_hash_params);
3993 	if (ret) {
3994 		kfree(dsq);
3995 		return ERR_PTR(ret);
3996 	}
3997 	return dsq;
3998 }
3999 
4000 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4001 {
4002 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4003 	struct scx_dispatch_q *dsq, *tmp_dsq;
4004 
4005 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4006 		kfree_rcu(dsq, rcu);
4007 }
4008 
4009 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4010 
4011 static void destroy_dsq(u64 dsq_id)
4012 {
4013 	struct scx_dispatch_q *dsq;
4014 	unsigned long flags;
4015 
4016 	rcu_read_lock();
4017 
4018 	dsq = find_user_dsq(dsq_id);
4019 	if (!dsq)
4020 		goto out_unlock_rcu;
4021 
4022 	raw_spin_lock_irqsave(&dsq->lock, flags);
4023 
4024 	if (dsq->nr) {
4025 		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4026 			      dsq->id, dsq->nr);
4027 		goto out_unlock_dsq;
4028 	}
4029 
4030 	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4031 		goto out_unlock_dsq;
4032 
4033 	/*
4034 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4035 	 * queueing more tasks. As this function can be called from anywhere,
4036 	 * freeing is bounced through an irq work to avoid nesting RCU
4037 	 * operations inside scheduler locks.
4038 	 */
4039 	dsq->id = SCX_DSQ_INVALID;
4040 	llist_add(&dsq->free_node, &dsqs_to_free);
4041 	irq_work_queue(&free_dsq_irq_work);
4042 
4043 out_unlock_dsq:
4044 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4045 out_unlock_rcu:
4046 	rcu_read_unlock();
4047 }
4048 
4049 #ifdef CONFIG_EXT_GROUP_SCHED
4050 static void scx_cgroup_exit(void)
4051 {
4052 	struct cgroup_subsys_state *css;
4053 
4054 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4055 
4056 	scx_cgroup_enabled = false;
4057 
4058 	/*
4059 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4060 	 * cgroups and exit all the inited ones, all online cgroups are exited.
4061 	 */
4062 	rcu_read_lock();
4063 	css_for_each_descendant_post(css, &root_task_group.css) {
4064 		struct task_group *tg = css_tg(css);
4065 
4066 		if (!(tg->scx_flags & SCX_TG_INITED))
4067 			continue;
4068 		tg->scx_flags &= ~SCX_TG_INITED;
4069 
4070 		if (!scx_ops.cgroup_exit)
4071 			continue;
4072 
4073 		if (WARN_ON_ONCE(!css_tryget(css)))
4074 			continue;
4075 		rcu_read_unlock();
4076 
4077 		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4078 
4079 		rcu_read_lock();
4080 		css_put(css);
4081 	}
4082 	rcu_read_unlock();
4083 }
4084 
4085 static int scx_cgroup_init(void)
4086 {
4087 	struct cgroup_subsys_state *css;
4088 	int ret;
4089 
4090 	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4091 
4092 	cgroup_warned_missing_weight = false;
4093 	cgroup_warned_missing_idle = false;
4094 
4095 	/*
4096 	 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4097 	 * cgroups and init, all online cgroups are initialized.
4098 	 */
4099 	rcu_read_lock();
4100 	css_for_each_descendant_pre(css, &root_task_group.css) {
4101 		struct task_group *tg = css_tg(css);
4102 		struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4103 
4104 		scx_cgroup_warn_missing_weight(tg);
4105 		scx_cgroup_warn_missing_idle(tg);
4106 
4107 		if ((tg->scx_flags &
4108 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4109 			continue;
4110 
4111 		if (!scx_ops.cgroup_init) {
4112 			tg->scx_flags |= SCX_TG_INITED;
4113 			continue;
4114 		}
4115 
4116 		if (WARN_ON_ONCE(!css_tryget(css)))
4117 			continue;
4118 		rcu_read_unlock();
4119 
4120 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4121 				      css->cgroup, &args);
4122 		if (ret) {
4123 			css_put(css);
4124 			scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4125 			return ret;
4126 		}
4127 		tg->scx_flags |= SCX_TG_INITED;
4128 
4129 		rcu_read_lock();
4130 		css_put(css);
4131 	}
4132 	rcu_read_unlock();
4133 
4134 	WARN_ON_ONCE(scx_cgroup_enabled);
4135 	scx_cgroup_enabled = true;
4136 
4137 	return 0;
4138 }
4139 
4140 #else
4141 static void scx_cgroup_exit(void) {}
4142 static int scx_cgroup_init(void) { return 0; }
4143 #endif
4144 
4145 
4146 /********************************************************************************
4147  * Sysfs interface and ops enable/disable.
4148  */
4149 
4150 #define SCX_ATTR(_name)								\
4151 	static struct kobj_attribute scx_attr_##_name = {			\
4152 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4153 		.show = scx_attr_##_name##_show,				\
4154 	}
4155 
4156 static ssize_t scx_attr_state_show(struct kobject *kobj,
4157 				   struct kobj_attribute *ka, char *buf)
4158 {
4159 	return sysfs_emit(buf, "%s\n",
4160 			  scx_ops_enable_state_str[scx_ops_enable_state()]);
4161 }
4162 SCX_ATTR(state);
4163 
4164 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4165 					struct kobj_attribute *ka, char *buf)
4166 {
4167 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4168 }
4169 SCX_ATTR(switch_all);
4170 
4171 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4172 					 struct kobj_attribute *ka, char *buf)
4173 {
4174 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4175 }
4176 SCX_ATTR(nr_rejected);
4177 
4178 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4179 					 struct kobj_attribute *ka, char *buf)
4180 {
4181 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4182 }
4183 SCX_ATTR(hotplug_seq);
4184 
4185 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4186 					struct kobj_attribute *ka, char *buf)
4187 {
4188 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4189 }
4190 SCX_ATTR(enable_seq);
4191 
4192 static struct attribute *scx_global_attrs[] = {
4193 	&scx_attr_state.attr,
4194 	&scx_attr_switch_all.attr,
4195 	&scx_attr_nr_rejected.attr,
4196 	&scx_attr_hotplug_seq.attr,
4197 	&scx_attr_enable_seq.attr,
4198 	NULL,
4199 };
4200 
4201 static const struct attribute_group scx_global_attr_group = {
4202 	.attrs = scx_global_attrs,
4203 };
4204 
4205 static void scx_kobj_release(struct kobject *kobj)
4206 {
4207 	kfree(kobj);
4208 }
4209 
4210 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4211 				 struct kobj_attribute *ka, char *buf)
4212 {
4213 	return sysfs_emit(buf, "%s\n", scx_ops.name);
4214 }
4215 SCX_ATTR(ops);
4216 
4217 static struct attribute *scx_sched_attrs[] = {
4218 	&scx_attr_ops.attr,
4219 	NULL,
4220 };
4221 ATTRIBUTE_GROUPS(scx_sched);
4222 
4223 static const struct kobj_type scx_ktype = {
4224 	.release = scx_kobj_release,
4225 	.sysfs_ops = &kobj_sysfs_ops,
4226 	.default_groups = scx_sched_groups,
4227 };
4228 
4229 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4230 {
4231 	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4232 }
4233 
4234 static const struct kset_uevent_ops scx_uevent_ops = {
4235 	.uevent = scx_uevent,
4236 };
4237 
4238 /*
4239  * Used by sched_fork() and __setscheduler_prio() to pick the matching
4240  * sched_class. dl/rt are already handled.
4241  */
4242 bool task_should_scx(struct task_struct *p)
4243 {
4244 	if (!scx_enabled() ||
4245 	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4246 		return false;
4247 	if (READ_ONCE(scx_switching_all))
4248 		return true;
4249 	return p->policy == SCHED_EXT;
4250 }
4251 
4252 /**
4253  * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4254  *
4255  * Bypassing guarantees that all runnable tasks make forward progress without
4256  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4257  * be held by tasks that the BPF scheduler is forgetting to run, which
4258  * unfortunately also excludes toggling the static branches.
4259  *
4260  * Let's work around by overriding a couple ops and modifying behaviors based on
4261  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4262  * to force global FIFO scheduling.
4263  *
4264  * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4265  *    %SCX_OPS_ENQ_LAST is also ignored.
4266  *
4267  * b. ops.dispatch() is ignored.
4268  *
4269  * c. balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4270  *    can't be trusted. Whenever a tick triggers, the running task is rotated to
4271  *    the tail of the queue with core_sched_at touched.
4272  *
4273  * d. pick_next_task() suppresses zero slice warning.
4274  *
4275  * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4276  *    operations.
4277  *
4278  * f. scx_prio_less() reverts to the default core_sched_at order.
4279  */
4280 static void scx_ops_bypass(bool bypass)
4281 {
4282 	int depth, cpu;
4283 
4284 	if (bypass) {
4285 		depth = atomic_inc_return(&scx_ops_bypass_depth);
4286 		WARN_ON_ONCE(depth <= 0);
4287 		if (depth != 1)
4288 			return;
4289 	} else {
4290 		depth = atomic_dec_return(&scx_ops_bypass_depth);
4291 		WARN_ON_ONCE(depth < 0);
4292 		if (depth != 0)
4293 			return;
4294 	}
4295 
4296 	/*
4297 	 * No task property is changing. We just need to make sure all currently
4298 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4299 	 * state. As an optimization, walk each rq's runnable_list instead of
4300 	 * the scx_tasks list.
4301 	 *
4302 	 * This function can't trust the scheduler and thus can't use
4303 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4304 	 */
4305 	for_each_possible_cpu(cpu) {
4306 		struct rq *rq = cpu_rq(cpu);
4307 		struct rq_flags rf;
4308 		struct task_struct *p, *n;
4309 
4310 		rq_lock_irqsave(rq, &rf);
4311 
4312 		if (bypass) {
4313 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4314 			rq->scx.flags |= SCX_RQ_BYPASSING;
4315 		} else {
4316 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4317 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4318 		}
4319 
4320 		/*
4321 		 * We need to guarantee that no tasks are on the BPF scheduler
4322 		 * while bypassing. Either we see enabled or the enable path
4323 		 * sees scx_rq_bypassing() before moving tasks to SCX.
4324 		 */
4325 		if (!scx_enabled()) {
4326 			rq_unlock_irqrestore(rq, &rf);
4327 			continue;
4328 		}
4329 
4330 		/*
4331 		 * The use of list_for_each_entry_safe_reverse() is required
4332 		 * because each task is going to be removed from and added back
4333 		 * to the runnable_list during iteration. Because they're added
4334 		 * to the tail of the list, safe reverse iteration can still
4335 		 * visit all nodes.
4336 		 */
4337 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4338 						 scx.runnable_node) {
4339 			struct sched_enq_and_set_ctx ctx;
4340 
4341 			/* cycling deq/enq is enough, see the function comment */
4342 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4343 			sched_enq_and_set_task(&ctx);
4344 		}
4345 
4346 		rq_unlock_irqrestore(rq, &rf);
4347 
4348 		/* kick to restore ticks */
4349 		resched_cpu(cpu);
4350 	}
4351 }
4352 
4353 static void free_exit_info(struct scx_exit_info *ei)
4354 {
4355 	kfree(ei->dump);
4356 	kfree(ei->msg);
4357 	kfree(ei->bt);
4358 	kfree(ei);
4359 }
4360 
4361 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4362 {
4363 	struct scx_exit_info *ei;
4364 
4365 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4366 	if (!ei)
4367 		return NULL;
4368 
4369 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4370 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4371 	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4372 
4373 	if (!ei->bt || !ei->msg || !ei->dump) {
4374 		free_exit_info(ei);
4375 		return NULL;
4376 	}
4377 
4378 	return ei;
4379 }
4380 
4381 static const char *scx_exit_reason(enum scx_exit_kind kind)
4382 {
4383 	switch (kind) {
4384 	case SCX_EXIT_UNREG:
4385 		return "unregistered from user space";
4386 	case SCX_EXIT_UNREG_BPF:
4387 		return "unregistered from BPF";
4388 	case SCX_EXIT_UNREG_KERN:
4389 		return "unregistered from the main kernel";
4390 	case SCX_EXIT_SYSRQ:
4391 		return "disabled by sysrq-S";
4392 	case SCX_EXIT_ERROR:
4393 		return "runtime error";
4394 	case SCX_EXIT_ERROR_BPF:
4395 		return "scx_bpf_error";
4396 	case SCX_EXIT_ERROR_STALL:
4397 		return "runnable task stall";
4398 	default:
4399 		return "<UNKNOWN>";
4400 	}
4401 }
4402 
4403 static void scx_ops_disable_workfn(struct kthread_work *work)
4404 {
4405 	struct scx_exit_info *ei = scx_exit_info;
4406 	struct scx_task_iter sti;
4407 	struct task_struct *p;
4408 	struct rhashtable_iter rht_iter;
4409 	struct scx_dispatch_q *dsq;
4410 	int i, kind;
4411 
4412 	kind = atomic_read(&scx_exit_kind);
4413 	while (true) {
4414 		/*
4415 		 * NONE indicates that a new scx_ops has been registered since
4416 		 * disable was scheduled - don't kill the new ops. DONE
4417 		 * indicates that the ops has already been disabled.
4418 		 */
4419 		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4420 			return;
4421 		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4422 			break;
4423 	}
4424 	ei->kind = kind;
4425 	ei->reason = scx_exit_reason(ei->kind);
4426 
4427 	/* guarantee forward progress by bypassing scx_ops */
4428 	scx_ops_bypass(true);
4429 
4430 	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4431 	case SCX_OPS_DISABLING:
4432 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4433 		break;
4434 	case SCX_OPS_DISABLED:
4435 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
4436 			scx_exit_info->msg);
4437 		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4438 			     SCX_OPS_DISABLING);
4439 		goto done;
4440 	default:
4441 		break;
4442 	}
4443 
4444 	/*
4445 	 * Here, every runnable task is guaranteed to make forward progress and
4446 	 * we can safely use blocking synchronization constructs. Actually
4447 	 * disable ops.
4448 	 */
4449 	mutex_lock(&scx_ops_enable_mutex);
4450 
4451 	static_branch_disable(&__scx_switched_all);
4452 	WRITE_ONCE(scx_switching_all, false);
4453 
4454 	/*
4455 	 * Shut down cgroup support before tasks so that the cgroup attach path
4456 	 * doesn't race against scx_ops_exit_task().
4457 	 */
4458 	scx_cgroup_lock();
4459 	scx_cgroup_exit();
4460 	scx_cgroup_unlock();
4461 
4462 	/*
4463 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4464 	 * must be switched out and exited synchronously.
4465 	 */
4466 	percpu_down_write(&scx_fork_rwsem);
4467 
4468 	scx_ops_init_task_enabled = false;
4469 
4470 	spin_lock_irq(&scx_tasks_lock);
4471 	scx_task_iter_init(&sti);
4472 	while ((p = scx_task_iter_next_locked(&sti))) {
4473 		const struct sched_class *old_class = p->sched_class;
4474 		struct sched_enq_and_set_ctx ctx;
4475 
4476 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4477 
4478 		p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
4479 		__setscheduler_prio(p, p->prio);
4480 		check_class_changing(task_rq(p), p, old_class);
4481 
4482 		sched_enq_and_set_task(&ctx);
4483 
4484 		check_class_changed(task_rq(p), p, old_class, p->prio);
4485 		scx_ops_exit_task(p);
4486 	}
4487 	scx_task_iter_exit(&sti);
4488 	spin_unlock_irq(&scx_tasks_lock);
4489 	percpu_up_write(&scx_fork_rwsem);
4490 
4491 	/* no task is on scx, turn off all the switches and flush in-progress calls */
4492 	static_branch_disable(&__scx_ops_enabled);
4493 	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4494 		static_branch_disable(&scx_has_op[i]);
4495 	static_branch_disable(&scx_ops_enq_last);
4496 	static_branch_disable(&scx_ops_enq_exiting);
4497 	static_branch_disable(&scx_ops_cpu_preempt);
4498 	static_branch_disable(&scx_builtin_idle_enabled);
4499 	synchronize_rcu();
4500 
4501 	if (ei->kind >= SCX_EXIT_ERROR) {
4502 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4503 		       scx_ops.name, ei->reason);
4504 
4505 		if (ei->msg[0] != '\0')
4506 			pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
4507 #ifdef CONFIG_STACKTRACE
4508 		stack_trace_print(ei->bt, ei->bt_len, 2);
4509 #endif
4510 	} else {
4511 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4512 			scx_ops.name, ei->reason);
4513 	}
4514 
4515 	if (scx_ops.exit)
4516 		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
4517 
4518 	cancel_delayed_work_sync(&scx_watchdog_work);
4519 
4520 	/*
4521 	 * Delete the kobject from the hierarchy eagerly in addition to just
4522 	 * dropping a reference. Otherwise, if the object is deleted
4523 	 * asynchronously, sysfs could observe an object of the same name still
4524 	 * in the hierarchy when another scheduler is loaded.
4525 	 */
4526 	kobject_del(scx_root_kobj);
4527 	kobject_put(scx_root_kobj);
4528 	scx_root_kobj = NULL;
4529 
4530 	memset(&scx_ops, 0, sizeof(scx_ops));
4531 
4532 	rhashtable_walk_enter(&dsq_hash, &rht_iter);
4533 	do {
4534 		rhashtable_walk_start(&rht_iter);
4535 
4536 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4537 			destroy_dsq(dsq->id);
4538 
4539 		rhashtable_walk_stop(&rht_iter);
4540 	} while (dsq == ERR_PTR(-EAGAIN));
4541 	rhashtable_walk_exit(&rht_iter);
4542 
4543 	free_percpu(scx_dsp_ctx);
4544 	scx_dsp_ctx = NULL;
4545 	scx_dsp_max_batch = 0;
4546 
4547 	free_exit_info(scx_exit_info);
4548 	scx_exit_info = NULL;
4549 
4550 	mutex_unlock(&scx_ops_enable_mutex);
4551 
4552 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4553 		     SCX_OPS_DISABLING);
4554 done:
4555 	scx_ops_bypass(false);
4556 }
4557 
4558 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
4559 
4560 static void schedule_scx_ops_disable_work(void)
4561 {
4562 	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
4563 
4564 	/*
4565 	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
4566 	 * scx_ops_helper isn't set up yet, there's nothing to do.
4567 	 */
4568 	if (helper)
4569 		kthread_queue_work(helper, &scx_ops_disable_work);
4570 }
4571 
4572 static void scx_ops_disable(enum scx_exit_kind kind)
4573 {
4574 	int none = SCX_EXIT_NONE;
4575 
4576 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4577 		kind = SCX_EXIT_ERROR;
4578 
4579 	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
4580 
4581 	schedule_scx_ops_disable_work();
4582 }
4583 
4584 static void dump_newline(struct seq_buf *s)
4585 {
4586 	trace_sched_ext_dump("");
4587 
4588 	/* @s may be zero sized and seq_buf triggers WARN if so */
4589 	if (s->size)
4590 		seq_buf_putc(s, '\n');
4591 }
4592 
4593 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4594 {
4595 	va_list args;
4596 
4597 #ifdef CONFIG_TRACEPOINTS
4598 	if (trace_sched_ext_dump_enabled()) {
4599 		/* protected by scx_dump_state()::dump_lock */
4600 		static char line_buf[SCX_EXIT_MSG_LEN];
4601 
4602 		va_start(args, fmt);
4603 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4604 		va_end(args);
4605 
4606 		trace_sched_ext_dump(line_buf);
4607 	}
4608 #endif
4609 	/* @s may be zero sized and seq_buf triggers WARN if so */
4610 	if (s->size) {
4611 		va_start(args, fmt);
4612 		seq_buf_vprintf(s, fmt, args);
4613 		va_end(args);
4614 
4615 		seq_buf_putc(s, '\n');
4616 	}
4617 }
4618 
4619 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4620 			     const unsigned long *bt, unsigned int len)
4621 {
4622 	unsigned int i;
4623 
4624 	for (i = 0; i < len; i++)
4625 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4626 }
4627 
4628 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4629 {
4630 	struct scx_dump_data *dd = &scx_dump_data;
4631 
4632 	lockdep_assert_irqs_disabled();
4633 
4634 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4635 	dd->first = true;
4636 	dd->cursor = 0;
4637 	dd->s = s;
4638 	dd->prefix = prefix;
4639 }
4640 
4641 static void ops_dump_flush(void)
4642 {
4643 	struct scx_dump_data *dd = &scx_dump_data;
4644 	char *line = dd->buf.line;
4645 
4646 	if (!dd->cursor)
4647 		return;
4648 
4649 	/*
4650 	 * There's something to flush and this is the first line. Insert a blank
4651 	 * line to distinguish ops dump.
4652 	 */
4653 	if (dd->first) {
4654 		dump_newline(dd->s);
4655 		dd->first = false;
4656 	}
4657 
4658 	/*
4659 	 * There may be multiple lines in $line. Scan and emit each line
4660 	 * separately.
4661 	 */
4662 	while (true) {
4663 		char *end = line;
4664 		char c;
4665 
4666 		while (*end != '\n' && *end != '\0')
4667 			end++;
4668 
4669 		/*
4670 		 * If $line overflowed, it may not have newline at the end.
4671 		 * Always emit with a newline.
4672 		 */
4673 		c = *end;
4674 		*end = '\0';
4675 		dump_line(dd->s, "%s%s", dd->prefix, line);
4676 		if (c == '\0')
4677 			break;
4678 
4679 		/* move to the next line */
4680 		end++;
4681 		if (*end == '\0')
4682 			break;
4683 		line = end;
4684 	}
4685 
4686 	dd->cursor = 0;
4687 }
4688 
4689 static void ops_dump_exit(void)
4690 {
4691 	ops_dump_flush();
4692 	scx_dump_data.cpu = -1;
4693 }
4694 
4695 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4696 			  struct task_struct *p, char marker)
4697 {
4698 	static unsigned long bt[SCX_EXIT_BT_LEN];
4699 	char dsq_id_buf[19] = "(n/a)";
4700 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4701 	unsigned int bt_len = 0;
4702 
4703 	if (p->scx.dsq)
4704 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4705 			  (unsigned long long)p->scx.dsq->id);
4706 
4707 	dump_newline(s);
4708 	dump_line(s, " %c%c %s[%d] %+ldms",
4709 		  marker, task_state_to_char(p), p->comm, p->pid,
4710 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4711 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4712 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4713 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4714 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4715 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
4716 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
4717 		  p->scx.dsq_vtime);
4718 	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
4719 
4720 	if (SCX_HAS_OP(dump_task)) {
4721 		ops_dump_init(s, "    ");
4722 		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
4723 		ops_dump_exit();
4724 	}
4725 
4726 #ifdef CONFIG_STACKTRACE
4727 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4728 #endif
4729 	if (bt_len) {
4730 		dump_newline(s);
4731 		dump_stack_trace(s, "    ", bt, bt_len);
4732 	}
4733 }
4734 
4735 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4736 {
4737 	static DEFINE_SPINLOCK(dump_lock);
4738 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4739 	struct scx_dump_ctx dctx = {
4740 		.kind = ei->kind,
4741 		.exit_code = ei->exit_code,
4742 		.reason = ei->reason,
4743 		.at_ns = ktime_get_ns(),
4744 		.at_jiffies = jiffies,
4745 	};
4746 	struct seq_buf s;
4747 	unsigned long flags;
4748 	char *buf;
4749 	int cpu;
4750 
4751 	spin_lock_irqsave(&dump_lock, flags);
4752 
4753 	seq_buf_init(&s, ei->dump, dump_len);
4754 
4755 	if (ei->kind == SCX_EXIT_NONE) {
4756 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
4757 	} else {
4758 		dump_line(&s, "%s[%d] triggered exit kind %d:",
4759 			  current->comm, current->pid, ei->kind);
4760 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
4761 		dump_newline(&s);
4762 		dump_line(&s, "Backtrace:");
4763 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
4764 	}
4765 
4766 	if (SCX_HAS_OP(dump)) {
4767 		ops_dump_init(&s, "");
4768 		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
4769 		ops_dump_exit();
4770 	}
4771 
4772 	dump_newline(&s);
4773 	dump_line(&s, "CPU states");
4774 	dump_line(&s, "----------");
4775 
4776 	for_each_possible_cpu(cpu) {
4777 		struct rq *rq = cpu_rq(cpu);
4778 		struct rq_flags rf;
4779 		struct task_struct *p;
4780 		struct seq_buf ns;
4781 		size_t avail, used;
4782 		bool idle;
4783 
4784 		rq_lock(rq, &rf);
4785 
4786 		idle = list_empty(&rq->scx.runnable_list) &&
4787 			rq->curr->sched_class == &idle_sched_class;
4788 
4789 		if (idle && !SCX_HAS_OP(dump_cpu))
4790 			goto next;
4791 
4792 		/*
4793 		 * We don't yet know whether ops.dump_cpu() will produce output
4794 		 * and we may want to skip the default CPU dump if it doesn't.
4795 		 * Use a nested seq_buf to generate the standard dump so that we
4796 		 * can decide whether to commit later.
4797 		 */
4798 		avail = seq_buf_get_buf(&s, &buf);
4799 		seq_buf_init(&ns, buf, avail);
4800 
4801 		dump_newline(&ns);
4802 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
4803 			  cpu, rq->scx.nr_running, rq->scx.flags,
4804 			  rq->scx.cpu_released, rq->scx.ops_qseq,
4805 			  rq->scx.pnt_seq);
4806 		dump_line(&ns, "          curr=%s[%d] class=%ps",
4807 			  rq->curr->comm, rq->curr->pid,
4808 			  rq->curr->sched_class);
4809 		if (!cpumask_empty(rq->scx.cpus_to_kick))
4810 			dump_line(&ns, "  cpus_to_kick   : %*pb",
4811 				  cpumask_pr_args(rq->scx.cpus_to_kick));
4812 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4813 			dump_line(&ns, "  idle_to_kick   : %*pb",
4814 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4815 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
4816 			dump_line(&ns, "  cpus_to_preempt: %*pb",
4817 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
4818 		if (!cpumask_empty(rq->scx.cpus_to_wait))
4819 			dump_line(&ns, "  cpus_to_wait   : %*pb",
4820 				  cpumask_pr_args(rq->scx.cpus_to_wait));
4821 
4822 		used = seq_buf_used(&ns);
4823 		if (SCX_HAS_OP(dump_cpu)) {
4824 			ops_dump_init(&ns, "  ");
4825 			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
4826 			ops_dump_exit();
4827 		}
4828 
4829 		/*
4830 		 * If idle && nothing generated by ops.dump_cpu(), there's
4831 		 * nothing interesting. Skip.
4832 		 */
4833 		if (idle && used == seq_buf_used(&ns))
4834 			goto next;
4835 
4836 		/*
4837 		 * $s may already have overflowed when $ns was created. If so,
4838 		 * calling commit on it will trigger BUG.
4839 		 */
4840 		if (avail) {
4841 			seq_buf_commit(&s, seq_buf_used(&ns));
4842 			if (seq_buf_has_overflowed(&ns))
4843 				seq_buf_set_overflow(&s);
4844 		}
4845 
4846 		if (rq->curr->sched_class == &ext_sched_class)
4847 			scx_dump_task(&s, &dctx, rq->curr, '*');
4848 
4849 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4850 			scx_dump_task(&s, &dctx, p, ' ');
4851 	next:
4852 		rq_unlock(rq, &rf);
4853 	}
4854 
4855 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4856 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4857 		       trunc_marker, sizeof(trunc_marker));
4858 
4859 	spin_unlock_irqrestore(&dump_lock, flags);
4860 }
4861 
4862 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
4863 {
4864 	struct scx_exit_info *ei = scx_exit_info;
4865 
4866 	if (ei->kind >= SCX_EXIT_ERROR)
4867 		scx_dump_state(ei, scx_ops.exit_dump_len);
4868 
4869 	schedule_scx_ops_disable_work();
4870 }
4871 
4872 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
4873 
4874 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
4875 					     s64 exit_code,
4876 					     const char *fmt, ...)
4877 {
4878 	struct scx_exit_info *ei = scx_exit_info;
4879 	int none = SCX_EXIT_NONE;
4880 	va_list args;
4881 
4882 	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
4883 		return;
4884 
4885 	ei->exit_code = exit_code;
4886 #ifdef CONFIG_STACKTRACE
4887 	if (kind >= SCX_EXIT_ERROR)
4888 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4889 #endif
4890 	va_start(args, fmt);
4891 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4892 	va_end(args);
4893 
4894 	/*
4895 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4896 	 * in scx_ops_disable_workfn().
4897 	 */
4898 	ei->kind = kind;
4899 	ei->reason = scx_exit_reason(ei->kind);
4900 
4901 	irq_work_queue(&scx_ops_error_irq_work);
4902 }
4903 
4904 static struct kthread_worker *scx_create_rt_helper(const char *name)
4905 {
4906 	struct kthread_worker *helper;
4907 
4908 	helper = kthread_create_worker(0, name);
4909 	if (helper)
4910 		sched_set_fifo(helper->task);
4911 	return helper;
4912 }
4913 
4914 static void check_hotplug_seq(const struct sched_ext_ops *ops)
4915 {
4916 	unsigned long long global_hotplug_seq;
4917 
4918 	/*
4919 	 * If a hotplug event has occurred between when a scheduler was
4920 	 * initialized, and when we were able to attach, exit and notify user
4921 	 * space about it.
4922 	 */
4923 	if (ops->hotplug_seq) {
4924 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4925 		if (ops->hotplug_seq != global_hotplug_seq) {
4926 			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4927 				     "expected hotplug seq %llu did not match actual %llu",
4928 				     ops->hotplug_seq, global_hotplug_seq);
4929 		}
4930 	}
4931 }
4932 
4933 static int validate_ops(const struct sched_ext_ops *ops)
4934 {
4935 	/*
4936 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4937 	 * ops.enqueue() callback isn't implemented.
4938 	 */
4939 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4940 		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4941 		return -EINVAL;
4942 	}
4943 
4944 	return 0;
4945 }
4946 
4947 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4948 {
4949 	struct scx_task_iter sti;
4950 	struct task_struct *p;
4951 	unsigned long timeout;
4952 	int i, cpu, node, ret;
4953 
4954 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
4955 			   cpu_possible_mask)) {
4956 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation");
4957 		return -EINVAL;
4958 	}
4959 
4960 	mutex_lock(&scx_ops_enable_mutex);
4961 
4962 	if (!scx_ops_helper) {
4963 		WRITE_ONCE(scx_ops_helper,
4964 			   scx_create_rt_helper("sched_ext_ops_helper"));
4965 		if (!scx_ops_helper) {
4966 			ret = -ENOMEM;
4967 			goto err_unlock;
4968 		}
4969 	}
4970 
4971 	if (!global_dsqs) {
4972 		struct scx_dispatch_q **dsqs;
4973 
4974 		dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
4975 		if (!dsqs) {
4976 			ret = -ENOMEM;
4977 			goto err_unlock;
4978 		}
4979 
4980 		for_each_node_state(node, N_POSSIBLE) {
4981 			struct scx_dispatch_q *dsq;
4982 
4983 			dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4984 			if (!dsq) {
4985 				for_each_node_state(node, N_POSSIBLE)
4986 					kfree(dsqs[node]);
4987 				kfree(dsqs);
4988 				ret = -ENOMEM;
4989 				goto err_unlock;
4990 			}
4991 
4992 			init_dsq(dsq, SCX_DSQ_GLOBAL);
4993 			dsqs[node] = dsq;
4994 		}
4995 
4996 		global_dsqs = dsqs;
4997 	}
4998 
4999 	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5000 		ret = -EBUSY;
5001 		goto err_unlock;
5002 	}
5003 
5004 	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5005 	if (!scx_root_kobj) {
5006 		ret = -ENOMEM;
5007 		goto err_unlock;
5008 	}
5009 
5010 	scx_root_kobj->kset = scx_kset;
5011 	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5012 	if (ret < 0)
5013 		goto err;
5014 
5015 	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5016 	if (!scx_exit_info) {
5017 		ret = -ENOMEM;
5018 		goto err_del;
5019 	}
5020 
5021 	/*
5022 	 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5023 	 * disable path. Failure triggers full disabling from here on.
5024 	 */
5025 	scx_ops = *ops;
5026 
5027 	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5028 		     SCX_OPS_DISABLED);
5029 
5030 	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5031 	scx_warned_zero_slice = false;
5032 
5033 	atomic_long_set(&scx_nr_rejected, 0);
5034 
5035 	for_each_possible_cpu(cpu)
5036 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5037 
5038 	/*
5039 	 * Keep CPUs stable during enable so that the BPF scheduler can track
5040 	 * online CPUs by watching ->on/offline_cpu() after ->init().
5041 	 */
5042 	cpus_read_lock();
5043 
5044 	if (scx_ops.init) {
5045 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5046 		if (ret) {
5047 			ret = ops_sanitize_err("init", ret);
5048 			cpus_read_unlock();
5049 			scx_ops_error("ops.init() failed (%d)", ret);
5050 			goto err_disable;
5051 		}
5052 	}
5053 
5054 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5055 		if (((void (**)(void))ops)[i])
5056 			static_branch_enable_cpuslocked(&scx_has_op[i]);
5057 
5058 	check_hotplug_seq(ops);
5059 	cpus_read_unlock();
5060 
5061 	ret = validate_ops(ops);
5062 	if (ret)
5063 		goto err_disable;
5064 
5065 	WARN_ON_ONCE(scx_dsp_ctx);
5066 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5067 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5068 						   scx_dsp_max_batch),
5069 				     __alignof__(struct scx_dsp_ctx));
5070 	if (!scx_dsp_ctx) {
5071 		ret = -ENOMEM;
5072 		goto err_disable;
5073 	}
5074 
5075 	if (ops->timeout_ms)
5076 		timeout = msecs_to_jiffies(ops->timeout_ms);
5077 	else
5078 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5079 
5080 	WRITE_ONCE(scx_watchdog_timeout, timeout);
5081 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5082 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5083 			   scx_watchdog_timeout / 2);
5084 
5085 	/*
5086 	 * Once __scx_ops_enabled is set, %current can be switched to SCX
5087 	 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5088 	 * userspace scheduling) may not function correctly before all tasks are
5089 	 * switched. Init in bypass mode to guarantee forward progress.
5090 	 */
5091 	scx_ops_bypass(true);
5092 
5093 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5094 		if (((void (**)(void))ops)[i])
5095 			static_branch_enable(&scx_has_op[i]);
5096 
5097 	if (ops->flags & SCX_OPS_ENQ_LAST)
5098 		static_branch_enable(&scx_ops_enq_last);
5099 
5100 	if (ops->flags & SCX_OPS_ENQ_EXITING)
5101 		static_branch_enable(&scx_ops_enq_exiting);
5102 	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5103 		static_branch_enable(&scx_ops_cpu_preempt);
5104 
5105 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5106 		reset_idle_masks();
5107 		static_branch_enable(&scx_builtin_idle_enabled);
5108 	} else {
5109 		static_branch_disable(&scx_builtin_idle_enabled);
5110 	}
5111 
5112 	/*
5113 	 * Lock out forks, cgroup on/offlining and moves before opening the
5114 	 * floodgate so that they don't wander into the operations prematurely.
5115 	 */
5116 	percpu_down_write(&scx_fork_rwsem);
5117 
5118 	WARN_ON_ONCE(scx_ops_init_task_enabled);
5119 	scx_ops_init_task_enabled = true;
5120 
5121 	/*
5122 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5123 	 * preventing new tasks from being added. No need to exclude tasks
5124 	 * leaving as sched_ext_free() can handle both prepped and enabled
5125 	 * tasks. Prep all tasks first and then enable them with preemption
5126 	 * disabled.
5127 	 *
5128 	 * All cgroups should be initialized before scx_ops_init_task() so that
5129 	 * the BPF scheduler can reliably track each task's cgroup membership
5130 	 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5131 	 * migrations while tasks are being initialized so that
5132 	 * scx_cgroup_can_attach() never sees uninitialized tasks.
5133 	 */
5134 	scx_cgroup_lock();
5135 	ret = scx_cgroup_init();
5136 	if (ret)
5137 		goto err_disable_unlock_all;
5138 
5139 	spin_lock_irq(&scx_tasks_lock);
5140 	scx_task_iter_init(&sti);
5141 	while ((p = scx_task_iter_next_locked(&sti))) {
5142 		/*
5143 		 * @p may already be dead, have lost all its usages counts and
5144 		 * be waiting for RCU grace period before being freed. @p can't
5145 		 * be initialized for SCX in such cases and should be ignored.
5146 		 */
5147 		if (!tryget_task_struct(p))
5148 			continue;
5149 
5150 		scx_task_iter_rq_unlock(&sti);
5151 		spin_unlock_irq(&scx_tasks_lock);
5152 
5153 		ret = scx_ops_init_task(p, task_group(p), false);
5154 		if (ret) {
5155 			put_task_struct(p);
5156 			spin_lock_irq(&scx_tasks_lock);
5157 			scx_task_iter_exit(&sti);
5158 			spin_unlock_irq(&scx_tasks_lock);
5159 			scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5160 				      ret, p->comm, p->pid);
5161 			goto err_disable_unlock_all;
5162 		}
5163 
5164 		scx_set_task_state(p, SCX_TASK_READY);
5165 
5166 		put_task_struct(p);
5167 		spin_lock_irq(&scx_tasks_lock);
5168 	}
5169 	scx_task_iter_exit(&sti);
5170 	spin_unlock_irq(&scx_tasks_lock);
5171 	scx_cgroup_unlock();
5172 	percpu_up_write(&scx_fork_rwsem);
5173 
5174 	/*
5175 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5176 	 * all eligible tasks.
5177 	 */
5178 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5179 	static_branch_enable(&__scx_ops_enabled);
5180 
5181 	/*
5182 	 * We're fully committed and can't fail. The task READY -> ENABLED
5183 	 * transitions here are synchronized against sched_ext_free() through
5184 	 * scx_tasks_lock.
5185 	 */
5186 	percpu_down_write(&scx_fork_rwsem);
5187 	spin_lock_irq(&scx_tasks_lock);
5188 	scx_task_iter_init(&sti);
5189 	while ((p = scx_task_iter_next_locked(&sti))) {
5190 		const struct sched_class *old_class = p->sched_class;
5191 		struct sched_enq_and_set_ctx ctx;
5192 
5193 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5194 
5195 		__setscheduler_prio(p, p->prio);
5196 		check_class_changing(task_rq(p), p, old_class);
5197 
5198 		sched_enq_and_set_task(&ctx);
5199 
5200 		check_class_changed(task_rq(p), p, old_class, p->prio);
5201 	}
5202 	scx_task_iter_exit(&sti);
5203 	spin_unlock_irq(&scx_tasks_lock);
5204 	percpu_up_write(&scx_fork_rwsem);
5205 
5206 	scx_ops_bypass(false);
5207 
5208 	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5209 		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5210 		goto err_disable;
5211 	}
5212 
5213 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5214 		static_branch_enable(&__scx_switched_all);
5215 
5216 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5217 		scx_ops.name, scx_switched_all() ? "" : " (partial)");
5218 	kobject_uevent(scx_root_kobj, KOBJ_ADD);
5219 	mutex_unlock(&scx_ops_enable_mutex);
5220 
5221 	atomic_long_inc(&scx_enable_seq);
5222 
5223 	return 0;
5224 
5225 err_del:
5226 	kobject_del(scx_root_kobj);
5227 err:
5228 	kobject_put(scx_root_kobj);
5229 	scx_root_kobj = NULL;
5230 	if (scx_exit_info) {
5231 		free_exit_info(scx_exit_info);
5232 		scx_exit_info = NULL;
5233 	}
5234 err_unlock:
5235 	mutex_unlock(&scx_ops_enable_mutex);
5236 	return ret;
5237 
5238 err_disable_unlock_all:
5239 	scx_cgroup_unlock();
5240 	percpu_up_write(&scx_fork_rwsem);
5241 	scx_ops_bypass(false);
5242 err_disable:
5243 	mutex_unlock(&scx_ops_enable_mutex);
5244 	/*
5245 	 * Returning an error code here would not pass all the error information
5246 	 * to userspace. Record errno using scx_ops_error() for cases
5247 	 * scx_ops_error() wasn't already invoked and exit indicating success so
5248 	 * that the error is notified through ops.exit() with all the details.
5249 	 *
5250 	 * Flush scx_ops_disable_work to ensure that error is reported before
5251 	 * init completion.
5252 	 */
5253 	scx_ops_error("scx_ops_enable() failed (%d)", ret);
5254 	kthread_flush_work(&scx_ops_disable_work);
5255 	return 0;
5256 }
5257 
5258 
5259 /********************************************************************************
5260  * bpf_struct_ops plumbing.
5261  */
5262 #include <linux/bpf_verifier.h>
5263 #include <linux/bpf.h>
5264 #include <linux/btf.h>
5265 
5266 extern struct btf *btf_vmlinux;
5267 static const struct btf_type *task_struct_type;
5268 static u32 task_struct_type_id;
5269 
5270 static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
5271 			       enum bpf_access_type type,
5272 			       const struct bpf_prog *prog,
5273 			       struct bpf_insn_access_aux *info)
5274 {
5275 	struct btf *btf = bpf_get_btf_vmlinux();
5276 	const struct bpf_struct_ops_desc *st_ops_desc;
5277 	const struct btf_member *member;
5278 	const struct btf_type *t;
5279 	u32 btf_id, member_idx;
5280 	const char *mname;
5281 
5282 	/* struct_ops op args are all sequential, 64-bit numbers */
5283 	if (off != arg_n * sizeof(__u64))
5284 		return false;
5285 
5286 	/* btf_id should be the type id of struct sched_ext_ops */
5287 	btf_id = prog->aux->attach_btf_id;
5288 	st_ops_desc = bpf_struct_ops_find(btf, btf_id);
5289 	if (!st_ops_desc)
5290 		return false;
5291 
5292 	/* BTF type of struct sched_ext_ops */
5293 	t = st_ops_desc->type;
5294 
5295 	member_idx = prog->expected_attach_type;
5296 	if (member_idx >= btf_type_vlen(t))
5297 		return false;
5298 
5299 	/*
5300 	 * Get the member name of this struct_ops program, which corresponds to
5301 	 * a field in struct sched_ext_ops. For example, the member name of the
5302 	 * dispatch struct_ops program (callback) is "dispatch".
5303 	 */
5304 	member = &btf_type_member(t)[member_idx];
5305 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
5306 
5307 	if (!strcmp(mname, op)) {
5308 		/*
5309 		 * The value is a pointer to a type (struct task_struct) given
5310 		 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
5311 		 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
5312 		 * should check the pointer to make sure it is not NULL before
5313 		 * using it, or the verifier will reject the program.
5314 		 *
5315 		 * Longer term, this is something that should be addressed by
5316 		 * BTF, and be fully contained within the verifier.
5317 		 */
5318 		info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
5319 		info->btf = btf_vmlinux;
5320 		info->btf_id = task_struct_type_id;
5321 
5322 		return true;
5323 	}
5324 
5325 	return false;
5326 }
5327 
5328 static bool bpf_scx_is_valid_access(int off, int size,
5329 				    enum bpf_access_type type,
5330 				    const struct bpf_prog *prog,
5331 				    struct bpf_insn_access_aux *info)
5332 {
5333 	if (type != BPF_READ)
5334 		return false;
5335 	if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
5336 	    set_arg_maybe_null("yield", 1, off, size, type, prog, info))
5337 		return true;
5338 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5339 		return false;
5340 	if (off % size != 0)
5341 		return false;
5342 
5343 	return btf_ctx_access(off, size, type, prog, info);
5344 }
5345 
5346 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5347 				     const struct bpf_reg_state *reg, int off,
5348 				     int size)
5349 {
5350 	const struct btf_type *t;
5351 
5352 	t = btf_type_by_id(reg->btf, reg->btf_id);
5353 	if (t == task_struct_type) {
5354 		if (off >= offsetof(struct task_struct, scx.slice) &&
5355 		    off + size <= offsetofend(struct task_struct, scx.slice))
5356 			return SCALAR_VALUE;
5357 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5358 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5359 			return SCALAR_VALUE;
5360 		if (off >= offsetof(struct task_struct, scx.disallow) &&
5361 		    off + size <= offsetofend(struct task_struct, scx.disallow))
5362 			return SCALAR_VALUE;
5363 	}
5364 
5365 	return -EACCES;
5366 }
5367 
5368 static const struct bpf_func_proto *
5369 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5370 {
5371 	switch (func_id) {
5372 	case BPF_FUNC_task_storage_get:
5373 		return &bpf_task_storage_get_proto;
5374 	case BPF_FUNC_task_storage_delete:
5375 		return &bpf_task_storage_delete_proto;
5376 	default:
5377 		return bpf_base_func_proto(func_id, prog);
5378 	}
5379 }
5380 
5381 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5382 	.get_func_proto = bpf_scx_get_func_proto,
5383 	.is_valid_access = bpf_scx_is_valid_access,
5384 	.btf_struct_access = bpf_scx_btf_struct_access,
5385 };
5386 
5387 static int bpf_scx_init_member(const struct btf_type *t,
5388 			       const struct btf_member *member,
5389 			       void *kdata, const void *udata)
5390 {
5391 	const struct sched_ext_ops *uops = udata;
5392 	struct sched_ext_ops *ops = kdata;
5393 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5394 	int ret;
5395 
5396 	switch (moff) {
5397 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5398 		if (*(u32 *)(udata + moff) > INT_MAX)
5399 			return -E2BIG;
5400 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5401 		return 1;
5402 	case offsetof(struct sched_ext_ops, flags):
5403 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5404 			return -EINVAL;
5405 		ops->flags = *(u64 *)(udata + moff);
5406 		return 1;
5407 	case offsetof(struct sched_ext_ops, name):
5408 		ret = bpf_obj_name_cpy(ops->name, uops->name,
5409 				       sizeof(ops->name));
5410 		if (ret < 0)
5411 			return ret;
5412 		if (ret == 0)
5413 			return -EINVAL;
5414 		return 1;
5415 	case offsetof(struct sched_ext_ops, timeout_ms):
5416 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5417 		    SCX_WATCHDOG_MAX_TIMEOUT)
5418 			return -E2BIG;
5419 		ops->timeout_ms = *(u32 *)(udata + moff);
5420 		return 1;
5421 	case offsetof(struct sched_ext_ops, exit_dump_len):
5422 		ops->exit_dump_len =
5423 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5424 		return 1;
5425 	case offsetof(struct sched_ext_ops, hotplug_seq):
5426 		ops->hotplug_seq = *(u64 *)(udata + moff);
5427 		return 1;
5428 	}
5429 
5430 	return 0;
5431 }
5432 
5433 static int bpf_scx_check_member(const struct btf_type *t,
5434 				const struct btf_member *member,
5435 				const struct bpf_prog *prog)
5436 {
5437 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5438 
5439 	switch (moff) {
5440 	case offsetof(struct sched_ext_ops, init_task):
5441 #ifdef CONFIG_EXT_GROUP_SCHED
5442 	case offsetof(struct sched_ext_ops, cgroup_init):
5443 	case offsetof(struct sched_ext_ops, cgroup_exit):
5444 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5445 #endif
5446 	case offsetof(struct sched_ext_ops, cpu_online):
5447 	case offsetof(struct sched_ext_ops, cpu_offline):
5448 	case offsetof(struct sched_ext_ops, init):
5449 	case offsetof(struct sched_ext_ops, exit):
5450 		break;
5451 	default:
5452 		if (prog->sleepable)
5453 			return -EINVAL;
5454 	}
5455 
5456 	return 0;
5457 }
5458 
5459 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5460 {
5461 	return scx_ops_enable(kdata, link);
5462 }
5463 
5464 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5465 {
5466 	scx_ops_disable(SCX_EXIT_UNREG);
5467 	kthread_flush_work(&scx_ops_disable_work);
5468 }
5469 
5470 static int bpf_scx_init(struct btf *btf)
5471 {
5472 	s32 type_id;
5473 
5474 	type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
5475 	if (type_id < 0)
5476 		return -EINVAL;
5477 	task_struct_type = btf_type_by_id(btf, type_id);
5478 	task_struct_type_id = type_id;
5479 
5480 	return 0;
5481 }
5482 
5483 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5484 {
5485 	/*
5486 	 * sched_ext does not support updating the actively-loaded BPF
5487 	 * scheduler, as registering a BPF scheduler can always fail if the
5488 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5489 	 * etc. Similarly, we can always race with unregistration happening
5490 	 * elsewhere, such as with sysrq.
5491 	 */
5492 	return -EOPNOTSUPP;
5493 }
5494 
5495 static int bpf_scx_validate(void *kdata)
5496 {
5497 	return 0;
5498 }
5499 
5500 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
5501 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
5502 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
5503 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
5504 static void tick_stub(struct task_struct *p) {}
5505 static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
5506 static void running_stub(struct task_struct *p) {}
5507 static void stopping_stub(struct task_struct *p, bool runnable) {}
5508 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
5509 static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
5510 static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
5511 static void set_weight_stub(struct task_struct *p, u32 weight) {}
5512 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
5513 static void update_idle_stub(s32 cpu, bool idle) {}
5514 static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
5515 static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
5516 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
5517 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
5518 static void enable_stub(struct task_struct *p) {}
5519 static void disable_stub(struct task_struct *p) {}
5520 #ifdef CONFIG_EXT_GROUP_SCHED
5521 static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
5522 static void cgroup_exit_stub(struct cgroup *cgrp) {}
5523 static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
5524 static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
5525 static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
5526 static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
5527 #endif
5528 static void cpu_online_stub(s32 cpu) {}
5529 static void cpu_offline_stub(s32 cpu) {}
5530 static s32 init_stub(void) { return -EINVAL; }
5531 static void exit_stub(struct scx_exit_info *info) {}
5532 static void dump_stub(struct scx_dump_ctx *ctx) {}
5533 static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
5534 static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5535 
5536 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5537 	.select_cpu = select_cpu_stub,
5538 	.enqueue = enqueue_stub,
5539 	.dequeue = dequeue_stub,
5540 	.dispatch = dispatch_stub,
5541 	.tick = tick_stub,
5542 	.runnable = runnable_stub,
5543 	.running = running_stub,
5544 	.stopping = stopping_stub,
5545 	.quiescent = quiescent_stub,
5546 	.yield = yield_stub,
5547 	.core_sched_before = core_sched_before_stub,
5548 	.set_weight = set_weight_stub,
5549 	.set_cpumask = set_cpumask_stub,
5550 	.update_idle = update_idle_stub,
5551 	.cpu_acquire = cpu_acquire_stub,
5552 	.cpu_release = cpu_release_stub,
5553 	.init_task = init_task_stub,
5554 	.exit_task = exit_task_stub,
5555 	.enable = enable_stub,
5556 	.disable = disable_stub,
5557 #ifdef CONFIG_EXT_GROUP_SCHED
5558 	.cgroup_init = cgroup_init_stub,
5559 	.cgroup_exit = cgroup_exit_stub,
5560 	.cgroup_prep_move = cgroup_prep_move_stub,
5561 	.cgroup_move = cgroup_move_stub,
5562 	.cgroup_cancel_move = cgroup_cancel_move_stub,
5563 	.cgroup_set_weight = cgroup_set_weight_stub,
5564 #endif
5565 	.cpu_online = cpu_online_stub,
5566 	.cpu_offline = cpu_offline_stub,
5567 	.init = init_stub,
5568 	.exit = exit_stub,
5569 	.dump = dump_stub,
5570 	.dump_cpu = dump_cpu_stub,
5571 	.dump_task = dump_task_stub,
5572 };
5573 
5574 static struct bpf_struct_ops bpf_sched_ext_ops = {
5575 	.verifier_ops = &bpf_scx_verifier_ops,
5576 	.reg = bpf_scx_reg,
5577 	.unreg = bpf_scx_unreg,
5578 	.check_member = bpf_scx_check_member,
5579 	.init_member = bpf_scx_init_member,
5580 	.init = bpf_scx_init,
5581 	.update = bpf_scx_update,
5582 	.validate = bpf_scx_validate,
5583 	.name = "sched_ext_ops",
5584 	.owner = THIS_MODULE,
5585 	.cfi_stubs = &__bpf_ops_sched_ext_ops
5586 };
5587 
5588 
5589 /********************************************************************************
5590  * System integration and init.
5591  */
5592 
5593 static void sysrq_handle_sched_ext_reset(u8 key)
5594 {
5595 	if (scx_ops_helper)
5596 		scx_ops_disable(SCX_EXIT_SYSRQ);
5597 	else
5598 		pr_info("sched_ext: BPF scheduler not yet used\n");
5599 }
5600 
5601 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5602 	.handler	= sysrq_handle_sched_ext_reset,
5603 	.help_msg	= "reset-sched-ext(S)",
5604 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
5605 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5606 };
5607 
5608 static void sysrq_handle_sched_ext_dump(u8 key)
5609 {
5610 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5611 
5612 	if (scx_enabled())
5613 		scx_dump_state(&ei, 0);
5614 }
5615 
5616 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5617 	.handler	= sysrq_handle_sched_ext_dump,
5618 	.help_msg	= "dump-sched-ext(D)",
5619 	.action_msg	= "Trigger sched_ext debug dump",
5620 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5621 };
5622 
5623 static bool can_skip_idle_kick(struct rq *rq)
5624 {
5625 	lockdep_assert_rq_held(rq);
5626 
5627 	/*
5628 	 * We can skip idle kicking if @rq is going to go through at least one
5629 	 * full SCX scheduling cycle before going idle. Just checking whether
5630 	 * curr is not idle is insufficient because we could be racing
5631 	 * balance_one() trying to pull the next task from a remote rq, which
5632 	 * may fail, and @rq may become idle afterwards.
5633 	 *
5634 	 * The race window is small and we don't and can't guarantee that @rq is
5635 	 * only kicked while idle anyway. Skip only when sure.
5636 	 */
5637 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5638 }
5639 
5640 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5641 {
5642 	struct rq *rq = cpu_rq(cpu);
5643 	struct scx_rq *this_scx = &this_rq->scx;
5644 	bool should_wait = false;
5645 	unsigned long flags;
5646 
5647 	raw_spin_rq_lock_irqsave(rq, flags);
5648 
5649 	/*
5650 	 * During CPU hotplug, a CPU may depend on kicking itself to make
5651 	 * forward progress. Allow kicking self regardless of online state.
5652 	 */
5653 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5654 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5655 			if (rq->curr->sched_class == &ext_sched_class)
5656 				rq->curr->scx.slice = 0;
5657 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5658 		}
5659 
5660 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5661 			pseqs[cpu] = rq->scx.pnt_seq;
5662 			should_wait = true;
5663 		}
5664 
5665 		resched_curr(rq);
5666 	} else {
5667 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5668 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5669 	}
5670 
5671 	raw_spin_rq_unlock_irqrestore(rq, flags);
5672 
5673 	return should_wait;
5674 }
5675 
5676 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5677 {
5678 	struct rq *rq = cpu_rq(cpu);
5679 	unsigned long flags;
5680 
5681 	raw_spin_rq_lock_irqsave(rq, flags);
5682 
5683 	if (!can_skip_idle_kick(rq) &&
5684 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5685 		resched_curr(rq);
5686 
5687 	raw_spin_rq_unlock_irqrestore(rq, flags);
5688 }
5689 
5690 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5691 {
5692 	struct rq *this_rq = this_rq();
5693 	struct scx_rq *this_scx = &this_rq->scx;
5694 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5695 	bool should_wait = false;
5696 	s32 cpu;
5697 
5698 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5699 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5700 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5701 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5702 	}
5703 
5704 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5705 		kick_one_cpu_if_idle(cpu, this_rq);
5706 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5707 	}
5708 
5709 	if (!should_wait)
5710 		return;
5711 
5712 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5713 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5714 
5715 		if (cpu != cpu_of(this_rq)) {
5716 			/*
5717 			 * Pairs with smp_store_release() issued by this CPU in
5718 			 * scx_next_task_picked() on the resched path.
5719 			 *
5720 			 * We busy-wait here to guarantee that no other task can
5721 			 * be scheduled on our core before the target CPU has
5722 			 * entered the resched path.
5723 			 */
5724 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5725 				cpu_relax();
5726 		}
5727 
5728 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5729 	}
5730 }
5731 
5732 /**
5733  * print_scx_info - print out sched_ext scheduler state
5734  * @log_lvl: the log level to use when printing
5735  * @p: target task
5736  *
5737  * If a sched_ext scheduler is enabled, print the name and state of the
5738  * scheduler. If @p is on sched_ext, print further information about the task.
5739  *
5740  * This function can be safely called on any task as long as the task_struct
5741  * itself is accessible. While safe, this function isn't synchronized and may
5742  * print out mixups or garbages of limited length.
5743  */
5744 void print_scx_info(const char *log_lvl, struct task_struct *p)
5745 {
5746 	enum scx_ops_enable_state state = scx_ops_enable_state();
5747 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5748 	char runnable_at_buf[22] = "?";
5749 	struct sched_class *class;
5750 	unsigned long runnable_at;
5751 
5752 	if (state == SCX_OPS_DISABLED)
5753 		return;
5754 
5755 	/*
5756 	 * Carefully check if the task was running on sched_ext, and then
5757 	 * carefully copy the time it's been runnable, and its state.
5758 	 */
5759 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5760 	    class != &ext_sched_class) {
5761 		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
5762 		       scx_ops_enable_state_str[state], all);
5763 		return;
5764 	}
5765 
5766 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5767 				      sizeof(runnable_at)))
5768 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5769 			  jiffies_delta_msecs(runnable_at, jiffies));
5770 
5771 	/* print everything onto one line to conserve console space */
5772 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5773 	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
5774 	       runnable_at_buf);
5775 }
5776 
5777 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5778 {
5779 	/*
5780 	 * SCX schedulers often have userspace components which are sometimes
5781 	 * involved in critial scheduling paths. PM operations involve freezing
5782 	 * userspace which can lead to scheduling misbehaviors including stalls.
5783 	 * Let's bypass while PM operations are in progress.
5784 	 */
5785 	switch (event) {
5786 	case PM_HIBERNATION_PREPARE:
5787 	case PM_SUSPEND_PREPARE:
5788 	case PM_RESTORE_PREPARE:
5789 		scx_ops_bypass(true);
5790 		break;
5791 	case PM_POST_HIBERNATION:
5792 	case PM_POST_SUSPEND:
5793 	case PM_POST_RESTORE:
5794 		scx_ops_bypass(false);
5795 		break;
5796 	}
5797 
5798 	return NOTIFY_OK;
5799 }
5800 
5801 static struct notifier_block scx_pm_notifier = {
5802 	.notifier_call = scx_pm_handler,
5803 };
5804 
5805 void __init init_sched_ext_class(void)
5806 {
5807 	s32 cpu, v;
5808 
5809 	/*
5810 	 * The following is to prevent the compiler from optimizing out the enum
5811 	 * definitions so that BPF scheduler implementations can use them
5812 	 * through the generated vmlinux.h.
5813 	 */
5814 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5815 		   SCX_TG_ONLINE);
5816 
5817 	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
5818 #ifdef CONFIG_SMP
5819 	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
5820 	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
5821 #endif
5822 	scx_kick_cpus_pnt_seqs =
5823 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
5824 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
5825 	BUG_ON(!scx_kick_cpus_pnt_seqs);
5826 
5827 	for_each_possible_cpu(cpu) {
5828 		struct rq *rq = cpu_rq(cpu);
5829 
5830 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5831 		INIT_LIST_HEAD(&rq->scx.runnable_list);
5832 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5833 
5834 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
5835 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
5836 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
5837 		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
5838 		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
5839 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5840 
5841 		if (cpu_online(cpu))
5842 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5843 	}
5844 
5845 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5846 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5847 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5848 }
5849 
5850 
5851 /********************************************************************************
5852  * Helpers that can be called from the BPF scheduler.
5853  */
5854 #include <linux/btf_ids.h>
5855 
5856 __bpf_kfunc_start_defs();
5857 
5858 /**
5859  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
5860  * @p: task_struct to select a CPU for
5861  * @prev_cpu: CPU @p was on previously
5862  * @wake_flags: %SCX_WAKE_* flags
5863  * @is_idle: out parameter indicating whether the returned CPU is idle
5864  *
5865  * Can only be called from ops.select_cpu() if the built-in CPU selection is
5866  * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
5867  * @p, @prev_cpu and @wake_flags match ops.select_cpu().
5868  *
5869  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
5870  * currently idle and thus a good candidate for direct dispatching.
5871  */
5872 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
5873 				       u64 wake_flags, bool *is_idle)
5874 {
5875 	if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
5876 		*is_idle = false;
5877 		return prev_cpu;
5878 	}
5879 #ifdef CONFIG_SMP
5880 	return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
5881 #else
5882 	*is_idle = false;
5883 	return prev_cpu;
5884 #endif
5885 }
5886 
5887 __bpf_kfunc_end_defs();
5888 
5889 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
5890 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
5891 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
5892 
5893 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
5894 	.owner			= THIS_MODULE,
5895 	.set			= &scx_kfunc_ids_select_cpu,
5896 };
5897 
5898 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
5899 {
5900 	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5901 		return false;
5902 
5903 	lockdep_assert_irqs_disabled();
5904 
5905 	if (unlikely(!p)) {
5906 		scx_ops_error("called with NULL task");
5907 		return false;
5908 	}
5909 
5910 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5911 		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
5912 		return false;
5913 	}
5914 
5915 	return true;
5916 }
5917 
5918 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
5919 {
5920 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5921 	struct task_struct *ddsp_task;
5922 
5923 	ddsp_task = __this_cpu_read(direct_dispatch_task);
5924 	if (ddsp_task) {
5925 		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
5926 		return;
5927 	}
5928 
5929 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5930 		scx_ops_error("dispatch buffer overflow");
5931 		return;
5932 	}
5933 
5934 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5935 		.task = p,
5936 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5937 		.dsq_id = dsq_id,
5938 		.enq_flags = enq_flags,
5939 	};
5940 }
5941 
5942 __bpf_kfunc_start_defs();
5943 
5944 /**
5945  * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
5946  * @p: task_struct to dispatch
5947  * @dsq_id: DSQ to dispatch to
5948  * @slice: duration @p can run for in nsecs, 0 to keep the current value
5949  * @enq_flags: SCX_ENQ_*
5950  *
5951  * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
5952  * to call this function spuriously. Can be called from ops.enqueue(),
5953  * ops.select_cpu(), and ops.dispatch().
5954  *
5955  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5956  * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
5957  * used to target the local DSQ of a CPU other than the enqueueing one. Use
5958  * ops.select_cpu() to be on the target CPU in the first place.
5959  *
5960  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5961  * will be directly dispatched to the corresponding dispatch queue after
5962  * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
5963  * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
5964  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5965  * task is dispatched.
5966  *
5967  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5968  * and this function can be called upto ops.dispatch_max_batch times to dispatch
5969  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5970  * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
5971  *
5972  * This function doesn't have any locking restrictions and may be called under
5973  * BPF locks (in the future when BPF introduces more flexible locking).
5974  *
5975  * @p is allowed to run for @slice. The scheduling path is triggered on slice
5976  * exhaustion. If zero, the current residual slice is maintained. If
5977  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5978  * scx_bpf_kick_cpu() to trigger scheduling.
5979  */
5980 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
5981 				  u64 enq_flags)
5982 {
5983 	if (!scx_dispatch_preamble(p, enq_flags))
5984 		return;
5985 
5986 	if (slice)
5987 		p->scx.slice = slice;
5988 	else
5989 		p->scx.slice = p->scx.slice ?: 1;
5990 
5991 	scx_dispatch_commit(p, dsq_id, enq_flags);
5992 }
5993 
5994 /**
5995  * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
5996  * @p: task_struct to dispatch
5997  * @dsq_id: DSQ to dispatch to
5998  * @slice: duration @p can run for in nsecs, 0 to keep the current value
5999  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6000  * @enq_flags: SCX_ENQ_*
6001  *
6002  * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
6003  * Tasks queued into the priority queue are ordered by @vtime and always
6004  * consumed after the tasks in the FIFO queue. All other aspects are identical
6005  * to scx_bpf_dispatch().
6006  *
6007  * @vtime ordering is according to time_before64() which considers wrapping. A
6008  * numerically larger vtime may indicate an earlier position in the ordering and
6009  * vice-versa.
6010  */
6011 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6012 					u64 slice, u64 vtime, u64 enq_flags)
6013 {
6014 	if (!scx_dispatch_preamble(p, enq_flags))
6015 		return;
6016 
6017 	if (slice)
6018 		p->scx.slice = slice;
6019 	else
6020 		p->scx.slice = p->scx.slice ?: 1;
6021 
6022 	p->scx.dsq_vtime = vtime;
6023 
6024 	scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6025 }
6026 
6027 __bpf_kfunc_end_defs();
6028 
6029 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6030 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6031 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6032 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6033 
6034 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6035 	.owner			= THIS_MODULE,
6036 	.set			= &scx_kfunc_ids_enqueue_dispatch,
6037 };
6038 
6039 static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
6040 				  struct task_struct *p, u64 dsq_id,
6041 				  u64 enq_flags)
6042 {
6043 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6044 	struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
6045 	bool dispatched = false;
6046 	bool in_balance;
6047 	unsigned long flags;
6048 
6049 	if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6050 		return false;
6051 
6052 	/*
6053 	 * Can be called from either ops.dispatch() locking this_rq() or any
6054 	 * context where no rq lock is held. If latter, lock @p's task_rq which
6055 	 * we'll likely need anyway.
6056 	 */
6057 	src_rq = task_rq(p);
6058 
6059 	local_irq_save(flags);
6060 	this_rq = this_rq();
6061 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6062 
6063 	if (in_balance) {
6064 		if (this_rq != src_rq) {
6065 			raw_spin_rq_unlock(this_rq);
6066 			raw_spin_rq_lock(src_rq);
6067 		}
6068 	} else {
6069 		raw_spin_rq_lock(src_rq);
6070 	}
6071 
6072 	locked_rq = src_rq;
6073 	raw_spin_lock(&src_dsq->lock);
6074 
6075 	/*
6076 	 * Did someone else get to it? @p could have already left $src_dsq, got
6077 	 * re-enqueud, or be in the process of being consumed by someone else.
6078 	 */
6079 	if (unlikely(p->scx.dsq != src_dsq ||
6080 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6081 		     p->scx.holding_cpu >= 0) ||
6082 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
6083 		raw_spin_unlock(&src_dsq->lock);
6084 		goto out;
6085 	}
6086 
6087 	/* @p is still on $src_dsq and stable, determine the destination */
6088 	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6089 
6090 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
6091 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
6092 		if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
6093 			dst_dsq = find_global_dsq(p);
6094 			dst_rq = src_rq;
6095 		}
6096 	} else {
6097 		/* no need to migrate if destination is a non-local DSQ */
6098 		dst_rq = src_rq;
6099 	}
6100 
6101 	/*
6102 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
6103 	 * CPU, @p will be migrated.
6104 	 */
6105 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
6106 		/* @p is going from a non-local DSQ to a local DSQ */
6107 		if (src_rq == dst_rq) {
6108 			task_unlink_from_dsq(p, src_dsq);
6109 			move_local_task_to_local_dsq(p, enq_flags,
6110 						     src_dsq, dst_rq);
6111 			raw_spin_unlock(&src_dsq->lock);
6112 		} else {
6113 			raw_spin_unlock(&src_dsq->lock);
6114 			move_remote_task_to_local_dsq(p, enq_flags,
6115 						      src_rq, dst_rq);
6116 			locked_rq = dst_rq;
6117 		}
6118 	} else {
6119 		/*
6120 		 * @p is going from a non-local DSQ to a non-local DSQ. As
6121 		 * $src_dsq is already locked, do an abbreviated dequeue.
6122 		 */
6123 		task_unlink_from_dsq(p, src_dsq);
6124 		p->scx.dsq = NULL;
6125 		raw_spin_unlock(&src_dsq->lock);
6126 
6127 		if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6128 			p->scx.dsq_vtime = kit->vtime;
6129 		dispatch_enqueue(dst_dsq, p, enq_flags);
6130 	}
6131 
6132 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6133 		p->scx.slice = kit->slice;
6134 
6135 	dispatched = true;
6136 out:
6137 	if (in_balance) {
6138 		if (this_rq != locked_rq) {
6139 			raw_spin_rq_unlock(locked_rq);
6140 			raw_spin_rq_lock(this_rq);
6141 		}
6142 	} else {
6143 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6144 	}
6145 
6146 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6147 			       __SCX_DSQ_ITER_HAS_VTIME);
6148 	return dispatched;
6149 }
6150 
6151 __bpf_kfunc_start_defs();
6152 
6153 /**
6154  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6155  *
6156  * Can only be called from ops.dispatch().
6157  */
6158 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6159 {
6160 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6161 		return 0;
6162 
6163 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6164 }
6165 
6166 /**
6167  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6168  *
6169  * Cancel the latest dispatch. Can be called multiple times to cancel further
6170  * dispatches. Can only be called from ops.dispatch().
6171  */
6172 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6173 {
6174 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6175 
6176 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6177 		return;
6178 
6179 	if (dspc->cursor > 0)
6180 		dspc->cursor--;
6181 	else
6182 		scx_ops_error("dispatch buffer underflow");
6183 }
6184 
6185 /**
6186  * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
6187  * @dsq_id: DSQ to consume
6188  *
6189  * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
6190  * to the current CPU's local DSQ for execution. Can only be called from
6191  * ops.dispatch().
6192  *
6193  * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
6194  * trying to consume the specified DSQ. It may also grab rq locks and thus can't
6195  * be called under any BPF locks.
6196  *
6197  * Returns %true if a task has been consumed, %false if there isn't any task to
6198  * consume.
6199  */
6200 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6201 {
6202 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6203 	struct scx_dispatch_q *dsq;
6204 
6205 	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6206 		return false;
6207 
6208 	flush_dispatch_buf(dspc->rq);
6209 
6210 	dsq = find_user_dsq(dsq_id);
6211 	if (unlikely(!dsq)) {
6212 		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6213 		return false;
6214 	}
6215 
6216 	if (consume_dispatch_q(dspc->rq, dsq)) {
6217 		/*
6218 		 * A successfully consumed task can be dequeued before it starts
6219 		 * running while the CPU is trying to migrate other dispatched
6220 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6221 		 * local DSQ.
6222 		 */
6223 		dspc->nr_tasks++;
6224 		return true;
6225 	} else {
6226 		return false;
6227 	}
6228 }
6229 
6230 /**
6231  * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
6232  * @it__iter: DSQ iterator in progress
6233  * @slice: duration the dispatched task can run for in nsecs
6234  *
6235  * Override the slice of the next task that will be dispatched from @it__iter
6236  * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
6237  * the previous slice duration is kept.
6238  */
6239 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6240 				struct bpf_iter_scx_dsq *it__iter, u64 slice)
6241 {
6242 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6243 
6244 	kit->slice = slice;
6245 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6246 }
6247 
6248 /**
6249  * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
6250  * @it__iter: DSQ iterator in progress
6251  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6252  *
6253  * Override the vtime of the next task that will be dispatched from @it__iter
6254  * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
6255  * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
6256  * dispatch the next task, the override is ignored and cleared.
6257  */
6258 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6259 				struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6260 {
6261 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6262 
6263 	kit->vtime = vtime;
6264 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6265 }
6266 
6267 /**
6268  * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
6269  * @it__iter: DSQ iterator in progress
6270  * @p: task to transfer
6271  * @dsq_id: DSQ to move @p to
6272  * @enq_flags: SCX_ENQ_*
6273  *
6274  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6275  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6276  * be the destination.
6277  *
6278  * For the transfer to be successful, @p must still be on the DSQ and have been
6279  * queued before the DSQ iteration started. This function doesn't care whether
6280  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6281  * been queued before the iteration started.
6282  *
6283  * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
6284  * update.
6285  *
6286  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6287  * lock (e.g. BPF timers or SYSCALL programs).
6288  *
6289  * Returns %true if @p has been consumed, %false if @p had already been consumed
6290  * or dequeued.
6291  */
6292 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6293 					   struct task_struct *p, u64 dsq_id,
6294 					   u64 enq_flags)
6295 {
6296 	return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6297 				     p, dsq_id, enq_flags);
6298 }
6299 
6300 /**
6301  * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
6302  * @it__iter: DSQ iterator in progress
6303  * @p: task to transfer
6304  * @dsq_id: DSQ to move @p to
6305  * @enq_flags: SCX_ENQ_*
6306  *
6307  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6308  * priority queue of the DSQ specified by @dsq_id. The destination must be a
6309  * user DSQ as only user DSQs support priority queue.
6310  *
6311  * @p's slice and vtime are kept by default. Use
6312  * scx_bpf_dispatch_from_dsq_set_slice() and
6313  * scx_bpf_dispatch_from_dsq_set_vtime() to update.
6314  *
6315  * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6316  * scx_bpf_dispatch_vtime() for more information on @vtime.
6317  */
6318 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6319 						 struct task_struct *p, u64 dsq_id,
6320 						 u64 enq_flags)
6321 {
6322 	return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6323 				     p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6324 }
6325 
6326 __bpf_kfunc_end_defs();
6327 
6328 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6329 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6330 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6331 BTF_ID_FLAGS(func, scx_bpf_consume)
6332 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6333 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6334 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6335 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6336 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6337 
6338 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6339 	.owner			= THIS_MODULE,
6340 	.set			= &scx_kfunc_ids_dispatch,
6341 };
6342 
6343 __bpf_kfunc_start_defs();
6344 
6345 /**
6346  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6347  *
6348  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6349  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6350  * processed tasks. Can only be called from ops.cpu_release().
6351  */
6352 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6353 {
6354 	LIST_HEAD(tasks);
6355 	u32 nr_enqueued = 0;
6356 	struct rq *rq;
6357 	struct task_struct *p, *n;
6358 
6359 	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6360 		return 0;
6361 
6362 	rq = cpu_rq(smp_processor_id());
6363 	lockdep_assert_rq_held(rq);
6364 
6365 	/*
6366 	 * The BPF scheduler may choose to dispatch tasks back to
6367 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6368 	 * first to avoid processing the same tasks repeatedly.
6369 	 */
6370 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6371 				 scx.dsq_list.node) {
6372 		/*
6373 		 * If @p is being migrated, @p's current CPU may not agree with
6374 		 * its allowed CPUs and the migration_cpu_stop is about to
6375 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6376 		 *
6377 		 * While racing sched property changes may also dequeue and
6378 		 * re-enqueue a migrating task while its current CPU and allowed
6379 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6380 		 * the current local DSQ for running tasks and thus are not
6381 		 * visible to the BPF scheduler.
6382 		 */
6383 		if (p->migration_pending)
6384 			continue;
6385 
6386 		dispatch_dequeue(rq, p);
6387 		list_add_tail(&p->scx.dsq_list.node, &tasks);
6388 	}
6389 
6390 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6391 		list_del_init(&p->scx.dsq_list.node);
6392 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6393 		nr_enqueued++;
6394 	}
6395 
6396 	return nr_enqueued;
6397 }
6398 
6399 __bpf_kfunc_end_defs();
6400 
6401 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6402 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6403 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6404 
6405 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6406 	.owner			= THIS_MODULE,
6407 	.set			= &scx_kfunc_ids_cpu_release,
6408 };
6409 
6410 __bpf_kfunc_start_defs();
6411 
6412 /**
6413  * scx_bpf_create_dsq - Create a custom DSQ
6414  * @dsq_id: DSQ to create
6415  * @node: NUMA node to allocate from
6416  *
6417  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6418  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6419  */
6420 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6421 {
6422 	if (unlikely(node >= (int)nr_node_ids ||
6423 		     (node < 0 && node != NUMA_NO_NODE)))
6424 		return -EINVAL;
6425 	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6426 }
6427 
6428 __bpf_kfunc_end_defs();
6429 
6430 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6431 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6432 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6433 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6434 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6435 
6436 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6437 	.owner			= THIS_MODULE,
6438 	.set			= &scx_kfunc_ids_unlocked,
6439 };
6440 
6441 __bpf_kfunc_start_defs();
6442 
6443 /**
6444  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6445  * @cpu: cpu to kick
6446  * @flags: %SCX_KICK_* flags
6447  *
6448  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6449  * trigger rescheduling on a busy CPU. This can be called from any online
6450  * scx_ops operation and the actual kicking is performed asynchronously through
6451  * an irq work.
6452  */
6453 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6454 {
6455 	struct rq *this_rq;
6456 	unsigned long irq_flags;
6457 
6458 	if (!ops_cpu_valid(cpu, NULL))
6459 		return;
6460 
6461 	local_irq_save(irq_flags);
6462 
6463 	this_rq = this_rq();
6464 
6465 	/*
6466 	 * While bypassing for PM ops, IRQ handling may not be online which can
6467 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
6468 	 * IRQ status update. Suppress kicking.
6469 	 */
6470 	if (scx_rq_bypassing(this_rq))
6471 		goto out;
6472 
6473 	/*
6474 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6475 	 * rq locks. We can probably be smarter and avoid bouncing if called
6476 	 * from ops which don't hold a rq lock.
6477 	 */
6478 	if (flags & SCX_KICK_IDLE) {
6479 		struct rq *target_rq = cpu_rq(cpu);
6480 
6481 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6482 			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6483 
6484 		if (raw_spin_rq_trylock(target_rq)) {
6485 			if (can_skip_idle_kick(target_rq)) {
6486 				raw_spin_rq_unlock(target_rq);
6487 				goto out;
6488 			}
6489 			raw_spin_rq_unlock(target_rq);
6490 		}
6491 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6492 	} else {
6493 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6494 
6495 		if (flags & SCX_KICK_PREEMPT)
6496 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6497 		if (flags & SCX_KICK_WAIT)
6498 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6499 	}
6500 
6501 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6502 out:
6503 	local_irq_restore(irq_flags);
6504 }
6505 
6506 /**
6507  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6508  * @dsq_id: id of the DSQ
6509  *
6510  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6511  * -%ENOENT is returned.
6512  */
6513 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6514 {
6515 	struct scx_dispatch_q *dsq;
6516 	s32 ret;
6517 
6518 	preempt_disable();
6519 
6520 	if (dsq_id == SCX_DSQ_LOCAL) {
6521 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6522 		goto out;
6523 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6524 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6525 
6526 		if (ops_cpu_valid(cpu, NULL)) {
6527 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6528 			goto out;
6529 		}
6530 	} else {
6531 		dsq = find_user_dsq(dsq_id);
6532 		if (dsq) {
6533 			ret = READ_ONCE(dsq->nr);
6534 			goto out;
6535 		}
6536 	}
6537 	ret = -ENOENT;
6538 out:
6539 	preempt_enable();
6540 	return ret;
6541 }
6542 
6543 /**
6544  * scx_bpf_destroy_dsq - Destroy a custom DSQ
6545  * @dsq_id: DSQ to destroy
6546  *
6547  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6548  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6549  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6550  * which doesn't exist. Can be called from any online scx_ops operations.
6551  */
6552 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6553 {
6554 	destroy_dsq(dsq_id);
6555 }
6556 
6557 /**
6558  * bpf_iter_scx_dsq_new - Create a DSQ iterator
6559  * @it: iterator to initialize
6560  * @dsq_id: DSQ to iterate
6561  * @flags: %SCX_DSQ_ITER_*
6562  *
6563  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6564  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6565  * tasks which are already queued when this function is invoked.
6566  */
6567 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6568 				     u64 flags)
6569 {
6570 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6571 
6572 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6573 		     sizeof(struct bpf_iter_scx_dsq));
6574 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6575 		     __alignof__(struct bpf_iter_scx_dsq));
6576 
6577 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6578 		return -EINVAL;
6579 
6580 	kit->dsq = find_user_dsq(dsq_id);
6581 	if (!kit->dsq)
6582 		return -ENOENT;
6583 
6584 	INIT_LIST_HEAD(&kit->cursor.node);
6585 	kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
6586 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
6587 
6588 	return 0;
6589 }
6590 
6591 /**
6592  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6593  * @it: iterator to progress
6594  *
6595  * Return the next task. See bpf_iter_scx_dsq_new().
6596  */
6597 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6598 {
6599 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6600 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6601 	struct task_struct *p;
6602 	unsigned long flags;
6603 
6604 	if (!kit->dsq)
6605 		return NULL;
6606 
6607 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6608 
6609 	if (list_empty(&kit->cursor.node))
6610 		p = NULL;
6611 	else
6612 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6613 
6614 	/*
6615 	 * Only tasks which were queued before the iteration started are
6616 	 * visible. This bounds BPF iterations and guarantees that vtime never
6617 	 * jumps in the other direction while iterating.
6618 	 */
6619 	do {
6620 		p = nldsq_next_task(kit->dsq, p, rev);
6621 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6622 
6623 	if (p) {
6624 		if (rev)
6625 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6626 		else
6627 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6628 	} else {
6629 		list_del_init(&kit->cursor.node);
6630 	}
6631 
6632 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6633 
6634 	return p;
6635 }
6636 
6637 /**
6638  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6639  * @it: iterator to destroy
6640  *
6641  * Undo scx_iter_scx_dsq_new().
6642  */
6643 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6644 {
6645 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6646 
6647 	if (!kit->dsq)
6648 		return;
6649 
6650 	if (!list_empty(&kit->cursor.node)) {
6651 		unsigned long flags;
6652 
6653 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6654 		list_del_init(&kit->cursor.node);
6655 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6656 	}
6657 	kit->dsq = NULL;
6658 }
6659 
6660 __bpf_kfunc_end_defs();
6661 
6662 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
6663 			 char *fmt, unsigned long long *data, u32 data__sz)
6664 {
6665 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6666 	s32 ret;
6667 
6668 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6669 	    (data__sz && !data)) {
6670 		scx_ops_error("invalid data=%p and data__sz=%u",
6671 			      (void *)data, data__sz);
6672 		return -EINVAL;
6673 	}
6674 
6675 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6676 	if (ret < 0) {
6677 		scx_ops_error("failed to read data fields (%d)", ret);
6678 		return ret;
6679 	}
6680 
6681 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6682 				  &bprintf_data);
6683 	if (ret < 0) {
6684 		scx_ops_error("format preparation failed (%d)", ret);
6685 		return ret;
6686 	}
6687 
6688 	ret = bstr_printf(line_buf, line_size, fmt,
6689 			  bprintf_data.bin_args);
6690 	bpf_bprintf_cleanup(&bprintf_data);
6691 	if (ret < 0) {
6692 		scx_ops_error("(\"%s\", %p, %u) failed to format",
6693 			      fmt, data, data__sz);
6694 		return ret;
6695 	}
6696 
6697 	return ret;
6698 }
6699 
6700 static s32 bstr_format(struct scx_bstr_buf *buf,
6701 		       char *fmt, unsigned long long *data, u32 data__sz)
6702 {
6703 	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
6704 			     fmt, data, data__sz);
6705 }
6706 
6707 __bpf_kfunc_start_defs();
6708 
6709 /**
6710  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6711  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6712  * @fmt: error message format string
6713  * @data: format string parameters packaged using ___bpf_fill() macro
6714  * @data__sz: @data len, must end in '__sz' for the verifier
6715  *
6716  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6717  * disabling.
6718  */
6719 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6720 				   unsigned long long *data, u32 data__sz)
6721 {
6722 	unsigned long flags;
6723 
6724 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6725 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6726 		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
6727 				  scx_exit_bstr_buf.line);
6728 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6729 }
6730 
6731 /**
6732  * scx_bpf_error_bstr - Indicate fatal error
6733  * @fmt: error message format string
6734  * @data: format string parameters packaged using ___bpf_fill() macro
6735  * @data__sz: @data len, must end in '__sz' for the verifier
6736  *
6737  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6738  * disabling.
6739  */
6740 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6741 				    u32 data__sz)
6742 {
6743 	unsigned long flags;
6744 
6745 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6746 	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6747 		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
6748 				  scx_exit_bstr_buf.line);
6749 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6750 }
6751 
6752 /**
6753  * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
6754  * @fmt: format string
6755  * @data: format string parameters packaged using ___bpf_fill() macro
6756  * @data__sz: @data len, must end in '__sz' for the verifier
6757  *
6758  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6759  * dump_task() to generate extra debug dump specific to the BPF scheduler.
6760  *
6761  * The extra dump may be multiple lines. A single line may be split over
6762  * multiple calls. The last line is automatically terminated.
6763  */
6764 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6765 				   u32 data__sz)
6766 {
6767 	struct scx_dump_data *dd = &scx_dump_data;
6768 	struct scx_bstr_buf *buf = &dd->buf;
6769 	s32 ret;
6770 
6771 	if (raw_smp_processor_id() != dd->cpu) {
6772 		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
6773 		return;
6774 	}
6775 
6776 	/* append the formatted string to the line buf */
6777 	ret = __bstr_format(buf->data, buf->line + dd->cursor,
6778 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6779 	if (ret < 0) {
6780 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6781 			  dd->prefix, fmt, data, data__sz, ret);
6782 		return;
6783 	}
6784 
6785 	dd->cursor += ret;
6786 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6787 
6788 	if (!dd->cursor)
6789 		return;
6790 
6791 	/*
6792 	 * If the line buf overflowed or ends in a newline, flush it into the
6793 	 * dump. This is to allow the caller to generate a single line over
6794 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6795 	 * the line buf, the only case which can lead to an unexpected
6796 	 * truncation is when the caller keeps generating newlines in the middle
6797 	 * instead of the end consecutively. Don't do that.
6798 	 */
6799 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6800 		ops_dump_flush();
6801 }
6802 
6803 /**
6804  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
6805  * @cpu: CPU of interest
6806  *
6807  * Return the maximum relative capacity of @cpu in relation to the most
6808  * performant CPU in the system. The return value is in the range [1,
6809  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
6810  */
6811 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
6812 {
6813 	if (ops_cpu_valid(cpu, NULL))
6814 		return arch_scale_cpu_capacity(cpu);
6815 	else
6816 		return SCX_CPUPERF_ONE;
6817 }
6818 
6819 /**
6820  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
6821  * @cpu: CPU of interest
6822  *
6823  * Return the current relative performance of @cpu in relation to its maximum.
6824  * The return value is in the range [1, %SCX_CPUPERF_ONE].
6825  *
6826  * The current performance level of a CPU in relation to the maximum performance
6827  * available in the system can be calculated as follows:
6828  *
6829  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
6830  *
6831  * The result is in the range [1, %SCX_CPUPERF_ONE].
6832  */
6833 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
6834 {
6835 	if (ops_cpu_valid(cpu, NULL))
6836 		return arch_scale_freq_capacity(cpu);
6837 	else
6838 		return SCX_CPUPERF_ONE;
6839 }
6840 
6841 /**
6842  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
6843  * @cpu: CPU of interest
6844  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
6845  * @flags: %SCX_CPUPERF_* flags
6846  *
6847  * Set the target performance level of @cpu to @perf. @perf is in linear
6848  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
6849  * schedutil cpufreq governor chooses the target frequency.
6850  *
6851  * The actual performance level chosen, CPU grouping, and the overhead and
6852  * latency of the operations are dependent on the hardware and cpufreq driver in
6853  * use. Consult hardware and cpufreq documentation for more information. The
6854  * current performance level can be monitored using scx_bpf_cpuperf_cur().
6855  */
6856 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
6857 {
6858 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
6859 		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
6860 		return;
6861 	}
6862 
6863 	if (ops_cpu_valid(cpu, NULL)) {
6864 		struct rq *rq = cpu_rq(cpu);
6865 
6866 		rq->scx.cpuperf_target = perf;
6867 
6868 		rcu_read_lock_sched_notrace();
6869 		cpufreq_update_util(cpu_rq(cpu), 0);
6870 		rcu_read_unlock_sched_notrace();
6871 	}
6872 }
6873 
6874 /**
6875  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
6876  *
6877  * All valid CPU IDs in the system are smaller than the returned value.
6878  */
6879 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
6880 {
6881 	return nr_cpu_ids;
6882 }
6883 
6884 /**
6885  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
6886  */
6887 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
6888 {
6889 	return cpu_possible_mask;
6890 }
6891 
6892 /**
6893  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
6894  */
6895 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
6896 {
6897 	return cpu_online_mask;
6898 }
6899 
6900 /**
6901  * scx_bpf_put_cpumask - Release a possible/online cpumask
6902  * @cpumask: cpumask to release
6903  */
6904 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
6905 {
6906 	/*
6907 	 * Empty function body because we aren't actually acquiring or releasing
6908 	 * a reference to a global cpumask, which is read-only in the caller and
6909 	 * is never released. The acquire / release semantics here are just used
6910 	 * to make the cpumask is a trusted pointer in the caller.
6911 	 */
6912 }
6913 
6914 /**
6915  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
6916  * per-CPU cpumask.
6917  *
6918  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6919  */
6920 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
6921 {
6922 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6923 		scx_ops_error("built-in idle tracking is disabled");
6924 		return cpu_none_mask;
6925 	}
6926 
6927 #ifdef CONFIG_SMP
6928 	return idle_masks.cpu;
6929 #else
6930 	return cpu_none_mask;
6931 #endif
6932 }
6933 
6934 /**
6935  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
6936  * per-physical-core cpumask. Can be used to determine if an entire physical
6937  * core is free.
6938  *
6939  * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6940  */
6941 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
6942 {
6943 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6944 		scx_ops_error("built-in idle tracking is disabled");
6945 		return cpu_none_mask;
6946 	}
6947 
6948 #ifdef CONFIG_SMP
6949 	if (sched_smt_active())
6950 		return idle_masks.smt;
6951 	else
6952 		return idle_masks.cpu;
6953 #else
6954 	return cpu_none_mask;
6955 #endif
6956 }
6957 
6958 /**
6959  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
6960  * either the percpu, or SMT idle-tracking cpumask.
6961  */
6962 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
6963 {
6964 	/*
6965 	 * Empty function body because we aren't actually acquiring or releasing
6966 	 * a reference to a global idle cpumask, which is read-only in the
6967 	 * caller and is never released. The acquire / release semantics here
6968 	 * are just used to make the cpumask a trusted pointer in the caller.
6969 	 */
6970 }
6971 
6972 /**
6973  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
6974  * @cpu: cpu to test and clear idle for
6975  *
6976  * Returns %true if @cpu was idle and its idle state was successfully cleared.
6977  * %false otherwise.
6978  *
6979  * Unavailable if ops.update_idle() is implemented and
6980  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
6981  */
6982 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
6983 {
6984 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6985 		scx_ops_error("built-in idle tracking is disabled");
6986 		return false;
6987 	}
6988 
6989 	if (ops_cpu_valid(cpu, NULL))
6990 		return test_and_clear_cpu_idle(cpu);
6991 	else
6992 		return false;
6993 }
6994 
6995 /**
6996  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
6997  * @cpus_allowed: Allowed cpumask
6998  * @flags: %SCX_PICK_IDLE_CPU_* flags
6999  *
7000  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7001  * number on success. -%EBUSY if no matching cpu was found.
7002  *
7003  * Idle CPU tracking may race against CPU scheduling state transitions. For
7004  * example, this function may return -%EBUSY as CPUs are transitioning into the
7005  * idle state. If the caller then assumes that there will be dispatch events on
7006  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7007  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7008  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7009  * event in the near future.
7010  *
7011  * Unavailable if ops.update_idle() is implemented and
7012  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7013  */
7014 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7015 				      u64 flags)
7016 {
7017 	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7018 		scx_ops_error("built-in idle tracking is disabled");
7019 		return -EBUSY;
7020 	}
7021 
7022 	return scx_pick_idle_cpu(cpus_allowed, flags);
7023 }
7024 
7025 /**
7026  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7027  * @cpus_allowed: Allowed cpumask
7028  * @flags: %SCX_PICK_IDLE_CPU_* flags
7029  *
7030  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7031  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7032  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7033  * empty.
7034  *
7035  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7036  * set, this function can't tell which CPUs are idle and will always pick any
7037  * CPU.
7038  */
7039 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7040 				     u64 flags)
7041 {
7042 	s32 cpu;
7043 
7044 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
7045 		cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7046 		if (cpu >= 0)
7047 			return cpu;
7048 	}
7049 
7050 	cpu = cpumask_any_distribute(cpus_allowed);
7051 	if (cpu < nr_cpu_ids)
7052 		return cpu;
7053 	else
7054 		return -EBUSY;
7055 }
7056 
7057 /**
7058  * scx_bpf_task_running - Is task currently running?
7059  * @p: task of interest
7060  */
7061 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7062 {
7063 	return task_rq(p)->curr == p;
7064 }
7065 
7066 /**
7067  * scx_bpf_task_cpu - CPU a task is currently associated with
7068  * @p: task of interest
7069  */
7070 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7071 {
7072 	return task_cpu(p);
7073 }
7074 
7075 /**
7076  * scx_bpf_cpu_rq - Fetch the rq of a CPU
7077  * @cpu: CPU of the rq
7078  */
7079 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7080 {
7081 	if (!ops_cpu_valid(cpu, NULL))
7082 		return NULL;
7083 
7084 	return cpu_rq(cpu);
7085 }
7086 
7087 /**
7088  * scx_bpf_task_cgroup - Return the sched cgroup of a task
7089  * @p: task of interest
7090  *
7091  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7092  * from the scheduler's POV. SCX operations should use this function to
7093  * determine @p's current cgroup as, unlike following @p->cgroups,
7094  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7095  * rq-locked operations. Can be called on the parameter tasks of rq-locked
7096  * operations. The restriction guarantees that @p's rq is locked by the caller.
7097  */
7098 #ifdef CONFIG_CGROUP_SCHED
7099 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7100 {
7101 	struct task_group *tg = p->sched_task_group;
7102 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7103 
7104 	if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7105 		goto out;
7106 
7107 	/*
7108 	 * A task_group may either be a cgroup or an autogroup. In the latter
7109 	 * case, @tg->css.cgroup is %NULL. A task_group can't become the other
7110 	 * kind once created.
7111 	 */
7112 	if (tg && tg->css.cgroup)
7113 		cgrp = tg->css.cgroup;
7114 	else
7115 		cgrp = &cgrp_dfl_root.cgrp;
7116 out:
7117 	cgroup_get(cgrp);
7118 	return cgrp;
7119 }
7120 #endif
7121 
7122 __bpf_kfunc_end_defs();
7123 
7124 BTF_KFUNCS_START(scx_kfunc_ids_any)
7125 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7126 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7127 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7128 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7129 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7130 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7131 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7132 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7133 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7134 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7135 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7136 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7137 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7138 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7139 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7140 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7141 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7142 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7143 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7144 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7145 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7146 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7147 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7148 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7149 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7150 #ifdef CONFIG_CGROUP_SCHED
7151 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7152 #endif
7153 BTF_KFUNCS_END(scx_kfunc_ids_any)
7154 
7155 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7156 	.owner			= THIS_MODULE,
7157 	.set			= &scx_kfunc_ids_any,
7158 };
7159 
7160 static int __init scx_init(void)
7161 {
7162 	int ret;
7163 
7164 	/*
7165 	 * kfunc registration can't be done from init_sched_ext_class() as
7166 	 * register_btf_kfunc_id_set() needs most of the system to be up.
7167 	 *
7168 	 * Some kfuncs are context-sensitive and can only be called from
7169 	 * specific SCX ops. They are grouped into BTF sets accordingly.
7170 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7171 	 * restrictions. Eventually, the verifier should be able to enforce
7172 	 * them. For now, register them the same and make each kfunc explicitly
7173 	 * check using scx_kf_allowed().
7174 	 */
7175 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7176 					     &scx_kfunc_set_select_cpu)) ||
7177 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7178 					     &scx_kfunc_set_enqueue_dispatch)) ||
7179 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7180 					     &scx_kfunc_set_dispatch)) ||
7181 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7182 					     &scx_kfunc_set_cpu_release)) ||
7183 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7184 					     &scx_kfunc_set_unlocked)) ||
7185 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7186 					     &scx_kfunc_set_unlocked)) ||
7187 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7188 					     &scx_kfunc_set_any)) ||
7189 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7190 					     &scx_kfunc_set_any)) ||
7191 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7192 					     &scx_kfunc_set_any))) {
7193 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7194 		return ret;
7195 	}
7196 
7197 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7198 	if (ret) {
7199 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7200 		return ret;
7201 	}
7202 
7203 	ret = register_pm_notifier(&scx_pm_notifier);
7204 	if (ret) {
7205 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7206 		return ret;
7207 	}
7208 
7209 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7210 	if (!scx_kset) {
7211 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7212 		return -ENOMEM;
7213 	}
7214 
7215 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7216 	if (ret < 0) {
7217 		pr_err("sched_ext: Failed to add global attributes\n");
7218 		return ret;
7219 	}
7220 
7221 	return 0;
7222 }
7223 __initcall(scx_init);
7224