1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10
11 enum scx_consts {
12 SCX_DSP_DFL_MAX_BATCH = 32,
13 SCX_DSP_MAX_LOOPS = 32,
14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
15
16 SCX_EXIT_BT_LEN = 64,
17 SCX_EXIT_MSG_LEN = 1024,
18 SCX_EXIT_DUMP_DFL_LEN = 32768,
19
20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
21
22 /*
23 * Iterating all tasks may take a while. Periodically drop
24 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25 */
26 SCX_OPS_TASK_ITER_BATCH = 32,
27 };
28
29 enum scx_exit_kind {
30 SCX_EXIT_NONE,
31 SCX_EXIT_DONE,
32
33 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
34 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
35 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
36 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
37
38 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
39 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
40 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
41 };
42
43 /*
44 * An exit code can be specified when exiting with scx_bpf_exit() or
45 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46 * respectively. The codes are 64bit of the format:
47 *
48 * Bits: [63 .. 48 47 .. 32 31 .. 0]
49 * [ SYS ACT ] [ SYS RSN ] [ USR ]
50 *
51 * SYS ACT: System-defined exit actions
52 * SYS RSN: System-defined exit reasons
53 * USR : User-defined exit codes and reasons
54 *
55 * Using the above, users may communicate intention and context by ORing system
56 * actions and/or system reasons with a user-defined exit code.
57 */
58 enum scx_exit_code {
59 /* Reasons */
60 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
61
62 /* Actions */
63 SCX_ECODE_ACT_RESTART = 1LLU << 48,
64 };
65
66 /*
67 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
68 * being disabled.
69 */
70 struct scx_exit_info {
71 /* %SCX_EXIT_* - broad category of the exit reason */
72 enum scx_exit_kind kind;
73
74 /* exit code if gracefully exiting */
75 s64 exit_code;
76
77 /* textual representation of the above */
78 const char *reason;
79
80 /* backtrace if exiting due to an error */
81 unsigned long *bt;
82 u32 bt_len;
83
84 /* informational message */
85 char *msg;
86
87 /* debug dump */
88 char *dump;
89 };
90
91 /* sched_ext_ops.flags */
92 enum scx_ops_flags {
93 /*
94 * Keep built-in idle tracking even if ops.update_idle() is implemented.
95 */
96 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
97
98 /*
99 * By default, if there are no other task to run on the CPU, ext core
100 * keeps running the current task even after its slice expires. If this
101 * flag is specified, such tasks are passed to ops.enqueue() with
102 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
103 */
104 SCX_OPS_ENQ_LAST = 1LLU << 1,
105
106 /*
107 * An exiting task may schedule after PF_EXITING is set. In such cases,
108 * bpf_task_from_pid() may not be able to find the task and if the BPF
109 * scheduler depends on pid lookup for dispatching, the task will be
110 * lost leading to various issues including RCU grace period stalls.
111 *
112 * To mask this problem, by default, unhashed tasks are automatically
113 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114 * depend on pid lookups and wants to handle these tasks directly, the
115 * following flag can be used.
116 */
117 SCX_OPS_ENQ_EXITING = 1LLU << 2,
118
119 /*
120 * If set, only tasks with policy set to SCHED_EXT are attached to
121 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
122 */
123 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
124
125 /*
126 * CPU cgroup support flags
127 */
128 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
129
130 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
131 SCX_OPS_ENQ_LAST |
132 SCX_OPS_ENQ_EXITING |
133 SCX_OPS_SWITCH_PARTIAL |
134 SCX_OPS_HAS_CGROUP_WEIGHT,
135 };
136
137 /* argument container for ops.init_task() */
138 struct scx_init_task_args {
139 /*
140 * Set if ops.init_task() is being invoked on the fork path, as opposed
141 * to the scheduler transition path.
142 */
143 bool fork;
144 #ifdef CONFIG_EXT_GROUP_SCHED
145 /* the cgroup the task is joining */
146 struct cgroup *cgroup;
147 #endif
148 };
149
150 /* argument container for ops.exit_task() */
151 struct scx_exit_task_args {
152 /* Whether the task exited before running on sched_ext. */
153 bool cancelled;
154 };
155
156 /* argument container for ops->cgroup_init() */
157 struct scx_cgroup_init_args {
158 /* the weight of the cgroup [1..10000] */
159 u32 weight;
160 };
161
162 enum scx_cpu_preempt_reason {
163 /* next task is being scheduled by &sched_class_rt */
164 SCX_CPU_PREEMPT_RT,
165 /* next task is being scheduled by &sched_class_dl */
166 SCX_CPU_PREEMPT_DL,
167 /* next task is being scheduled by &sched_class_stop */
168 SCX_CPU_PREEMPT_STOP,
169 /* unknown reason for SCX being preempted */
170 SCX_CPU_PREEMPT_UNKNOWN,
171 };
172
173 /*
174 * Argument container for ops->cpu_acquire(). Currently empty, but may be
175 * expanded in the future.
176 */
177 struct scx_cpu_acquire_args {};
178
179 /* argument container for ops->cpu_release() */
180 struct scx_cpu_release_args {
181 /* the reason the CPU was preempted */
182 enum scx_cpu_preempt_reason reason;
183
184 /* the task that's going to be scheduled on the CPU */
185 struct task_struct *task;
186 };
187
188 /*
189 * Informational context provided to dump operations.
190 */
191 struct scx_dump_ctx {
192 enum scx_exit_kind kind;
193 s64 exit_code;
194 const char *reason;
195 u64 at_ns;
196 u64 at_jiffies;
197 };
198
199 /**
200 * struct sched_ext_ops - Operation table for BPF scheduler implementation
201 *
202 * A BPF scheduler can implement an arbitrary scheduling policy by
203 * implementing and loading operations in this table. Note that a userland
204 * scheduling policy can also be implemented using the BPF scheduler
205 * as a shim layer.
206 */
207 struct sched_ext_ops {
208 /**
209 * select_cpu - Pick the target CPU for a task which is being woken up
210 * @p: task being woken up
211 * @prev_cpu: the cpu @p was on before sleeping
212 * @wake_flags: SCX_WAKE_*
213 *
214 * Decision made here isn't final. @p may be moved to any CPU while it
215 * is getting dispatched for execution later. However, as @p is not on
216 * the rq at this point, getting the eventual execution CPU right here
217 * saves a small bit of overhead down the line.
218 *
219 * If an idle CPU is returned, the CPU is kicked and will try to
220 * dispatch. While an explicit custom mechanism can be added,
221 * select_cpu() serves as the default way to wake up idle CPUs.
222 *
223 * @p may be inserted into a DSQ directly by calling
224 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
225 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
226 * of the CPU returned by this operation.
227 *
228 * Note that select_cpu() is never called for tasks that can only run
229 * on a single CPU or tasks with migration disabled, as they don't have
230 * the option to select a different CPU. See select_task_rq() for
231 * details.
232 */
233 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
234
235 /**
236 * enqueue - Enqueue a task on the BPF scheduler
237 * @p: task being enqueued
238 * @enq_flags: %SCX_ENQ_*
239 *
240 * @p is ready to run. Insert directly into a DSQ by calling
241 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
242 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
243 * the task will stall.
244 *
245 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
246 * skipped.
247 */
248 void (*enqueue)(struct task_struct *p, u64 enq_flags);
249
250 /**
251 * dequeue - Remove a task from the BPF scheduler
252 * @p: task being dequeued
253 * @deq_flags: %SCX_DEQ_*
254 *
255 * Remove @p from the BPF scheduler. This is usually called to isolate
256 * the task while updating its scheduling properties (e.g. priority).
257 *
258 * The ext core keeps track of whether the BPF side owns a given task or
259 * not and can gracefully ignore spurious dispatches from BPF side,
260 * which makes it safe to not implement this method. However, depending
261 * on the scheduling logic, this can lead to confusing behaviors - e.g.
262 * scheduling position not being updated across a priority change.
263 */
264 void (*dequeue)(struct task_struct *p, u64 deq_flags);
265
266 /**
267 * dispatch - Dispatch tasks from the BPF scheduler and/or user DSQs
268 * @cpu: CPU to dispatch tasks for
269 * @prev: previous task being switched out
270 *
271 * Called when a CPU's local dsq is empty. The operation should dispatch
272 * one or more tasks from the BPF scheduler into the DSQs using
273 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
274 * using scx_bpf_dsq_move_to_local().
275 *
276 * The maximum number of times scx_bpf_dsq_insert() can be called
277 * without an intervening scx_bpf_dsq_move_to_local() is specified by
278 * ops.dispatch_max_batch. See the comments on top of the two functions
279 * for more details.
280 *
281 * When not %NULL, @prev is an SCX task with its slice depleted. If
282 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
283 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
284 * ops.dispatch() returns. To keep executing @prev, return without
285 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
286 */
287 void (*dispatch)(s32 cpu, struct task_struct *prev);
288
289 /**
290 * tick - Periodic tick
291 * @p: task running currently
292 *
293 * This operation is called every 1/HZ seconds on CPUs which are
294 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
295 * immediate dispatch cycle on the CPU.
296 */
297 void (*tick)(struct task_struct *p);
298
299 /**
300 * runnable - A task is becoming runnable on its associated CPU
301 * @p: task becoming runnable
302 * @enq_flags: %SCX_ENQ_*
303 *
304 * This and the following three functions can be used to track a task's
305 * execution state transitions. A task becomes ->runnable() on a CPU,
306 * and then goes through one or more ->running() and ->stopping() pairs
307 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
308 * done running on the CPU.
309 *
310 * @p is becoming runnable on the CPU because it's
311 *
312 * - waking up (%SCX_ENQ_WAKEUP)
313 * - being moved from another CPU
314 * - being restored after temporarily taken off the queue for an
315 * attribute change.
316 *
317 * This and ->enqueue() are related but not coupled. This operation
318 * notifies @p's state transition and may not be followed by ->enqueue()
319 * e.g. when @p is being dispatched to a remote CPU, or when @p is
320 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
321 * task may be ->enqueue()'d without being preceded by this operation
322 * e.g. after exhausting its slice.
323 */
324 void (*runnable)(struct task_struct *p, u64 enq_flags);
325
326 /**
327 * running - A task is starting to run on its associated CPU
328 * @p: task starting to run
329 *
330 * See ->runnable() for explanation on the task state notifiers.
331 */
332 void (*running)(struct task_struct *p);
333
334 /**
335 * stopping - A task is stopping execution
336 * @p: task stopping to run
337 * @runnable: is task @p still runnable?
338 *
339 * See ->runnable() for explanation on the task state notifiers. If
340 * !@runnable, ->quiescent() will be invoked after this operation
341 * returns.
342 */
343 void (*stopping)(struct task_struct *p, bool runnable);
344
345 /**
346 * quiescent - A task is becoming not runnable on its associated CPU
347 * @p: task becoming not runnable
348 * @deq_flags: %SCX_DEQ_*
349 *
350 * See ->runnable() for explanation on the task state notifiers.
351 *
352 * @p is becoming quiescent on the CPU because it's
353 *
354 * - sleeping (%SCX_DEQ_SLEEP)
355 * - being moved to another CPU
356 * - being temporarily taken off the queue for an attribute change
357 * (%SCX_DEQ_SAVE)
358 *
359 * This and ->dequeue() are related but not coupled. This operation
360 * notifies @p's state transition and may not be preceded by ->dequeue()
361 * e.g. when @p is being dispatched to a remote CPU.
362 */
363 void (*quiescent)(struct task_struct *p, u64 deq_flags);
364
365 /**
366 * yield - Yield CPU
367 * @from: yielding task
368 * @to: optional yield target task
369 *
370 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
371 * The BPF scheduler should ensure that other available tasks are
372 * dispatched before the yielding task. Return value is ignored in this
373 * case.
374 *
375 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
376 * scheduler can implement the request, return %true; otherwise, %false.
377 */
378 bool (*yield)(struct task_struct *from, struct task_struct *to);
379
380 /**
381 * core_sched_before - Task ordering for core-sched
382 * @a: task A
383 * @b: task B
384 *
385 * Used by core-sched to determine the ordering between two tasks. See
386 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
387 * core-sched.
388 *
389 * Both @a and @b are runnable and may or may not currently be queued on
390 * the BPF scheduler. Should return %true if @a should run before @b.
391 * %false if there's no required ordering or @b should run before @a.
392 *
393 * If not specified, the default is ordering them according to when they
394 * became runnable.
395 */
396 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
397
398 /**
399 * set_weight - Set task weight
400 * @p: task to set weight for
401 * @weight: new weight [1..10000]
402 *
403 * Update @p's weight to @weight.
404 */
405 void (*set_weight)(struct task_struct *p, u32 weight);
406
407 /**
408 * set_cpumask - Set CPU affinity
409 * @p: task to set CPU affinity for
410 * @cpumask: cpumask of cpus that @p can run on
411 *
412 * Update @p's CPU affinity to @cpumask.
413 */
414 void (*set_cpumask)(struct task_struct *p,
415 const struct cpumask *cpumask);
416
417 /**
418 * update_idle - Update the idle state of a CPU
419 * @cpu: CPU to udpate the idle state for
420 * @idle: whether entering or exiting the idle state
421 *
422 * This operation is called when @rq's CPU goes or leaves the idle
423 * state. By default, implementing this operation disables the built-in
424 * idle CPU tracking and the following helpers become unavailable:
425 *
426 * - scx_bpf_select_cpu_dfl()
427 * - scx_bpf_test_and_clear_cpu_idle()
428 * - scx_bpf_pick_idle_cpu()
429 *
430 * The user also must implement ops.select_cpu() as the default
431 * implementation relies on scx_bpf_select_cpu_dfl().
432 *
433 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
434 * tracking.
435 */
436 void (*update_idle)(s32 cpu, bool idle);
437
438 /**
439 * cpu_acquire - A CPU is becoming available to the BPF scheduler
440 * @cpu: The CPU being acquired by the BPF scheduler.
441 * @args: Acquire arguments, see the struct definition.
442 *
443 * A CPU that was previously released from the BPF scheduler is now once
444 * again under its control.
445 */
446 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
447
448 /**
449 * cpu_release - A CPU is taken away from the BPF scheduler
450 * @cpu: The CPU being released by the BPF scheduler.
451 * @args: Release arguments, see the struct definition.
452 *
453 * The specified CPU is no longer under the control of the BPF
454 * scheduler. This could be because it was preempted by a higher
455 * priority sched_class, though there may be other reasons as well. The
456 * caller should consult @args->reason to determine the cause.
457 */
458 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
459
460 /**
461 * init_task - Initialize a task to run in a BPF scheduler
462 * @p: task to initialize for BPF scheduling
463 * @args: init arguments, see the struct definition
464 *
465 * Either we're loading a BPF scheduler or a new task is being forked.
466 * Initialize @p for BPF scheduling. This operation may block and can
467 * be used for allocations, and is called exactly once for a task.
468 *
469 * Return 0 for success, -errno for failure. An error return while
470 * loading will abort loading of the BPF scheduler. During a fork, it
471 * will abort that specific fork.
472 */
473 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
474
475 /**
476 * exit_task - Exit a previously-running task from the system
477 * @p: task to exit
478 *
479 * @p is exiting or the BPF scheduler is being unloaded. Perform any
480 * necessary cleanup for @p.
481 */
482 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
483
484 /**
485 * enable - Enable BPF scheduling for a task
486 * @p: task to enable BPF scheduling for
487 *
488 * Enable @p for BPF scheduling. enable() is called on @p any time it
489 * enters SCX, and is always paired with a matching disable().
490 */
491 void (*enable)(struct task_struct *p);
492
493 /**
494 * disable - Disable BPF scheduling for a task
495 * @p: task to disable BPF scheduling for
496 *
497 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
498 * Disable BPF scheduling for @p. A disable() call is always matched
499 * with a prior enable() call.
500 */
501 void (*disable)(struct task_struct *p);
502
503 /**
504 * dump - Dump BPF scheduler state on error
505 * @ctx: debug dump context
506 *
507 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
508 */
509 void (*dump)(struct scx_dump_ctx *ctx);
510
511 /**
512 * dump_cpu - Dump BPF scheduler state for a CPU on error
513 * @ctx: debug dump context
514 * @cpu: CPU to generate debug dump for
515 * @idle: @cpu is currently idle without any runnable tasks
516 *
517 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
518 * @cpu. If @idle is %true and this operation doesn't produce any
519 * output, @cpu is skipped for dump.
520 */
521 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
522
523 /**
524 * dump_task - Dump BPF scheduler state for a runnable task on error
525 * @ctx: debug dump context
526 * @p: runnable task to generate debug dump for
527 *
528 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
529 * @p.
530 */
531 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
532
533 #ifdef CONFIG_EXT_GROUP_SCHED
534 /**
535 * cgroup_init - Initialize a cgroup
536 * @cgrp: cgroup being initialized
537 * @args: init arguments, see the struct definition
538 *
539 * Either the BPF scheduler is being loaded or @cgrp created, initialize
540 * @cgrp for sched_ext. This operation may block.
541 *
542 * Return 0 for success, -errno for failure. An error return while
543 * loading will abort loading of the BPF scheduler. During cgroup
544 * creation, it will abort the specific cgroup creation.
545 */
546 s32 (*cgroup_init)(struct cgroup *cgrp,
547 struct scx_cgroup_init_args *args);
548
549 /**
550 * cgroup_exit - Exit a cgroup
551 * @cgrp: cgroup being exited
552 *
553 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
554 * @cgrp for sched_ext. This operation my block.
555 */
556 void (*cgroup_exit)(struct cgroup *cgrp);
557
558 /**
559 * cgroup_prep_move - Prepare a task to be moved to a different cgroup
560 * @p: task being moved
561 * @from: cgroup @p is being moved from
562 * @to: cgroup @p is being moved to
563 *
564 * Prepare @p for move from cgroup @from to @to. This operation may
565 * block and can be used for allocations.
566 *
567 * Return 0 for success, -errno for failure. An error return aborts the
568 * migration.
569 */
570 s32 (*cgroup_prep_move)(struct task_struct *p,
571 struct cgroup *from, struct cgroup *to);
572
573 /**
574 * cgroup_move - Commit cgroup move
575 * @p: task being moved
576 * @from: cgroup @p is being moved from
577 * @to: cgroup @p is being moved to
578 *
579 * Commit the move. @p is dequeued during this operation.
580 */
581 void (*cgroup_move)(struct task_struct *p,
582 struct cgroup *from, struct cgroup *to);
583
584 /**
585 * cgroup_cancel_move - Cancel cgroup move
586 * @p: task whose cgroup move is being canceled
587 * @from: cgroup @p was being moved from
588 * @to: cgroup @p was being moved to
589 *
590 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
591 * Undo the preparation.
592 */
593 void (*cgroup_cancel_move)(struct task_struct *p,
594 struct cgroup *from, struct cgroup *to);
595
596 /**
597 * cgroup_set_weight - A cgroup's weight is being changed
598 * @cgrp: cgroup whose weight is being updated
599 * @weight: new weight [1..10000]
600 *
601 * Update @tg's weight to @weight.
602 */
603 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
604 #endif /* CONFIG_EXT_GROUP_SCHED */
605
606 /*
607 * All online ops must come before ops.cpu_online().
608 */
609
610 /**
611 * cpu_online - A CPU became online
612 * @cpu: CPU which just came up
613 *
614 * @cpu just came online. @cpu will not call ops.enqueue() or
615 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
616 */
617 void (*cpu_online)(s32 cpu);
618
619 /**
620 * cpu_offline - A CPU is going offline
621 * @cpu: CPU which is going offline
622 *
623 * @cpu is going offline. @cpu will not call ops.enqueue() or
624 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
625 */
626 void (*cpu_offline)(s32 cpu);
627
628 /*
629 * All CPU hotplug ops must come before ops.init().
630 */
631
632 /**
633 * init - Initialize the BPF scheduler
634 */
635 s32 (*init)(void);
636
637 /**
638 * exit - Clean up after the BPF scheduler
639 * @info: Exit info
640 *
641 * ops.exit() is also called on ops.init() failure, which is a bit
642 * unusual. This is to allow rich reporting through @info on how
643 * ops.init() failed.
644 */
645 void (*exit)(struct scx_exit_info *info);
646
647 /**
648 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
649 */
650 u32 dispatch_max_batch;
651
652 /**
653 * flags - %SCX_OPS_* flags
654 */
655 u64 flags;
656
657 /**
658 * timeout_ms - The maximum amount of time, in milliseconds, that a
659 * runnable task should be able to wait before being scheduled. The
660 * maximum timeout may not exceed the default timeout of 30 seconds.
661 *
662 * Defaults to the maximum allowed timeout value of 30 seconds.
663 */
664 u32 timeout_ms;
665
666 /**
667 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
668 * value of 32768 is used.
669 */
670 u32 exit_dump_len;
671
672 /**
673 * hotplug_seq - A sequence number that may be set by the scheduler to
674 * detect when a hotplug event has occurred during the loading process.
675 * If 0, no detection occurs. Otherwise, the scheduler will fail to
676 * load if the sequence number does not match @scx_hotplug_seq on the
677 * enable path.
678 */
679 u64 hotplug_seq;
680
681 /**
682 * name - BPF scheduler's name
683 *
684 * Must be a non-zero valid BPF object name including only isalnum(),
685 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
686 * BPF scheduler is enabled.
687 */
688 char name[SCX_OPS_NAME_LEN];
689 };
690
691 enum scx_opi {
692 SCX_OPI_BEGIN = 0,
693 SCX_OPI_NORMAL_BEGIN = 0,
694 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
695 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
696 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
697 SCX_OPI_END = SCX_OP_IDX(init),
698 };
699
700 enum scx_wake_flags {
701 /* expose select WF_* flags as enums */
702 SCX_WAKE_FORK = WF_FORK,
703 SCX_WAKE_TTWU = WF_TTWU,
704 SCX_WAKE_SYNC = WF_SYNC,
705 };
706
707 enum scx_enq_flags {
708 /* expose select ENQUEUE_* flags as enums */
709 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
710 SCX_ENQ_HEAD = ENQUEUE_HEAD,
711 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
712
713 /* high 32bits are SCX specific */
714
715 /*
716 * Set the following to trigger preemption when calling
717 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
718 * current task is cleared to zero and the CPU is kicked into the
719 * scheduling path. Implies %SCX_ENQ_HEAD.
720 */
721 SCX_ENQ_PREEMPT = 1LLU << 32,
722
723 /*
724 * The task being enqueued was previously enqueued on the current CPU's
725 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
726 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
727 * invoked in a ->cpu_release() callback, and the task is again
728 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
729 * task will not be scheduled on the CPU until at least the next invocation
730 * of the ->cpu_acquire() callback.
731 */
732 SCX_ENQ_REENQ = 1LLU << 40,
733
734 /*
735 * The task being enqueued is the only task available for the cpu. By
736 * default, ext core keeps executing such tasks but when
737 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
738 * %SCX_ENQ_LAST flag set.
739 *
740 * The BPF scheduler is responsible for triggering a follow-up
741 * scheduling event. Otherwise, Execution may stall.
742 */
743 SCX_ENQ_LAST = 1LLU << 41,
744
745 /* high 8 bits are internal */
746 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
747
748 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
749 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
750 };
751
752 enum scx_deq_flags {
753 /* expose select DEQUEUE_* flags as enums */
754 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
755
756 /* high 32bits are SCX specific */
757
758 /*
759 * The generic core-sched layer decided to execute the task even though
760 * it hasn't been dispatched yet. Dequeue from the BPF side.
761 */
762 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
763 };
764
765 enum scx_pick_idle_cpu_flags {
766 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
767 };
768
769 enum scx_kick_flags {
770 /*
771 * Kick the target CPU if idle. Guarantees that the target CPU goes
772 * through at least one full scheduling cycle before going idle. If the
773 * target CPU can be determined to be currently not idle and going to go
774 * through a scheduling cycle before going idle, noop.
775 */
776 SCX_KICK_IDLE = 1LLU << 0,
777
778 /*
779 * Preempt the current task and execute the dispatch path. If the
780 * current task of the target CPU is an SCX task, its ->scx.slice is
781 * cleared to zero before the scheduling path is invoked so that the
782 * task expires and the dispatch path is invoked.
783 */
784 SCX_KICK_PREEMPT = 1LLU << 1,
785
786 /*
787 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
788 * return after the target CPU finishes picking the next task.
789 */
790 SCX_KICK_WAIT = 1LLU << 2,
791 };
792
793 enum scx_tg_flags {
794 SCX_TG_ONLINE = 1U << 0,
795 SCX_TG_INITED = 1U << 1,
796 };
797
798 enum scx_ops_enable_state {
799 SCX_OPS_ENABLING,
800 SCX_OPS_ENABLED,
801 SCX_OPS_DISABLING,
802 SCX_OPS_DISABLED,
803 };
804
805 static const char *scx_ops_enable_state_str[] = {
806 [SCX_OPS_ENABLING] = "enabling",
807 [SCX_OPS_ENABLED] = "enabled",
808 [SCX_OPS_DISABLING] = "disabling",
809 [SCX_OPS_DISABLED] = "disabled",
810 };
811
812 /*
813 * sched_ext_entity->ops_state
814 *
815 * Used to track the task ownership between the SCX core and the BPF scheduler.
816 * State transitions look as follows:
817 *
818 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
819 * ^ | |
820 * | v v
821 * \-------------------------------/
822 *
823 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
824 * sites for explanations on the conditions being waited upon and why they are
825 * safe. Transitions out of them into NONE or QUEUED must store_release and the
826 * waiters should load_acquire.
827 *
828 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
829 * any given task can be dispatched by the BPF scheduler at all times and thus
830 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
831 * to try to dispatch any task anytime regardless of its state as the SCX core
832 * can safely reject invalid dispatches.
833 */
834 enum scx_ops_state {
835 SCX_OPSS_NONE, /* owned by the SCX core */
836 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
837 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
838 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
839
840 /*
841 * QSEQ brands each QUEUED instance so that, when dispatch races
842 * dequeue/requeue, the dispatcher can tell whether it still has a claim
843 * on the task being dispatched.
844 *
845 * As some 32bit archs can't do 64bit store_release/load_acquire,
846 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
847 * 32bit machines. The dispatch race window QSEQ protects is very narrow
848 * and runs with IRQ disabled. 30 bits should be sufficient.
849 */
850 SCX_OPSS_QSEQ_SHIFT = 2,
851 };
852
853 /* Use macros to ensure that the type is unsigned long for the masks */
854 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
855 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
856
857 /*
858 * During exit, a task may schedule after losing its PIDs. When disabling the
859 * BPF scheduler, we need to be able to iterate tasks in every state to
860 * guarantee system safety. Maintain a dedicated task list which contains every
861 * task between its fork and eventual free.
862 */
863 static DEFINE_SPINLOCK(scx_tasks_lock);
864 static LIST_HEAD(scx_tasks);
865
866 /* ops enable/disable */
867 static struct kthread_worker *scx_ops_helper;
868 static DEFINE_MUTEX(scx_ops_enable_mutex);
869 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
870 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
871 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
872 static unsigned long scx_in_softlockup;
873 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
874 static int scx_ops_bypass_depth;
875 static bool scx_ops_init_task_enabled;
876 static bool scx_switching_all;
877 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
878
879 static struct sched_ext_ops scx_ops;
880 static bool scx_warned_zero_slice;
881
882 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
883 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
884 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
885 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
886
887 #ifdef CONFIG_SMP
888 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
889 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
890 #endif
891
892 static struct static_key_false scx_has_op[SCX_OPI_END] =
893 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
894
895 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
896 static struct scx_exit_info *scx_exit_info;
897
898 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
899 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
900
901 /*
902 * A monotically increasing sequence number that is incremented every time a
903 * scheduler is enabled. This can be used by to check if any custom sched_ext
904 * scheduler has ever been used in the system.
905 */
906 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
907
908 /*
909 * The maximum amount of time in jiffies that a task may be runnable without
910 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
911 * scx_ops_error().
912 */
913 static unsigned long scx_watchdog_timeout;
914
915 /*
916 * The last time the delayed work was run. This delayed work relies on
917 * ksoftirqd being able to run to service timer interrupts, so it's possible
918 * that this work itself could get wedged. To account for this, we check that
919 * it's not stalled in the timer tick, and trigger an error if it is.
920 */
921 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
922
923 static struct delayed_work scx_watchdog_work;
924
925 /* idle tracking */
926 #ifdef CONFIG_SMP
927 #ifdef CONFIG_CPUMASK_OFFSTACK
928 #define CL_ALIGNED_IF_ONSTACK
929 #else
930 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
931 #endif
932
933 static struct {
934 cpumask_var_t cpu;
935 cpumask_var_t smt;
936 } idle_masks CL_ALIGNED_IF_ONSTACK;
937
938 #endif /* CONFIG_SMP */
939
940 /* for %SCX_KICK_WAIT */
941 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
942
943 /*
944 * Direct dispatch marker.
945 *
946 * Non-NULL values are used for direct dispatch from enqueue path. A valid
947 * pointer points to the task currently being enqueued. An ERR_PTR value is used
948 * to indicate that direct dispatch has already happened.
949 */
950 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
951
952 /*
953 * Dispatch queues.
954 *
955 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
956 * to avoid live-locking in bypass mode where all tasks are dispatched to
957 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
958 * sufficient, it can be further split.
959 */
960 static struct scx_dispatch_q **global_dsqs;
961
962 static const struct rhashtable_params dsq_hash_params = {
963 .key_len = 8,
964 .key_offset = offsetof(struct scx_dispatch_q, id),
965 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
966 };
967
968 static struct rhashtable dsq_hash;
969 static LLIST_HEAD(dsqs_to_free);
970
971 /* dispatch buf */
972 struct scx_dsp_buf_ent {
973 struct task_struct *task;
974 unsigned long qseq;
975 u64 dsq_id;
976 u64 enq_flags;
977 };
978
979 static u32 scx_dsp_max_batch;
980
981 struct scx_dsp_ctx {
982 struct rq *rq;
983 u32 cursor;
984 u32 nr_tasks;
985 struct scx_dsp_buf_ent buf[];
986 };
987
988 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
989
990 /* string formatting from BPF */
991 struct scx_bstr_buf {
992 u64 data[MAX_BPRINTF_VARARGS];
993 char line[SCX_EXIT_MSG_LEN];
994 };
995
996 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
997 static struct scx_bstr_buf scx_exit_bstr_buf;
998
999 /* ops debug dump */
1000 struct scx_dump_data {
1001 s32 cpu;
1002 bool first;
1003 s32 cursor;
1004 struct seq_buf *s;
1005 const char *prefix;
1006 struct scx_bstr_buf buf;
1007 };
1008
1009 static struct scx_dump_data scx_dump_data = {
1010 .cpu = -1,
1011 };
1012
1013 /* /sys/kernel/sched_ext interface */
1014 static struct kset *scx_kset;
1015 static struct kobject *scx_root_kobj;
1016
1017 #define CREATE_TRACE_POINTS
1018 #include <trace/events/sched_ext.h>
1019
1020 static void process_ddsp_deferred_locals(struct rq *rq);
1021 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1022 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1023 s64 exit_code,
1024 const char *fmt, ...);
1025
1026 #define scx_ops_error_kind(err, fmt, args...) \
1027 scx_ops_exit_kind((err), 0, fmt, ##args)
1028
1029 #define scx_ops_exit(code, fmt, args...) \
1030 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1031
1032 #define scx_ops_error(fmt, args...) \
1033 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1034
1035 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1036
jiffies_delta_msecs(unsigned long at,unsigned long now)1037 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1038 {
1039 if (time_after(at, now))
1040 return jiffies_to_msecs(at - now);
1041 else
1042 return -(long)jiffies_to_msecs(now - at);
1043 }
1044
1045 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1046 static u32 higher_bits(u32 flags)
1047 {
1048 return ~((1 << fls(flags)) - 1);
1049 }
1050
1051 /* return the mask with only the highest bit set */
highest_bit(u32 flags)1052 static u32 highest_bit(u32 flags)
1053 {
1054 int bit = fls(flags);
1055 return ((u64)1 << bit) >> 1;
1056 }
1057
u32_before(u32 a,u32 b)1058 static bool u32_before(u32 a, u32 b)
1059 {
1060 return (s32)(a - b) < 0;
1061 }
1062
find_global_dsq(struct task_struct * p)1063 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1064 {
1065 return global_dsqs[cpu_to_node(task_cpu(p))];
1066 }
1067
find_user_dsq(u64 dsq_id)1068 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1069 {
1070 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1071 }
1072
1073 /*
1074 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1075 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1076 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1077 * whether it's running from an allowed context.
1078 *
1079 * @mask is constant, always inline to cull the mask calculations.
1080 */
scx_kf_allow(u32 mask)1081 static __always_inline void scx_kf_allow(u32 mask)
1082 {
1083 /* nesting is allowed only in increasing scx_kf_mask order */
1084 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1085 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1086 current->scx.kf_mask, mask);
1087 current->scx.kf_mask |= mask;
1088 barrier();
1089 }
1090
scx_kf_disallow(u32 mask)1091 static void scx_kf_disallow(u32 mask)
1092 {
1093 barrier();
1094 current->scx.kf_mask &= ~mask;
1095 }
1096
1097 #define SCX_CALL_OP(mask, op, args...) \
1098 do { \
1099 if (mask) { \
1100 scx_kf_allow(mask); \
1101 scx_ops.op(args); \
1102 scx_kf_disallow(mask); \
1103 } else { \
1104 scx_ops.op(args); \
1105 } \
1106 } while (0)
1107
1108 #define SCX_CALL_OP_RET(mask, op, args...) \
1109 ({ \
1110 __typeof__(scx_ops.op(args)) __ret; \
1111 if (mask) { \
1112 scx_kf_allow(mask); \
1113 __ret = scx_ops.op(args); \
1114 scx_kf_disallow(mask); \
1115 } else { \
1116 __ret = scx_ops.op(args); \
1117 } \
1118 __ret; \
1119 })
1120
1121 /*
1122 * Some kfuncs are allowed only on the tasks that are subjects of the
1123 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1124 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1125 * invoking scx_ops operations that take task arguments. These can only be used
1126 * for non-nesting operations due to the way the tasks are tracked.
1127 *
1128 * kfuncs which can only operate on such tasks can in turn use
1129 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1130 * the specific task.
1131 */
1132 #define SCX_CALL_OP_TASK(mask, op, task, args...) \
1133 do { \
1134 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1135 current->scx.kf_tasks[0] = task; \
1136 SCX_CALL_OP(mask, op, task, ##args); \
1137 current->scx.kf_tasks[0] = NULL; \
1138 } while (0)
1139
1140 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
1141 ({ \
1142 __typeof__(scx_ops.op(task, ##args)) __ret; \
1143 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1144 current->scx.kf_tasks[0] = task; \
1145 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
1146 current->scx.kf_tasks[0] = NULL; \
1147 __ret; \
1148 })
1149
1150 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
1151 ({ \
1152 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
1153 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1154 current->scx.kf_tasks[0] = task0; \
1155 current->scx.kf_tasks[1] = task1; \
1156 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
1157 current->scx.kf_tasks[0] = NULL; \
1158 current->scx.kf_tasks[1] = NULL; \
1159 __ret; \
1160 })
1161
1162 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1163 static __always_inline bool scx_kf_allowed(u32 mask)
1164 {
1165 if (unlikely(!(current->scx.kf_mask & mask))) {
1166 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1167 mask, current->scx.kf_mask);
1168 return false;
1169 }
1170
1171 /*
1172 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1173 * DISPATCH must not be called if we're running DEQUEUE which is nested
1174 * inside ops.dispatch(). We don't need to check boundaries for any
1175 * blocking kfuncs as the verifier ensures they're only called from
1176 * sleepable progs.
1177 */
1178 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1179 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1180 scx_ops_error("cpu_release kfunc called from a nested operation");
1181 return false;
1182 }
1183
1184 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1185 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1186 scx_ops_error("dispatch kfunc called from a nested operation");
1187 return false;
1188 }
1189
1190 return true;
1191 }
1192
1193 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1194 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1195 struct task_struct *p)
1196 {
1197 if (!scx_kf_allowed(mask))
1198 return false;
1199
1200 if (unlikely((p != current->scx.kf_tasks[0] &&
1201 p != current->scx.kf_tasks[1]))) {
1202 scx_ops_error("called on a task not being operated on");
1203 return false;
1204 }
1205
1206 return true;
1207 }
1208
scx_kf_allowed_if_unlocked(void)1209 static bool scx_kf_allowed_if_unlocked(void)
1210 {
1211 return !current->scx.kf_mask;
1212 }
1213
1214 /**
1215 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1216 * @dsq: user dsq being interated
1217 * @cur: current position, %NULL to start iteration
1218 * @rev: walk backwards
1219 *
1220 * Returns %NULL when iteration is finished.
1221 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1222 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1223 struct task_struct *cur, bool rev)
1224 {
1225 struct list_head *list_node;
1226 struct scx_dsq_list_node *dsq_lnode;
1227
1228 lockdep_assert_held(&dsq->lock);
1229
1230 if (cur)
1231 list_node = &cur->scx.dsq_list.node;
1232 else
1233 list_node = &dsq->list;
1234
1235 /* find the next task, need to skip BPF iteration cursors */
1236 do {
1237 if (rev)
1238 list_node = list_node->prev;
1239 else
1240 list_node = list_node->next;
1241
1242 if (list_node == &dsq->list)
1243 return NULL;
1244
1245 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1246 node);
1247 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1248
1249 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1250 }
1251
1252 #define nldsq_for_each_task(p, dsq) \
1253 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1254 (p) = nldsq_next_task((dsq), (p), false))
1255
1256
1257 /*
1258 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1259 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1260 * changes without breaking backward compatibility. Can be used with
1261 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1262 */
1263 enum scx_dsq_iter_flags {
1264 /* iterate in the reverse dispatch order */
1265 SCX_DSQ_ITER_REV = 1U << 16,
1266
1267 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1268 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1269
1270 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1271 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1272 __SCX_DSQ_ITER_HAS_SLICE |
1273 __SCX_DSQ_ITER_HAS_VTIME,
1274 };
1275
1276 struct bpf_iter_scx_dsq_kern {
1277 struct scx_dsq_list_node cursor;
1278 struct scx_dispatch_q *dsq;
1279 u64 slice;
1280 u64 vtime;
1281 } __attribute__((aligned(8)));
1282
1283 struct bpf_iter_scx_dsq {
1284 u64 __opaque[6];
1285 } __attribute__((aligned(8)));
1286
1287
1288 /*
1289 * SCX task iterator.
1290 */
1291 struct scx_task_iter {
1292 struct sched_ext_entity cursor;
1293 struct task_struct *locked;
1294 struct rq *rq;
1295 struct rq_flags rf;
1296 u32 cnt;
1297 };
1298
1299 /**
1300 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1301 * @iter: iterator to init
1302 *
1303 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1304 * must eventually be stopped with scx_task_iter_stop().
1305 *
1306 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1307 * between this and the first next() call or between any two next() calls. If
1308 * the locks are released between two next() calls, the caller is responsible
1309 * for ensuring that the task being iterated remains accessible either through
1310 * RCU read lock or obtaining a reference count.
1311 *
1312 * All tasks which existed when the iteration started are guaranteed to be
1313 * visited as long as they still exist.
1314 */
scx_task_iter_start(struct scx_task_iter * iter)1315 static void scx_task_iter_start(struct scx_task_iter *iter)
1316 {
1317 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1318 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1319
1320 spin_lock_irq(&scx_tasks_lock);
1321
1322 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1323 list_add(&iter->cursor.tasks_node, &scx_tasks);
1324 iter->locked = NULL;
1325 iter->cnt = 0;
1326 }
1327
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)1328 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1329 {
1330 if (iter->locked) {
1331 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1332 iter->locked = NULL;
1333 }
1334 }
1335
1336 /**
1337 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1338 * @iter: iterator to unlock
1339 *
1340 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1341 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1342 * This function can be safely called anytime during an iteration.
1343 */
scx_task_iter_unlock(struct scx_task_iter * iter)1344 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1345 {
1346 __scx_task_iter_rq_unlock(iter);
1347 spin_unlock_irq(&scx_tasks_lock);
1348 }
1349
1350 /**
1351 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1352 * @iter: iterator to re-lock
1353 *
1354 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1355 * doesn't re-lock the rq lock. Must be called before other iterator operations.
1356 */
scx_task_iter_relock(struct scx_task_iter * iter)1357 static void scx_task_iter_relock(struct scx_task_iter *iter)
1358 {
1359 spin_lock_irq(&scx_tasks_lock);
1360 }
1361
1362 /**
1363 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1364 * @iter: iterator to exit
1365 *
1366 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1367 * which is released on return. If the iterator holds a task's rq lock, that rq
1368 * lock is also released. See scx_task_iter_start() for details.
1369 */
scx_task_iter_stop(struct scx_task_iter * iter)1370 static void scx_task_iter_stop(struct scx_task_iter *iter)
1371 {
1372 list_del_init(&iter->cursor.tasks_node);
1373 scx_task_iter_unlock(iter);
1374 }
1375
1376 /**
1377 * scx_task_iter_next - Next task
1378 * @iter: iterator to walk
1379 *
1380 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1381 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1382 * stalls by holding scx_tasks_lock for too long.
1383 */
scx_task_iter_next(struct scx_task_iter * iter)1384 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1385 {
1386 struct list_head *cursor = &iter->cursor.tasks_node;
1387 struct sched_ext_entity *pos;
1388
1389 if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1390 scx_task_iter_unlock(iter);
1391 cond_resched();
1392 scx_task_iter_relock(iter);
1393 }
1394
1395 list_for_each_entry(pos, cursor, tasks_node) {
1396 if (&pos->tasks_node == &scx_tasks)
1397 return NULL;
1398 if (!(pos->flags & SCX_TASK_CURSOR)) {
1399 list_move(cursor, &pos->tasks_node);
1400 return container_of(pos, struct task_struct, scx);
1401 }
1402 }
1403
1404 /* can't happen, should always terminate at scx_tasks above */
1405 BUG();
1406 }
1407
1408 /**
1409 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1410 * @iter: iterator to walk
1411 * @include_dead: Whether we should include dead tasks in the iteration
1412 *
1413 * Visit the non-idle task with its rq lock held. Allows callers to specify
1414 * whether they would like to filter out dead tasks. See scx_task_iter_start()
1415 * for details.
1416 */
scx_task_iter_next_locked(struct scx_task_iter * iter)1417 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1418 {
1419 struct task_struct *p;
1420
1421 __scx_task_iter_rq_unlock(iter);
1422
1423 while ((p = scx_task_iter_next(iter))) {
1424 /*
1425 * scx_task_iter is used to prepare and move tasks into SCX
1426 * while loading the BPF scheduler and vice-versa while
1427 * unloading. The init_tasks ("swappers") should be excluded
1428 * from the iteration because:
1429 *
1430 * - It's unsafe to use __setschduler_prio() on an init_task to
1431 * determine the sched_class to use as it won't preserve its
1432 * idle_sched_class.
1433 *
1434 * - ops.init/exit_task() can easily be confused if called with
1435 * init_tasks as they, e.g., share PID 0.
1436 *
1437 * As init_tasks are never scheduled through SCX, they can be
1438 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1439 * doesn't work here:
1440 *
1441 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1442 * yet been onlined.
1443 *
1444 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1445 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1446 *
1447 * Test for idle_sched_class as only init_tasks are on it.
1448 */
1449 if (p->sched_class != &idle_sched_class)
1450 break;
1451 }
1452 if (!p)
1453 return NULL;
1454
1455 iter->rq = task_rq_lock(p, &iter->rf);
1456 iter->locked = p;
1457
1458 return p;
1459 }
1460
scx_ops_enable_state(void)1461 static enum scx_ops_enable_state scx_ops_enable_state(void)
1462 {
1463 return atomic_read(&scx_ops_enable_state_var);
1464 }
1465
1466 static enum scx_ops_enable_state
scx_ops_set_enable_state(enum scx_ops_enable_state to)1467 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1468 {
1469 return atomic_xchg(&scx_ops_enable_state_var, to);
1470 }
1471
scx_ops_tryset_enable_state(enum scx_ops_enable_state to,enum scx_ops_enable_state from)1472 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1473 enum scx_ops_enable_state from)
1474 {
1475 int from_v = from;
1476
1477 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1478 }
1479
scx_rq_bypassing(struct rq * rq)1480 static bool scx_rq_bypassing(struct rq *rq)
1481 {
1482 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1483 }
1484
1485 /**
1486 * wait_ops_state - Busy-wait the specified ops state to end
1487 * @p: target task
1488 * @opss: state to wait the end of
1489 *
1490 * Busy-wait for @p to transition out of @opss. This can only be used when the
1491 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1492 * has load_acquire semantics to ensure that the caller can see the updates made
1493 * in the enqueueing and dispatching paths.
1494 */
wait_ops_state(struct task_struct * p,unsigned long opss)1495 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1496 {
1497 do {
1498 cpu_relax();
1499 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1500 }
1501
1502 /**
1503 * ops_cpu_valid - Verify a cpu number
1504 * @cpu: cpu number which came from a BPF ops
1505 * @where: extra information reported on error
1506 *
1507 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1508 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1509 * an ops error.
1510 */
ops_cpu_valid(s32 cpu,const char * where)1511 static bool ops_cpu_valid(s32 cpu, const char *where)
1512 {
1513 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1514 return true;
1515 } else {
1516 scx_ops_error("invalid CPU %d%s%s", cpu,
1517 where ? " " : "", where ?: "");
1518 return false;
1519 }
1520 }
1521
1522 /**
1523 * ops_sanitize_err - Sanitize a -errno value
1524 * @ops_name: operation to blame on failure
1525 * @err: -errno value to sanitize
1526 *
1527 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1528 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1529 * cause misbehaviors. For an example, a large negative return from
1530 * ops.init_task() triggers an oops when passed up the call chain because the
1531 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1532 * handled as a pointer.
1533 */
ops_sanitize_err(const char * ops_name,s32 err)1534 static int ops_sanitize_err(const char *ops_name, s32 err)
1535 {
1536 if (err < 0 && err >= -MAX_ERRNO)
1537 return err;
1538
1539 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1540 return -EPROTO;
1541 }
1542
run_deferred(struct rq * rq)1543 static void run_deferred(struct rq *rq)
1544 {
1545 process_ddsp_deferred_locals(rq);
1546 }
1547
1548 #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1549 static void deferred_bal_cb_workfn(struct rq *rq)
1550 {
1551 run_deferred(rq);
1552 }
1553 #endif
1554
deferred_irq_workfn(struct irq_work * irq_work)1555 static void deferred_irq_workfn(struct irq_work *irq_work)
1556 {
1557 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1558
1559 raw_spin_rq_lock(rq);
1560 run_deferred(rq);
1561 raw_spin_rq_unlock(rq);
1562 }
1563
1564 /**
1565 * schedule_deferred - Schedule execution of deferred actions on an rq
1566 * @rq: target rq
1567 *
1568 * Schedule execution of deferred actions on @rq. Must be called with @rq
1569 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1570 * can unlock @rq to e.g. migrate tasks to other rqs.
1571 */
schedule_deferred(struct rq * rq)1572 static void schedule_deferred(struct rq *rq)
1573 {
1574 lockdep_assert_rq_held(rq);
1575
1576 #ifdef CONFIG_SMP
1577 /*
1578 * If in the middle of waking up a task, task_woken_scx() will be called
1579 * afterwards which will then run the deferred actions, no need to
1580 * schedule anything.
1581 */
1582 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1583 return;
1584
1585 /*
1586 * If in balance, the balance callbacks will be called before rq lock is
1587 * released. Schedule one.
1588 */
1589 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1590 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1591 deferred_bal_cb_workfn);
1592 return;
1593 }
1594 #endif
1595 /*
1596 * No scheduler hooks available. Queue an irq work. They are executed on
1597 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1598 * The above WAKEUP and BALANCE paths should cover most of the cases and
1599 * the time to IRQ re-enable shouldn't be long.
1600 */
1601 irq_work_queue(&rq->scx.deferred_irq_work);
1602 }
1603
1604 /**
1605 * touch_core_sched - Update timestamp used for core-sched task ordering
1606 * @rq: rq to read clock from, must be locked
1607 * @p: task to update the timestamp for
1608 *
1609 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1610 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1611 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1612 * exhaustion).
1613 */
touch_core_sched(struct rq * rq,struct task_struct * p)1614 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1615 {
1616 lockdep_assert_rq_held(rq);
1617
1618 #ifdef CONFIG_SCHED_CORE
1619 /*
1620 * It's okay to update the timestamp spuriously. Use
1621 * sched_core_disabled() which is cheaper than enabled().
1622 *
1623 * As this is used to determine ordering between tasks of sibling CPUs,
1624 * it may be better to use per-core dispatch sequence instead.
1625 */
1626 if (!sched_core_disabled())
1627 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1628 #endif
1629 }
1630
1631 /**
1632 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1633 * @rq: rq to read clock from, must be locked
1634 * @p: task being dispatched
1635 *
1636 * If the BPF scheduler implements custom core-sched ordering via
1637 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1638 * ordering within each local DSQ. This function is called from dispatch paths
1639 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1640 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1641 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1642 {
1643 lockdep_assert_rq_held(rq);
1644
1645 #ifdef CONFIG_SCHED_CORE
1646 if (SCX_HAS_OP(core_sched_before))
1647 touch_core_sched(rq, p);
1648 #endif
1649 }
1650
update_curr_scx(struct rq * rq)1651 static void update_curr_scx(struct rq *rq)
1652 {
1653 struct task_struct *curr = rq->curr;
1654 s64 delta_exec;
1655
1656 delta_exec = update_curr_common(rq);
1657 if (unlikely(delta_exec <= 0))
1658 return;
1659
1660 if (curr->scx.slice != SCX_SLICE_INF) {
1661 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1662 if (!curr->scx.slice)
1663 touch_core_sched(rq, curr);
1664 }
1665 }
1666
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1667 static bool scx_dsq_priq_less(struct rb_node *node_a,
1668 const struct rb_node *node_b)
1669 {
1670 const struct task_struct *a =
1671 container_of(node_a, struct task_struct, scx.dsq_priq);
1672 const struct task_struct *b =
1673 container_of(node_b, struct task_struct, scx.dsq_priq);
1674
1675 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1676 }
1677
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1678 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1679 {
1680 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1681 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1682 }
1683
dispatch_enqueue(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1684 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1685 u64 enq_flags)
1686 {
1687 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1688
1689 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1690 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1691 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1692
1693 if (!is_local) {
1694 raw_spin_lock(&dsq->lock);
1695 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1696 scx_ops_error("attempting to dispatch to a destroyed dsq");
1697 /* fall back to the global dsq */
1698 raw_spin_unlock(&dsq->lock);
1699 dsq = find_global_dsq(p);
1700 raw_spin_lock(&dsq->lock);
1701 }
1702 }
1703
1704 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1705 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1706 /*
1707 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1708 * their FIFO queues. To avoid confusion and accidentally
1709 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1710 * disallow any internal DSQ from doing vtime ordering of
1711 * tasks.
1712 */
1713 scx_ops_error("cannot use vtime ordering for built-in DSQs");
1714 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1715 }
1716
1717 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1718 struct rb_node *rbp;
1719
1720 /*
1721 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1722 * linked to both the rbtree and list on PRIQs, this can only be
1723 * tested easily when adding the first task.
1724 */
1725 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1726 nldsq_next_task(dsq, NULL, false)))
1727 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1728 dsq->id);
1729
1730 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1731 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1732
1733 /*
1734 * Find the previous task and insert after it on the list so
1735 * that @dsq->list is vtime ordered.
1736 */
1737 rbp = rb_prev(&p->scx.dsq_priq);
1738 if (rbp) {
1739 struct task_struct *prev =
1740 container_of(rbp, struct task_struct,
1741 scx.dsq_priq);
1742 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1743 } else {
1744 list_add(&p->scx.dsq_list.node, &dsq->list);
1745 }
1746 } else {
1747 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1748 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1749 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1750 dsq->id);
1751
1752 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1753 list_add(&p->scx.dsq_list.node, &dsq->list);
1754 else
1755 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1756 }
1757
1758 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1759 dsq->seq++;
1760 p->scx.dsq_seq = dsq->seq;
1761
1762 dsq_mod_nr(dsq, 1);
1763 p->scx.dsq = dsq;
1764
1765 /*
1766 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1767 * direct dispatch path, but we clear them here because the direct
1768 * dispatch verdict may be overridden on the enqueue path during e.g.
1769 * bypass.
1770 */
1771 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1772 p->scx.ddsp_enq_flags = 0;
1773
1774 /*
1775 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1776 * match waiters' load_acquire.
1777 */
1778 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1779 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1780
1781 if (is_local) {
1782 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1783 bool preempt = false;
1784
1785 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1786 rq->curr->sched_class == &ext_sched_class) {
1787 rq->curr->scx.slice = 0;
1788 preempt = true;
1789 }
1790
1791 if (preempt || sched_class_above(&ext_sched_class,
1792 rq->curr->sched_class))
1793 resched_curr(rq);
1794 } else {
1795 raw_spin_unlock(&dsq->lock);
1796 }
1797 }
1798
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1799 static void task_unlink_from_dsq(struct task_struct *p,
1800 struct scx_dispatch_q *dsq)
1801 {
1802 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1803
1804 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1805 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1806 RB_CLEAR_NODE(&p->scx.dsq_priq);
1807 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1808 }
1809
1810 list_del_init(&p->scx.dsq_list.node);
1811 dsq_mod_nr(dsq, -1);
1812 }
1813
dispatch_dequeue(struct rq * rq,struct task_struct * p)1814 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1815 {
1816 struct scx_dispatch_q *dsq = p->scx.dsq;
1817 bool is_local = dsq == &rq->scx.local_dsq;
1818
1819 if (!dsq) {
1820 /*
1821 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1822 * Unlinking is all that's needed to cancel.
1823 */
1824 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1825 list_del_init(&p->scx.dsq_list.node);
1826
1827 /*
1828 * When dispatching directly from the BPF scheduler to a local
1829 * DSQ, the task isn't associated with any DSQ but
1830 * @p->scx.holding_cpu may be set under the protection of
1831 * %SCX_OPSS_DISPATCHING.
1832 */
1833 if (p->scx.holding_cpu >= 0)
1834 p->scx.holding_cpu = -1;
1835
1836 return;
1837 }
1838
1839 if (!is_local)
1840 raw_spin_lock(&dsq->lock);
1841
1842 /*
1843 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1844 * change underneath us.
1845 */
1846 if (p->scx.holding_cpu < 0) {
1847 /* @p must still be on @dsq, dequeue */
1848 task_unlink_from_dsq(p, dsq);
1849 } else {
1850 /*
1851 * We're racing against dispatch_to_local_dsq() which already
1852 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1853 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1854 * the race.
1855 */
1856 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1857 p->scx.holding_cpu = -1;
1858 }
1859 p->scx.dsq = NULL;
1860
1861 if (!is_local)
1862 raw_spin_unlock(&dsq->lock);
1863 }
1864
find_dsq_for_dispatch(struct rq * rq,u64 dsq_id,struct task_struct * p)1865 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1866 struct task_struct *p)
1867 {
1868 struct scx_dispatch_q *dsq;
1869
1870 if (dsq_id == SCX_DSQ_LOCAL)
1871 return &rq->scx.local_dsq;
1872
1873 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1874 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1875
1876 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1877 return find_global_dsq(p);
1878
1879 return &cpu_rq(cpu)->scx.local_dsq;
1880 }
1881
1882 if (dsq_id == SCX_DSQ_GLOBAL)
1883 dsq = find_global_dsq(p);
1884 else
1885 dsq = find_user_dsq(dsq_id);
1886
1887 if (unlikely(!dsq)) {
1888 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1889 dsq_id, p->comm, p->pid);
1890 return find_global_dsq(p);
1891 }
1892
1893 return dsq;
1894 }
1895
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1896 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1897 struct task_struct *p, u64 dsq_id,
1898 u64 enq_flags)
1899 {
1900 /*
1901 * Mark that dispatch already happened from ops.select_cpu() or
1902 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1903 * which can never match a valid task pointer.
1904 */
1905 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1906
1907 /* @p must match the task on the enqueue path */
1908 if (unlikely(p != ddsp_task)) {
1909 if (IS_ERR(ddsp_task))
1910 scx_ops_error("%s[%d] already direct-dispatched",
1911 p->comm, p->pid);
1912 else
1913 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1914 ddsp_task->comm, ddsp_task->pid,
1915 p->comm, p->pid);
1916 return;
1917 }
1918
1919 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1920 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1921
1922 p->scx.ddsp_dsq_id = dsq_id;
1923 p->scx.ddsp_enq_flags = enq_flags;
1924 }
1925
direct_dispatch(struct task_struct * p,u64 enq_flags)1926 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1927 {
1928 struct rq *rq = task_rq(p);
1929 struct scx_dispatch_q *dsq =
1930 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1931
1932 touch_core_sched_dispatch(rq, p);
1933
1934 p->scx.ddsp_enq_flags |= enq_flags;
1935
1936 /*
1937 * We are in the enqueue path with @rq locked and pinned, and thus can't
1938 * double lock a remote rq and enqueue to its local DSQ. For
1939 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1940 * the enqueue so that it's executed when @rq can be unlocked.
1941 */
1942 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1943 unsigned long opss;
1944
1945 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1946
1947 switch (opss & SCX_OPSS_STATE_MASK) {
1948 case SCX_OPSS_NONE:
1949 break;
1950 case SCX_OPSS_QUEUEING:
1951 /*
1952 * As @p was never passed to the BPF side, _release is
1953 * not strictly necessary. Still do it for consistency.
1954 */
1955 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1956 break;
1957 default:
1958 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1959 p->comm, p->pid, opss);
1960 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1961 break;
1962 }
1963
1964 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1965 list_add_tail(&p->scx.dsq_list.node,
1966 &rq->scx.ddsp_deferred_locals);
1967 schedule_deferred(rq);
1968 return;
1969 }
1970
1971 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1972 }
1973
scx_rq_online(struct rq * rq)1974 static bool scx_rq_online(struct rq *rq)
1975 {
1976 /*
1977 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1978 * the online state as seen from the BPF scheduler. cpu_active() test
1979 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1980 * stay set until the current scheduling operation is complete even if
1981 * we aren't locking @rq.
1982 */
1983 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1984 }
1985
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1986 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1987 int sticky_cpu)
1988 {
1989 struct task_struct **ddsp_taskp;
1990 unsigned long qseq;
1991
1992 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1993
1994 /* rq migration */
1995 if (sticky_cpu == cpu_of(rq))
1996 goto local_norefill;
1997
1998 /*
1999 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2000 * is offline and are just running the hotplug path. Don't bother the
2001 * BPF scheduler.
2002 */
2003 if (!scx_rq_online(rq))
2004 goto local;
2005
2006 if (scx_rq_bypassing(rq))
2007 goto global;
2008
2009 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2010 goto direct;
2011
2012 /* see %SCX_OPS_ENQ_EXITING */
2013 if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2014 unlikely(p->flags & PF_EXITING))
2015 goto local;
2016
2017 if (!SCX_HAS_OP(enqueue))
2018 goto global;
2019
2020 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2021 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2022
2023 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2024 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2025
2026 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2027 WARN_ON_ONCE(*ddsp_taskp);
2028 *ddsp_taskp = p;
2029
2030 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2031
2032 *ddsp_taskp = NULL;
2033 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2034 goto direct;
2035
2036 /*
2037 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2038 * dequeue may be waiting. The store_release matches their load_acquire.
2039 */
2040 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2041 return;
2042
2043 direct:
2044 direct_dispatch(p, enq_flags);
2045 return;
2046
2047 local:
2048 /*
2049 * For task-ordering, slice refill must be treated as implying the end
2050 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2051 * higher priority it becomes from scx_prio_less()'s POV.
2052 */
2053 touch_core_sched(rq, p);
2054 p->scx.slice = SCX_SLICE_DFL;
2055 local_norefill:
2056 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2057 return;
2058
2059 global:
2060 touch_core_sched(rq, p); /* see the comment in local: */
2061 p->scx.slice = SCX_SLICE_DFL;
2062 dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2063 }
2064
task_runnable(const struct task_struct * p)2065 static bool task_runnable(const struct task_struct *p)
2066 {
2067 return !list_empty(&p->scx.runnable_node);
2068 }
2069
set_task_runnable(struct rq * rq,struct task_struct * p)2070 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2071 {
2072 lockdep_assert_rq_held(rq);
2073
2074 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2075 p->scx.runnable_at = jiffies;
2076 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2077 }
2078
2079 /*
2080 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2081 * appened to the runnable_list.
2082 */
2083 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2084 }
2085
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2086 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2087 {
2088 list_del_init(&p->scx.runnable_node);
2089 if (reset_runnable_at)
2090 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2091 }
2092
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2093 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2094 {
2095 int sticky_cpu = p->scx.sticky_cpu;
2096
2097 if (enq_flags & ENQUEUE_WAKEUP)
2098 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2099
2100 enq_flags |= rq->scx.extra_enq_flags;
2101
2102 if (sticky_cpu >= 0)
2103 p->scx.sticky_cpu = -1;
2104
2105 /*
2106 * Restoring a running task will be immediately followed by
2107 * set_next_task_scx() which expects the task to not be on the BPF
2108 * scheduler as tasks can only start running through local DSQs. Force
2109 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2110 */
2111 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2112 sticky_cpu = cpu_of(rq);
2113
2114 if (p->scx.flags & SCX_TASK_QUEUED) {
2115 WARN_ON_ONCE(!task_runnable(p));
2116 goto out;
2117 }
2118
2119 set_task_runnable(rq, p);
2120 p->scx.flags |= SCX_TASK_QUEUED;
2121 rq->scx.nr_running++;
2122 add_nr_running(rq, 1);
2123
2124 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2125 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2126
2127 if (enq_flags & SCX_ENQ_WAKEUP)
2128 touch_core_sched(rq, p);
2129
2130 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2131 out:
2132 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2133 }
2134
ops_dequeue(struct task_struct * p,u64 deq_flags)2135 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2136 {
2137 unsigned long opss;
2138
2139 /* dequeue is always temporary, don't reset runnable_at */
2140 clr_task_runnable(p, false);
2141
2142 /* acquire ensures that we see the preceding updates on QUEUED */
2143 opss = atomic_long_read_acquire(&p->scx.ops_state);
2144
2145 switch (opss & SCX_OPSS_STATE_MASK) {
2146 case SCX_OPSS_NONE:
2147 break;
2148 case SCX_OPSS_QUEUEING:
2149 /*
2150 * QUEUEING is started and finished while holding @p's rq lock.
2151 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2152 */
2153 BUG();
2154 case SCX_OPSS_QUEUED:
2155 if (SCX_HAS_OP(dequeue))
2156 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2157
2158 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2159 SCX_OPSS_NONE))
2160 break;
2161 fallthrough;
2162 case SCX_OPSS_DISPATCHING:
2163 /*
2164 * If @p is being dispatched from the BPF scheduler to a DSQ,
2165 * wait for the transfer to complete so that @p doesn't get
2166 * added to its DSQ after dequeueing is complete.
2167 *
2168 * As we're waiting on DISPATCHING with the rq locked, the
2169 * dispatching side shouldn't try to lock the rq while
2170 * DISPATCHING is set. See dispatch_to_local_dsq().
2171 *
2172 * DISPATCHING shouldn't have qseq set and control can reach
2173 * here with NONE @opss from the above QUEUED case block.
2174 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2175 */
2176 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2177 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2178 break;
2179 }
2180 }
2181
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2182 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2183 {
2184 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2185 WARN_ON_ONCE(task_runnable(p));
2186 return true;
2187 }
2188
2189 ops_dequeue(p, deq_flags);
2190
2191 /*
2192 * A currently running task which is going off @rq first gets dequeued
2193 * and then stops running. As we want running <-> stopping transitions
2194 * to be contained within runnable <-> quiescent transitions, trigger
2195 * ->stopping() early here instead of in put_prev_task_scx().
2196 *
2197 * @p may go through multiple stopping <-> running transitions between
2198 * here and put_prev_task_scx() if task attribute changes occur while
2199 * balance_scx() leaves @rq unlocked. However, they don't contain any
2200 * information meaningful to the BPF scheduler and can be suppressed by
2201 * skipping the callbacks if the task is !QUEUED.
2202 */
2203 if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2204 update_curr_scx(rq);
2205 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2206 }
2207
2208 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2209 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2210
2211 if (deq_flags & SCX_DEQ_SLEEP)
2212 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2213 else
2214 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2215
2216 p->scx.flags &= ~SCX_TASK_QUEUED;
2217 rq->scx.nr_running--;
2218 sub_nr_running(rq, 1);
2219
2220 dispatch_dequeue(rq, p);
2221 return true;
2222 }
2223
yield_task_scx(struct rq * rq)2224 static void yield_task_scx(struct rq *rq)
2225 {
2226 struct task_struct *p = rq->curr;
2227
2228 if (SCX_HAS_OP(yield))
2229 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2230 else
2231 p->scx.slice = 0;
2232 }
2233
yield_to_task_scx(struct rq * rq,struct task_struct * to)2234 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2235 {
2236 struct task_struct *from = rq->curr;
2237
2238 if (SCX_HAS_OP(yield))
2239 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2240 else
2241 return false;
2242 }
2243
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2244 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2245 struct scx_dispatch_q *src_dsq,
2246 struct rq *dst_rq)
2247 {
2248 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2249
2250 /* @dsq is locked and @p is on @dst_rq */
2251 lockdep_assert_held(&src_dsq->lock);
2252 lockdep_assert_rq_held(dst_rq);
2253
2254 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2255
2256 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2257 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2258 else
2259 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2260
2261 dsq_mod_nr(dst_dsq, 1);
2262 p->scx.dsq = dst_dsq;
2263 }
2264
2265 #ifdef CONFIG_SMP
2266 /**
2267 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2268 * @p: task to move
2269 * @enq_flags: %SCX_ENQ_*
2270 * @src_rq: rq to move the task from, locked on entry, released on return
2271 * @dst_rq: rq to move the task into, locked on return
2272 *
2273 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2274 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2275 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2276 struct rq *src_rq, struct rq *dst_rq)
2277 {
2278 lockdep_assert_rq_held(src_rq);
2279
2280 /* the following marks @p MIGRATING which excludes dequeue */
2281 deactivate_task(src_rq, p, 0);
2282 set_task_cpu(p, cpu_of(dst_rq));
2283 p->scx.sticky_cpu = cpu_of(dst_rq);
2284
2285 raw_spin_rq_unlock(src_rq);
2286 raw_spin_rq_lock(dst_rq);
2287
2288 /*
2289 * We want to pass scx-specific enq_flags but activate_task() will
2290 * truncate the upper 32 bit. As we own @rq, we can pass them through
2291 * @rq->scx.extra_enq_flags instead.
2292 */
2293 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2294 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2295 dst_rq->scx.extra_enq_flags = enq_flags;
2296 activate_task(dst_rq, p, 0);
2297 dst_rq->scx.extra_enq_flags = 0;
2298 }
2299
2300 /*
2301 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2302 * differences:
2303 *
2304 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2305 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2306 * this CPU?".
2307 *
2308 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2309 * must be allowed to finish on the CPU that it's currently on regardless of
2310 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2311 * BPF scheduler shouldn't attempt to migrate a task which has migration
2312 * disabled.
2313 *
2314 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2315 * no to the BPF scheduler initiated migrations while offline.
2316 */
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2317 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2318 bool trigger_error)
2319 {
2320 int cpu = cpu_of(rq);
2321
2322 /*
2323 * We don't require the BPF scheduler to avoid dispatching to offline
2324 * CPUs mostly for convenience but also because CPUs can go offline
2325 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2326 * picked CPU is outside the allowed mask.
2327 */
2328 if (!task_allowed_on_cpu(p, cpu)) {
2329 if (trigger_error)
2330 scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2331 cpu_of(rq), p->comm, p->pid);
2332 return false;
2333 }
2334
2335 if (unlikely(is_migration_disabled(p)))
2336 return false;
2337
2338 if (!scx_rq_online(rq))
2339 return false;
2340
2341 return true;
2342 }
2343
2344 /**
2345 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2346 * @p: target task
2347 * @dsq: locked DSQ @p is currently on
2348 * @src_rq: rq @p is currently on, stable with @dsq locked
2349 *
2350 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2351 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2352 * required when transferring into a local DSQ. Even when transferring into a
2353 * non-local DSQ, it's better to use the same mechanism to protect against
2354 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2355 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2356 *
2357 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2358 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2359 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2360 * dancing from our side.
2361 *
2362 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2363 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2364 * would be cleared to -1. While other cpus may have updated it to different
2365 * values afterwards, as this operation can't be preempted or recurse, the
2366 * holding_cpu can never become this CPU again before we're done. Thus, we can
2367 * tell whether we lost to dequeue by testing whether the holding_cpu still
2368 * points to this CPU. See dispatch_dequeue() for the counterpart.
2369 *
2370 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2371 * still valid. %false if lost to dequeue.
2372 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2373 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2374 struct scx_dispatch_q *dsq,
2375 struct rq *src_rq)
2376 {
2377 s32 cpu = raw_smp_processor_id();
2378
2379 lockdep_assert_held(&dsq->lock);
2380
2381 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2382 task_unlink_from_dsq(p, dsq);
2383 p->scx.holding_cpu = cpu;
2384
2385 raw_spin_unlock(&dsq->lock);
2386 raw_spin_rq_lock(src_rq);
2387
2388 /* task_rq couldn't have changed if we're still the holding cpu */
2389 return likely(p->scx.holding_cpu == cpu) &&
2390 !WARN_ON_ONCE(src_rq != task_rq(p));
2391 }
2392
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2393 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2394 struct scx_dispatch_q *dsq, struct rq *src_rq)
2395 {
2396 raw_spin_rq_unlock(this_rq);
2397
2398 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2399 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2400 return true;
2401 } else {
2402 raw_spin_rq_unlock(src_rq);
2403 raw_spin_rq_lock(this_rq);
2404 return false;
2405 }
2406 }
2407 #else /* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2408 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2409 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2410 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2411 #endif /* CONFIG_SMP */
2412
2413 /**
2414 * move_task_between_dsqs() - Move a task from one DSQ to another
2415 * @p: target task
2416 * @enq_flags: %SCX_ENQ_*
2417 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2418 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2419 *
2420 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2421 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2422 * will change. As @p's task_rq is locked, this function doesn't need to use the
2423 * holding_cpu mechanism.
2424 *
2425 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2426 * return value, is locked.
2427 */
move_task_between_dsqs(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)2428 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
2429 struct scx_dispatch_q *src_dsq,
2430 struct scx_dispatch_q *dst_dsq)
2431 {
2432 struct rq *src_rq = task_rq(p), *dst_rq;
2433
2434 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2435 lockdep_assert_held(&src_dsq->lock);
2436 lockdep_assert_rq_held(src_rq);
2437
2438 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2439 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2440 if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
2441 dst_dsq = find_global_dsq(p);
2442 dst_rq = src_rq;
2443 }
2444 } else {
2445 /* no need to migrate if destination is a non-local DSQ */
2446 dst_rq = src_rq;
2447 }
2448
2449 /*
2450 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2451 * CPU, @p will be migrated.
2452 */
2453 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2454 /* @p is going from a non-local DSQ to a local DSQ */
2455 if (src_rq == dst_rq) {
2456 task_unlink_from_dsq(p, src_dsq);
2457 move_local_task_to_local_dsq(p, enq_flags,
2458 src_dsq, dst_rq);
2459 raw_spin_unlock(&src_dsq->lock);
2460 } else {
2461 raw_spin_unlock(&src_dsq->lock);
2462 move_remote_task_to_local_dsq(p, enq_flags,
2463 src_rq, dst_rq);
2464 }
2465 } else {
2466 /*
2467 * @p is going from a non-local DSQ to a non-local DSQ. As
2468 * $src_dsq is already locked, do an abbreviated dequeue.
2469 */
2470 task_unlink_from_dsq(p, src_dsq);
2471 p->scx.dsq = NULL;
2472 raw_spin_unlock(&src_dsq->lock);
2473
2474 dispatch_enqueue(dst_dsq, p, enq_flags);
2475 }
2476
2477 return dst_rq;
2478 }
2479
2480 /*
2481 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2482 * banging on the same DSQ on a large NUMA system to the point where switching
2483 * to the bypass mode can take a long time. Inject artifical delays while the
2484 * bypass mode is switching to guarantee timely completion.
2485 */
scx_ops_breather(struct rq * rq)2486 static void scx_ops_breather(struct rq *rq)
2487 {
2488 u64 until;
2489
2490 lockdep_assert_rq_held(rq);
2491
2492 if (likely(!atomic_read(&scx_ops_breather_depth)))
2493 return;
2494
2495 raw_spin_rq_unlock(rq);
2496
2497 until = ktime_get_ns() + NSEC_PER_MSEC;
2498
2499 do {
2500 int cnt = 1024;
2501 while (atomic_read(&scx_ops_breather_depth) && --cnt)
2502 cpu_relax();
2503 } while (atomic_read(&scx_ops_breather_depth) &&
2504 time_before64(ktime_get_ns(), until));
2505
2506 raw_spin_rq_lock(rq);
2507 }
2508
consume_dispatch_q(struct rq * rq,struct scx_dispatch_q * dsq)2509 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2510 {
2511 struct task_struct *p;
2512 retry:
2513 /*
2514 * This retry loop can repeatedly race against scx_ops_bypass()
2515 * dequeueing tasks from @dsq trying to put the system into the bypass
2516 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2517 * live-lock the machine into soft lockups. Give a breather.
2518 */
2519 scx_ops_breather(rq);
2520
2521 /*
2522 * The caller can't expect to successfully consume a task if the task's
2523 * addition to @dsq isn't guaranteed to be visible somehow. Test
2524 * @dsq->list without locking and skip if it seems empty.
2525 */
2526 if (list_empty(&dsq->list))
2527 return false;
2528
2529 raw_spin_lock(&dsq->lock);
2530
2531 nldsq_for_each_task(p, dsq) {
2532 struct rq *task_rq = task_rq(p);
2533
2534 if (rq == task_rq) {
2535 task_unlink_from_dsq(p, dsq);
2536 move_local_task_to_local_dsq(p, 0, dsq, rq);
2537 raw_spin_unlock(&dsq->lock);
2538 return true;
2539 }
2540
2541 if (task_can_run_on_remote_rq(p, rq, false)) {
2542 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2543 return true;
2544 goto retry;
2545 }
2546 }
2547
2548 raw_spin_unlock(&dsq->lock);
2549 return false;
2550 }
2551
consume_global_dsq(struct rq * rq)2552 static bool consume_global_dsq(struct rq *rq)
2553 {
2554 int node = cpu_to_node(cpu_of(rq));
2555
2556 return consume_dispatch_q(rq, global_dsqs[node]);
2557 }
2558
2559 /**
2560 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2561 * @rq: current rq which is locked
2562 * @dst_dsq: destination DSQ
2563 * @p: task to dispatch
2564 * @enq_flags: %SCX_ENQ_*
2565 *
2566 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2567 * DSQ. This function performs all the synchronization dancing needed because
2568 * local DSQs are protected with rq locks.
2569 *
2570 * The caller must have exclusive ownership of @p (e.g. through
2571 * %SCX_OPSS_DISPATCHING).
2572 */
dispatch_to_local_dsq(struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2573 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2574 struct task_struct *p, u64 enq_flags)
2575 {
2576 struct rq *src_rq = task_rq(p);
2577 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2578
2579 /*
2580 * We're synchronized against dequeue through DISPATCHING. As @p can't
2581 * be dequeued, its task_rq and cpus_allowed are stable too.
2582 *
2583 * If dispatching to @rq that @p is already on, no lock dancing needed.
2584 */
2585 if (rq == src_rq && rq == dst_rq) {
2586 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2587 return;
2588 }
2589
2590 #ifdef CONFIG_SMP
2591 if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2592 dispatch_enqueue(find_global_dsq(p), p,
2593 enq_flags | SCX_ENQ_CLEAR_OPSS);
2594 return;
2595 }
2596
2597 /*
2598 * @p is on a possibly remote @src_rq which we need to lock to move the
2599 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2600 * on DISPATCHING, so we can't grab @src_rq lock while holding
2601 * DISPATCHING.
2602 *
2603 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2604 * we're moving from a DSQ and use the same mechanism - mark the task
2605 * under transfer with holding_cpu, release DISPATCHING and then follow
2606 * the same protocol. See unlink_dsq_and_lock_src_rq().
2607 */
2608 p->scx.holding_cpu = raw_smp_processor_id();
2609
2610 /* store_release ensures that dequeue sees the above */
2611 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2612
2613 /* switch to @src_rq lock */
2614 if (rq != src_rq) {
2615 raw_spin_rq_unlock(rq);
2616 raw_spin_rq_lock(src_rq);
2617 }
2618
2619 /* task_rq couldn't have changed if we're still the holding cpu */
2620 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2621 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2622 /*
2623 * If @p is staying on the same rq, there's no need to go
2624 * through the full deactivate/activate cycle. Optimize by
2625 * abbreviating move_remote_task_to_local_dsq().
2626 */
2627 if (src_rq == dst_rq) {
2628 p->scx.holding_cpu = -1;
2629 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2630 } else {
2631 move_remote_task_to_local_dsq(p, enq_flags,
2632 src_rq, dst_rq);
2633 }
2634
2635 /* if the destination CPU is idle, wake it up */
2636 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2637 resched_curr(dst_rq);
2638 }
2639
2640 /* switch back to @rq lock */
2641 if (rq != dst_rq) {
2642 raw_spin_rq_unlock(dst_rq);
2643 raw_spin_rq_lock(rq);
2644 }
2645 #else /* CONFIG_SMP */
2646 BUG(); /* control can not reach here on UP */
2647 #endif /* CONFIG_SMP */
2648 }
2649
2650 /**
2651 * finish_dispatch - Asynchronously finish dispatching a task
2652 * @rq: current rq which is locked
2653 * @p: task to finish dispatching
2654 * @qseq_at_dispatch: qseq when @p started getting dispatched
2655 * @dsq_id: destination DSQ ID
2656 * @enq_flags: %SCX_ENQ_*
2657 *
2658 * Dispatching to local DSQs may need to wait for queueing to complete or
2659 * require rq lock dancing. As we don't wanna do either while inside
2660 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2661 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2662 * task and its qseq. Once ops.dispatch() returns, this function is called to
2663 * finish up.
2664 *
2665 * There is no guarantee that @p is still valid for dispatching or even that it
2666 * was valid in the first place. Make sure that the task is still owned by the
2667 * BPF scheduler and claim the ownership before dispatching.
2668 */
finish_dispatch(struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2669 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2670 unsigned long qseq_at_dispatch,
2671 u64 dsq_id, u64 enq_flags)
2672 {
2673 struct scx_dispatch_q *dsq;
2674 unsigned long opss;
2675
2676 touch_core_sched_dispatch(rq, p);
2677 retry:
2678 /*
2679 * No need for _acquire here. @p is accessed only after a successful
2680 * try_cmpxchg to DISPATCHING.
2681 */
2682 opss = atomic_long_read(&p->scx.ops_state);
2683
2684 switch (opss & SCX_OPSS_STATE_MASK) {
2685 case SCX_OPSS_DISPATCHING:
2686 case SCX_OPSS_NONE:
2687 /* someone else already got to it */
2688 return;
2689 case SCX_OPSS_QUEUED:
2690 /*
2691 * If qseq doesn't match, @p has gone through at least one
2692 * dispatch/dequeue and re-enqueue cycle between
2693 * scx_bpf_dsq_insert() and here and we have no claim on it.
2694 */
2695 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2696 return;
2697
2698 /*
2699 * While we know @p is accessible, we don't yet have a claim on
2700 * it - the BPF scheduler is allowed to dispatch tasks
2701 * spuriously and there can be a racing dequeue attempt. Let's
2702 * claim @p by atomically transitioning it from QUEUED to
2703 * DISPATCHING.
2704 */
2705 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2706 SCX_OPSS_DISPATCHING)))
2707 break;
2708 goto retry;
2709 case SCX_OPSS_QUEUEING:
2710 /*
2711 * do_enqueue_task() is in the process of transferring the task
2712 * to the BPF scheduler while holding @p's rq lock. As we aren't
2713 * holding any kernel or BPF resource that the enqueue path may
2714 * depend upon, it's safe to wait.
2715 */
2716 wait_ops_state(p, opss);
2717 goto retry;
2718 }
2719
2720 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2721
2722 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2723
2724 if (dsq->id == SCX_DSQ_LOCAL)
2725 dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2726 else
2727 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2728 }
2729
flush_dispatch_buf(struct rq * rq)2730 static void flush_dispatch_buf(struct rq *rq)
2731 {
2732 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2733 u32 u;
2734
2735 for (u = 0; u < dspc->cursor; u++) {
2736 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2737
2738 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2739 ent->enq_flags);
2740 }
2741
2742 dspc->nr_tasks += dspc->cursor;
2743 dspc->cursor = 0;
2744 }
2745
balance_one(struct rq * rq,struct task_struct * prev)2746 static int balance_one(struct rq *rq, struct task_struct *prev)
2747 {
2748 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2749 bool prev_on_scx = prev->sched_class == &ext_sched_class;
2750 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2751 int nr_loops = SCX_DSP_MAX_LOOPS;
2752
2753 lockdep_assert_rq_held(rq);
2754 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2755 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2756
2757 if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2758 unlikely(rq->scx.cpu_released)) {
2759 /*
2760 * If the previous sched_class for the current CPU was not SCX,
2761 * notify the BPF scheduler that it again has control of the
2762 * core. This callback complements ->cpu_release(), which is
2763 * emitted in switch_class().
2764 */
2765 if (SCX_HAS_OP(cpu_acquire))
2766 SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2767 rq->scx.cpu_released = false;
2768 }
2769
2770 if (prev_on_scx) {
2771 update_curr_scx(rq);
2772
2773 /*
2774 * If @prev is runnable & has slice left, it has priority and
2775 * fetching more just increases latency for the fetched tasks.
2776 * Tell pick_task_scx() to keep running @prev. If the BPF
2777 * scheduler wants to handle this explicitly, it should
2778 * implement ->cpu_release().
2779 *
2780 * See scx_ops_disable_workfn() for the explanation on the
2781 * bypassing test.
2782 */
2783 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2784 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2785 goto has_tasks;
2786 }
2787 }
2788
2789 /* if there already are tasks to run, nothing to do */
2790 if (rq->scx.local_dsq.nr)
2791 goto has_tasks;
2792
2793 if (consume_global_dsq(rq))
2794 goto has_tasks;
2795
2796 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2797 goto no_tasks;
2798
2799 dspc->rq = rq;
2800
2801 /*
2802 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2803 * the local DSQ might still end up empty after a successful
2804 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2805 * produced some tasks, retry. The BPF scheduler may depend on this
2806 * looping behavior to simplify its implementation.
2807 */
2808 do {
2809 dspc->nr_tasks = 0;
2810
2811 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2812 prev_on_scx ? prev : NULL);
2813
2814 flush_dispatch_buf(rq);
2815
2816 if (prev_on_rq && prev->scx.slice) {
2817 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2818 goto has_tasks;
2819 }
2820 if (rq->scx.local_dsq.nr)
2821 goto has_tasks;
2822 if (consume_global_dsq(rq))
2823 goto has_tasks;
2824
2825 /*
2826 * ops.dispatch() can trap us in this loop by repeatedly
2827 * dispatching ineligible tasks. Break out once in a while to
2828 * allow the watchdog to run. As IRQ can't be enabled in
2829 * balance(), we want to complete this scheduling cycle and then
2830 * start a new one. IOW, we want to call resched_curr() on the
2831 * next, most likely idle, task, not the current one. Use
2832 * scx_bpf_kick_cpu() for deferred kicking.
2833 */
2834 if (unlikely(!--nr_loops)) {
2835 scx_bpf_kick_cpu(cpu_of(rq), 0);
2836 break;
2837 }
2838 } while (dspc->nr_tasks);
2839
2840 no_tasks:
2841 /*
2842 * Didn't find another task to run. Keep running @prev unless
2843 * %SCX_OPS_ENQ_LAST is in effect.
2844 */
2845 if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
2846 scx_rq_bypassing(rq))) {
2847 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2848 goto has_tasks;
2849 }
2850 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2851 return false;
2852
2853 has_tasks:
2854 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2855 return true;
2856 }
2857
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)2858 static int balance_scx(struct rq *rq, struct task_struct *prev,
2859 struct rq_flags *rf)
2860 {
2861 int ret;
2862
2863 rq_unpin_lock(rq, rf);
2864
2865 ret = balance_one(rq, prev);
2866
2867 #ifdef CONFIG_SCHED_SMT
2868 /*
2869 * When core-sched is enabled, this ops.balance() call will be followed
2870 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2871 * siblings too.
2872 */
2873 if (sched_core_enabled(rq)) {
2874 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2875 int scpu;
2876
2877 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2878 struct rq *srq = cpu_rq(scpu);
2879 struct task_struct *sprev = srq->curr;
2880
2881 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2882 update_rq_clock(srq);
2883 balance_one(srq, sprev);
2884 }
2885 }
2886 #endif
2887 rq_repin_lock(rq, rf);
2888
2889 return ret;
2890 }
2891
process_ddsp_deferred_locals(struct rq * rq)2892 static void process_ddsp_deferred_locals(struct rq *rq)
2893 {
2894 struct task_struct *p;
2895
2896 lockdep_assert_rq_held(rq);
2897
2898 /*
2899 * Now that @rq can be unlocked, execute the deferred enqueueing of
2900 * tasks directly dispatched to the local DSQs of other CPUs. See
2901 * direct_dispatch(). Keep popping from the head instead of using
2902 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2903 * temporarily.
2904 */
2905 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2906 struct task_struct, scx.dsq_list.node))) {
2907 struct scx_dispatch_q *dsq;
2908
2909 list_del_init(&p->scx.dsq_list.node);
2910
2911 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2912 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2913 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2914 }
2915 }
2916
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2917 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2918 {
2919 if (p->scx.flags & SCX_TASK_QUEUED) {
2920 /*
2921 * Core-sched might decide to execute @p before it is
2922 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2923 */
2924 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2925 dispatch_dequeue(rq, p);
2926 }
2927
2928 p->se.exec_start = rq_clock_task(rq);
2929
2930 /* see dequeue_task_scx() on why we skip when !QUEUED */
2931 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2932 SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2933
2934 clr_task_runnable(p, true);
2935
2936 /*
2937 * @p is getting newly scheduled or got kicked after someone updated its
2938 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2939 */
2940 if ((p->scx.slice == SCX_SLICE_INF) !=
2941 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2942 if (p->scx.slice == SCX_SLICE_INF)
2943 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2944 else
2945 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2946
2947 sched_update_tick_dependency(rq);
2948
2949 /*
2950 * For now, let's refresh the load_avgs just when transitioning
2951 * in and out of nohz. In the future, we might want to add a
2952 * mechanism which calls the following periodically on
2953 * tick-stopped CPUs.
2954 */
2955 update_other_load_avgs(rq);
2956 }
2957 }
2958
2959 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2960 preempt_reason_from_class(const struct sched_class *class)
2961 {
2962 #ifdef CONFIG_SMP
2963 if (class == &stop_sched_class)
2964 return SCX_CPU_PREEMPT_STOP;
2965 #endif
2966 if (class == &dl_sched_class)
2967 return SCX_CPU_PREEMPT_DL;
2968 if (class == &rt_sched_class)
2969 return SCX_CPU_PREEMPT_RT;
2970 return SCX_CPU_PREEMPT_UNKNOWN;
2971 }
2972
switch_class(struct rq * rq,struct task_struct * next)2973 static void switch_class(struct rq *rq, struct task_struct *next)
2974 {
2975 const struct sched_class *next_class = next->sched_class;
2976
2977 #ifdef CONFIG_SMP
2978 /*
2979 * Pairs with the smp_load_acquire() issued by a CPU in
2980 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2981 * resched.
2982 */
2983 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2984 #endif
2985 if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2986 return;
2987
2988 /*
2989 * The callback is conceptually meant to convey that the CPU is no
2990 * longer under the control of SCX. Therefore, don't invoke the callback
2991 * if the next class is below SCX (in which case the BPF scheduler has
2992 * actively decided not to schedule any tasks on the CPU).
2993 */
2994 if (sched_class_above(&ext_sched_class, next_class))
2995 return;
2996
2997 /*
2998 * At this point we know that SCX was preempted by a higher priority
2999 * sched_class, so invoke the ->cpu_release() callback if we have not
3000 * done so already. We only send the callback once between SCX being
3001 * preempted, and it regaining control of the CPU.
3002 *
3003 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3004 * next time that balance_scx() is invoked.
3005 */
3006 if (!rq->scx.cpu_released) {
3007 if (SCX_HAS_OP(cpu_release)) {
3008 struct scx_cpu_release_args args = {
3009 .reason = preempt_reason_from_class(next_class),
3010 .task = next,
3011 };
3012
3013 SCX_CALL_OP(SCX_KF_CPU_RELEASE,
3014 cpu_release, cpu_of(rq), &args);
3015 }
3016 rq->scx.cpu_released = true;
3017 }
3018 }
3019
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)3020 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3021 struct task_struct *next)
3022 {
3023 update_curr_scx(rq);
3024
3025 /* see dequeue_task_scx() on why we skip when !QUEUED */
3026 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3027 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
3028
3029 if (p->scx.flags & SCX_TASK_QUEUED) {
3030 set_task_runnable(rq, p);
3031
3032 /*
3033 * If @p has slice left and is being put, @p is getting
3034 * preempted by a higher priority scheduler class or core-sched
3035 * forcing a different task. Leave it at the head of the local
3036 * DSQ.
3037 */
3038 if (p->scx.slice && !scx_rq_bypassing(rq)) {
3039 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3040 goto switch_class;
3041 }
3042
3043 /*
3044 * If @p is runnable but we're about to enter a lower
3045 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3046 * ops.enqueue() that @p is the only one available for this cpu,
3047 * which should trigger an explicit follow-up scheduling event.
3048 */
3049 if (sched_class_above(&ext_sched_class, next->sched_class)) {
3050 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
3051 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3052 } else {
3053 do_enqueue_task(rq, p, 0, -1);
3054 }
3055 }
3056
3057 switch_class:
3058 if (next && next->sched_class != &ext_sched_class)
3059 switch_class(rq, next);
3060 }
3061
first_local_task(struct rq * rq)3062 static struct task_struct *first_local_task(struct rq *rq)
3063 {
3064 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3065 struct task_struct, scx.dsq_list.node);
3066 }
3067
pick_task_scx(struct rq * rq)3068 static struct task_struct *pick_task_scx(struct rq *rq)
3069 {
3070 struct task_struct *prev = rq->curr;
3071 struct task_struct *p;
3072 bool prev_on_scx = prev->sched_class == &ext_sched_class;
3073 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3074 bool kick_idle = false;
3075
3076 /*
3077 * WORKAROUND:
3078 *
3079 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3080 * have gone through balance_scx(). Unfortunately, there currently is a
3081 * bug where fair could say yes on balance() but no on pick_task(),
3082 * which then ends up calling pick_task_scx() without preceding
3083 * balance_scx().
3084 *
3085 * Keep running @prev if possible and avoid stalling from entering idle
3086 * without balancing.
3087 *
3088 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3089 * if pick_task_scx() is called without preceding balance_scx().
3090 */
3091 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3092 if (prev_on_scx) {
3093 keep_prev = true;
3094 } else {
3095 keep_prev = false;
3096 kick_idle = true;
3097 }
3098 } else if (unlikely(keep_prev && !prev_on_scx)) {
3099 /* only allowed during transitions */
3100 WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
3101 keep_prev = false;
3102 }
3103
3104 /*
3105 * If balance_scx() is telling us to keep running @prev, replenish slice
3106 * if necessary and keep running @prev. Otherwise, pop the first one
3107 * from the local DSQ.
3108 */
3109 if (keep_prev) {
3110 p = prev;
3111 if (!p->scx.slice)
3112 p->scx.slice = SCX_SLICE_DFL;
3113 } else {
3114 p = first_local_task(rq);
3115 if (!p) {
3116 if (kick_idle)
3117 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3118 return NULL;
3119 }
3120
3121 if (unlikely(!p->scx.slice)) {
3122 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3123 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3124 p->comm, p->pid, __func__);
3125 scx_warned_zero_slice = true;
3126 }
3127 p->scx.slice = SCX_SLICE_DFL;
3128 }
3129 }
3130
3131 return p;
3132 }
3133
3134 #ifdef CONFIG_SCHED_CORE
3135 /**
3136 * scx_prio_less - Task ordering for core-sched
3137 * @a: task A
3138 * @b: task B
3139 *
3140 * Core-sched is implemented as an additional scheduling layer on top of the
3141 * usual sched_class'es and needs to find out the expected task ordering. For
3142 * SCX, core-sched calls this function to interrogate the task ordering.
3143 *
3144 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3145 * to implement the default task ordering. The older the timestamp, the higher
3146 * prority the task - the global FIFO ordering matching the default scheduling
3147 * behavior.
3148 *
3149 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3150 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3151 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3152 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3153 bool in_fi)
3154 {
3155 /*
3156 * The const qualifiers are dropped from task_struct pointers when
3157 * calling ops.core_sched_before(). Accesses are controlled by the
3158 * verifier.
3159 */
3160 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3161 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3162 (struct task_struct *)a,
3163 (struct task_struct *)b);
3164 else
3165 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3166 }
3167 #endif /* CONFIG_SCHED_CORE */
3168
3169 #ifdef CONFIG_SMP
3170
test_and_clear_cpu_idle(int cpu)3171 static bool test_and_clear_cpu_idle(int cpu)
3172 {
3173 #ifdef CONFIG_SCHED_SMT
3174 /*
3175 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3176 * cluster is not wholly idle either way. This also prevents
3177 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3178 */
3179 if (sched_smt_active()) {
3180 const struct cpumask *smt = cpu_smt_mask(cpu);
3181
3182 /*
3183 * If offline, @cpu is not its own sibling and
3184 * scx_pick_idle_cpu() can get caught in an infinite loop as
3185 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3186 * is eventually cleared.
3187 */
3188 if (cpumask_intersects(smt, idle_masks.smt))
3189 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3190 else if (cpumask_test_cpu(cpu, idle_masks.smt))
3191 __cpumask_clear_cpu(cpu, idle_masks.smt);
3192 }
3193 #endif
3194 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3195 }
3196
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3197 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3198 {
3199 int cpu;
3200
3201 retry:
3202 if (sched_smt_active()) {
3203 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3204 if (cpu < nr_cpu_ids)
3205 goto found;
3206
3207 if (flags & SCX_PICK_IDLE_CORE)
3208 return -EBUSY;
3209 }
3210
3211 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3212 if (cpu >= nr_cpu_ids)
3213 return -EBUSY;
3214
3215 found:
3216 if (test_and_clear_cpu_idle(cpu))
3217 return cpu;
3218 else
3219 goto retry;
3220 }
3221
3222 /*
3223 * Return true if the LLC domains do not perfectly overlap with the NUMA
3224 * domains, false otherwise.
3225 */
llc_numa_mismatch(void)3226 static bool llc_numa_mismatch(void)
3227 {
3228 int cpu;
3229
3230 /*
3231 * We need to scan all online CPUs to verify whether their scheduling
3232 * domains overlap.
3233 *
3234 * While it is rare to encounter architectures with asymmetric NUMA
3235 * topologies, CPU hotplugging or virtualized environments can result
3236 * in asymmetric configurations.
3237 *
3238 * For example:
3239 *
3240 * NUMA 0:
3241 * - LLC 0: cpu0..cpu7
3242 * - LLC 1: cpu8..cpu15 [offline]
3243 *
3244 * NUMA 1:
3245 * - LLC 0: cpu16..cpu23
3246 * - LLC 1: cpu24..cpu31
3247 *
3248 * In this case, if we only check the first online CPU (cpu0), we might
3249 * incorrectly assume that the LLC and NUMA domains are fully
3250 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
3251 * domains).
3252 */
3253 for_each_online_cpu(cpu) {
3254 const struct cpumask *numa_cpus;
3255 struct sched_domain *sd;
3256
3257 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3258 if (!sd)
3259 return true;
3260
3261 numa_cpus = cpumask_of_node(cpu_to_node(cpu));
3262 if (sd->span_weight != cpumask_weight(numa_cpus))
3263 return true;
3264 }
3265
3266 return false;
3267 }
3268
3269 /*
3270 * Initialize topology-aware scheduling.
3271 *
3272 * Detect if the system has multiple LLC or multiple NUMA domains and enable
3273 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
3274 * selection policy.
3275 *
3276 * Assumption: the kernel's internal topology representation assumes that each
3277 * CPU belongs to a single LLC domain, and that each LLC domain is entirely
3278 * contained within a single NUMA node.
3279 */
update_selcpu_topology(void)3280 static void update_selcpu_topology(void)
3281 {
3282 bool enable_llc = false, enable_numa = false;
3283 struct sched_domain *sd;
3284 const struct cpumask *cpus;
3285 s32 cpu = cpumask_first(cpu_online_mask);
3286
3287 /*
3288 * Enable LLC domain optimization only when there are multiple LLC
3289 * domains among the online CPUs. If all online CPUs are part of a
3290 * single LLC domain, the idle CPU selection logic can choose any
3291 * online CPU without bias.
3292 *
3293 * Note that it is sufficient to check the LLC domain of the first
3294 * online CPU to determine whether a single LLC domain includes all
3295 * CPUs.
3296 */
3297 rcu_read_lock();
3298 sd = rcu_dereference(per_cpu(sd_llc, cpu));
3299 if (sd) {
3300 if (sd->span_weight < num_online_cpus())
3301 enable_llc = true;
3302 }
3303
3304 /*
3305 * Enable NUMA optimization only when there are multiple NUMA domains
3306 * among the online CPUs and the NUMA domains don't perfectly overlaps
3307 * with the LLC domains.
3308 *
3309 * If all CPUs belong to the same NUMA node and the same LLC domain,
3310 * enabling both NUMA and LLC optimizations is unnecessary, as checking
3311 * for an idle CPU in the same domain twice is redundant.
3312 */
3313 cpus = cpumask_of_node(cpu_to_node(cpu));
3314 if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch())
3315 enable_numa = true;
3316 rcu_read_unlock();
3317
3318 pr_debug("sched_ext: LLC idle selection %s\n",
3319 enable_llc ? "enabled" : "disabled");
3320 pr_debug("sched_ext: NUMA idle selection %s\n",
3321 enable_numa ? "enabled" : "disabled");
3322
3323 if (enable_llc)
3324 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
3325 else
3326 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
3327 if (enable_numa)
3328 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
3329 else
3330 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
3331 }
3332
3333 /*
3334 * Built-in CPU idle selection policy:
3335 *
3336 * 1. Prioritize full-idle cores:
3337 * - always prioritize CPUs from fully idle cores (both logical CPUs are
3338 * idle) to avoid interference caused by SMT.
3339 *
3340 * 2. Reuse the same CPU:
3341 * - prefer the last used CPU to take advantage of cached data (L1, L2) and
3342 * branch prediction optimizations.
3343 *
3344 * 3. Pick a CPU within the same LLC (Last-Level Cache):
3345 * - if the above conditions aren't met, pick a CPU that shares the same LLC
3346 * to maintain cache locality.
3347 *
3348 * 4. Pick a CPU within the same NUMA node, if enabled:
3349 * - choose a CPU from the same NUMA node to reduce memory access latency.
3350 *
3351 * Step 3 and 4 are performed only if the system has, respectively, multiple
3352 * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
3353 * scx_selcpu_topo_numa).
3354 *
3355 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
3356 * we never call ops.select_cpu() for them, see select_task_rq().
3357 */
scx_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * found)3358 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3359 u64 wake_flags, bool *found)
3360 {
3361 const struct cpumask *llc_cpus = NULL;
3362 const struct cpumask *numa_cpus = NULL;
3363 s32 cpu;
3364
3365 *found = false;
3366
3367
3368 /*
3369 * This is necessary to protect llc_cpus.
3370 */
3371 rcu_read_lock();
3372
3373 /*
3374 * Determine the scheduling domain only if the task is allowed to run
3375 * on all CPUs.
3376 *
3377 * This is done primarily for efficiency, as it avoids the overhead of
3378 * updating a cpumask every time we need to select an idle CPU (which
3379 * can be costly in large SMP systems), but it also aligns logically:
3380 * if a task's scheduling domain is restricted by user-space (through
3381 * CPU affinity), the task will simply use the flat scheduling domain
3382 * defined by user-space.
3383 */
3384 if (p->nr_cpus_allowed >= num_possible_cpus()) {
3385 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
3386 numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
3387
3388 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
3389 struct sched_domain *sd;
3390
3391 sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
3392 if (sd)
3393 llc_cpus = sched_domain_span(sd);
3394 }
3395 }
3396
3397 /*
3398 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
3399 */
3400 if (wake_flags & SCX_WAKE_SYNC) {
3401 cpu = smp_processor_id();
3402
3403 /*
3404 * If the waker's CPU is cache affine and prev_cpu is idle,
3405 * then avoid a migration.
3406 */
3407 if (cpus_share_cache(cpu, prev_cpu) &&
3408 test_and_clear_cpu_idle(prev_cpu)) {
3409 cpu = prev_cpu;
3410 goto cpu_found;
3411 }
3412
3413 /*
3414 * If the waker's local DSQ is empty, and the system is under
3415 * utilized, try to wake up @p to the local DSQ of the waker.
3416 *
3417 * Checking only for an empty local DSQ is insufficient as it
3418 * could give the wakee an unfair advantage when the system is
3419 * oversaturated.
3420 *
3421 * Checking only for the presence of idle CPUs is also
3422 * insufficient as the local DSQ of the waker could have tasks
3423 * piled up on it even if there is an idle core elsewhere on
3424 * the system.
3425 */
3426 if (!cpumask_empty(idle_masks.cpu) &&
3427 !(current->flags & PF_EXITING) &&
3428 cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3429 if (cpumask_test_cpu(cpu, p->cpus_ptr))
3430 goto cpu_found;
3431 }
3432 }
3433
3434 /*
3435 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3436 * partially idle @prev_cpu.
3437 */
3438 if (sched_smt_active()) {
3439 /*
3440 * Keep using @prev_cpu if it's part of a fully idle core.
3441 */
3442 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3443 test_and_clear_cpu_idle(prev_cpu)) {
3444 cpu = prev_cpu;
3445 goto cpu_found;
3446 }
3447
3448 /*
3449 * Search for any fully idle core in the same LLC domain.
3450 */
3451 if (llc_cpus) {
3452 cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
3453 if (cpu >= 0)
3454 goto cpu_found;
3455 }
3456
3457 /*
3458 * Search for any fully idle core in the same NUMA node.
3459 */
3460 if (numa_cpus) {
3461 cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
3462 if (cpu >= 0)
3463 goto cpu_found;
3464 }
3465
3466 /*
3467 * Search for any full idle core usable by the task.
3468 */
3469 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3470 if (cpu >= 0)
3471 goto cpu_found;
3472 }
3473
3474 /*
3475 * Use @prev_cpu if it's idle.
3476 */
3477 if (test_and_clear_cpu_idle(prev_cpu)) {
3478 cpu = prev_cpu;
3479 goto cpu_found;
3480 }
3481
3482 /*
3483 * Search for any idle CPU in the same LLC domain.
3484 */
3485 if (llc_cpus) {
3486 cpu = scx_pick_idle_cpu(llc_cpus, 0);
3487 if (cpu >= 0)
3488 goto cpu_found;
3489 }
3490
3491 /*
3492 * Search for any idle CPU in the same NUMA node.
3493 */
3494 if (numa_cpus) {
3495 cpu = scx_pick_idle_cpu(numa_cpus, 0);
3496 if (cpu >= 0)
3497 goto cpu_found;
3498 }
3499
3500 /*
3501 * Search for any idle CPU usable by the task.
3502 */
3503 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3504 if (cpu >= 0)
3505 goto cpu_found;
3506
3507 rcu_read_unlock();
3508 return prev_cpu;
3509
3510 cpu_found:
3511 rcu_read_unlock();
3512
3513 *found = true;
3514 return cpu;
3515 }
3516
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3517 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3518 {
3519 /*
3520 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3521 * can be a good migration opportunity with low cache and memory
3522 * footprint. Returning a CPU different than @prev_cpu triggers
3523 * immediate rq migration. However, for SCX, as the current rq
3524 * association doesn't dictate where the task is going to run, this
3525 * doesn't fit well. If necessary, we can later add a dedicated method
3526 * which can decide to preempt self to force it through the regular
3527 * scheduling path.
3528 */
3529 if (unlikely(wake_flags & WF_EXEC))
3530 return prev_cpu;
3531
3532 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3533 s32 cpu;
3534 struct task_struct **ddsp_taskp;
3535
3536 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3537 WARN_ON_ONCE(*ddsp_taskp);
3538 *ddsp_taskp = p;
3539
3540 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3541 select_cpu, p, prev_cpu, wake_flags);
3542 *ddsp_taskp = NULL;
3543 if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3544 return cpu;
3545 else
3546 return prev_cpu;
3547 } else {
3548 bool found;
3549 s32 cpu;
3550
3551 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3552 if (found) {
3553 p->scx.slice = SCX_SLICE_DFL;
3554 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3555 }
3556 return cpu;
3557 }
3558 }
3559
task_woken_scx(struct rq * rq,struct task_struct * p)3560 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3561 {
3562 run_deferred(rq);
3563 }
3564
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3565 static void set_cpus_allowed_scx(struct task_struct *p,
3566 struct affinity_context *ac)
3567 {
3568 set_cpus_allowed_common(p, ac);
3569
3570 /*
3571 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3572 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3573 * scheduler the effective one.
3574 *
3575 * Fine-grained memory write control is enforced by BPF making the const
3576 * designation pointless. Cast it away when calling the operation.
3577 */
3578 if (SCX_HAS_OP(set_cpumask))
3579 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3580 (struct cpumask *)p->cpus_ptr);
3581 }
3582
reset_idle_masks(void)3583 static void reset_idle_masks(void)
3584 {
3585 /*
3586 * Consider all online cpus idle. Should converge to the actual state
3587 * quickly.
3588 */
3589 cpumask_copy(idle_masks.cpu, cpu_online_mask);
3590 cpumask_copy(idle_masks.smt, cpu_online_mask);
3591 }
3592
update_builtin_idle(int cpu,bool idle)3593 static void update_builtin_idle(int cpu, bool idle)
3594 {
3595 if (idle)
3596 cpumask_set_cpu(cpu, idle_masks.cpu);
3597 else
3598 cpumask_clear_cpu(cpu, idle_masks.cpu);
3599
3600 #ifdef CONFIG_SCHED_SMT
3601 if (sched_smt_active()) {
3602 const struct cpumask *smt = cpu_smt_mask(cpu);
3603
3604 if (idle) {
3605 /*
3606 * idle_masks.smt handling is racy but that's fine as
3607 * it's only for optimization and self-correcting.
3608 */
3609 for_each_cpu(cpu, smt) {
3610 if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3611 return;
3612 }
3613 cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3614 } else {
3615 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3616 }
3617 }
3618 #endif
3619 }
3620
3621 /*
3622 * Update the idle state of a CPU to @idle.
3623 *
3624 * If @do_notify is true, ops.update_idle() is invoked to notify the scx
3625 * scheduler of an actual idle state transition (idle to busy or vice
3626 * versa). If @do_notify is false, only the idle state in the idle masks is
3627 * refreshed without invoking ops.update_idle().
3628 *
3629 * This distinction is necessary, because an idle CPU can be "reserved" and
3630 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
3631 * busy even if no tasks are dispatched. In this case, the CPU may return
3632 * to idle without a true state transition. Refreshing the idle masks
3633 * without invoking ops.update_idle() ensures accurate idle state tracking
3634 * while avoiding unnecessary updates and maintaining balanced state
3635 * transitions.
3636 */
__scx_update_idle(struct rq * rq,bool idle,bool do_notify)3637 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
3638 {
3639 int cpu = cpu_of(rq);
3640
3641 lockdep_assert_rq_held(rq);
3642
3643 /*
3644 * Trigger ops.update_idle() only when transitioning from a task to
3645 * the idle thread and vice versa.
3646 *
3647 * Idle transitions are indicated by do_notify being set to true,
3648 * managed by put_prev_task_idle()/set_next_task_idle().
3649 */
3650 if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
3651 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3652
3653 /*
3654 * Update the idle masks:
3655 * - for real idle transitions (do_notify == true)
3656 * - for idle-to-idle transitions (indicated by the previous task
3657 * being the idle thread, managed by pick_task_idle())
3658 *
3659 * Skip updating idle masks if the previous task is not the idle
3660 * thread, since set_next_task_idle() has already handled it when
3661 * transitioning from a task to the idle thread (calling this
3662 * function with do_notify == true).
3663 *
3664 * In this way we can avoid updating the idle masks twice,
3665 * unnecessarily.
3666 */
3667 if (static_branch_likely(&scx_builtin_idle_enabled))
3668 if (do_notify || is_idle_task(rq->curr))
3669 update_builtin_idle(cpu, idle);
3670 }
3671
handle_hotplug(struct rq * rq,bool online)3672 static void handle_hotplug(struct rq *rq, bool online)
3673 {
3674 int cpu = cpu_of(rq);
3675
3676 atomic_long_inc(&scx_hotplug_seq);
3677
3678 if (scx_enabled())
3679 update_selcpu_topology();
3680
3681 if (online && SCX_HAS_OP(cpu_online))
3682 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3683 else if (!online && SCX_HAS_OP(cpu_offline))
3684 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3685 else
3686 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3687 "cpu %d going %s, exiting scheduler", cpu,
3688 online ? "online" : "offline");
3689 }
3690
scx_rq_activate(struct rq * rq)3691 void scx_rq_activate(struct rq *rq)
3692 {
3693 handle_hotplug(rq, true);
3694 }
3695
scx_rq_deactivate(struct rq * rq)3696 void scx_rq_deactivate(struct rq *rq)
3697 {
3698 handle_hotplug(rq, false);
3699 }
3700
rq_online_scx(struct rq * rq)3701 static void rq_online_scx(struct rq *rq)
3702 {
3703 rq->scx.flags |= SCX_RQ_ONLINE;
3704 }
3705
rq_offline_scx(struct rq * rq)3706 static void rq_offline_scx(struct rq *rq)
3707 {
3708 rq->scx.flags &= ~SCX_RQ_ONLINE;
3709 }
3710
3711 #else /* CONFIG_SMP */
3712
test_and_clear_cpu_idle(int cpu)3713 static bool test_and_clear_cpu_idle(int cpu) { return false; }
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3714 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
reset_idle_masks(void)3715 static void reset_idle_masks(void) {}
3716
3717 #endif /* CONFIG_SMP */
3718
check_rq_for_timeouts(struct rq * rq)3719 static bool check_rq_for_timeouts(struct rq *rq)
3720 {
3721 struct task_struct *p;
3722 struct rq_flags rf;
3723 bool timed_out = false;
3724
3725 rq_lock_irqsave(rq, &rf);
3726 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3727 unsigned long last_runnable = p->scx.runnable_at;
3728
3729 if (unlikely(time_after(jiffies,
3730 last_runnable + scx_watchdog_timeout))) {
3731 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3732
3733 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3734 "%s[%d] failed to run for %u.%03us",
3735 p->comm, p->pid,
3736 dur_ms / 1000, dur_ms % 1000);
3737 timed_out = true;
3738 break;
3739 }
3740 }
3741 rq_unlock_irqrestore(rq, &rf);
3742
3743 return timed_out;
3744 }
3745
scx_watchdog_workfn(struct work_struct * work)3746 static void scx_watchdog_workfn(struct work_struct *work)
3747 {
3748 int cpu;
3749
3750 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3751
3752 for_each_online_cpu(cpu) {
3753 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3754 break;
3755
3756 cond_resched();
3757 }
3758 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3759 scx_watchdog_timeout / 2);
3760 }
3761
scx_tick(struct rq * rq)3762 void scx_tick(struct rq *rq)
3763 {
3764 unsigned long last_check;
3765
3766 if (!scx_enabled())
3767 return;
3768
3769 last_check = READ_ONCE(scx_watchdog_timestamp);
3770 if (unlikely(time_after(jiffies,
3771 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3772 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3773
3774 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3775 "watchdog failed to check in for %u.%03us",
3776 dur_ms / 1000, dur_ms % 1000);
3777 }
3778
3779 update_other_load_avgs(rq);
3780 }
3781
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3782 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3783 {
3784 update_curr_scx(rq);
3785
3786 /*
3787 * While disabling, always resched and refresh core-sched timestamp as
3788 * we can't trust the slice management or ops.core_sched_before().
3789 */
3790 if (scx_rq_bypassing(rq)) {
3791 curr->scx.slice = 0;
3792 touch_core_sched(rq, curr);
3793 } else if (SCX_HAS_OP(tick)) {
3794 SCX_CALL_OP(SCX_KF_REST, tick, curr);
3795 }
3796
3797 if (!curr->scx.slice)
3798 resched_curr(rq);
3799 }
3800
3801 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3802 static struct cgroup *tg_cgrp(struct task_group *tg)
3803 {
3804 /*
3805 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3806 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3807 * root cgroup.
3808 */
3809 if (tg && tg->css.cgroup)
3810 return tg->css.cgroup;
3811 else
3812 return &cgrp_dfl_root.cgrp;
3813 }
3814
3815 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3816
3817 #else /* CONFIG_EXT_GROUP_SCHED */
3818
3819 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3820
3821 #endif /* CONFIG_EXT_GROUP_SCHED */
3822
scx_get_task_state(const struct task_struct * p)3823 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3824 {
3825 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3826 }
3827
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3828 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3829 {
3830 enum scx_task_state prev_state = scx_get_task_state(p);
3831 bool warn = false;
3832
3833 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3834
3835 switch (state) {
3836 case SCX_TASK_NONE:
3837 break;
3838 case SCX_TASK_INIT:
3839 warn = prev_state != SCX_TASK_NONE;
3840 break;
3841 case SCX_TASK_READY:
3842 warn = prev_state == SCX_TASK_NONE;
3843 break;
3844 case SCX_TASK_ENABLED:
3845 warn = prev_state != SCX_TASK_READY;
3846 break;
3847 default:
3848 warn = true;
3849 return;
3850 }
3851
3852 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3853 prev_state, state, p->comm, p->pid);
3854
3855 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3856 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3857 }
3858
scx_ops_init_task(struct task_struct * p,struct task_group * tg,bool fork)3859 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3860 {
3861 int ret;
3862
3863 p->scx.disallow = false;
3864
3865 if (SCX_HAS_OP(init_task)) {
3866 struct scx_init_task_args args = {
3867 SCX_INIT_TASK_ARGS_CGROUP(tg)
3868 .fork = fork,
3869 };
3870
3871 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3872 if (unlikely(ret)) {
3873 ret = ops_sanitize_err("init_task", ret);
3874 return ret;
3875 }
3876 }
3877
3878 scx_set_task_state(p, SCX_TASK_INIT);
3879
3880 if (p->scx.disallow) {
3881 if (!fork) {
3882 struct rq *rq;
3883 struct rq_flags rf;
3884
3885 rq = task_rq_lock(p, &rf);
3886
3887 /*
3888 * We're in the load path and @p->policy will be applied
3889 * right after. Reverting @p->policy here and rejecting
3890 * %SCHED_EXT transitions from scx_check_setscheduler()
3891 * guarantees that if ops.init_task() sets @p->disallow,
3892 * @p can never be in SCX.
3893 */
3894 if (p->policy == SCHED_EXT) {
3895 p->policy = SCHED_NORMAL;
3896 atomic_long_inc(&scx_nr_rejected);
3897 }
3898
3899 task_rq_unlock(rq, p, &rf);
3900 } else if (p->policy == SCHED_EXT) {
3901 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3902 p->comm, p->pid);
3903 }
3904 }
3905
3906 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3907 return 0;
3908 }
3909
scx_ops_enable_task(struct task_struct * p)3910 static void scx_ops_enable_task(struct task_struct *p)
3911 {
3912 u32 weight;
3913
3914 lockdep_assert_rq_held(task_rq(p));
3915
3916 /*
3917 * Set the weight before calling ops.enable() so that the scheduler
3918 * doesn't see a stale value if they inspect the task struct.
3919 */
3920 if (task_has_idle_policy(p))
3921 weight = WEIGHT_IDLEPRIO;
3922 else
3923 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3924
3925 p->scx.weight = sched_weight_to_cgroup(weight);
3926
3927 if (SCX_HAS_OP(enable))
3928 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3929 scx_set_task_state(p, SCX_TASK_ENABLED);
3930
3931 if (SCX_HAS_OP(set_weight))
3932 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3933 }
3934
scx_ops_disable_task(struct task_struct * p)3935 static void scx_ops_disable_task(struct task_struct *p)
3936 {
3937 lockdep_assert_rq_held(task_rq(p));
3938 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3939
3940 if (SCX_HAS_OP(disable))
3941 SCX_CALL_OP(SCX_KF_REST, disable, p);
3942 scx_set_task_state(p, SCX_TASK_READY);
3943 }
3944
scx_ops_exit_task(struct task_struct * p)3945 static void scx_ops_exit_task(struct task_struct *p)
3946 {
3947 struct scx_exit_task_args args = {
3948 .cancelled = false,
3949 };
3950
3951 lockdep_assert_rq_held(task_rq(p));
3952
3953 switch (scx_get_task_state(p)) {
3954 case SCX_TASK_NONE:
3955 return;
3956 case SCX_TASK_INIT:
3957 args.cancelled = true;
3958 break;
3959 case SCX_TASK_READY:
3960 break;
3961 case SCX_TASK_ENABLED:
3962 scx_ops_disable_task(p);
3963 break;
3964 default:
3965 WARN_ON_ONCE(true);
3966 return;
3967 }
3968
3969 if (SCX_HAS_OP(exit_task))
3970 SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3971 scx_set_task_state(p, SCX_TASK_NONE);
3972 }
3973
init_scx_entity(struct sched_ext_entity * scx)3974 void init_scx_entity(struct sched_ext_entity *scx)
3975 {
3976 memset(scx, 0, sizeof(*scx));
3977 INIT_LIST_HEAD(&scx->dsq_list.node);
3978 RB_CLEAR_NODE(&scx->dsq_priq);
3979 scx->sticky_cpu = -1;
3980 scx->holding_cpu = -1;
3981 INIT_LIST_HEAD(&scx->runnable_node);
3982 scx->runnable_at = jiffies;
3983 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3984 scx->slice = SCX_SLICE_DFL;
3985 }
3986
scx_pre_fork(struct task_struct * p)3987 void scx_pre_fork(struct task_struct *p)
3988 {
3989 /*
3990 * BPF scheduler enable/disable paths want to be able to iterate and
3991 * update all tasks which can become complex when racing forks. As
3992 * enable/disable are very cold paths, let's use a percpu_rwsem to
3993 * exclude forks.
3994 */
3995 percpu_down_read(&scx_fork_rwsem);
3996 }
3997
scx_fork(struct task_struct * p)3998 int scx_fork(struct task_struct *p)
3999 {
4000 percpu_rwsem_assert_held(&scx_fork_rwsem);
4001
4002 if (scx_ops_init_task_enabled)
4003 return scx_ops_init_task(p, task_group(p), true);
4004 else
4005 return 0;
4006 }
4007
scx_post_fork(struct task_struct * p)4008 void scx_post_fork(struct task_struct *p)
4009 {
4010 if (scx_ops_init_task_enabled) {
4011 scx_set_task_state(p, SCX_TASK_READY);
4012
4013 /*
4014 * Enable the task immediately if it's running on sched_ext.
4015 * Otherwise, it'll be enabled in switching_to_scx() if and
4016 * when it's ever configured to run with a SCHED_EXT policy.
4017 */
4018 if (p->sched_class == &ext_sched_class) {
4019 struct rq_flags rf;
4020 struct rq *rq;
4021
4022 rq = task_rq_lock(p, &rf);
4023 scx_ops_enable_task(p);
4024 task_rq_unlock(rq, p, &rf);
4025 }
4026 }
4027
4028 spin_lock_irq(&scx_tasks_lock);
4029 list_add_tail(&p->scx.tasks_node, &scx_tasks);
4030 spin_unlock_irq(&scx_tasks_lock);
4031
4032 percpu_up_read(&scx_fork_rwsem);
4033 }
4034
scx_cancel_fork(struct task_struct * p)4035 void scx_cancel_fork(struct task_struct *p)
4036 {
4037 if (scx_enabled()) {
4038 struct rq *rq;
4039 struct rq_flags rf;
4040
4041 rq = task_rq_lock(p, &rf);
4042 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
4043 scx_ops_exit_task(p);
4044 task_rq_unlock(rq, p, &rf);
4045 }
4046
4047 percpu_up_read(&scx_fork_rwsem);
4048 }
4049
sched_ext_free(struct task_struct * p)4050 void sched_ext_free(struct task_struct *p)
4051 {
4052 unsigned long flags;
4053
4054 spin_lock_irqsave(&scx_tasks_lock, flags);
4055 list_del_init(&p->scx.tasks_node);
4056 spin_unlock_irqrestore(&scx_tasks_lock, flags);
4057
4058 /*
4059 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
4060 * ENABLED transitions can't race us. Disable ops for @p.
4061 */
4062 if (scx_get_task_state(p) != SCX_TASK_NONE) {
4063 struct rq_flags rf;
4064 struct rq *rq;
4065
4066 rq = task_rq_lock(p, &rf);
4067 scx_ops_exit_task(p);
4068 task_rq_unlock(rq, p, &rf);
4069 }
4070 }
4071
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)4072 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4073 const struct load_weight *lw)
4074 {
4075 lockdep_assert_rq_held(task_rq(p));
4076
4077 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4078 if (SCX_HAS_OP(set_weight))
4079 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
4080 }
4081
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)4082 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4083 {
4084 }
4085
switching_to_scx(struct rq * rq,struct task_struct * p)4086 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4087 {
4088 scx_ops_enable_task(p);
4089
4090 /*
4091 * set_cpus_allowed_scx() is not called while @p is associated with a
4092 * different scheduler class. Keep the BPF scheduler up-to-date.
4093 */
4094 if (SCX_HAS_OP(set_cpumask))
4095 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
4096 (struct cpumask *)p->cpus_ptr);
4097 }
4098
switched_from_scx(struct rq * rq,struct task_struct * p)4099 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4100 {
4101 scx_ops_disable_task(p);
4102 }
4103
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)4104 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)4105 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4106
scx_check_setscheduler(struct task_struct * p,int policy)4107 int scx_check_setscheduler(struct task_struct *p, int policy)
4108 {
4109 lockdep_assert_rq_held(task_rq(p));
4110
4111 /* if disallow, reject transitioning into SCX */
4112 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4113 p->policy != policy && policy == SCHED_EXT)
4114 return -EACCES;
4115
4116 return 0;
4117 }
4118
4119 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)4120 bool scx_can_stop_tick(struct rq *rq)
4121 {
4122 struct task_struct *p = rq->curr;
4123
4124 if (scx_rq_bypassing(rq))
4125 return false;
4126
4127 if (p->sched_class != &ext_sched_class)
4128 return true;
4129
4130 /*
4131 * @rq can dispatch from different DSQs, so we can't tell whether it
4132 * needs the tick or not by looking at nr_running. Allow stopping ticks
4133 * iff the BPF scheduler indicated so. See set_next_task_scx().
4134 */
4135 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4136 }
4137 #endif
4138
4139 #ifdef CONFIG_EXT_GROUP_SCHED
4140
4141 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4142 static bool scx_cgroup_enabled;
4143 static bool cgroup_warned_missing_weight;
4144 static bool cgroup_warned_missing_idle;
4145
scx_cgroup_warn_missing_weight(struct task_group * tg)4146 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
4147 {
4148 if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
4149 cgroup_warned_missing_weight)
4150 return;
4151
4152 if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
4153 return;
4154
4155 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
4156 scx_ops.name);
4157 cgroup_warned_missing_weight = true;
4158 }
4159
scx_cgroup_warn_missing_idle(struct task_group * tg)4160 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
4161 {
4162 if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
4163 return;
4164
4165 if (!tg->idle)
4166 return;
4167
4168 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
4169 scx_ops.name);
4170 cgroup_warned_missing_idle = true;
4171 }
4172
scx_tg_online(struct task_group * tg)4173 int scx_tg_online(struct task_group *tg)
4174 {
4175 int ret = 0;
4176
4177 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4178
4179 percpu_down_read(&scx_cgroup_rwsem);
4180
4181 scx_cgroup_warn_missing_weight(tg);
4182
4183 if (scx_cgroup_enabled) {
4184 if (SCX_HAS_OP(cgroup_init)) {
4185 struct scx_cgroup_init_args args =
4186 { .weight = tg->scx_weight };
4187
4188 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4189 tg->css.cgroup, &args);
4190 if (ret)
4191 ret = ops_sanitize_err("cgroup_init", ret);
4192 }
4193 if (ret == 0)
4194 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4195 } else {
4196 tg->scx_flags |= SCX_TG_ONLINE;
4197 }
4198
4199 percpu_up_read(&scx_cgroup_rwsem);
4200 return ret;
4201 }
4202
scx_tg_offline(struct task_group * tg)4203 void scx_tg_offline(struct task_group *tg)
4204 {
4205 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4206
4207 percpu_down_read(&scx_cgroup_rwsem);
4208
4209 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
4210 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
4211 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4212
4213 percpu_up_read(&scx_cgroup_rwsem);
4214 }
4215
scx_cgroup_can_attach(struct cgroup_taskset * tset)4216 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4217 {
4218 struct cgroup_subsys_state *css;
4219 struct task_struct *p;
4220 int ret;
4221
4222 /* released in scx_finish/cancel_attach() */
4223 percpu_down_read(&scx_cgroup_rwsem);
4224
4225 if (!scx_cgroup_enabled)
4226 return 0;
4227
4228 cgroup_taskset_for_each(p, css, tset) {
4229 struct cgroup *from = tg_cgrp(task_group(p));
4230 struct cgroup *to = tg_cgrp(css_tg(css));
4231
4232 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4233
4234 /*
4235 * sched_move_task() omits identity migrations. Let's match the
4236 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4237 * always match one-to-one.
4238 */
4239 if (from == to)
4240 continue;
4241
4242 if (SCX_HAS_OP(cgroup_prep_move)) {
4243 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
4244 p, from, css->cgroup);
4245 if (ret)
4246 goto err;
4247 }
4248
4249 p->scx.cgrp_moving_from = from;
4250 }
4251
4252 return 0;
4253
4254 err:
4255 cgroup_taskset_for_each(p, css, tset) {
4256 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4257 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4258 p->scx.cgrp_moving_from, css->cgroup);
4259 p->scx.cgrp_moving_from = NULL;
4260 }
4261
4262 percpu_up_read(&scx_cgroup_rwsem);
4263 return ops_sanitize_err("cgroup_prep_move", ret);
4264 }
4265
scx_move_task(struct task_struct * p)4266 void scx_move_task(struct task_struct *p)
4267 {
4268 if (!scx_cgroup_enabled)
4269 return;
4270
4271 /*
4272 * We're called from sched_move_task() which handles both cgroup and
4273 * autogroup moves. Ignore the latter.
4274 *
4275 * Also ignore exiting tasks, because in the exit path tasks transition
4276 * from the autogroup to the root group, so task_group_is_autogroup()
4277 * alone isn't able to catch exiting autogroup tasks. This is safe for
4278 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
4279 * tasks.
4280 */
4281 if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
4282 return;
4283
4284 /*
4285 * @p must have ops.cgroup_prep_move() called on it and thus
4286 * cgrp_moving_from set.
4287 */
4288 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4289 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
4290 p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
4291 p->scx.cgrp_moving_from = NULL;
4292 }
4293
scx_cgroup_finish_attach(void)4294 void scx_cgroup_finish_attach(void)
4295 {
4296 percpu_up_read(&scx_cgroup_rwsem);
4297 }
4298
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)4299 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4300 {
4301 struct cgroup_subsys_state *css;
4302 struct task_struct *p;
4303
4304 if (!scx_cgroup_enabled)
4305 goto out_unlock;
4306
4307 cgroup_taskset_for_each(p, css, tset) {
4308 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
4309 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
4310 p->scx.cgrp_moving_from, css->cgroup);
4311 p->scx.cgrp_moving_from = NULL;
4312 }
4313 out_unlock:
4314 percpu_up_read(&scx_cgroup_rwsem);
4315 }
4316
scx_group_set_weight(struct task_group * tg,unsigned long weight)4317 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4318 {
4319 percpu_down_read(&scx_cgroup_rwsem);
4320
4321 if (scx_cgroup_enabled && tg->scx_weight != weight) {
4322 if (SCX_HAS_OP(cgroup_set_weight))
4323 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
4324 tg_cgrp(tg), weight);
4325 tg->scx_weight = weight;
4326 }
4327
4328 percpu_up_read(&scx_cgroup_rwsem);
4329 }
4330
scx_group_set_idle(struct task_group * tg,bool idle)4331 void scx_group_set_idle(struct task_group *tg, bool idle)
4332 {
4333 percpu_down_read(&scx_cgroup_rwsem);
4334 scx_cgroup_warn_missing_idle(tg);
4335 percpu_up_read(&scx_cgroup_rwsem);
4336 }
4337
scx_cgroup_lock(void)4338 static void scx_cgroup_lock(void)
4339 {
4340 percpu_down_write(&scx_cgroup_rwsem);
4341 }
4342
scx_cgroup_unlock(void)4343 static void scx_cgroup_unlock(void)
4344 {
4345 percpu_up_write(&scx_cgroup_rwsem);
4346 }
4347
4348 #else /* CONFIG_EXT_GROUP_SCHED */
4349
scx_cgroup_lock(void)4350 static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)4351 static inline void scx_cgroup_unlock(void) {}
4352
4353 #endif /* CONFIG_EXT_GROUP_SCHED */
4354
4355 /*
4356 * Omitted operations:
4357 *
4358 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4359 * isn't tied to the CPU at that point. Preemption is implemented by resetting
4360 * the victim task's slice to 0 and triggering reschedule on the target CPU.
4361 *
4362 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4363 *
4364 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4365 * their current sched_class. Call them directly from sched core instead.
4366 */
4367 DEFINE_SCHED_CLASS(ext) = {
4368 .enqueue_task = enqueue_task_scx,
4369 .dequeue_task = dequeue_task_scx,
4370 .yield_task = yield_task_scx,
4371 .yield_to_task = yield_to_task_scx,
4372
4373 .wakeup_preempt = wakeup_preempt_scx,
4374
4375 .balance = balance_scx,
4376 .pick_task = pick_task_scx,
4377
4378 .put_prev_task = put_prev_task_scx,
4379 .set_next_task = set_next_task_scx,
4380
4381 #ifdef CONFIG_SMP
4382 .select_task_rq = select_task_rq_scx,
4383 .task_woken = task_woken_scx,
4384 .set_cpus_allowed = set_cpus_allowed_scx,
4385
4386 .rq_online = rq_online_scx,
4387 .rq_offline = rq_offline_scx,
4388 #endif
4389
4390 .task_tick = task_tick_scx,
4391
4392 .switching_to = switching_to_scx,
4393 .switched_from = switched_from_scx,
4394 .switched_to = switched_to_scx,
4395 .reweight_task = reweight_task_scx,
4396 .prio_changed = prio_changed_scx,
4397
4398 .update_curr = update_curr_scx,
4399
4400 #ifdef CONFIG_UCLAMP_TASK
4401 .uclamp_enabled = 1,
4402 #endif
4403 };
4404
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)4405 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4406 {
4407 memset(dsq, 0, sizeof(*dsq));
4408
4409 raw_spin_lock_init(&dsq->lock);
4410 INIT_LIST_HEAD(&dsq->list);
4411 dsq->id = dsq_id;
4412 }
4413
create_dsq(u64 dsq_id,int node)4414 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4415 {
4416 struct scx_dispatch_q *dsq;
4417 int ret;
4418
4419 if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4420 return ERR_PTR(-EINVAL);
4421
4422 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4423 if (!dsq)
4424 return ERR_PTR(-ENOMEM);
4425
4426 init_dsq(dsq, dsq_id);
4427
4428 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4429 dsq_hash_params);
4430 if (ret) {
4431 kfree(dsq);
4432 return ERR_PTR(ret);
4433 }
4434 return dsq;
4435 }
4436
free_dsq_irq_workfn(struct irq_work * irq_work)4437 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4438 {
4439 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4440 struct scx_dispatch_q *dsq, *tmp_dsq;
4441
4442 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4443 kfree_rcu(dsq, rcu);
4444 }
4445
4446 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4447
destroy_dsq(u64 dsq_id)4448 static void destroy_dsq(u64 dsq_id)
4449 {
4450 struct scx_dispatch_q *dsq;
4451 unsigned long flags;
4452
4453 rcu_read_lock();
4454
4455 dsq = find_user_dsq(dsq_id);
4456 if (!dsq)
4457 goto out_unlock_rcu;
4458
4459 raw_spin_lock_irqsave(&dsq->lock, flags);
4460
4461 if (dsq->nr) {
4462 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4463 dsq->id, dsq->nr);
4464 goto out_unlock_dsq;
4465 }
4466
4467 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4468 goto out_unlock_dsq;
4469
4470 /*
4471 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4472 * queueing more tasks. As this function can be called from anywhere,
4473 * freeing is bounced through an irq work to avoid nesting RCU
4474 * operations inside scheduler locks.
4475 */
4476 dsq->id = SCX_DSQ_INVALID;
4477 llist_add(&dsq->free_node, &dsqs_to_free);
4478 irq_work_queue(&free_dsq_irq_work);
4479
4480 out_unlock_dsq:
4481 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4482 out_unlock_rcu:
4483 rcu_read_unlock();
4484 }
4485
4486 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(void)4487 static void scx_cgroup_exit(void)
4488 {
4489 struct cgroup_subsys_state *css;
4490
4491 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4492
4493 scx_cgroup_enabled = false;
4494
4495 /*
4496 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4497 * cgroups and exit all the inited ones, all online cgroups are exited.
4498 */
4499 rcu_read_lock();
4500 css_for_each_descendant_post(css, &root_task_group.css) {
4501 struct task_group *tg = css_tg(css);
4502
4503 if (!(tg->scx_flags & SCX_TG_INITED))
4504 continue;
4505 tg->scx_flags &= ~SCX_TG_INITED;
4506
4507 if (!scx_ops.cgroup_exit)
4508 continue;
4509
4510 if (WARN_ON_ONCE(!css_tryget(css)))
4511 continue;
4512 rcu_read_unlock();
4513
4514 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4515
4516 rcu_read_lock();
4517 css_put(css);
4518 }
4519 rcu_read_unlock();
4520 }
4521
scx_cgroup_init(void)4522 static int scx_cgroup_init(void)
4523 {
4524 struct cgroup_subsys_state *css;
4525 int ret;
4526
4527 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4528
4529 cgroup_warned_missing_weight = false;
4530 cgroup_warned_missing_idle = false;
4531
4532 /*
4533 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4534 * cgroups and init, all online cgroups are initialized.
4535 */
4536 rcu_read_lock();
4537 css_for_each_descendant_pre(css, &root_task_group.css) {
4538 struct task_group *tg = css_tg(css);
4539 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4540
4541 scx_cgroup_warn_missing_weight(tg);
4542 scx_cgroup_warn_missing_idle(tg);
4543
4544 if ((tg->scx_flags &
4545 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4546 continue;
4547
4548 if (!scx_ops.cgroup_init) {
4549 tg->scx_flags |= SCX_TG_INITED;
4550 continue;
4551 }
4552
4553 if (WARN_ON_ONCE(!css_tryget(css)))
4554 continue;
4555 rcu_read_unlock();
4556
4557 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4558 css->cgroup, &args);
4559 if (ret) {
4560 css_put(css);
4561 scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4562 return ret;
4563 }
4564 tg->scx_flags |= SCX_TG_INITED;
4565
4566 rcu_read_lock();
4567 css_put(css);
4568 }
4569 rcu_read_unlock();
4570
4571 WARN_ON_ONCE(scx_cgroup_enabled);
4572 scx_cgroup_enabled = true;
4573
4574 return 0;
4575 }
4576
4577 #else
scx_cgroup_exit(void)4578 static void scx_cgroup_exit(void) {}
scx_cgroup_init(void)4579 static int scx_cgroup_init(void) { return 0; }
4580 #endif
4581
4582
4583 /********************************************************************************
4584 * Sysfs interface and ops enable/disable.
4585 */
4586
4587 #define SCX_ATTR(_name) \
4588 static struct kobj_attribute scx_attr_##_name = { \
4589 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4590 .show = scx_attr_##_name##_show, \
4591 }
4592
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4593 static ssize_t scx_attr_state_show(struct kobject *kobj,
4594 struct kobj_attribute *ka, char *buf)
4595 {
4596 return sysfs_emit(buf, "%s\n",
4597 scx_ops_enable_state_str[scx_ops_enable_state()]);
4598 }
4599 SCX_ATTR(state);
4600
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4601 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4602 struct kobj_attribute *ka, char *buf)
4603 {
4604 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4605 }
4606 SCX_ATTR(switch_all);
4607
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4608 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4609 struct kobj_attribute *ka, char *buf)
4610 {
4611 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4612 }
4613 SCX_ATTR(nr_rejected);
4614
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4615 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4616 struct kobj_attribute *ka, char *buf)
4617 {
4618 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4619 }
4620 SCX_ATTR(hotplug_seq);
4621
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4622 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4623 struct kobj_attribute *ka, char *buf)
4624 {
4625 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4626 }
4627 SCX_ATTR(enable_seq);
4628
4629 static struct attribute *scx_global_attrs[] = {
4630 &scx_attr_state.attr,
4631 &scx_attr_switch_all.attr,
4632 &scx_attr_nr_rejected.attr,
4633 &scx_attr_hotplug_seq.attr,
4634 &scx_attr_enable_seq.attr,
4635 NULL,
4636 };
4637
4638 static const struct attribute_group scx_global_attr_group = {
4639 .attrs = scx_global_attrs,
4640 };
4641
scx_kobj_release(struct kobject * kobj)4642 static void scx_kobj_release(struct kobject *kobj)
4643 {
4644 kfree(kobj);
4645 }
4646
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4647 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4648 struct kobj_attribute *ka, char *buf)
4649 {
4650 return sysfs_emit(buf, "%s\n", scx_ops.name);
4651 }
4652 SCX_ATTR(ops);
4653
4654 static struct attribute *scx_sched_attrs[] = {
4655 &scx_attr_ops.attr,
4656 NULL,
4657 };
4658 ATTRIBUTE_GROUPS(scx_sched);
4659
4660 static const struct kobj_type scx_ktype = {
4661 .release = scx_kobj_release,
4662 .sysfs_ops = &kobj_sysfs_ops,
4663 .default_groups = scx_sched_groups,
4664 };
4665
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4666 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4667 {
4668 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4669 }
4670
4671 static const struct kset_uevent_ops scx_uevent_ops = {
4672 .uevent = scx_uevent,
4673 };
4674
4675 /*
4676 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4677 * sched_class. dl/rt are already handled.
4678 */
task_should_scx(int policy)4679 bool task_should_scx(int policy)
4680 {
4681 if (!scx_enabled() ||
4682 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4683 return false;
4684 if (READ_ONCE(scx_switching_all))
4685 return true;
4686 return policy == SCHED_EXT;
4687 }
4688
4689 /**
4690 * scx_softlockup - sched_ext softlockup handler
4691 *
4692 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4693 * live-lock the system by making many CPUs target the same DSQ to the point
4694 * where soft-lockup detection triggers. This function is called from
4695 * soft-lockup watchdog when the triggering point is close and tries to unjam
4696 * the system by enabling the breather and aborting the BPF scheduler.
4697 */
scx_softlockup(u32 dur_s)4698 void scx_softlockup(u32 dur_s)
4699 {
4700 switch (scx_ops_enable_state()) {
4701 case SCX_OPS_ENABLING:
4702 case SCX_OPS_ENABLED:
4703 break;
4704 default:
4705 return;
4706 }
4707
4708 /* allow only one instance, cleared at the end of scx_ops_bypass() */
4709 if (test_and_set_bit(0, &scx_in_softlockup))
4710 return;
4711
4712 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4713 smp_processor_id(), dur_s, scx_ops.name);
4714
4715 /*
4716 * Some CPUs may be trapped in the dispatch paths. Enable breather
4717 * immediately; otherwise, we might even be able to get to
4718 * scx_ops_bypass().
4719 */
4720 atomic_inc(&scx_ops_breather_depth);
4721
4722 scx_ops_error("soft lockup - CPU#%d stuck for %us",
4723 smp_processor_id(), dur_s);
4724 }
4725
scx_clear_softlockup(void)4726 static void scx_clear_softlockup(void)
4727 {
4728 if (test_and_clear_bit(0, &scx_in_softlockup))
4729 atomic_dec(&scx_ops_breather_depth);
4730 }
4731
4732 /**
4733 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4734 *
4735 * Bypassing guarantees that all runnable tasks make forward progress without
4736 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4737 * be held by tasks that the BPF scheduler is forgetting to run, which
4738 * unfortunately also excludes toggling the static branches.
4739 *
4740 * Let's work around by overriding a couple ops and modifying behaviors based on
4741 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4742 * to force global FIFO scheduling.
4743 *
4744 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4745 *
4746 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4747 * %SCX_OPS_ENQ_LAST is also ignored.
4748 *
4749 * - ops.dispatch() is ignored.
4750 *
4751 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4752 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4753 * the tail of the queue with core_sched_at touched.
4754 *
4755 * - pick_next_task() suppresses zero slice warning.
4756 *
4757 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4758 * operations.
4759 *
4760 * - scx_prio_less() reverts to the default core_sched_at order.
4761 */
scx_ops_bypass(bool bypass)4762 static void scx_ops_bypass(bool bypass)
4763 {
4764 static DEFINE_RAW_SPINLOCK(bypass_lock);
4765 int cpu;
4766 unsigned long flags;
4767
4768 raw_spin_lock_irqsave(&bypass_lock, flags);
4769 if (bypass) {
4770 scx_ops_bypass_depth++;
4771 WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4772 if (scx_ops_bypass_depth != 1)
4773 goto unlock;
4774 } else {
4775 scx_ops_bypass_depth--;
4776 WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4777 if (scx_ops_bypass_depth != 0)
4778 goto unlock;
4779 }
4780
4781 atomic_inc(&scx_ops_breather_depth);
4782
4783 /*
4784 * No task property is changing. We just need to make sure all currently
4785 * queued tasks are re-queued according to the new scx_rq_bypassing()
4786 * state. As an optimization, walk each rq's runnable_list instead of
4787 * the scx_tasks list.
4788 *
4789 * This function can't trust the scheduler and thus can't use
4790 * cpus_read_lock(). Walk all possible CPUs instead of online.
4791 */
4792 for_each_possible_cpu(cpu) {
4793 struct rq *rq = cpu_rq(cpu);
4794 struct task_struct *p, *n;
4795
4796 raw_spin_rq_lock(rq);
4797
4798 if (bypass) {
4799 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4800 rq->scx.flags |= SCX_RQ_BYPASSING;
4801 } else {
4802 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4803 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4804 }
4805
4806 /*
4807 * We need to guarantee that no tasks are on the BPF scheduler
4808 * while bypassing. Either we see enabled or the enable path
4809 * sees scx_rq_bypassing() before moving tasks to SCX.
4810 */
4811 if (!scx_enabled()) {
4812 raw_spin_rq_unlock(rq);
4813 continue;
4814 }
4815
4816 /*
4817 * The use of list_for_each_entry_safe_reverse() is required
4818 * because each task is going to be removed from and added back
4819 * to the runnable_list during iteration. Because they're added
4820 * to the tail of the list, safe reverse iteration can still
4821 * visit all nodes.
4822 */
4823 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4824 scx.runnable_node) {
4825 struct sched_enq_and_set_ctx ctx;
4826
4827 /* cycling deq/enq is enough, see the function comment */
4828 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4829 sched_enq_and_set_task(&ctx);
4830 }
4831
4832 /* resched to restore ticks and idle state */
4833 if (cpu_online(cpu) || cpu == smp_processor_id())
4834 resched_curr(rq);
4835
4836 raw_spin_rq_unlock(rq);
4837 }
4838
4839 atomic_dec(&scx_ops_breather_depth);
4840 unlock:
4841 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4842 scx_clear_softlockup();
4843 }
4844
free_exit_info(struct scx_exit_info * ei)4845 static void free_exit_info(struct scx_exit_info *ei)
4846 {
4847 kfree(ei->dump);
4848 kfree(ei->msg);
4849 kfree(ei->bt);
4850 kfree(ei);
4851 }
4852
alloc_exit_info(size_t exit_dump_len)4853 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4854 {
4855 struct scx_exit_info *ei;
4856
4857 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4858 if (!ei)
4859 return NULL;
4860
4861 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4862 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4863 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4864
4865 if (!ei->bt || !ei->msg || !ei->dump) {
4866 free_exit_info(ei);
4867 return NULL;
4868 }
4869
4870 return ei;
4871 }
4872
scx_exit_reason(enum scx_exit_kind kind)4873 static const char *scx_exit_reason(enum scx_exit_kind kind)
4874 {
4875 switch (kind) {
4876 case SCX_EXIT_UNREG:
4877 return "unregistered from user space";
4878 case SCX_EXIT_UNREG_BPF:
4879 return "unregistered from BPF";
4880 case SCX_EXIT_UNREG_KERN:
4881 return "unregistered from the main kernel";
4882 case SCX_EXIT_SYSRQ:
4883 return "disabled by sysrq-S";
4884 case SCX_EXIT_ERROR:
4885 return "runtime error";
4886 case SCX_EXIT_ERROR_BPF:
4887 return "scx_bpf_error";
4888 case SCX_EXIT_ERROR_STALL:
4889 return "runnable task stall";
4890 default:
4891 return "<UNKNOWN>";
4892 }
4893 }
4894
scx_ops_disable_workfn(struct kthread_work * work)4895 static void scx_ops_disable_workfn(struct kthread_work *work)
4896 {
4897 struct scx_exit_info *ei = scx_exit_info;
4898 struct scx_task_iter sti;
4899 struct task_struct *p;
4900 struct rhashtable_iter rht_iter;
4901 struct scx_dispatch_q *dsq;
4902 int i, kind;
4903
4904 kind = atomic_read(&scx_exit_kind);
4905 while (true) {
4906 /*
4907 * NONE indicates that a new scx_ops has been registered since
4908 * disable was scheduled - don't kill the new ops. DONE
4909 * indicates that the ops has already been disabled.
4910 */
4911 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4912 return;
4913 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4914 break;
4915 }
4916 ei->kind = kind;
4917 ei->reason = scx_exit_reason(ei->kind);
4918
4919 /* guarantee forward progress by bypassing scx_ops */
4920 scx_ops_bypass(true);
4921
4922 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4923 case SCX_OPS_DISABLING:
4924 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4925 break;
4926 case SCX_OPS_DISABLED:
4927 pr_warn("sched_ext: ops error detected without ops (%s)\n",
4928 scx_exit_info->msg);
4929 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4930 SCX_OPS_DISABLING);
4931 goto done;
4932 default:
4933 break;
4934 }
4935
4936 /*
4937 * Here, every runnable task is guaranteed to make forward progress and
4938 * we can safely use blocking synchronization constructs. Actually
4939 * disable ops.
4940 */
4941 mutex_lock(&scx_ops_enable_mutex);
4942
4943 static_branch_disable(&__scx_switched_all);
4944 WRITE_ONCE(scx_switching_all, false);
4945
4946 /*
4947 * Shut down cgroup support before tasks so that the cgroup attach path
4948 * doesn't race against scx_ops_exit_task().
4949 */
4950 scx_cgroup_lock();
4951 scx_cgroup_exit();
4952 scx_cgroup_unlock();
4953
4954 /*
4955 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4956 * must be switched out and exited synchronously.
4957 */
4958 percpu_down_write(&scx_fork_rwsem);
4959
4960 scx_ops_init_task_enabled = false;
4961
4962 scx_task_iter_start(&sti);
4963 while ((p = scx_task_iter_next_locked(&sti))) {
4964 const struct sched_class *old_class = p->sched_class;
4965 const struct sched_class *new_class =
4966 __setscheduler_class(p->policy, p->prio);
4967 struct sched_enq_and_set_ctx ctx;
4968
4969 if (old_class != new_class && p->se.sched_delayed)
4970 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4971
4972 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4973
4974 p->sched_class = new_class;
4975 check_class_changing(task_rq(p), p, old_class);
4976
4977 sched_enq_and_set_task(&ctx);
4978
4979 check_class_changed(task_rq(p), p, old_class, p->prio);
4980 scx_ops_exit_task(p);
4981 }
4982 scx_task_iter_stop(&sti);
4983 percpu_up_write(&scx_fork_rwsem);
4984
4985 /* no task is on scx, turn off all the switches and flush in-progress calls */
4986 static_branch_disable(&__scx_ops_enabled);
4987 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4988 static_branch_disable(&scx_has_op[i]);
4989 static_branch_disable(&scx_ops_enq_last);
4990 static_branch_disable(&scx_ops_enq_exiting);
4991 static_branch_disable(&scx_ops_cpu_preempt);
4992 static_branch_disable(&scx_builtin_idle_enabled);
4993 synchronize_rcu();
4994
4995 if (ei->kind >= SCX_EXIT_ERROR) {
4996 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4997 scx_ops.name, ei->reason);
4998
4999 if (ei->msg[0] != '\0')
5000 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
5001 #ifdef CONFIG_STACKTRACE
5002 stack_trace_print(ei->bt, ei->bt_len, 2);
5003 #endif
5004 } else {
5005 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5006 scx_ops.name, ei->reason);
5007 }
5008
5009 if (scx_ops.exit)
5010 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
5011
5012 cancel_delayed_work_sync(&scx_watchdog_work);
5013
5014 /*
5015 * Delete the kobject from the hierarchy eagerly in addition to just
5016 * dropping a reference. Otherwise, if the object is deleted
5017 * asynchronously, sysfs could observe an object of the same name still
5018 * in the hierarchy when another scheduler is loaded.
5019 */
5020 kobject_del(scx_root_kobj);
5021 kobject_put(scx_root_kobj);
5022 scx_root_kobj = NULL;
5023
5024 memset(&scx_ops, 0, sizeof(scx_ops));
5025
5026 rhashtable_walk_enter(&dsq_hash, &rht_iter);
5027 do {
5028 rhashtable_walk_start(&rht_iter);
5029
5030 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
5031 destroy_dsq(dsq->id);
5032
5033 rhashtable_walk_stop(&rht_iter);
5034 } while (dsq == ERR_PTR(-EAGAIN));
5035 rhashtable_walk_exit(&rht_iter);
5036
5037 free_percpu(scx_dsp_ctx);
5038 scx_dsp_ctx = NULL;
5039 scx_dsp_max_batch = 0;
5040
5041 free_exit_info(scx_exit_info);
5042 scx_exit_info = NULL;
5043
5044 mutex_unlock(&scx_ops_enable_mutex);
5045
5046 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
5047 SCX_OPS_DISABLING);
5048 done:
5049 scx_ops_bypass(false);
5050 }
5051
5052 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
5053
schedule_scx_ops_disable_work(void)5054 static void schedule_scx_ops_disable_work(void)
5055 {
5056 struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
5057
5058 /*
5059 * We may be called spuriously before the first bpf_sched_ext_reg(). If
5060 * scx_ops_helper isn't set up yet, there's nothing to do.
5061 */
5062 if (helper)
5063 kthread_queue_work(helper, &scx_ops_disable_work);
5064 }
5065
scx_ops_disable(enum scx_exit_kind kind)5066 static void scx_ops_disable(enum scx_exit_kind kind)
5067 {
5068 int none = SCX_EXIT_NONE;
5069
5070 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5071 kind = SCX_EXIT_ERROR;
5072
5073 atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
5074
5075 schedule_scx_ops_disable_work();
5076 }
5077
dump_newline(struct seq_buf * s)5078 static void dump_newline(struct seq_buf *s)
5079 {
5080 trace_sched_ext_dump("");
5081
5082 /* @s may be zero sized and seq_buf triggers WARN if so */
5083 if (s->size)
5084 seq_buf_putc(s, '\n');
5085 }
5086
dump_line(struct seq_buf * s,const char * fmt,...)5087 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5088 {
5089 va_list args;
5090
5091 #ifdef CONFIG_TRACEPOINTS
5092 if (trace_sched_ext_dump_enabled()) {
5093 /* protected by scx_dump_state()::dump_lock */
5094 static char line_buf[SCX_EXIT_MSG_LEN];
5095
5096 va_start(args, fmt);
5097 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5098 va_end(args);
5099
5100 trace_sched_ext_dump(line_buf);
5101 }
5102 #endif
5103 /* @s may be zero sized and seq_buf triggers WARN if so */
5104 if (s->size) {
5105 va_start(args, fmt);
5106 seq_buf_vprintf(s, fmt, args);
5107 va_end(args);
5108
5109 seq_buf_putc(s, '\n');
5110 }
5111 }
5112
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)5113 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5114 const unsigned long *bt, unsigned int len)
5115 {
5116 unsigned int i;
5117
5118 for (i = 0; i < len; i++)
5119 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5120 }
5121
ops_dump_init(struct seq_buf * s,const char * prefix)5122 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5123 {
5124 struct scx_dump_data *dd = &scx_dump_data;
5125
5126 lockdep_assert_irqs_disabled();
5127
5128 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
5129 dd->first = true;
5130 dd->cursor = 0;
5131 dd->s = s;
5132 dd->prefix = prefix;
5133 }
5134
ops_dump_flush(void)5135 static void ops_dump_flush(void)
5136 {
5137 struct scx_dump_data *dd = &scx_dump_data;
5138 char *line = dd->buf.line;
5139
5140 if (!dd->cursor)
5141 return;
5142
5143 /*
5144 * There's something to flush and this is the first line. Insert a blank
5145 * line to distinguish ops dump.
5146 */
5147 if (dd->first) {
5148 dump_newline(dd->s);
5149 dd->first = false;
5150 }
5151
5152 /*
5153 * There may be multiple lines in $line. Scan and emit each line
5154 * separately.
5155 */
5156 while (true) {
5157 char *end = line;
5158 char c;
5159
5160 while (*end != '\n' && *end != '\0')
5161 end++;
5162
5163 /*
5164 * If $line overflowed, it may not have newline at the end.
5165 * Always emit with a newline.
5166 */
5167 c = *end;
5168 *end = '\0';
5169 dump_line(dd->s, "%s%s", dd->prefix, line);
5170 if (c == '\0')
5171 break;
5172
5173 /* move to the next line */
5174 end++;
5175 if (*end == '\0')
5176 break;
5177 line = end;
5178 }
5179
5180 dd->cursor = 0;
5181 }
5182
ops_dump_exit(void)5183 static void ops_dump_exit(void)
5184 {
5185 ops_dump_flush();
5186 scx_dump_data.cpu = -1;
5187 }
5188
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)5189 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5190 struct task_struct *p, char marker)
5191 {
5192 static unsigned long bt[SCX_EXIT_BT_LEN];
5193 char dsq_id_buf[19] = "(n/a)";
5194 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5195 unsigned int bt_len = 0;
5196
5197 if (p->scx.dsq)
5198 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5199 (unsigned long long)p->scx.dsq->id);
5200
5201 dump_newline(s);
5202 dump_line(s, " %c%c %s[%d] %+ldms",
5203 marker, task_state_to_char(p), p->comm, p->pid,
5204 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5205 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5206 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5207 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5208 ops_state >> SCX_OPSS_QSEQ_SHIFT);
5209 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
5210 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
5211 p->scx.dsq_vtime);
5212 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5213
5214 if (SCX_HAS_OP(dump_task)) {
5215 ops_dump_init(s, " ");
5216 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
5217 ops_dump_exit();
5218 }
5219
5220 #ifdef CONFIG_STACKTRACE
5221 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5222 #endif
5223 if (bt_len) {
5224 dump_newline(s);
5225 dump_stack_trace(s, " ", bt, bt_len);
5226 }
5227 }
5228
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)5229 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5230 {
5231 static DEFINE_SPINLOCK(dump_lock);
5232 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5233 struct scx_dump_ctx dctx = {
5234 .kind = ei->kind,
5235 .exit_code = ei->exit_code,
5236 .reason = ei->reason,
5237 .at_ns = ktime_get_ns(),
5238 .at_jiffies = jiffies,
5239 };
5240 struct seq_buf s;
5241 unsigned long flags;
5242 char *buf;
5243 int cpu;
5244
5245 spin_lock_irqsave(&dump_lock, flags);
5246
5247 seq_buf_init(&s, ei->dump, dump_len);
5248
5249 if (ei->kind == SCX_EXIT_NONE) {
5250 dump_line(&s, "Debug dump triggered by %s", ei->reason);
5251 } else {
5252 dump_line(&s, "%s[%d] triggered exit kind %d:",
5253 current->comm, current->pid, ei->kind);
5254 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
5255 dump_newline(&s);
5256 dump_line(&s, "Backtrace:");
5257 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
5258 }
5259
5260 if (SCX_HAS_OP(dump)) {
5261 ops_dump_init(&s, "");
5262 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
5263 ops_dump_exit();
5264 }
5265
5266 dump_newline(&s);
5267 dump_line(&s, "CPU states");
5268 dump_line(&s, "----------");
5269
5270 for_each_possible_cpu(cpu) {
5271 struct rq *rq = cpu_rq(cpu);
5272 struct rq_flags rf;
5273 struct task_struct *p;
5274 struct seq_buf ns;
5275 size_t avail, used;
5276 bool idle;
5277
5278 rq_lock(rq, &rf);
5279
5280 idle = list_empty(&rq->scx.runnable_list) &&
5281 rq->curr->sched_class == &idle_sched_class;
5282
5283 if (idle && !SCX_HAS_OP(dump_cpu))
5284 goto next;
5285
5286 /*
5287 * We don't yet know whether ops.dump_cpu() will produce output
5288 * and we may want to skip the default CPU dump if it doesn't.
5289 * Use a nested seq_buf to generate the standard dump so that we
5290 * can decide whether to commit later.
5291 */
5292 avail = seq_buf_get_buf(&s, &buf);
5293 seq_buf_init(&ns, buf, avail);
5294
5295 dump_newline(&ns);
5296 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5297 cpu, rq->scx.nr_running, rq->scx.flags,
5298 rq->scx.cpu_released, rq->scx.ops_qseq,
5299 rq->scx.pnt_seq);
5300 dump_line(&ns, " curr=%s[%d] class=%ps",
5301 rq->curr->comm, rq->curr->pid,
5302 rq->curr->sched_class);
5303 if (!cpumask_empty(rq->scx.cpus_to_kick))
5304 dump_line(&ns, " cpus_to_kick : %*pb",
5305 cpumask_pr_args(rq->scx.cpus_to_kick));
5306 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5307 dump_line(&ns, " idle_to_kick : %*pb",
5308 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5309 if (!cpumask_empty(rq->scx.cpus_to_preempt))
5310 dump_line(&ns, " cpus_to_preempt: %*pb",
5311 cpumask_pr_args(rq->scx.cpus_to_preempt));
5312 if (!cpumask_empty(rq->scx.cpus_to_wait))
5313 dump_line(&ns, " cpus_to_wait : %*pb",
5314 cpumask_pr_args(rq->scx.cpus_to_wait));
5315
5316 used = seq_buf_used(&ns);
5317 if (SCX_HAS_OP(dump_cpu)) {
5318 ops_dump_init(&ns, " ");
5319 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
5320 ops_dump_exit();
5321 }
5322
5323 /*
5324 * If idle && nothing generated by ops.dump_cpu(), there's
5325 * nothing interesting. Skip.
5326 */
5327 if (idle && used == seq_buf_used(&ns))
5328 goto next;
5329
5330 /*
5331 * $s may already have overflowed when $ns was created. If so,
5332 * calling commit on it will trigger BUG.
5333 */
5334 if (avail) {
5335 seq_buf_commit(&s, seq_buf_used(&ns));
5336 if (seq_buf_has_overflowed(&ns))
5337 seq_buf_set_overflow(&s);
5338 }
5339
5340 if (rq->curr->sched_class == &ext_sched_class)
5341 scx_dump_task(&s, &dctx, rq->curr, '*');
5342
5343 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5344 scx_dump_task(&s, &dctx, p, ' ');
5345 next:
5346 rq_unlock(rq, &rf);
5347 }
5348
5349 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5350 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5351 trunc_marker, sizeof(trunc_marker));
5352
5353 spin_unlock_irqrestore(&dump_lock, flags);
5354 }
5355
scx_ops_error_irq_workfn(struct irq_work * irq_work)5356 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
5357 {
5358 struct scx_exit_info *ei = scx_exit_info;
5359
5360 if (ei->kind >= SCX_EXIT_ERROR)
5361 scx_dump_state(ei, scx_ops.exit_dump_len);
5362
5363 schedule_scx_ops_disable_work();
5364 }
5365
5366 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
5367
scx_ops_exit_kind(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)5368 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
5369 s64 exit_code,
5370 const char *fmt, ...)
5371 {
5372 struct scx_exit_info *ei = scx_exit_info;
5373 int none = SCX_EXIT_NONE;
5374 va_list args;
5375
5376 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
5377 return;
5378
5379 ei->exit_code = exit_code;
5380 #ifdef CONFIG_STACKTRACE
5381 if (kind >= SCX_EXIT_ERROR)
5382 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5383 #endif
5384 va_start(args, fmt);
5385 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5386 va_end(args);
5387
5388 /*
5389 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5390 * in scx_ops_disable_workfn().
5391 */
5392 ei->kind = kind;
5393 ei->reason = scx_exit_reason(ei->kind);
5394
5395 irq_work_queue(&scx_ops_error_irq_work);
5396 }
5397
scx_create_rt_helper(const char * name)5398 static struct kthread_worker *scx_create_rt_helper(const char *name)
5399 {
5400 struct kthread_worker *helper;
5401
5402 helper = kthread_create_worker(0, name);
5403 if (helper)
5404 sched_set_fifo(helper->task);
5405 return helper;
5406 }
5407
check_hotplug_seq(const struct sched_ext_ops * ops)5408 static void check_hotplug_seq(const struct sched_ext_ops *ops)
5409 {
5410 unsigned long long global_hotplug_seq;
5411
5412 /*
5413 * If a hotplug event has occurred between when a scheduler was
5414 * initialized, and when we were able to attach, exit and notify user
5415 * space about it.
5416 */
5417 if (ops->hotplug_seq) {
5418 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5419 if (ops->hotplug_seq != global_hotplug_seq) {
5420 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5421 "expected hotplug seq %llu did not match actual %llu",
5422 ops->hotplug_seq, global_hotplug_seq);
5423 }
5424 }
5425 }
5426
validate_ops(const struct sched_ext_ops * ops)5427 static int validate_ops(const struct sched_ext_ops *ops)
5428 {
5429 /*
5430 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5431 * ops.enqueue() callback isn't implemented.
5432 */
5433 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5434 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5435 return -EINVAL;
5436 }
5437
5438 return 0;
5439 }
5440
scx_ops_enable(struct sched_ext_ops * ops,struct bpf_link * link)5441 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5442 {
5443 struct scx_task_iter sti;
5444 struct task_struct *p;
5445 unsigned long timeout;
5446 int i, cpu, node, ret;
5447
5448 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5449 cpu_possible_mask)) {
5450 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5451 return -EINVAL;
5452 }
5453
5454 mutex_lock(&scx_ops_enable_mutex);
5455
5456 if (!scx_ops_helper) {
5457 WRITE_ONCE(scx_ops_helper,
5458 scx_create_rt_helper("sched_ext_ops_helper"));
5459 if (!scx_ops_helper) {
5460 ret = -ENOMEM;
5461 goto err_unlock;
5462 }
5463 }
5464
5465 if (!global_dsqs) {
5466 struct scx_dispatch_q **dsqs;
5467
5468 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5469 if (!dsqs) {
5470 ret = -ENOMEM;
5471 goto err_unlock;
5472 }
5473
5474 for_each_node_state(node, N_POSSIBLE) {
5475 struct scx_dispatch_q *dsq;
5476
5477 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5478 if (!dsq) {
5479 for_each_node_state(node, N_POSSIBLE)
5480 kfree(dsqs[node]);
5481 kfree(dsqs);
5482 ret = -ENOMEM;
5483 goto err_unlock;
5484 }
5485
5486 init_dsq(dsq, SCX_DSQ_GLOBAL);
5487 dsqs[node] = dsq;
5488 }
5489
5490 global_dsqs = dsqs;
5491 }
5492
5493 if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5494 ret = -EBUSY;
5495 goto err_unlock;
5496 }
5497
5498 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5499 if (!scx_root_kobj) {
5500 ret = -ENOMEM;
5501 goto err_unlock;
5502 }
5503
5504 scx_root_kobj->kset = scx_kset;
5505 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5506 if (ret < 0)
5507 goto err;
5508
5509 scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5510 if (!scx_exit_info) {
5511 ret = -ENOMEM;
5512 goto err_del;
5513 }
5514
5515 /*
5516 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5517 * disable path. Failure triggers full disabling from here on.
5518 */
5519 scx_ops = *ops;
5520
5521 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5522 SCX_OPS_DISABLED);
5523
5524 atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5525 scx_warned_zero_slice = false;
5526
5527 atomic_long_set(&scx_nr_rejected, 0);
5528
5529 for_each_possible_cpu(cpu)
5530 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5531
5532 /*
5533 * Keep CPUs stable during enable so that the BPF scheduler can track
5534 * online CPUs by watching ->on/offline_cpu() after ->init().
5535 */
5536 cpus_read_lock();
5537
5538 if (scx_ops.init) {
5539 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5540 if (ret) {
5541 ret = ops_sanitize_err("init", ret);
5542 cpus_read_unlock();
5543 scx_ops_error("ops.init() failed (%d)", ret);
5544 goto err_disable;
5545 }
5546 }
5547
5548 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5549 if (((void (**)(void))ops)[i])
5550 static_branch_enable_cpuslocked(&scx_has_op[i]);
5551
5552 check_hotplug_seq(ops);
5553 #ifdef CONFIG_SMP
5554 update_selcpu_topology();
5555 #endif
5556 cpus_read_unlock();
5557
5558 ret = validate_ops(ops);
5559 if (ret)
5560 goto err_disable;
5561
5562 WARN_ON_ONCE(scx_dsp_ctx);
5563 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5564 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5565 scx_dsp_max_batch),
5566 __alignof__(struct scx_dsp_ctx));
5567 if (!scx_dsp_ctx) {
5568 ret = -ENOMEM;
5569 goto err_disable;
5570 }
5571
5572 if (ops->timeout_ms)
5573 timeout = msecs_to_jiffies(ops->timeout_ms);
5574 else
5575 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5576
5577 WRITE_ONCE(scx_watchdog_timeout, timeout);
5578 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5579 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5580 scx_watchdog_timeout / 2);
5581
5582 /*
5583 * Once __scx_ops_enabled is set, %current can be switched to SCX
5584 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5585 * userspace scheduling) may not function correctly before all tasks are
5586 * switched. Init in bypass mode to guarantee forward progress.
5587 */
5588 scx_ops_bypass(true);
5589
5590 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5591 if (((void (**)(void))ops)[i])
5592 static_branch_enable(&scx_has_op[i]);
5593
5594 if (ops->flags & SCX_OPS_ENQ_LAST)
5595 static_branch_enable(&scx_ops_enq_last);
5596
5597 if (ops->flags & SCX_OPS_ENQ_EXITING)
5598 static_branch_enable(&scx_ops_enq_exiting);
5599 if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5600 static_branch_enable(&scx_ops_cpu_preempt);
5601
5602 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5603 reset_idle_masks();
5604 static_branch_enable(&scx_builtin_idle_enabled);
5605 } else {
5606 static_branch_disable(&scx_builtin_idle_enabled);
5607 }
5608
5609 /*
5610 * Lock out forks, cgroup on/offlining and moves before opening the
5611 * floodgate so that they don't wander into the operations prematurely.
5612 */
5613 percpu_down_write(&scx_fork_rwsem);
5614
5615 WARN_ON_ONCE(scx_ops_init_task_enabled);
5616 scx_ops_init_task_enabled = true;
5617
5618 /*
5619 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5620 * preventing new tasks from being added. No need to exclude tasks
5621 * leaving as sched_ext_free() can handle both prepped and enabled
5622 * tasks. Prep all tasks first and then enable them with preemption
5623 * disabled.
5624 *
5625 * All cgroups should be initialized before scx_ops_init_task() so that
5626 * the BPF scheduler can reliably track each task's cgroup membership
5627 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5628 * migrations while tasks are being initialized so that
5629 * scx_cgroup_can_attach() never sees uninitialized tasks.
5630 */
5631 scx_cgroup_lock();
5632 ret = scx_cgroup_init();
5633 if (ret)
5634 goto err_disable_unlock_all;
5635
5636 scx_task_iter_start(&sti);
5637 while ((p = scx_task_iter_next_locked(&sti))) {
5638 /*
5639 * @p may already be dead, have lost all its usages counts and
5640 * be waiting for RCU grace period before being freed. @p can't
5641 * be initialized for SCX in such cases and should be ignored.
5642 */
5643 if (!tryget_task_struct(p))
5644 continue;
5645
5646 scx_task_iter_unlock(&sti);
5647
5648 ret = scx_ops_init_task(p, task_group(p), false);
5649 if (ret) {
5650 put_task_struct(p);
5651 scx_task_iter_relock(&sti);
5652 scx_task_iter_stop(&sti);
5653 scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5654 ret, p->comm, p->pid);
5655 goto err_disable_unlock_all;
5656 }
5657
5658 scx_set_task_state(p, SCX_TASK_READY);
5659
5660 put_task_struct(p);
5661 scx_task_iter_relock(&sti);
5662 }
5663 scx_task_iter_stop(&sti);
5664 scx_cgroup_unlock();
5665 percpu_up_write(&scx_fork_rwsem);
5666
5667 /*
5668 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5669 * all eligible tasks.
5670 */
5671 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5672 static_branch_enable(&__scx_ops_enabled);
5673
5674 /*
5675 * We're fully committed and can't fail. The task READY -> ENABLED
5676 * transitions here are synchronized against sched_ext_free() through
5677 * scx_tasks_lock.
5678 */
5679 percpu_down_write(&scx_fork_rwsem);
5680 scx_task_iter_start(&sti);
5681 while ((p = scx_task_iter_next_locked(&sti))) {
5682 const struct sched_class *old_class = p->sched_class;
5683 const struct sched_class *new_class =
5684 __setscheduler_class(p->policy, p->prio);
5685 struct sched_enq_and_set_ctx ctx;
5686
5687 if (old_class != new_class && p->se.sched_delayed)
5688 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5689
5690 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5691
5692 p->scx.slice = SCX_SLICE_DFL;
5693 p->sched_class = new_class;
5694 check_class_changing(task_rq(p), p, old_class);
5695
5696 sched_enq_and_set_task(&ctx);
5697
5698 check_class_changed(task_rq(p), p, old_class, p->prio);
5699 }
5700 scx_task_iter_stop(&sti);
5701 percpu_up_write(&scx_fork_rwsem);
5702
5703 scx_ops_bypass(false);
5704
5705 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5706 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5707 goto err_disable;
5708 }
5709
5710 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5711 static_branch_enable(&__scx_switched_all);
5712
5713 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5714 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5715 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5716 mutex_unlock(&scx_ops_enable_mutex);
5717
5718 atomic_long_inc(&scx_enable_seq);
5719
5720 return 0;
5721
5722 err_del:
5723 kobject_del(scx_root_kobj);
5724 err:
5725 kobject_put(scx_root_kobj);
5726 scx_root_kobj = NULL;
5727 if (scx_exit_info) {
5728 free_exit_info(scx_exit_info);
5729 scx_exit_info = NULL;
5730 }
5731 err_unlock:
5732 mutex_unlock(&scx_ops_enable_mutex);
5733 return ret;
5734
5735 err_disable_unlock_all:
5736 scx_cgroup_unlock();
5737 percpu_up_write(&scx_fork_rwsem);
5738 scx_ops_bypass(false);
5739 err_disable:
5740 mutex_unlock(&scx_ops_enable_mutex);
5741 /*
5742 * Returning an error code here would not pass all the error information
5743 * to userspace. Record errno using scx_ops_error() for cases
5744 * scx_ops_error() wasn't already invoked and exit indicating success so
5745 * that the error is notified through ops.exit() with all the details.
5746 *
5747 * Flush scx_ops_disable_work to ensure that error is reported before
5748 * init completion.
5749 */
5750 scx_ops_error("scx_ops_enable() failed (%d)", ret);
5751 kthread_flush_work(&scx_ops_disable_work);
5752 return 0;
5753 }
5754
5755
5756 /********************************************************************************
5757 * bpf_struct_ops plumbing.
5758 */
5759 #include <linux/bpf_verifier.h>
5760 #include <linux/bpf.h>
5761 #include <linux/btf.h>
5762
5763 static const struct btf_type *task_struct_type;
5764
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5765 static bool bpf_scx_is_valid_access(int off, int size,
5766 enum bpf_access_type type,
5767 const struct bpf_prog *prog,
5768 struct bpf_insn_access_aux *info)
5769 {
5770 if (type != BPF_READ)
5771 return false;
5772 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5773 return false;
5774 if (off % size != 0)
5775 return false;
5776
5777 return btf_ctx_access(off, size, type, prog, info);
5778 }
5779
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5780 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5781 const struct bpf_reg_state *reg, int off,
5782 int size)
5783 {
5784 const struct btf_type *t;
5785
5786 t = btf_type_by_id(reg->btf, reg->btf_id);
5787 if (t == task_struct_type) {
5788 if (off >= offsetof(struct task_struct, scx.slice) &&
5789 off + size <= offsetofend(struct task_struct, scx.slice))
5790 return SCALAR_VALUE;
5791 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5792 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5793 return SCALAR_VALUE;
5794 if (off >= offsetof(struct task_struct, scx.disallow) &&
5795 off + size <= offsetofend(struct task_struct, scx.disallow))
5796 return SCALAR_VALUE;
5797 }
5798
5799 return -EACCES;
5800 }
5801
5802 static const struct bpf_func_proto *
bpf_scx_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5803 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5804 {
5805 switch (func_id) {
5806 case BPF_FUNC_task_storage_get:
5807 return &bpf_task_storage_get_proto;
5808 case BPF_FUNC_task_storage_delete:
5809 return &bpf_task_storage_delete_proto;
5810 default:
5811 return bpf_base_func_proto(func_id, prog);
5812 }
5813 }
5814
5815 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5816 .get_func_proto = bpf_scx_get_func_proto,
5817 .is_valid_access = bpf_scx_is_valid_access,
5818 .btf_struct_access = bpf_scx_btf_struct_access,
5819 };
5820
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5821 static int bpf_scx_init_member(const struct btf_type *t,
5822 const struct btf_member *member,
5823 void *kdata, const void *udata)
5824 {
5825 const struct sched_ext_ops *uops = udata;
5826 struct sched_ext_ops *ops = kdata;
5827 u32 moff = __btf_member_bit_offset(t, member) / 8;
5828 int ret;
5829
5830 switch (moff) {
5831 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5832 if (*(u32 *)(udata + moff) > INT_MAX)
5833 return -E2BIG;
5834 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5835 return 1;
5836 case offsetof(struct sched_ext_ops, flags):
5837 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5838 return -EINVAL;
5839 ops->flags = *(u64 *)(udata + moff);
5840 return 1;
5841 case offsetof(struct sched_ext_ops, name):
5842 ret = bpf_obj_name_cpy(ops->name, uops->name,
5843 sizeof(ops->name));
5844 if (ret < 0)
5845 return ret;
5846 if (ret == 0)
5847 return -EINVAL;
5848 return 1;
5849 case offsetof(struct sched_ext_ops, timeout_ms):
5850 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5851 SCX_WATCHDOG_MAX_TIMEOUT)
5852 return -E2BIG;
5853 ops->timeout_ms = *(u32 *)(udata + moff);
5854 return 1;
5855 case offsetof(struct sched_ext_ops, exit_dump_len):
5856 ops->exit_dump_len =
5857 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5858 return 1;
5859 case offsetof(struct sched_ext_ops, hotplug_seq):
5860 ops->hotplug_seq = *(u64 *)(udata + moff);
5861 return 1;
5862 }
5863
5864 return 0;
5865 }
5866
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5867 static int bpf_scx_check_member(const struct btf_type *t,
5868 const struct btf_member *member,
5869 const struct bpf_prog *prog)
5870 {
5871 u32 moff = __btf_member_bit_offset(t, member) / 8;
5872
5873 switch (moff) {
5874 case offsetof(struct sched_ext_ops, init_task):
5875 #ifdef CONFIG_EXT_GROUP_SCHED
5876 case offsetof(struct sched_ext_ops, cgroup_init):
5877 case offsetof(struct sched_ext_ops, cgroup_exit):
5878 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5879 #endif
5880 case offsetof(struct sched_ext_ops, cpu_online):
5881 case offsetof(struct sched_ext_ops, cpu_offline):
5882 case offsetof(struct sched_ext_ops, init):
5883 case offsetof(struct sched_ext_ops, exit):
5884 break;
5885 default:
5886 if (prog->sleepable)
5887 return -EINVAL;
5888 }
5889
5890 return 0;
5891 }
5892
bpf_scx_reg(void * kdata,struct bpf_link * link)5893 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5894 {
5895 return scx_ops_enable(kdata, link);
5896 }
5897
bpf_scx_unreg(void * kdata,struct bpf_link * link)5898 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5899 {
5900 scx_ops_disable(SCX_EXIT_UNREG);
5901 kthread_flush_work(&scx_ops_disable_work);
5902 }
5903
bpf_scx_init(struct btf * btf)5904 static int bpf_scx_init(struct btf *btf)
5905 {
5906 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5907
5908 return 0;
5909 }
5910
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5911 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5912 {
5913 /*
5914 * sched_ext does not support updating the actively-loaded BPF
5915 * scheduler, as registering a BPF scheduler can always fail if the
5916 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5917 * etc. Similarly, we can always race with unregistration happening
5918 * elsewhere, such as with sysrq.
5919 */
5920 return -EOPNOTSUPP;
5921 }
5922
bpf_scx_validate(void * kdata)5923 static int bpf_scx_validate(void *kdata)
5924 {
5925 return 0;
5926 }
5927
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5928 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)5929 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)5930 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)5931 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)5932 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)5933 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)5934 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)5935 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)5936 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)5937 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)5938 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)5939 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)5940 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)5941 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)5942 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)5943 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)5944 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)5945 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)5946 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)5947 static void sched_ext_ops__disable(struct task_struct *p) {}
5948 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5949 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)5950 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5951 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5952 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5953 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)5954 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
5955 #endif
sched_ext_ops__cpu_online(s32 cpu)5956 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)5957 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)5958 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)5959 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)5960 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5961 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)5962 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5963
5964 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5965 .select_cpu = sched_ext_ops__select_cpu,
5966 .enqueue = sched_ext_ops__enqueue,
5967 .dequeue = sched_ext_ops__dequeue,
5968 .dispatch = sched_ext_ops__dispatch,
5969 .tick = sched_ext_ops__tick,
5970 .runnable = sched_ext_ops__runnable,
5971 .running = sched_ext_ops__running,
5972 .stopping = sched_ext_ops__stopping,
5973 .quiescent = sched_ext_ops__quiescent,
5974 .yield = sched_ext_ops__yield,
5975 .core_sched_before = sched_ext_ops__core_sched_before,
5976 .set_weight = sched_ext_ops__set_weight,
5977 .set_cpumask = sched_ext_ops__set_cpumask,
5978 .update_idle = sched_ext_ops__update_idle,
5979 .cpu_acquire = sched_ext_ops__cpu_acquire,
5980 .cpu_release = sched_ext_ops__cpu_release,
5981 .init_task = sched_ext_ops__init_task,
5982 .exit_task = sched_ext_ops__exit_task,
5983 .enable = sched_ext_ops__enable,
5984 .disable = sched_ext_ops__disable,
5985 #ifdef CONFIG_EXT_GROUP_SCHED
5986 .cgroup_init = sched_ext_ops__cgroup_init,
5987 .cgroup_exit = sched_ext_ops__cgroup_exit,
5988 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
5989 .cgroup_move = sched_ext_ops__cgroup_move,
5990 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
5991 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
5992 #endif
5993 .cpu_online = sched_ext_ops__cpu_online,
5994 .cpu_offline = sched_ext_ops__cpu_offline,
5995 .init = sched_ext_ops__init,
5996 .exit = sched_ext_ops__exit,
5997 .dump = sched_ext_ops__dump,
5998 .dump_cpu = sched_ext_ops__dump_cpu,
5999 .dump_task = sched_ext_ops__dump_task,
6000 };
6001
6002 static struct bpf_struct_ops bpf_sched_ext_ops = {
6003 .verifier_ops = &bpf_scx_verifier_ops,
6004 .reg = bpf_scx_reg,
6005 .unreg = bpf_scx_unreg,
6006 .check_member = bpf_scx_check_member,
6007 .init_member = bpf_scx_init_member,
6008 .init = bpf_scx_init,
6009 .update = bpf_scx_update,
6010 .validate = bpf_scx_validate,
6011 .name = "sched_ext_ops",
6012 .owner = THIS_MODULE,
6013 .cfi_stubs = &__bpf_ops_sched_ext_ops
6014 };
6015
6016
6017 /********************************************************************************
6018 * System integration and init.
6019 */
6020
sysrq_handle_sched_ext_reset(u8 key)6021 static void sysrq_handle_sched_ext_reset(u8 key)
6022 {
6023 if (scx_ops_helper)
6024 scx_ops_disable(SCX_EXIT_SYSRQ);
6025 else
6026 pr_info("sched_ext: BPF scheduler not yet used\n");
6027 }
6028
6029 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6030 .handler = sysrq_handle_sched_ext_reset,
6031 .help_msg = "reset-sched-ext(S)",
6032 .action_msg = "Disable sched_ext and revert all tasks to CFS",
6033 .enable_mask = SYSRQ_ENABLE_RTNICE,
6034 };
6035
sysrq_handle_sched_ext_dump(u8 key)6036 static void sysrq_handle_sched_ext_dump(u8 key)
6037 {
6038 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6039
6040 if (scx_enabled())
6041 scx_dump_state(&ei, 0);
6042 }
6043
6044 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6045 .handler = sysrq_handle_sched_ext_dump,
6046 .help_msg = "dump-sched-ext(D)",
6047 .action_msg = "Trigger sched_ext debug dump",
6048 .enable_mask = SYSRQ_ENABLE_RTNICE,
6049 };
6050
can_skip_idle_kick(struct rq * rq)6051 static bool can_skip_idle_kick(struct rq *rq)
6052 {
6053 lockdep_assert_rq_held(rq);
6054
6055 /*
6056 * We can skip idle kicking if @rq is going to go through at least one
6057 * full SCX scheduling cycle before going idle. Just checking whether
6058 * curr is not idle is insufficient because we could be racing
6059 * balance_one() trying to pull the next task from a remote rq, which
6060 * may fail, and @rq may become idle afterwards.
6061 *
6062 * The race window is small and we don't and can't guarantee that @rq is
6063 * only kicked while idle anyway. Skip only when sure.
6064 */
6065 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6066 }
6067
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)6068 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6069 {
6070 struct rq *rq = cpu_rq(cpu);
6071 struct scx_rq *this_scx = &this_rq->scx;
6072 bool should_wait = false;
6073 unsigned long flags;
6074
6075 raw_spin_rq_lock_irqsave(rq, flags);
6076
6077 /*
6078 * During CPU hotplug, a CPU may depend on kicking itself to make
6079 * forward progress. Allow kicking self regardless of online state.
6080 */
6081 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6082 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6083 if (rq->curr->sched_class == &ext_sched_class)
6084 rq->curr->scx.slice = 0;
6085 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6086 }
6087
6088 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6089 pseqs[cpu] = rq->scx.pnt_seq;
6090 should_wait = true;
6091 }
6092
6093 resched_curr(rq);
6094 } else {
6095 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6096 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6097 }
6098
6099 raw_spin_rq_unlock_irqrestore(rq, flags);
6100
6101 return should_wait;
6102 }
6103
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)6104 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6105 {
6106 struct rq *rq = cpu_rq(cpu);
6107 unsigned long flags;
6108
6109 raw_spin_rq_lock_irqsave(rq, flags);
6110
6111 if (!can_skip_idle_kick(rq) &&
6112 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6113 resched_curr(rq);
6114
6115 raw_spin_rq_unlock_irqrestore(rq, flags);
6116 }
6117
kick_cpus_irq_workfn(struct irq_work * irq_work)6118 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6119 {
6120 struct rq *this_rq = this_rq();
6121 struct scx_rq *this_scx = &this_rq->scx;
6122 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6123 bool should_wait = false;
6124 s32 cpu;
6125
6126 for_each_cpu(cpu, this_scx->cpus_to_kick) {
6127 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6128 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6129 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6130 }
6131
6132 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6133 kick_one_cpu_if_idle(cpu, this_rq);
6134 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6135 }
6136
6137 if (!should_wait)
6138 return;
6139
6140 for_each_cpu(cpu, this_scx->cpus_to_wait) {
6141 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6142
6143 if (cpu != cpu_of(this_rq)) {
6144 /*
6145 * Pairs with smp_store_release() issued by this CPU in
6146 * switch_class() on the resched path.
6147 *
6148 * We busy-wait here to guarantee that no other task can
6149 * be scheduled on our core before the target CPU has
6150 * entered the resched path.
6151 */
6152 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6153 cpu_relax();
6154 }
6155
6156 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6157 }
6158 }
6159
6160 /**
6161 * print_scx_info - print out sched_ext scheduler state
6162 * @log_lvl: the log level to use when printing
6163 * @p: target task
6164 *
6165 * If a sched_ext scheduler is enabled, print the name and state of the
6166 * scheduler. If @p is on sched_ext, print further information about the task.
6167 *
6168 * This function can be safely called on any task as long as the task_struct
6169 * itself is accessible. While safe, this function isn't synchronized and may
6170 * print out mixups or garbages of limited length.
6171 */
print_scx_info(const char * log_lvl,struct task_struct * p)6172 void print_scx_info(const char *log_lvl, struct task_struct *p)
6173 {
6174 enum scx_ops_enable_state state = scx_ops_enable_state();
6175 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6176 char runnable_at_buf[22] = "?";
6177 struct sched_class *class;
6178 unsigned long runnable_at;
6179
6180 if (state == SCX_OPS_DISABLED)
6181 return;
6182
6183 /*
6184 * Carefully check if the task was running on sched_ext, and then
6185 * carefully copy the time it's been runnable, and its state.
6186 */
6187 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6188 class != &ext_sched_class) {
6189 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
6190 scx_ops_enable_state_str[state], all);
6191 return;
6192 }
6193
6194 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6195 sizeof(runnable_at)))
6196 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6197 jiffies_delta_msecs(runnable_at, jiffies));
6198
6199 /* print everything onto one line to conserve console space */
6200 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6201 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
6202 runnable_at_buf);
6203 }
6204
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)6205 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6206 {
6207 /*
6208 * SCX schedulers often have userspace components which are sometimes
6209 * involved in critial scheduling paths. PM operations involve freezing
6210 * userspace which can lead to scheduling misbehaviors including stalls.
6211 * Let's bypass while PM operations are in progress.
6212 */
6213 switch (event) {
6214 case PM_HIBERNATION_PREPARE:
6215 case PM_SUSPEND_PREPARE:
6216 case PM_RESTORE_PREPARE:
6217 scx_ops_bypass(true);
6218 break;
6219 case PM_POST_HIBERNATION:
6220 case PM_POST_SUSPEND:
6221 case PM_POST_RESTORE:
6222 scx_ops_bypass(false);
6223 break;
6224 }
6225
6226 return NOTIFY_OK;
6227 }
6228
6229 static struct notifier_block scx_pm_notifier = {
6230 .notifier_call = scx_pm_handler,
6231 };
6232
init_sched_ext_class(void)6233 void __init init_sched_ext_class(void)
6234 {
6235 s32 cpu, v;
6236
6237 /*
6238 * The following is to prevent the compiler from optimizing out the enum
6239 * definitions so that BPF scheduler implementations can use them
6240 * through the generated vmlinux.h.
6241 */
6242 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6243 SCX_TG_ONLINE);
6244
6245 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
6246 #ifdef CONFIG_SMP
6247 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
6248 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
6249 #endif
6250 scx_kick_cpus_pnt_seqs =
6251 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6252 __alignof__(scx_kick_cpus_pnt_seqs[0]));
6253 BUG_ON(!scx_kick_cpus_pnt_seqs);
6254
6255 for_each_possible_cpu(cpu) {
6256 struct rq *rq = cpu_rq(cpu);
6257
6258 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6259 INIT_LIST_HEAD(&rq->scx.runnable_list);
6260 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6261
6262 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
6263 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
6264 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
6265 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
6266 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6267 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6268
6269 if (cpu_online(cpu))
6270 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6271 }
6272
6273 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6274 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6275 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6276 }
6277
6278
6279 /********************************************************************************
6280 * Helpers that can be called from the BPF scheduler.
6281 */
6282 #include <linux/btf_ids.h>
6283
6284 __bpf_kfunc_start_defs();
6285
6286 /**
6287 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
6288 * @p: task_struct to select a CPU for
6289 * @prev_cpu: CPU @p was on previously
6290 * @wake_flags: %SCX_WAKE_* flags
6291 * @is_idle: out parameter indicating whether the returned CPU is idle
6292 *
6293 * Can only be called from ops.select_cpu() if the built-in CPU selection is
6294 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
6295 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
6296 *
6297 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
6298 * currently idle and thus a good candidate for direct dispatching.
6299 */
scx_bpf_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * is_idle)6300 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
6301 u64 wake_flags, bool *is_idle)
6302 {
6303 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6304 scx_ops_error("built-in idle tracking is disabled");
6305 goto prev_cpu;
6306 }
6307
6308 if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
6309 goto prev_cpu;
6310
6311 #ifdef CONFIG_SMP
6312 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
6313 #endif
6314
6315 prev_cpu:
6316 *is_idle = false;
6317 return prev_cpu;
6318 }
6319
6320 __bpf_kfunc_end_defs();
6321
6322 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
6323 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
6324 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
6325
6326 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6327 .owner = THIS_MODULE,
6328 .set = &scx_kfunc_ids_select_cpu,
6329 };
6330
scx_dsq_insert_preamble(struct task_struct * p,u64 enq_flags)6331 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6332 {
6333 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6334 return false;
6335
6336 lockdep_assert_irqs_disabled();
6337
6338 if (unlikely(!p)) {
6339 scx_ops_error("called with NULL task");
6340 return false;
6341 }
6342
6343 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6344 scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
6345 return false;
6346 }
6347
6348 return true;
6349 }
6350
scx_dsq_insert_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)6351 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6352 u64 enq_flags)
6353 {
6354 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6355 struct task_struct *ddsp_task;
6356
6357 ddsp_task = __this_cpu_read(direct_dispatch_task);
6358 if (ddsp_task) {
6359 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6360 return;
6361 }
6362
6363 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6364 scx_ops_error("dispatch buffer overflow");
6365 return;
6366 }
6367
6368 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6369 .task = p,
6370 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6371 .dsq_id = dsq_id,
6372 .enq_flags = enq_flags,
6373 };
6374 }
6375
6376 __bpf_kfunc_start_defs();
6377
6378 /**
6379 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6380 * @p: task_struct to insert
6381 * @dsq_id: DSQ to insert into
6382 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6383 * @enq_flags: SCX_ENQ_*
6384 *
6385 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6386 * call this function spuriously. Can be called from ops.enqueue(),
6387 * ops.select_cpu(), and ops.dispatch().
6388 *
6389 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6390 * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
6391 * used to target the local DSQ of a CPU other than the enqueueing one. Use
6392 * ops.select_cpu() to be on the target CPU in the first place.
6393 *
6394 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6395 * will be directly inserted into the corresponding dispatch queue after
6396 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6397 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6398 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6399 * task is inserted.
6400 *
6401 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6402 * and this function can be called upto ops.dispatch_max_batch times to insert
6403 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6404 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6405 *
6406 * This function doesn't have any locking restrictions and may be called under
6407 * BPF locks (in the future when BPF introduces more flexible locking).
6408 *
6409 * @p is allowed to run for @slice. The scheduling path is triggered on slice
6410 * exhaustion. If zero, the current residual slice is maintained. If
6411 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6412 * scx_bpf_kick_cpu() to trigger scheduling.
6413 */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6414 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6415 u64 enq_flags)
6416 {
6417 if (!scx_dsq_insert_preamble(p, enq_flags))
6418 return;
6419
6420 if (slice)
6421 p->scx.slice = slice;
6422 else
6423 p->scx.slice = p->scx.slice ?: 1;
6424
6425 scx_dsq_insert_commit(p, dsq_id, enq_flags);
6426 }
6427
6428 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6429 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6430 u64 enq_flags)
6431 {
6432 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6433 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6434 }
6435
6436 /**
6437 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6438 * @p: task_struct to insert
6439 * @dsq_id: DSQ to insert into
6440 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6441 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6442 * @enq_flags: SCX_ENQ_*
6443 *
6444 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6445 * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6446 * are identical to scx_bpf_dsq_insert().
6447 *
6448 * @vtime ordering is according to time_before64() which considers wrapping. A
6449 * numerically larger vtime may indicate an earlier position in the ordering and
6450 * vice-versa.
6451 *
6452 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6453 * function must not be called on a DSQ which already has one or more FIFO tasks
6454 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6455 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6456 */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6457 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6458 u64 slice, u64 vtime, u64 enq_flags)
6459 {
6460 if (!scx_dsq_insert_preamble(p, enq_flags))
6461 return;
6462
6463 if (slice)
6464 p->scx.slice = slice;
6465 else
6466 p->scx.slice = p->scx.slice ?: 1;
6467
6468 p->scx.dsq_vtime = vtime;
6469
6470 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6471 }
6472
6473 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6474 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6475 u64 slice, u64 vtime, u64 enq_flags)
6476 {
6477 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6478 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6479 }
6480
6481 __bpf_kfunc_end_defs();
6482
6483 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6484 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6485 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6486 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6487 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6488 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6489
6490 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6491 .owner = THIS_MODULE,
6492 .set = &scx_kfunc_ids_enqueue_dispatch,
6493 };
6494
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6495 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6496 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6497 {
6498 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6499 struct rq *this_rq, *src_rq, *locked_rq;
6500 bool dispatched = false;
6501 bool in_balance;
6502 unsigned long flags;
6503
6504 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6505 return false;
6506
6507 /*
6508 * Can be called from either ops.dispatch() locking this_rq() or any
6509 * context where no rq lock is held. If latter, lock @p's task_rq which
6510 * we'll likely need anyway.
6511 */
6512 src_rq = task_rq(p);
6513
6514 local_irq_save(flags);
6515 this_rq = this_rq();
6516 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6517
6518 if (in_balance) {
6519 if (this_rq != src_rq) {
6520 raw_spin_rq_unlock(this_rq);
6521 raw_spin_rq_lock(src_rq);
6522 }
6523 } else {
6524 raw_spin_rq_lock(src_rq);
6525 }
6526
6527 /*
6528 * If the BPF scheduler keeps calling this function repeatedly, it can
6529 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6530 * breather if necessary.
6531 */
6532 scx_ops_breather(src_rq);
6533
6534 locked_rq = src_rq;
6535 raw_spin_lock(&src_dsq->lock);
6536
6537 /*
6538 * Did someone else get to it? @p could have already left $src_dsq, got
6539 * re-enqueud, or be in the process of being consumed by someone else.
6540 */
6541 if (unlikely(p->scx.dsq != src_dsq ||
6542 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6543 p->scx.holding_cpu >= 0) ||
6544 WARN_ON_ONCE(src_rq != task_rq(p))) {
6545 raw_spin_unlock(&src_dsq->lock);
6546 goto out;
6547 }
6548
6549 /* @p is still on $src_dsq and stable, determine the destination */
6550 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6551
6552 /*
6553 * Apply vtime and slice updates before moving so that the new time is
6554 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6555 * this is safe as we're locking it.
6556 */
6557 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6558 p->scx.dsq_vtime = kit->vtime;
6559 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6560 p->scx.slice = kit->slice;
6561
6562 /* execute move */
6563 locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
6564 dispatched = true;
6565 out:
6566 if (in_balance) {
6567 if (this_rq != locked_rq) {
6568 raw_spin_rq_unlock(locked_rq);
6569 raw_spin_rq_lock(this_rq);
6570 }
6571 } else {
6572 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6573 }
6574
6575 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6576 __SCX_DSQ_ITER_HAS_VTIME);
6577 return dispatched;
6578 }
6579
6580 __bpf_kfunc_start_defs();
6581
6582 /**
6583 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6584 *
6585 * Can only be called from ops.dispatch().
6586 */
scx_bpf_dispatch_nr_slots(void)6587 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6588 {
6589 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6590 return 0;
6591
6592 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6593 }
6594
6595 /**
6596 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6597 *
6598 * Cancel the latest dispatch. Can be called multiple times to cancel further
6599 * dispatches. Can only be called from ops.dispatch().
6600 */
scx_bpf_dispatch_cancel(void)6601 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6602 {
6603 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6604
6605 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6606 return;
6607
6608 if (dspc->cursor > 0)
6609 dspc->cursor--;
6610 else
6611 scx_ops_error("dispatch buffer underflow");
6612 }
6613
6614 /**
6615 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6616 * @dsq_id: DSQ to move task from
6617 *
6618 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6619 * local DSQ for execution. Can only be called from ops.dispatch().
6620 *
6621 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6622 * before trying to move from the specified DSQ. It may also grab rq locks and
6623 * thus can't be called under any BPF locks.
6624 *
6625 * Returns %true if a task has been moved, %false if there isn't any task to
6626 * move.
6627 */
scx_bpf_dsq_move_to_local(u64 dsq_id)6628 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6629 {
6630 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6631 struct scx_dispatch_q *dsq;
6632
6633 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6634 return false;
6635
6636 flush_dispatch_buf(dspc->rq);
6637
6638 dsq = find_user_dsq(dsq_id);
6639 if (unlikely(!dsq)) {
6640 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6641 return false;
6642 }
6643
6644 if (consume_dispatch_q(dspc->rq, dsq)) {
6645 /*
6646 * A successfully consumed task can be dequeued before it starts
6647 * running while the CPU is trying to migrate other dispatched
6648 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6649 * local DSQ.
6650 */
6651 dspc->nr_tasks++;
6652 return true;
6653 } else {
6654 return false;
6655 }
6656 }
6657
6658 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_consume(u64 dsq_id)6659 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6660 {
6661 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6662 return scx_bpf_dsq_move_to_local(dsq_id);
6663 }
6664
6665 /**
6666 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6667 * @it__iter: DSQ iterator in progress
6668 * @slice: duration the moved task can run for in nsecs
6669 *
6670 * Override the slice of the next task that will be moved from @it__iter using
6671 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6672 * slice duration is kept.
6673 */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6674 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6675 u64 slice)
6676 {
6677 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6678
6679 kit->slice = slice;
6680 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6681 }
6682
6683 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6684 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6685 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6686 {
6687 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6688 scx_bpf_dsq_move_set_slice(it__iter, slice);
6689 }
6690
6691 /**
6692 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6693 * @it__iter: DSQ iterator in progress
6694 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6695 *
6696 * Override the vtime of the next task that will be moved from @it__iter using
6697 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6698 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6699 * override is ignored and cleared.
6700 */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6701 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6702 u64 vtime)
6703 {
6704 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6705
6706 kit->vtime = vtime;
6707 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6708 }
6709
6710 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6711 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6712 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6713 {
6714 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6715 scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6716 }
6717
6718 /**
6719 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6720 * @it__iter: DSQ iterator in progress
6721 * @p: task to transfer
6722 * @dsq_id: DSQ to move @p to
6723 * @enq_flags: SCX_ENQ_*
6724 *
6725 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6726 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6727 * be the destination.
6728 *
6729 * For the transfer to be successful, @p must still be on the DSQ and have been
6730 * queued before the DSQ iteration started. This function doesn't care whether
6731 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6732 * been queued before the iteration started.
6733 *
6734 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6735 *
6736 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6737 * lock (e.g. BPF timers or SYSCALL programs).
6738 *
6739 * Returns %true if @p has been consumed, %false if @p had already been consumed
6740 * or dequeued.
6741 */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6742 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6743 struct task_struct *p, u64 dsq_id,
6744 u64 enq_flags)
6745 {
6746 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6747 p, dsq_id, enq_flags);
6748 }
6749
6750 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6751 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6752 struct task_struct *p, u64 dsq_id,
6753 u64 enq_flags)
6754 {
6755 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6756 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6757 }
6758
6759 /**
6760 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6761 * @it__iter: DSQ iterator in progress
6762 * @p: task to transfer
6763 * @dsq_id: DSQ to move @p to
6764 * @enq_flags: SCX_ENQ_*
6765 *
6766 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6767 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6768 * user DSQ as only user DSQs support priority queue.
6769 *
6770 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6771 * and scx_bpf_dsq_move_set_vtime() to update.
6772 *
6773 * All other aspects are identical to scx_bpf_dsq_move(). See
6774 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6775 */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6776 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6777 struct task_struct *p, u64 dsq_id,
6778 u64 enq_flags)
6779 {
6780 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6781 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6782 }
6783
6784 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6785 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6786 struct task_struct *p, u64 dsq_id,
6787 u64 enq_flags)
6788 {
6789 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6790 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6791 }
6792
6793 __bpf_kfunc_end_defs();
6794
6795 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6796 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6797 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6798 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6799 BTF_ID_FLAGS(func, scx_bpf_consume)
6800 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6801 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6802 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6803 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6804 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6805 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6806 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6807 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6808 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6809
6810 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6811 .owner = THIS_MODULE,
6812 .set = &scx_kfunc_ids_dispatch,
6813 };
6814
6815 __bpf_kfunc_start_defs();
6816
6817 /**
6818 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6819 *
6820 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6821 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6822 * processed tasks. Can only be called from ops.cpu_release().
6823 */
scx_bpf_reenqueue_local(void)6824 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6825 {
6826 LIST_HEAD(tasks);
6827 u32 nr_enqueued = 0;
6828 struct rq *rq;
6829 struct task_struct *p, *n;
6830
6831 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6832 return 0;
6833
6834 rq = cpu_rq(smp_processor_id());
6835 lockdep_assert_rq_held(rq);
6836
6837 /*
6838 * The BPF scheduler may choose to dispatch tasks back to
6839 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6840 * first to avoid processing the same tasks repeatedly.
6841 */
6842 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6843 scx.dsq_list.node) {
6844 /*
6845 * If @p is being migrated, @p's current CPU may not agree with
6846 * its allowed CPUs and the migration_cpu_stop is about to
6847 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6848 *
6849 * While racing sched property changes may also dequeue and
6850 * re-enqueue a migrating task while its current CPU and allowed
6851 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6852 * the current local DSQ for running tasks and thus are not
6853 * visible to the BPF scheduler.
6854 */
6855 if (p->migration_pending)
6856 continue;
6857
6858 dispatch_dequeue(rq, p);
6859 list_add_tail(&p->scx.dsq_list.node, &tasks);
6860 }
6861
6862 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6863 list_del_init(&p->scx.dsq_list.node);
6864 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6865 nr_enqueued++;
6866 }
6867
6868 return nr_enqueued;
6869 }
6870
6871 __bpf_kfunc_end_defs();
6872
6873 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6874 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6875 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6876
6877 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6878 .owner = THIS_MODULE,
6879 .set = &scx_kfunc_ids_cpu_release,
6880 };
6881
6882 __bpf_kfunc_start_defs();
6883
6884 /**
6885 * scx_bpf_create_dsq - Create a custom DSQ
6886 * @dsq_id: DSQ to create
6887 * @node: NUMA node to allocate from
6888 *
6889 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6890 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6891 */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6892 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6893 {
6894 if (unlikely(node >= (int)nr_node_ids ||
6895 (node < 0 && node != NUMA_NO_NODE)))
6896 return -EINVAL;
6897 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6898 }
6899
6900 __bpf_kfunc_end_defs();
6901
6902 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6903 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6904 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6905 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6906 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6907 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6908 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6909 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6910 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6911 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6912 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6913
6914 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6915 .owner = THIS_MODULE,
6916 .set = &scx_kfunc_ids_unlocked,
6917 };
6918
6919 __bpf_kfunc_start_defs();
6920
6921 /**
6922 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6923 * @cpu: cpu to kick
6924 * @flags: %SCX_KICK_* flags
6925 *
6926 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6927 * trigger rescheduling on a busy CPU. This can be called from any online
6928 * scx_ops operation and the actual kicking is performed asynchronously through
6929 * an irq work.
6930 */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6931 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6932 {
6933 struct rq *this_rq;
6934 unsigned long irq_flags;
6935
6936 if (!ops_cpu_valid(cpu, NULL))
6937 return;
6938
6939 local_irq_save(irq_flags);
6940
6941 this_rq = this_rq();
6942
6943 /*
6944 * While bypassing for PM ops, IRQ handling may not be online which can
6945 * lead to irq_work_queue() malfunction such as infinite busy wait for
6946 * IRQ status update. Suppress kicking.
6947 */
6948 if (scx_rq_bypassing(this_rq))
6949 goto out;
6950
6951 /*
6952 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6953 * rq locks. We can probably be smarter and avoid bouncing if called
6954 * from ops which don't hold a rq lock.
6955 */
6956 if (flags & SCX_KICK_IDLE) {
6957 struct rq *target_rq = cpu_rq(cpu);
6958
6959 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6960 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6961
6962 if (raw_spin_rq_trylock(target_rq)) {
6963 if (can_skip_idle_kick(target_rq)) {
6964 raw_spin_rq_unlock(target_rq);
6965 goto out;
6966 }
6967 raw_spin_rq_unlock(target_rq);
6968 }
6969 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6970 } else {
6971 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6972
6973 if (flags & SCX_KICK_PREEMPT)
6974 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6975 if (flags & SCX_KICK_WAIT)
6976 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6977 }
6978
6979 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6980 out:
6981 local_irq_restore(irq_flags);
6982 }
6983
6984 /**
6985 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6986 * @dsq_id: id of the DSQ
6987 *
6988 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6989 * -%ENOENT is returned.
6990 */
scx_bpf_dsq_nr_queued(u64 dsq_id)6991 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6992 {
6993 struct scx_dispatch_q *dsq;
6994 s32 ret;
6995
6996 preempt_disable();
6997
6998 if (dsq_id == SCX_DSQ_LOCAL) {
6999 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
7000 goto out;
7001 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
7002 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
7003
7004 if (ops_cpu_valid(cpu, NULL)) {
7005 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
7006 goto out;
7007 }
7008 } else {
7009 dsq = find_user_dsq(dsq_id);
7010 if (dsq) {
7011 ret = READ_ONCE(dsq->nr);
7012 goto out;
7013 }
7014 }
7015 ret = -ENOENT;
7016 out:
7017 preempt_enable();
7018 return ret;
7019 }
7020
7021 /**
7022 * scx_bpf_destroy_dsq - Destroy a custom DSQ
7023 * @dsq_id: DSQ to destroy
7024 *
7025 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7026 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7027 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7028 * which doesn't exist. Can be called from any online scx_ops operations.
7029 */
scx_bpf_destroy_dsq(u64 dsq_id)7030 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7031 {
7032 destroy_dsq(dsq_id);
7033 }
7034
7035 /**
7036 * bpf_iter_scx_dsq_new - Create a DSQ iterator
7037 * @it: iterator to initialize
7038 * @dsq_id: DSQ to iterate
7039 * @flags: %SCX_DSQ_ITER_*
7040 *
7041 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7042 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7043 * tasks which are already queued when this function is invoked.
7044 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)7045 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7046 u64 flags)
7047 {
7048 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7049
7050 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7051 sizeof(struct bpf_iter_scx_dsq));
7052 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7053 __alignof__(struct bpf_iter_scx_dsq));
7054
7055 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7056 return -EINVAL;
7057
7058 kit->dsq = find_user_dsq(dsq_id);
7059 if (!kit->dsq)
7060 return -ENOENT;
7061
7062 INIT_LIST_HEAD(&kit->cursor.node);
7063 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7064 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7065
7066 return 0;
7067 }
7068
7069 /**
7070 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7071 * @it: iterator to progress
7072 *
7073 * Return the next task. See bpf_iter_scx_dsq_new().
7074 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)7075 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7076 {
7077 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7078 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7079 struct task_struct *p;
7080 unsigned long flags;
7081
7082 if (!kit->dsq)
7083 return NULL;
7084
7085 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7086
7087 if (list_empty(&kit->cursor.node))
7088 p = NULL;
7089 else
7090 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7091
7092 /*
7093 * Only tasks which were queued before the iteration started are
7094 * visible. This bounds BPF iterations and guarantees that vtime never
7095 * jumps in the other direction while iterating.
7096 */
7097 do {
7098 p = nldsq_next_task(kit->dsq, p, rev);
7099 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7100
7101 if (p) {
7102 if (rev)
7103 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7104 else
7105 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7106 } else {
7107 list_del_init(&kit->cursor.node);
7108 }
7109
7110 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7111
7112 return p;
7113 }
7114
7115 /**
7116 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7117 * @it: iterator to destroy
7118 *
7119 * Undo scx_iter_scx_dsq_new().
7120 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)7121 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7122 {
7123 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7124
7125 if (!kit->dsq)
7126 return;
7127
7128 if (!list_empty(&kit->cursor.node)) {
7129 unsigned long flags;
7130
7131 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7132 list_del_init(&kit->cursor.node);
7133 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7134 }
7135 kit->dsq = NULL;
7136 }
7137
7138 __bpf_kfunc_end_defs();
7139
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)7140 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7141 char *fmt, unsigned long long *data, u32 data__sz)
7142 {
7143 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7144 s32 ret;
7145
7146 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7147 (data__sz && !data)) {
7148 scx_ops_error("invalid data=%p and data__sz=%u",
7149 (void *)data, data__sz);
7150 return -EINVAL;
7151 }
7152
7153 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7154 if (ret < 0) {
7155 scx_ops_error("failed to read data fields (%d)", ret);
7156 return ret;
7157 }
7158
7159 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7160 &bprintf_data);
7161 if (ret < 0) {
7162 scx_ops_error("format preparation failed (%d)", ret);
7163 return ret;
7164 }
7165
7166 ret = bstr_printf(line_buf, line_size, fmt,
7167 bprintf_data.bin_args);
7168 bpf_bprintf_cleanup(&bprintf_data);
7169 if (ret < 0) {
7170 scx_ops_error("(\"%s\", %p, %u) failed to format",
7171 fmt, data, data__sz);
7172 return ret;
7173 }
7174
7175 return ret;
7176 }
7177
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)7178 static s32 bstr_format(struct scx_bstr_buf *buf,
7179 char *fmt, unsigned long long *data, u32 data__sz)
7180 {
7181 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7182 fmt, data, data__sz);
7183 }
7184
7185 __bpf_kfunc_start_defs();
7186
7187 /**
7188 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7189 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7190 * @fmt: error message format string
7191 * @data: format string parameters packaged using ___bpf_fill() macro
7192 * @data__sz: @data len, must end in '__sz' for the verifier
7193 *
7194 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7195 * disabling.
7196 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)7197 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7198 unsigned long long *data, u32 data__sz)
7199 {
7200 unsigned long flags;
7201
7202 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7203 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7204 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
7205 scx_exit_bstr_buf.line);
7206 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7207 }
7208
7209 /**
7210 * scx_bpf_error_bstr - Indicate fatal error
7211 * @fmt: error message format string
7212 * @data: format string parameters packaged using ___bpf_fill() macro
7213 * @data__sz: @data len, must end in '__sz' for the verifier
7214 *
7215 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7216 * disabling.
7217 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)7218 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7219 u32 data__sz)
7220 {
7221 unsigned long flags;
7222
7223 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7224 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7225 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
7226 scx_exit_bstr_buf.line);
7227 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7228 }
7229
7230 /**
7231 * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
7232 * @fmt: format string
7233 * @data: format string parameters packaged using ___bpf_fill() macro
7234 * @data__sz: @data len, must end in '__sz' for the verifier
7235 *
7236 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7237 * dump_task() to generate extra debug dump specific to the BPF scheduler.
7238 *
7239 * The extra dump may be multiple lines. A single line may be split over
7240 * multiple calls. The last line is automatically terminated.
7241 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)7242 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7243 u32 data__sz)
7244 {
7245 struct scx_dump_data *dd = &scx_dump_data;
7246 struct scx_bstr_buf *buf = &dd->buf;
7247 s32 ret;
7248
7249 if (raw_smp_processor_id() != dd->cpu) {
7250 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7251 return;
7252 }
7253
7254 /* append the formatted string to the line buf */
7255 ret = __bstr_format(buf->data, buf->line + dd->cursor,
7256 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7257 if (ret < 0) {
7258 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7259 dd->prefix, fmt, data, data__sz, ret);
7260 return;
7261 }
7262
7263 dd->cursor += ret;
7264 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7265
7266 if (!dd->cursor)
7267 return;
7268
7269 /*
7270 * If the line buf overflowed or ends in a newline, flush it into the
7271 * dump. This is to allow the caller to generate a single line over
7272 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7273 * the line buf, the only case which can lead to an unexpected
7274 * truncation is when the caller keeps generating newlines in the middle
7275 * instead of the end consecutively. Don't do that.
7276 */
7277 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7278 ops_dump_flush();
7279 }
7280
7281 /**
7282 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7283 * @cpu: CPU of interest
7284 *
7285 * Return the maximum relative capacity of @cpu in relation to the most
7286 * performant CPU in the system. The return value is in the range [1,
7287 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7288 */
scx_bpf_cpuperf_cap(s32 cpu)7289 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7290 {
7291 if (ops_cpu_valid(cpu, NULL))
7292 return arch_scale_cpu_capacity(cpu);
7293 else
7294 return SCX_CPUPERF_ONE;
7295 }
7296
7297 /**
7298 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7299 * @cpu: CPU of interest
7300 *
7301 * Return the current relative performance of @cpu in relation to its maximum.
7302 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7303 *
7304 * The current performance level of a CPU in relation to the maximum performance
7305 * available in the system can be calculated as follows:
7306 *
7307 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7308 *
7309 * The result is in the range [1, %SCX_CPUPERF_ONE].
7310 */
scx_bpf_cpuperf_cur(s32 cpu)7311 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7312 {
7313 if (ops_cpu_valid(cpu, NULL))
7314 return arch_scale_freq_capacity(cpu);
7315 else
7316 return SCX_CPUPERF_ONE;
7317 }
7318
7319 /**
7320 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7321 * @cpu: CPU of interest
7322 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7323 * @flags: %SCX_CPUPERF_* flags
7324 *
7325 * Set the target performance level of @cpu to @perf. @perf is in linear
7326 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7327 * schedutil cpufreq governor chooses the target frequency.
7328 *
7329 * The actual performance level chosen, CPU grouping, and the overhead and
7330 * latency of the operations are dependent on the hardware and cpufreq driver in
7331 * use. Consult hardware and cpufreq documentation for more information. The
7332 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7333 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)7334 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7335 {
7336 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7337 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7338 return;
7339 }
7340
7341 if (ops_cpu_valid(cpu, NULL)) {
7342 struct rq *rq = cpu_rq(cpu);
7343
7344 rq->scx.cpuperf_target = perf;
7345
7346 rcu_read_lock_sched_notrace();
7347 cpufreq_update_util(cpu_rq(cpu), 0);
7348 rcu_read_unlock_sched_notrace();
7349 }
7350 }
7351
7352 /**
7353 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7354 *
7355 * All valid CPU IDs in the system are smaller than the returned value.
7356 */
scx_bpf_nr_cpu_ids(void)7357 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7358 {
7359 return nr_cpu_ids;
7360 }
7361
7362 /**
7363 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7364 */
scx_bpf_get_possible_cpumask(void)7365 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7366 {
7367 return cpu_possible_mask;
7368 }
7369
7370 /**
7371 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7372 */
scx_bpf_get_online_cpumask(void)7373 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7374 {
7375 return cpu_online_mask;
7376 }
7377
7378 /**
7379 * scx_bpf_put_cpumask - Release a possible/online cpumask
7380 * @cpumask: cpumask to release
7381 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)7382 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7383 {
7384 /*
7385 * Empty function body because we aren't actually acquiring or releasing
7386 * a reference to a global cpumask, which is read-only in the caller and
7387 * is never released. The acquire / release semantics here are just used
7388 * to make the cpumask is a trusted pointer in the caller.
7389 */
7390 }
7391
7392 /**
7393 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
7394 * per-CPU cpumask.
7395 *
7396 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7397 */
scx_bpf_get_idle_cpumask(void)7398 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
7399 {
7400 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7401 scx_ops_error("built-in idle tracking is disabled");
7402 return cpu_none_mask;
7403 }
7404
7405 #ifdef CONFIG_SMP
7406 return idle_masks.cpu;
7407 #else
7408 return cpu_none_mask;
7409 #endif
7410 }
7411
7412 /**
7413 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
7414 * per-physical-core cpumask. Can be used to determine if an entire physical
7415 * core is free.
7416 *
7417 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
7418 */
scx_bpf_get_idle_smtmask(void)7419 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
7420 {
7421 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7422 scx_ops_error("built-in idle tracking is disabled");
7423 return cpu_none_mask;
7424 }
7425
7426 #ifdef CONFIG_SMP
7427 if (sched_smt_active())
7428 return idle_masks.smt;
7429 else
7430 return idle_masks.cpu;
7431 #else
7432 return cpu_none_mask;
7433 #endif
7434 }
7435
7436 /**
7437 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7438 * either the percpu, or SMT idle-tracking cpumask.
7439 */
scx_bpf_put_idle_cpumask(const struct cpumask * idle_mask)7440 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7441 {
7442 /*
7443 * Empty function body because we aren't actually acquiring or releasing
7444 * a reference to a global idle cpumask, which is read-only in the
7445 * caller and is never released. The acquire / release semantics here
7446 * are just used to make the cpumask a trusted pointer in the caller.
7447 */
7448 }
7449
7450 /**
7451 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7452 * @cpu: cpu to test and clear idle for
7453 *
7454 * Returns %true if @cpu was idle and its idle state was successfully cleared.
7455 * %false otherwise.
7456 *
7457 * Unavailable if ops.update_idle() is implemented and
7458 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7459 */
scx_bpf_test_and_clear_cpu_idle(s32 cpu)7460 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7461 {
7462 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7463 scx_ops_error("built-in idle tracking is disabled");
7464 return false;
7465 }
7466
7467 if (ops_cpu_valid(cpu, NULL))
7468 return test_and_clear_cpu_idle(cpu);
7469 else
7470 return false;
7471 }
7472
7473 /**
7474 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7475 * @cpus_allowed: Allowed cpumask
7476 * @flags: %SCX_PICK_IDLE_CPU_* flags
7477 *
7478 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7479 * number on success. -%EBUSY if no matching cpu was found.
7480 *
7481 * Idle CPU tracking may race against CPU scheduling state transitions. For
7482 * example, this function may return -%EBUSY as CPUs are transitioning into the
7483 * idle state. If the caller then assumes that there will be dispatch events on
7484 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7485 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7486 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7487 * event in the near future.
7488 *
7489 * Unavailable if ops.update_idle() is implemented and
7490 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7491 */
scx_bpf_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)7492 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7493 u64 flags)
7494 {
7495 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7496 scx_ops_error("built-in idle tracking is disabled");
7497 return -EBUSY;
7498 }
7499
7500 return scx_pick_idle_cpu(cpus_allowed, flags);
7501 }
7502
7503 /**
7504 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7505 * @cpus_allowed: Allowed cpumask
7506 * @flags: %SCX_PICK_IDLE_CPU_* flags
7507 *
7508 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7509 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7510 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7511 * empty.
7512 *
7513 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7514 * set, this function can't tell which CPUs are idle and will always pick any
7515 * CPU.
7516 */
scx_bpf_pick_any_cpu(const struct cpumask * cpus_allowed,u64 flags)7517 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7518 u64 flags)
7519 {
7520 s32 cpu;
7521
7522 if (static_branch_likely(&scx_builtin_idle_enabled)) {
7523 cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7524 if (cpu >= 0)
7525 return cpu;
7526 }
7527
7528 cpu = cpumask_any_distribute(cpus_allowed);
7529 if (cpu < nr_cpu_ids)
7530 return cpu;
7531 else
7532 return -EBUSY;
7533 }
7534
7535 /**
7536 * scx_bpf_task_running - Is task currently running?
7537 * @p: task of interest
7538 */
scx_bpf_task_running(const struct task_struct * p)7539 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7540 {
7541 return task_rq(p)->curr == p;
7542 }
7543
7544 /**
7545 * scx_bpf_task_cpu - CPU a task is currently associated with
7546 * @p: task of interest
7547 */
scx_bpf_task_cpu(const struct task_struct * p)7548 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7549 {
7550 return task_cpu(p);
7551 }
7552
7553 /**
7554 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7555 * @cpu: CPU of the rq
7556 */
scx_bpf_cpu_rq(s32 cpu)7557 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7558 {
7559 if (!ops_cpu_valid(cpu, NULL))
7560 return NULL;
7561
7562 return cpu_rq(cpu);
7563 }
7564
7565 /**
7566 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7567 * @p: task of interest
7568 *
7569 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7570 * from the scheduler's POV. SCX operations should use this function to
7571 * determine @p's current cgroup as, unlike following @p->cgroups,
7572 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7573 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7574 * operations. The restriction guarantees that @p's rq is locked by the caller.
7575 */
7576 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7577 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7578 {
7579 struct task_group *tg = p->sched_task_group;
7580 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7581
7582 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7583 goto out;
7584
7585 cgrp = tg_cgrp(tg);
7586
7587 out:
7588 cgroup_get(cgrp);
7589 return cgrp;
7590 }
7591 #endif
7592
7593 __bpf_kfunc_end_defs();
7594
7595 BTF_KFUNCS_START(scx_kfunc_ids_any)
7596 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7597 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7598 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7599 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7600 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7601 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7602 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7603 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7604 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7605 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7606 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7607 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7608 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7609 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7610 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7611 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7612 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7613 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7614 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7615 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7616 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7617 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7618 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7619 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7620 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7621 #ifdef CONFIG_CGROUP_SCHED
7622 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7623 #endif
7624 BTF_KFUNCS_END(scx_kfunc_ids_any)
7625
7626 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7627 .owner = THIS_MODULE,
7628 .set = &scx_kfunc_ids_any,
7629 };
7630
scx_init(void)7631 static int __init scx_init(void)
7632 {
7633 int ret;
7634
7635 /*
7636 * kfunc registration can't be done from init_sched_ext_class() as
7637 * register_btf_kfunc_id_set() needs most of the system to be up.
7638 *
7639 * Some kfuncs are context-sensitive and can only be called from
7640 * specific SCX ops. They are grouped into BTF sets accordingly.
7641 * Unfortunately, BPF currently doesn't have a way of enforcing such
7642 * restrictions. Eventually, the verifier should be able to enforce
7643 * them. For now, register them the same and make each kfunc explicitly
7644 * check using scx_kf_allowed().
7645 */
7646 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7647 &scx_kfunc_set_select_cpu)) ||
7648 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7649 &scx_kfunc_set_enqueue_dispatch)) ||
7650 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7651 &scx_kfunc_set_dispatch)) ||
7652 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7653 &scx_kfunc_set_cpu_release)) ||
7654 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7655 &scx_kfunc_set_unlocked)) ||
7656 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7657 &scx_kfunc_set_unlocked)) ||
7658 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7659 &scx_kfunc_set_any)) ||
7660 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7661 &scx_kfunc_set_any)) ||
7662 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7663 &scx_kfunc_set_any))) {
7664 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7665 return ret;
7666 }
7667
7668 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7669 if (ret) {
7670 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7671 return ret;
7672 }
7673
7674 ret = register_pm_notifier(&scx_pm_notifier);
7675 if (ret) {
7676 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7677 return ret;
7678 }
7679
7680 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7681 if (!scx_kset) {
7682 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7683 return -ENOMEM;
7684 }
7685
7686 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7687 if (ret < 0) {
7688 pr_err("sched_ext: Failed to add global attributes\n");
7689 return ret;
7690 }
7691
7692 return 0;
7693 }
7694 __initcall(scx_init);
7695