1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10
11 enum scx_consts {
12 SCX_SLICE_BYPASS = SCX_SLICE_DFL / 4,
13 SCX_DSP_DFL_MAX_BATCH = 32,
14 SCX_DSP_MAX_LOOPS = 32,
15 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
16
17 SCX_EXIT_BT_LEN = 64,
18 SCX_EXIT_MSG_LEN = 1024,
19 SCX_EXIT_DUMP_DFL_LEN = 32768,
20
21 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
22 };
23
24 enum scx_exit_kind {
25 SCX_EXIT_NONE,
26 SCX_EXIT_DONE,
27
28 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
29 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
30 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
31 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
32
33 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
34 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
35 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
36 };
37
38 /*
39 * An exit code can be specified when exiting with scx_bpf_exit() or
40 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
41 * respectively. The codes are 64bit of the format:
42 *
43 * Bits: [63 .. 48 47 .. 32 31 .. 0]
44 * [ SYS ACT ] [ SYS RSN ] [ USR ]
45 *
46 * SYS ACT: System-defined exit actions
47 * SYS RSN: System-defined exit reasons
48 * USR : User-defined exit codes and reasons
49 *
50 * Using the above, users may communicate intention and context by ORing system
51 * actions and/or system reasons with a user-defined exit code.
52 */
53 enum scx_exit_code {
54 /* Reasons */
55 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
56
57 /* Actions */
58 SCX_ECODE_ACT_RESTART = 1LLU << 48,
59 };
60
61 /*
62 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
63 * being disabled.
64 */
65 struct scx_exit_info {
66 /* %SCX_EXIT_* - broad category of the exit reason */
67 enum scx_exit_kind kind;
68
69 /* exit code if gracefully exiting */
70 s64 exit_code;
71
72 /* textual representation of the above */
73 const char *reason;
74
75 /* backtrace if exiting due to an error */
76 unsigned long *bt;
77 u32 bt_len;
78
79 /* informational message */
80 char *msg;
81
82 /* debug dump */
83 char *dump;
84 };
85
86 /* sched_ext_ops.flags */
87 enum scx_ops_flags {
88 /*
89 * Keep built-in idle tracking even if ops.update_idle() is implemented.
90 */
91 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
92
93 /*
94 * By default, if there are no other task to run on the CPU, ext core
95 * keeps running the current task even after its slice expires. If this
96 * flag is specified, such tasks are passed to ops.enqueue() with
97 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
98 */
99 SCX_OPS_ENQ_LAST = 1LLU << 1,
100
101 /*
102 * An exiting task may schedule after PF_EXITING is set. In such cases,
103 * bpf_task_from_pid() may not be able to find the task and if the BPF
104 * scheduler depends on pid lookup for dispatching, the task will be
105 * lost leading to various issues including RCU grace period stalls.
106 *
107 * To mask this problem, by default, unhashed tasks are automatically
108 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
109 * depend on pid lookups and wants to handle these tasks directly, the
110 * following flag can be used.
111 */
112 SCX_OPS_ENQ_EXITING = 1LLU << 2,
113
114 /*
115 * If set, only tasks with policy set to SCHED_EXT are attached to
116 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
117 */
118 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
119
120 /*
121 * CPU cgroup support flags
122 */
123 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
124
125 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
126 SCX_OPS_ENQ_LAST |
127 SCX_OPS_ENQ_EXITING |
128 SCX_OPS_SWITCH_PARTIAL |
129 SCX_OPS_HAS_CGROUP_WEIGHT,
130 };
131
132 /* argument container for ops.init_task() */
133 struct scx_init_task_args {
134 /*
135 * Set if ops.init_task() is being invoked on the fork path, as opposed
136 * to the scheduler transition path.
137 */
138 bool fork;
139 #ifdef CONFIG_EXT_GROUP_SCHED
140 /* the cgroup the task is joining */
141 struct cgroup *cgroup;
142 #endif
143 };
144
145 /* argument container for ops.exit_task() */
146 struct scx_exit_task_args {
147 /* Whether the task exited before running on sched_ext. */
148 bool cancelled;
149 };
150
151 /* argument container for ops->cgroup_init() */
152 struct scx_cgroup_init_args {
153 /* the weight of the cgroup [1..10000] */
154 u32 weight;
155 };
156
157 enum scx_cpu_preempt_reason {
158 /* next task is being scheduled by &sched_class_rt */
159 SCX_CPU_PREEMPT_RT,
160 /* next task is being scheduled by &sched_class_dl */
161 SCX_CPU_PREEMPT_DL,
162 /* next task is being scheduled by &sched_class_stop */
163 SCX_CPU_PREEMPT_STOP,
164 /* unknown reason for SCX being preempted */
165 SCX_CPU_PREEMPT_UNKNOWN,
166 };
167
168 /*
169 * Argument container for ops->cpu_acquire(). Currently empty, but may be
170 * expanded in the future.
171 */
172 struct scx_cpu_acquire_args {};
173
174 /* argument container for ops->cpu_release() */
175 struct scx_cpu_release_args {
176 /* the reason the CPU was preempted */
177 enum scx_cpu_preempt_reason reason;
178
179 /* the task that's going to be scheduled on the CPU */
180 struct task_struct *task;
181 };
182
183 /*
184 * Informational context provided to dump operations.
185 */
186 struct scx_dump_ctx {
187 enum scx_exit_kind kind;
188 s64 exit_code;
189 const char *reason;
190 u64 at_ns;
191 u64 at_jiffies;
192 };
193
194 /**
195 * struct sched_ext_ops - Operation table for BPF scheduler implementation
196 *
197 * Userland can implement an arbitrary scheduling policy by implementing and
198 * loading operations in this table.
199 */
200 struct sched_ext_ops {
201 /**
202 * select_cpu - Pick the target CPU for a task which is being woken up
203 * @p: task being woken up
204 * @prev_cpu: the cpu @p was on before sleeping
205 * @wake_flags: SCX_WAKE_*
206 *
207 * Decision made here isn't final. @p may be moved to any CPU while it
208 * is getting dispatched for execution later. However, as @p is not on
209 * the rq at this point, getting the eventual execution CPU right here
210 * saves a small bit of overhead down the line.
211 *
212 * If an idle CPU is returned, the CPU is kicked and will try to
213 * dispatch. While an explicit custom mechanism can be added,
214 * select_cpu() serves as the default way to wake up idle CPUs.
215 *
216 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
217 * is dispatched, the ops.enqueue() callback will be skipped. Finally,
218 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
219 * local DSQ of whatever CPU is returned by this callback.
220 */
221 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
222
223 /**
224 * enqueue - Enqueue a task on the BPF scheduler
225 * @p: task being enqueued
226 * @enq_flags: %SCX_ENQ_*
227 *
228 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
229 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
230 * scheduler owns @p and if it fails to dispatch @p, the task will
231 * stall.
232 *
233 * If @p was dispatched from ops.select_cpu(), this callback is
234 * skipped.
235 */
236 void (*enqueue)(struct task_struct *p, u64 enq_flags);
237
238 /**
239 * dequeue - Remove a task from the BPF scheduler
240 * @p: task being dequeued
241 * @deq_flags: %SCX_DEQ_*
242 *
243 * Remove @p from the BPF scheduler. This is usually called to isolate
244 * the task while updating its scheduling properties (e.g. priority).
245 *
246 * The ext core keeps track of whether the BPF side owns a given task or
247 * not and can gracefully ignore spurious dispatches from BPF side,
248 * which makes it safe to not implement this method. However, depending
249 * on the scheduling logic, this can lead to confusing behaviors - e.g.
250 * scheduling position not being updated across a priority change.
251 */
252 void (*dequeue)(struct task_struct *p, u64 deq_flags);
253
254 /**
255 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
256 * @cpu: CPU to dispatch tasks for
257 * @prev: previous task being switched out
258 *
259 * Called when a CPU's local dsq is empty. The operation should dispatch
260 * one or more tasks from the BPF scheduler into the DSQs using
261 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
262 * scx_bpf_consume().
263 *
264 * The maximum number of times scx_bpf_dispatch() can be called without
265 * an intervening scx_bpf_consume() is specified by
266 * ops.dispatch_max_batch. See the comments on top of the two functions
267 * for more details.
268 *
269 * When not %NULL, @prev is an SCX task with its slice depleted. If
270 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
271 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
272 * ops.dispatch() returns. To keep executing @prev, return without
273 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
274 */
275 void (*dispatch)(s32 cpu, struct task_struct *prev);
276
277 /**
278 * tick - Periodic tick
279 * @p: task running currently
280 *
281 * This operation is called every 1/HZ seconds on CPUs which are
282 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
283 * immediate dispatch cycle on the CPU.
284 */
285 void (*tick)(struct task_struct *p);
286
287 /**
288 * runnable - A task is becoming runnable on its associated CPU
289 * @p: task becoming runnable
290 * @enq_flags: %SCX_ENQ_*
291 *
292 * This and the following three functions can be used to track a task's
293 * execution state transitions. A task becomes ->runnable() on a CPU,
294 * and then goes through one or more ->running() and ->stopping() pairs
295 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
296 * done running on the CPU.
297 *
298 * @p is becoming runnable on the CPU because it's
299 *
300 * - waking up (%SCX_ENQ_WAKEUP)
301 * - being moved from another CPU
302 * - being restored after temporarily taken off the queue for an
303 * attribute change.
304 *
305 * This and ->enqueue() are related but not coupled. This operation
306 * notifies @p's state transition and may not be followed by ->enqueue()
307 * e.g. when @p is being dispatched to a remote CPU, or when @p is
308 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
309 * task may be ->enqueue()'d without being preceded by this operation
310 * e.g. after exhausting its slice.
311 */
312 void (*runnable)(struct task_struct *p, u64 enq_flags);
313
314 /**
315 * running - A task is starting to run on its associated CPU
316 * @p: task starting to run
317 *
318 * See ->runnable() for explanation on the task state notifiers.
319 */
320 void (*running)(struct task_struct *p);
321
322 /**
323 * stopping - A task is stopping execution
324 * @p: task stopping to run
325 * @runnable: is task @p still runnable?
326 *
327 * See ->runnable() for explanation on the task state notifiers. If
328 * !@runnable, ->quiescent() will be invoked after this operation
329 * returns.
330 */
331 void (*stopping)(struct task_struct *p, bool runnable);
332
333 /**
334 * quiescent - A task is becoming not runnable on its associated CPU
335 * @p: task becoming not runnable
336 * @deq_flags: %SCX_DEQ_*
337 *
338 * See ->runnable() for explanation on the task state notifiers.
339 *
340 * @p is becoming quiescent on the CPU because it's
341 *
342 * - sleeping (%SCX_DEQ_SLEEP)
343 * - being moved to another CPU
344 * - being temporarily taken off the queue for an attribute change
345 * (%SCX_DEQ_SAVE)
346 *
347 * This and ->dequeue() are related but not coupled. This operation
348 * notifies @p's state transition and may not be preceded by ->dequeue()
349 * e.g. when @p is being dispatched to a remote CPU.
350 */
351 void (*quiescent)(struct task_struct *p, u64 deq_flags);
352
353 /**
354 * yield - Yield CPU
355 * @from: yielding task
356 * @to: optional yield target task
357 *
358 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
359 * The BPF scheduler should ensure that other available tasks are
360 * dispatched before the yielding task. Return value is ignored in this
361 * case.
362 *
363 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
364 * scheduler can implement the request, return %true; otherwise, %false.
365 */
366 bool (*yield)(struct task_struct *from, struct task_struct *to);
367
368 /**
369 * core_sched_before - Task ordering for core-sched
370 * @a: task A
371 * @b: task B
372 *
373 * Used by core-sched to determine the ordering between two tasks. See
374 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
375 * core-sched.
376 *
377 * Both @a and @b are runnable and may or may not currently be queued on
378 * the BPF scheduler. Should return %true if @a should run before @b.
379 * %false if there's no required ordering or @b should run before @a.
380 *
381 * If not specified, the default is ordering them according to when they
382 * became runnable.
383 */
384 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
385
386 /**
387 * set_weight - Set task weight
388 * @p: task to set weight for
389 * @weight: new weight [1..10000]
390 *
391 * Update @p's weight to @weight.
392 */
393 void (*set_weight)(struct task_struct *p, u32 weight);
394
395 /**
396 * set_cpumask - Set CPU affinity
397 * @p: task to set CPU affinity for
398 * @cpumask: cpumask of cpus that @p can run on
399 *
400 * Update @p's CPU affinity to @cpumask.
401 */
402 void (*set_cpumask)(struct task_struct *p,
403 const struct cpumask *cpumask);
404
405 /**
406 * update_idle - Update the idle state of a CPU
407 * @cpu: CPU to udpate the idle state for
408 * @idle: whether entering or exiting the idle state
409 *
410 * This operation is called when @rq's CPU goes or leaves the idle
411 * state. By default, implementing this operation disables the built-in
412 * idle CPU tracking and the following helpers become unavailable:
413 *
414 * - scx_bpf_select_cpu_dfl()
415 * - scx_bpf_test_and_clear_cpu_idle()
416 * - scx_bpf_pick_idle_cpu()
417 *
418 * The user also must implement ops.select_cpu() as the default
419 * implementation relies on scx_bpf_select_cpu_dfl().
420 *
421 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
422 * tracking.
423 */
424 void (*update_idle)(s32 cpu, bool idle);
425
426 /**
427 * cpu_acquire - A CPU is becoming available to the BPF scheduler
428 * @cpu: The CPU being acquired by the BPF scheduler.
429 * @args: Acquire arguments, see the struct definition.
430 *
431 * A CPU that was previously released from the BPF scheduler is now once
432 * again under its control.
433 */
434 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
435
436 /**
437 * cpu_release - A CPU is taken away from the BPF scheduler
438 * @cpu: The CPU being released by the BPF scheduler.
439 * @args: Release arguments, see the struct definition.
440 *
441 * The specified CPU is no longer under the control of the BPF
442 * scheduler. This could be because it was preempted by a higher
443 * priority sched_class, though there may be other reasons as well. The
444 * caller should consult @args->reason to determine the cause.
445 */
446 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
447
448 /**
449 * init_task - Initialize a task to run in a BPF scheduler
450 * @p: task to initialize for BPF scheduling
451 * @args: init arguments, see the struct definition
452 *
453 * Either we're loading a BPF scheduler or a new task is being forked.
454 * Initialize @p for BPF scheduling. This operation may block and can
455 * be used for allocations, and is called exactly once for a task.
456 *
457 * Return 0 for success, -errno for failure. An error return while
458 * loading will abort loading of the BPF scheduler. During a fork, it
459 * will abort that specific fork.
460 */
461 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
462
463 /**
464 * exit_task - Exit a previously-running task from the system
465 * @p: task to exit
466 *
467 * @p is exiting or the BPF scheduler is being unloaded. Perform any
468 * necessary cleanup for @p.
469 */
470 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
471
472 /**
473 * enable - Enable BPF scheduling for a task
474 * @p: task to enable BPF scheduling for
475 *
476 * Enable @p for BPF scheduling. enable() is called on @p any time it
477 * enters SCX, and is always paired with a matching disable().
478 */
479 void (*enable)(struct task_struct *p);
480
481 /**
482 * disable - Disable BPF scheduling for a task
483 * @p: task to disable BPF scheduling for
484 *
485 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
486 * Disable BPF scheduling for @p. A disable() call is always matched
487 * with a prior enable() call.
488 */
489 void (*disable)(struct task_struct *p);
490
491 /**
492 * dump - Dump BPF scheduler state on error
493 * @ctx: debug dump context
494 *
495 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
496 */
497 void (*dump)(struct scx_dump_ctx *ctx);
498
499 /**
500 * dump_cpu - Dump BPF scheduler state for a CPU on error
501 * @ctx: debug dump context
502 * @cpu: CPU to generate debug dump for
503 * @idle: @cpu is currently idle without any runnable tasks
504 *
505 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
506 * @cpu. If @idle is %true and this operation doesn't produce any
507 * output, @cpu is skipped for dump.
508 */
509 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
510
511 /**
512 * dump_task - Dump BPF scheduler state for a runnable task on error
513 * @ctx: debug dump context
514 * @p: runnable task to generate debug dump for
515 *
516 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
517 * @p.
518 */
519 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
520
521 #ifdef CONFIG_EXT_GROUP_SCHED
522 /**
523 * cgroup_init - Initialize a cgroup
524 * @cgrp: cgroup being initialized
525 * @args: init arguments, see the struct definition
526 *
527 * Either the BPF scheduler is being loaded or @cgrp created, initialize
528 * @cgrp for sched_ext. This operation may block.
529 *
530 * Return 0 for success, -errno for failure. An error return while
531 * loading will abort loading of the BPF scheduler. During cgroup
532 * creation, it will abort the specific cgroup creation.
533 */
534 s32 (*cgroup_init)(struct cgroup *cgrp,
535 struct scx_cgroup_init_args *args);
536
537 /**
538 * cgroup_exit - Exit a cgroup
539 * @cgrp: cgroup being exited
540 *
541 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
542 * @cgrp for sched_ext. This operation my block.
543 */
544 void (*cgroup_exit)(struct cgroup *cgrp);
545
546 /**
547 * cgroup_prep_move - Prepare a task to be moved to a different cgroup
548 * @p: task being moved
549 * @from: cgroup @p is being moved from
550 * @to: cgroup @p is being moved to
551 *
552 * Prepare @p for move from cgroup @from to @to. This operation may
553 * block and can be used for allocations.
554 *
555 * Return 0 for success, -errno for failure. An error return aborts the
556 * migration.
557 */
558 s32 (*cgroup_prep_move)(struct task_struct *p,
559 struct cgroup *from, struct cgroup *to);
560
561 /**
562 * cgroup_move - Commit cgroup move
563 * @p: task being moved
564 * @from: cgroup @p is being moved from
565 * @to: cgroup @p is being moved to
566 *
567 * Commit the move. @p is dequeued during this operation.
568 */
569 void (*cgroup_move)(struct task_struct *p,
570 struct cgroup *from, struct cgroup *to);
571
572 /**
573 * cgroup_cancel_move - Cancel cgroup move
574 * @p: task whose cgroup move is being canceled
575 * @from: cgroup @p was being moved from
576 * @to: cgroup @p was being moved to
577 *
578 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
579 * Undo the preparation.
580 */
581 void (*cgroup_cancel_move)(struct task_struct *p,
582 struct cgroup *from, struct cgroup *to);
583
584 /**
585 * cgroup_set_weight - A cgroup's weight is being changed
586 * @cgrp: cgroup whose weight is being updated
587 * @weight: new weight [1..10000]
588 *
589 * Update @tg's weight to @weight.
590 */
591 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
592 #endif /* CONFIG_CGROUPS */
593
594 /*
595 * All online ops must come before ops.cpu_online().
596 */
597
598 /**
599 * cpu_online - A CPU became online
600 * @cpu: CPU which just came up
601 *
602 * @cpu just came online. @cpu will not call ops.enqueue() or
603 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
604 */
605 void (*cpu_online)(s32 cpu);
606
607 /**
608 * cpu_offline - A CPU is going offline
609 * @cpu: CPU which is going offline
610 *
611 * @cpu is going offline. @cpu will not call ops.enqueue() or
612 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
613 */
614 void (*cpu_offline)(s32 cpu);
615
616 /*
617 * All CPU hotplug ops must come before ops.init().
618 */
619
620 /**
621 * init - Initialize the BPF scheduler
622 */
623 s32 (*init)(void);
624
625 /**
626 * exit - Clean up after the BPF scheduler
627 * @info: Exit info
628 */
629 void (*exit)(struct scx_exit_info *info);
630
631 /**
632 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
633 */
634 u32 dispatch_max_batch;
635
636 /**
637 * flags - %SCX_OPS_* flags
638 */
639 u64 flags;
640
641 /**
642 * timeout_ms - The maximum amount of time, in milliseconds, that a
643 * runnable task should be able to wait before being scheduled. The
644 * maximum timeout may not exceed the default timeout of 30 seconds.
645 *
646 * Defaults to the maximum allowed timeout value of 30 seconds.
647 */
648 u32 timeout_ms;
649
650 /**
651 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
652 * value of 32768 is used.
653 */
654 u32 exit_dump_len;
655
656 /**
657 * hotplug_seq - A sequence number that may be set by the scheduler to
658 * detect when a hotplug event has occurred during the loading process.
659 * If 0, no detection occurs. Otherwise, the scheduler will fail to
660 * load if the sequence number does not match @scx_hotplug_seq on the
661 * enable path.
662 */
663 u64 hotplug_seq;
664
665 /**
666 * name - BPF scheduler's name
667 *
668 * Must be a non-zero valid BPF object name including only isalnum(),
669 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
670 * BPF scheduler is enabled.
671 */
672 char name[SCX_OPS_NAME_LEN];
673 };
674
675 enum scx_opi {
676 SCX_OPI_BEGIN = 0,
677 SCX_OPI_NORMAL_BEGIN = 0,
678 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
679 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
680 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
681 SCX_OPI_END = SCX_OP_IDX(init),
682 };
683
684 enum scx_wake_flags {
685 /* expose select WF_* flags as enums */
686 SCX_WAKE_FORK = WF_FORK,
687 SCX_WAKE_TTWU = WF_TTWU,
688 SCX_WAKE_SYNC = WF_SYNC,
689 };
690
691 enum scx_enq_flags {
692 /* expose select ENQUEUE_* flags as enums */
693 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
694 SCX_ENQ_HEAD = ENQUEUE_HEAD,
695
696 /* high 32bits are SCX specific */
697
698 /*
699 * Set the following to trigger preemption when calling
700 * scx_bpf_dispatch() with a local dsq as the target. The slice of the
701 * current task is cleared to zero and the CPU is kicked into the
702 * scheduling path. Implies %SCX_ENQ_HEAD.
703 */
704 SCX_ENQ_PREEMPT = 1LLU << 32,
705
706 /*
707 * The task being enqueued was previously enqueued on the current CPU's
708 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
709 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
710 * invoked in a ->cpu_release() callback, and the task is again
711 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
712 * task will not be scheduled on the CPU until at least the next invocation
713 * of the ->cpu_acquire() callback.
714 */
715 SCX_ENQ_REENQ = 1LLU << 40,
716
717 /*
718 * The task being enqueued is the only task available for the cpu. By
719 * default, ext core keeps executing such tasks but when
720 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
721 * %SCX_ENQ_LAST flag set.
722 *
723 * The BPF scheduler is responsible for triggering a follow-up
724 * scheduling event. Otherwise, Execution may stall.
725 */
726 SCX_ENQ_LAST = 1LLU << 41,
727
728 /* high 8 bits are internal */
729 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
730
731 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
732 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
733 };
734
735 enum scx_deq_flags {
736 /* expose select DEQUEUE_* flags as enums */
737 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
738
739 /* high 32bits are SCX specific */
740
741 /*
742 * The generic core-sched layer decided to execute the task even though
743 * it hasn't been dispatched yet. Dequeue from the BPF side.
744 */
745 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
746 };
747
748 enum scx_pick_idle_cpu_flags {
749 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
750 };
751
752 enum scx_kick_flags {
753 /*
754 * Kick the target CPU if idle. Guarantees that the target CPU goes
755 * through at least one full scheduling cycle before going idle. If the
756 * target CPU can be determined to be currently not idle and going to go
757 * through a scheduling cycle before going idle, noop.
758 */
759 SCX_KICK_IDLE = 1LLU << 0,
760
761 /*
762 * Preempt the current task and execute the dispatch path. If the
763 * current task of the target CPU is an SCX task, its ->scx.slice is
764 * cleared to zero before the scheduling path is invoked so that the
765 * task expires and the dispatch path is invoked.
766 */
767 SCX_KICK_PREEMPT = 1LLU << 1,
768
769 /*
770 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
771 * return after the target CPU finishes picking the next task.
772 */
773 SCX_KICK_WAIT = 1LLU << 2,
774 };
775
776 enum scx_tg_flags {
777 SCX_TG_ONLINE = 1U << 0,
778 SCX_TG_INITED = 1U << 1,
779 };
780
781 enum scx_ops_enable_state {
782 SCX_OPS_ENABLING,
783 SCX_OPS_ENABLED,
784 SCX_OPS_DISABLING,
785 SCX_OPS_DISABLED,
786 };
787
788 static const char *scx_ops_enable_state_str[] = {
789 [SCX_OPS_ENABLING] = "enabling",
790 [SCX_OPS_ENABLED] = "enabled",
791 [SCX_OPS_DISABLING] = "disabling",
792 [SCX_OPS_DISABLED] = "disabled",
793 };
794
795 /*
796 * sched_ext_entity->ops_state
797 *
798 * Used to track the task ownership between the SCX core and the BPF scheduler.
799 * State transitions look as follows:
800 *
801 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
802 * ^ | |
803 * | v v
804 * \-------------------------------/
805 *
806 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
807 * sites for explanations on the conditions being waited upon and why they are
808 * safe. Transitions out of them into NONE or QUEUED must store_release and the
809 * waiters should load_acquire.
810 *
811 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
812 * any given task can be dispatched by the BPF scheduler at all times and thus
813 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
814 * to try to dispatch any task anytime regardless of its state as the SCX core
815 * can safely reject invalid dispatches.
816 */
817 enum scx_ops_state {
818 SCX_OPSS_NONE, /* owned by the SCX core */
819 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
820 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
821 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
822
823 /*
824 * QSEQ brands each QUEUED instance so that, when dispatch races
825 * dequeue/requeue, the dispatcher can tell whether it still has a claim
826 * on the task being dispatched.
827 *
828 * As some 32bit archs can't do 64bit store_release/load_acquire,
829 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
830 * 32bit machines. The dispatch race window QSEQ protects is very narrow
831 * and runs with IRQ disabled. 30 bits should be sufficient.
832 */
833 SCX_OPSS_QSEQ_SHIFT = 2,
834 };
835
836 /* Use macros to ensure that the type is unsigned long for the masks */
837 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
838 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
839
840 /*
841 * During exit, a task may schedule after losing its PIDs. When disabling the
842 * BPF scheduler, we need to be able to iterate tasks in every state to
843 * guarantee system safety. Maintain a dedicated task list which contains every
844 * task between its fork and eventual free.
845 */
846 static DEFINE_SPINLOCK(scx_tasks_lock);
847 static LIST_HEAD(scx_tasks);
848
849 /* ops enable/disable */
850 static struct kthread_worker *scx_ops_helper;
851 static DEFINE_MUTEX(scx_ops_enable_mutex);
852 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
853 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
854 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
855 static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
856 static bool scx_ops_init_task_enabled;
857 static bool scx_switching_all;
858 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
859
860 static struct sched_ext_ops scx_ops;
861 static bool scx_warned_zero_slice;
862
863 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
864 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
865 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
866 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
867
868 static struct static_key_false scx_has_op[SCX_OPI_END] =
869 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
870
871 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
872 static struct scx_exit_info *scx_exit_info;
873
874 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
875 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
876
877 /*
878 * A monotically increasing sequence number that is incremented every time a
879 * scheduler is enabled. This can be used by to check if any custom sched_ext
880 * scheduler has ever been used in the system.
881 */
882 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
883
884 /*
885 * The maximum amount of time in jiffies that a task may be runnable without
886 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
887 * scx_ops_error().
888 */
889 static unsigned long scx_watchdog_timeout;
890
891 /*
892 * The last time the delayed work was run. This delayed work relies on
893 * ksoftirqd being able to run to service timer interrupts, so it's possible
894 * that this work itself could get wedged. To account for this, we check that
895 * it's not stalled in the timer tick, and trigger an error if it is.
896 */
897 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
898
899 static struct delayed_work scx_watchdog_work;
900
901 /* idle tracking */
902 #ifdef CONFIG_SMP
903 #ifdef CONFIG_CPUMASK_OFFSTACK
904 #define CL_ALIGNED_IF_ONSTACK
905 #else
906 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
907 #endif
908
909 static struct {
910 cpumask_var_t cpu;
911 cpumask_var_t smt;
912 } idle_masks CL_ALIGNED_IF_ONSTACK;
913
914 #endif /* CONFIG_SMP */
915
916 /* for %SCX_KICK_WAIT */
917 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
918
919 /*
920 * Direct dispatch marker.
921 *
922 * Non-NULL values are used for direct dispatch from enqueue path. A valid
923 * pointer points to the task currently being enqueued. An ERR_PTR value is used
924 * to indicate that direct dispatch has already happened.
925 */
926 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
927
928 /*
929 * Dispatch queues.
930 *
931 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
932 * to avoid live-locking in bypass mode where all tasks are dispatched to
933 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
934 * sufficient, it can be further split.
935 */
936 static struct scx_dispatch_q **global_dsqs;
937
938 static const struct rhashtable_params dsq_hash_params = {
939 .key_len = 8,
940 .key_offset = offsetof(struct scx_dispatch_q, id),
941 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
942 };
943
944 static struct rhashtable dsq_hash;
945 static LLIST_HEAD(dsqs_to_free);
946
947 /* dispatch buf */
948 struct scx_dsp_buf_ent {
949 struct task_struct *task;
950 unsigned long qseq;
951 u64 dsq_id;
952 u64 enq_flags;
953 };
954
955 static u32 scx_dsp_max_batch;
956
957 struct scx_dsp_ctx {
958 struct rq *rq;
959 u32 cursor;
960 u32 nr_tasks;
961 struct scx_dsp_buf_ent buf[];
962 };
963
964 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
965
966 /* string formatting from BPF */
967 struct scx_bstr_buf {
968 u64 data[MAX_BPRINTF_VARARGS];
969 char line[SCX_EXIT_MSG_LEN];
970 };
971
972 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
973 static struct scx_bstr_buf scx_exit_bstr_buf;
974
975 /* ops debug dump */
976 struct scx_dump_data {
977 s32 cpu;
978 bool first;
979 s32 cursor;
980 struct seq_buf *s;
981 const char *prefix;
982 struct scx_bstr_buf buf;
983 };
984
985 static struct scx_dump_data scx_dump_data = {
986 .cpu = -1,
987 };
988
989 /* /sys/kernel/sched_ext interface */
990 static struct kset *scx_kset;
991 static struct kobject *scx_root_kobj;
992
993 #define CREATE_TRACE_POINTS
994 #include <trace/events/sched_ext.h>
995
996 static void process_ddsp_deferred_locals(struct rq *rq);
997 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
998 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
999 s64 exit_code,
1000 const char *fmt, ...);
1001
1002 #define scx_ops_error_kind(err, fmt, args...) \
1003 scx_ops_exit_kind((err), 0, fmt, ##args)
1004
1005 #define scx_ops_exit(code, fmt, args...) \
1006 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1007
1008 #define scx_ops_error(fmt, args...) \
1009 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1010
1011 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1012
jiffies_delta_msecs(unsigned long at,unsigned long now)1013 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1014 {
1015 if (time_after(at, now))
1016 return jiffies_to_msecs(at - now);
1017 else
1018 return -(long)jiffies_to_msecs(now - at);
1019 }
1020
1021 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1022 static u32 higher_bits(u32 flags)
1023 {
1024 return ~((1 << fls(flags)) - 1);
1025 }
1026
1027 /* return the mask with only the highest bit set */
highest_bit(u32 flags)1028 static u32 highest_bit(u32 flags)
1029 {
1030 int bit = fls(flags);
1031 return ((u64)1 << bit) >> 1;
1032 }
1033
u32_before(u32 a,u32 b)1034 static bool u32_before(u32 a, u32 b)
1035 {
1036 return (s32)(a - b) < 0;
1037 }
1038
find_global_dsq(struct task_struct * p)1039 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1040 {
1041 return global_dsqs[cpu_to_node(task_cpu(p))];
1042 }
1043
find_user_dsq(u64 dsq_id)1044 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1045 {
1046 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1047 }
1048
1049 /*
1050 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1051 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1052 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1053 * whether it's running from an allowed context.
1054 *
1055 * @mask is constant, always inline to cull the mask calculations.
1056 */
scx_kf_allow(u32 mask)1057 static __always_inline void scx_kf_allow(u32 mask)
1058 {
1059 /* nesting is allowed only in increasing scx_kf_mask order */
1060 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1061 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1062 current->scx.kf_mask, mask);
1063 current->scx.kf_mask |= mask;
1064 barrier();
1065 }
1066
scx_kf_disallow(u32 mask)1067 static void scx_kf_disallow(u32 mask)
1068 {
1069 barrier();
1070 current->scx.kf_mask &= ~mask;
1071 }
1072
1073 #define SCX_CALL_OP(mask, op, args...) \
1074 do { \
1075 if (mask) { \
1076 scx_kf_allow(mask); \
1077 scx_ops.op(args); \
1078 scx_kf_disallow(mask); \
1079 } else { \
1080 scx_ops.op(args); \
1081 } \
1082 } while (0)
1083
1084 #define SCX_CALL_OP_RET(mask, op, args...) \
1085 ({ \
1086 __typeof__(scx_ops.op(args)) __ret; \
1087 if (mask) { \
1088 scx_kf_allow(mask); \
1089 __ret = scx_ops.op(args); \
1090 scx_kf_disallow(mask); \
1091 } else { \
1092 __ret = scx_ops.op(args); \
1093 } \
1094 __ret; \
1095 })
1096
1097 /*
1098 * Some kfuncs are allowed only on the tasks that are subjects of the
1099 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1100 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1101 * invoking scx_ops operations that take task arguments. These can only be used
1102 * for non-nesting operations due to the way the tasks are tracked.
1103 *
1104 * kfuncs which can only operate on such tasks can in turn use
1105 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1106 * the specific task.
1107 */
1108 #define SCX_CALL_OP_TASK(mask, op, task, args...) \
1109 do { \
1110 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1111 current->scx.kf_tasks[0] = task; \
1112 SCX_CALL_OP(mask, op, task, ##args); \
1113 current->scx.kf_tasks[0] = NULL; \
1114 } while (0)
1115
1116 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
1117 ({ \
1118 __typeof__(scx_ops.op(task, ##args)) __ret; \
1119 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1120 current->scx.kf_tasks[0] = task; \
1121 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
1122 current->scx.kf_tasks[0] = NULL; \
1123 __ret; \
1124 })
1125
1126 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
1127 ({ \
1128 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
1129 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1130 current->scx.kf_tasks[0] = task0; \
1131 current->scx.kf_tasks[1] = task1; \
1132 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
1133 current->scx.kf_tasks[0] = NULL; \
1134 current->scx.kf_tasks[1] = NULL; \
1135 __ret; \
1136 })
1137
1138 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1139 static __always_inline bool scx_kf_allowed(u32 mask)
1140 {
1141 if (unlikely(!(current->scx.kf_mask & mask))) {
1142 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1143 mask, current->scx.kf_mask);
1144 return false;
1145 }
1146
1147 /*
1148 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1149 * DISPATCH must not be called if we're running DEQUEUE which is nested
1150 * inside ops.dispatch(). We don't need to check boundaries for any
1151 * blocking kfuncs as the verifier ensures they're only called from
1152 * sleepable progs.
1153 */
1154 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1155 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1156 scx_ops_error("cpu_release kfunc called from a nested operation");
1157 return false;
1158 }
1159
1160 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1161 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1162 scx_ops_error("dispatch kfunc called from a nested operation");
1163 return false;
1164 }
1165
1166 return true;
1167 }
1168
1169 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1170 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1171 struct task_struct *p)
1172 {
1173 if (!scx_kf_allowed(mask))
1174 return false;
1175
1176 if (unlikely((p != current->scx.kf_tasks[0] &&
1177 p != current->scx.kf_tasks[1]))) {
1178 scx_ops_error("called on a task not being operated on");
1179 return false;
1180 }
1181
1182 return true;
1183 }
1184
scx_kf_allowed_if_unlocked(void)1185 static bool scx_kf_allowed_if_unlocked(void)
1186 {
1187 return !current->scx.kf_mask;
1188 }
1189
1190 /**
1191 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1192 * @dsq: user dsq being interated
1193 * @cur: current position, %NULL to start iteration
1194 * @rev: walk backwards
1195 *
1196 * Returns %NULL when iteration is finished.
1197 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1198 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1199 struct task_struct *cur, bool rev)
1200 {
1201 struct list_head *list_node;
1202 struct scx_dsq_list_node *dsq_lnode;
1203
1204 lockdep_assert_held(&dsq->lock);
1205
1206 if (cur)
1207 list_node = &cur->scx.dsq_list.node;
1208 else
1209 list_node = &dsq->list;
1210
1211 /* find the next task, need to skip BPF iteration cursors */
1212 do {
1213 if (rev)
1214 list_node = list_node->prev;
1215 else
1216 list_node = list_node->next;
1217
1218 if (list_node == &dsq->list)
1219 return NULL;
1220
1221 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1222 node);
1223 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1224
1225 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1226 }
1227
1228 #define nldsq_for_each_task(p, dsq) \
1229 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1230 (p) = nldsq_next_task((dsq), (p), false))
1231
1232
1233 /*
1234 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1235 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1236 * changes without breaking backward compatibility. Can be used with
1237 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1238 */
1239 enum scx_dsq_iter_flags {
1240 /* iterate in the reverse dispatch order */
1241 SCX_DSQ_ITER_REV = 1U << 16,
1242
1243 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1244 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1245
1246 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1247 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1248 __SCX_DSQ_ITER_HAS_SLICE |
1249 __SCX_DSQ_ITER_HAS_VTIME,
1250 };
1251
1252 struct bpf_iter_scx_dsq_kern {
1253 struct scx_dsq_list_node cursor;
1254 struct scx_dispatch_q *dsq;
1255 u64 slice;
1256 u64 vtime;
1257 } __attribute__((aligned(8)));
1258
1259 struct bpf_iter_scx_dsq {
1260 u64 __opaque[6];
1261 } __attribute__((aligned(8)));
1262
1263
1264 /*
1265 * SCX task iterator.
1266 */
1267 struct scx_task_iter {
1268 struct sched_ext_entity cursor;
1269 struct task_struct *locked;
1270 struct rq *rq;
1271 struct rq_flags rf;
1272 };
1273
1274 /**
1275 * scx_task_iter_init - Initialize a task iterator
1276 * @iter: iterator to init
1277 *
1278 * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
1279 * @iter must eventually be exited with scx_task_iter_exit().
1280 *
1281 * scx_tasks_lock may be released between this and the first next() call or
1282 * between any two next() calls. If scx_tasks_lock is released between two
1283 * next() calls, the caller is responsible for ensuring that the task being
1284 * iterated remains accessible either through RCU read lock or obtaining a
1285 * reference count.
1286 *
1287 * All tasks which existed when the iteration started are guaranteed to be
1288 * visited as long as they still exist.
1289 */
scx_task_iter_init(struct scx_task_iter * iter)1290 static void scx_task_iter_init(struct scx_task_iter *iter)
1291 {
1292 lockdep_assert_held(&scx_tasks_lock);
1293
1294 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1295 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1296
1297 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1298 list_add(&iter->cursor.tasks_node, &scx_tasks);
1299 iter->locked = NULL;
1300 }
1301
1302 /**
1303 * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
1304 * @iter: iterator to unlock rq for
1305 *
1306 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1307 * the task currently being visited. Unlock the rq if so. This function can be
1308 * safely called anytime during an iteration.
1309 *
1310 * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
1311 * not locking an rq.
1312 */
scx_task_iter_rq_unlock(struct scx_task_iter * iter)1313 static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1314 {
1315 if (iter->locked) {
1316 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1317 iter->locked = NULL;
1318 return true;
1319 } else {
1320 return false;
1321 }
1322 }
1323
1324 /**
1325 * scx_task_iter_exit - Exit a task iterator
1326 * @iter: iterator to exit
1327 *
1328 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
1329 * If the iterator holds a task's rq lock, that rq lock is released. See
1330 * scx_task_iter_init() for details.
1331 */
scx_task_iter_exit(struct scx_task_iter * iter)1332 static void scx_task_iter_exit(struct scx_task_iter *iter)
1333 {
1334 lockdep_assert_held(&scx_tasks_lock);
1335
1336 scx_task_iter_rq_unlock(iter);
1337 list_del_init(&iter->cursor.tasks_node);
1338 }
1339
1340 /**
1341 * scx_task_iter_next - Next task
1342 * @iter: iterator to walk
1343 *
1344 * Visit the next task. See scx_task_iter_init() for details.
1345 */
scx_task_iter_next(struct scx_task_iter * iter)1346 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1347 {
1348 struct list_head *cursor = &iter->cursor.tasks_node;
1349 struct sched_ext_entity *pos;
1350
1351 lockdep_assert_held(&scx_tasks_lock);
1352
1353 list_for_each_entry(pos, cursor, tasks_node) {
1354 if (&pos->tasks_node == &scx_tasks)
1355 return NULL;
1356 if (!(pos->flags & SCX_TASK_CURSOR)) {
1357 list_move(cursor, &pos->tasks_node);
1358 return container_of(pos, struct task_struct, scx);
1359 }
1360 }
1361
1362 /* can't happen, should always terminate at scx_tasks above */
1363 BUG();
1364 }
1365
1366 /**
1367 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1368 * @iter: iterator to walk
1369 * @include_dead: Whether we should include dead tasks in the iteration
1370 *
1371 * Visit the non-idle task with its rq lock held. Allows callers to specify
1372 * whether they would like to filter out dead tasks. See scx_task_iter_init()
1373 * for details.
1374 */
scx_task_iter_next_locked(struct scx_task_iter * iter)1375 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1376 {
1377 struct task_struct *p;
1378
1379 scx_task_iter_rq_unlock(iter);
1380
1381 while ((p = scx_task_iter_next(iter))) {
1382 /*
1383 * scx_task_iter is used to prepare and move tasks into SCX
1384 * while loading the BPF scheduler and vice-versa while
1385 * unloading. The init_tasks ("swappers") should be excluded
1386 * from the iteration because:
1387 *
1388 * - It's unsafe to use __setschduler_prio() on an init_task to
1389 * determine the sched_class to use as it won't preserve its
1390 * idle_sched_class.
1391 *
1392 * - ops.init/exit_task() can easily be confused if called with
1393 * init_tasks as they, e.g., share PID 0.
1394 *
1395 * As init_tasks are never scheduled through SCX, they can be
1396 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1397 * doesn't work here:
1398 *
1399 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1400 * yet been onlined.
1401 *
1402 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1403 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1404 *
1405 * Test for idle_sched_class as only init_tasks are on it.
1406 */
1407 if (p->sched_class != &idle_sched_class)
1408 break;
1409 }
1410 if (!p)
1411 return NULL;
1412
1413 iter->rq = task_rq_lock(p, &iter->rf);
1414 iter->locked = p;
1415
1416 return p;
1417 }
1418
scx_ops_enable_state(void)1419 static enum scx_ops_enable_state scx_ops_enable_state(void)
1420 {
1421 return atomic_read(&scx_ops_enable_state_var);
1422 }
1423
1424 static enum scx_ops_enable_state
scx_ops_set_enable_state(enum scx_ops_enable_state to)1425 scx_ops_set_enable_state(enum scx_ops_enable_state to)
1426 {
1427 return atomic_xchg(&scx_ops_enable_state_var, to);
1428 }
1429
scx_ops_tryset_enable_state(enum scx_ops_enable_state to,enum scx_ops_enable_state from)1430 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1431 enum scx_ops_enable_state from)
1432 {
1433 int from_v = from;
1434
1435 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1436 }
1437
scx_rq_bypassing(struct rq * rq)1438 static bool scx_rq_bypassing(struct rq *rq)
1439 {
1440 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1441 }
1442
1443 /**
1444 * wait_ops_state - Busy-wait the specified ops state to end
1445 * @p: target task
1446 * @opss: state to wait the end of
1447 *
1448 * Busy-wait for @p to transition out of @opss. This can only be used when the
1449 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1450 * has load_acquire semantics to ensure that the caller can see the updates made
1451 * in the enqueueing and dispatching paths.
1452 */
wait_ops_state(struct task_struct * p,unsigned long opss)1453 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1454 {
1455 do {
1456 cpu_relax();
1457 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1458 }
1459
1460 /**
1461 * ops_cpu_valid - Verify a cpu number
1462 * @cpu: cpu number which came from a BPF ops
1463 * @where: extra information reported on error
1464 *
1465 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1466 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1467 * an ops error.
1468 */
ops_cpu_valid(s32 cpu,const char * where)1469 static bool ops_cpu_valid(s32 cpu, const char *where)
1470 {
1471 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1472 return true;
1473 } else {
1474 scx_ops_error("invalid CPU %d%s%s", cpu,
1475 where ? " " : "", where ?: "");
1476 return false;
1477 }
1478 }
1479
1480 /**
1481 * ops_sanitize_err - Sanitize a -errno value
1482 * @ops_name: operation to blame on failure
1483 * @err: -errno value to sanitize
1484 *
1485 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1486 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1487 * cause misbehaviors. For an example, a large negative return from
1488 * ops.init_task() triggers an oops when passed up the call chain because the
1489 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1490 * handled as a pointer.
1491 */
ops_sanitize_err(const char * ops_name,s32 err)1492 static int ops_sanitize_err(const char *ops_name, s32 err)
1493 {
1494 if (err < 0 && err >= -MAX_ERRNO)
1495 return err;
1496
1497 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1498 return -EPROTO;
1499 }
1500
run_deferred(struct rq * rq)1501 static void run_deferred(struct rq *rq)
1502 {
1503 process_ddsp_deferred_locals(rq);
1504 }
1505
1506 #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1507 static void deferred_bal_cb_workfn(struct rq *rq)
1508 {
1509 run_deferred(rq);
1510 }
1511 #endif
1512
deferred_irq_workfn(struct irq_work * irq_work)1513 static void deferred_irq_workfn(struct irq_work *irq_work)
1514 {
1515 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1516
1517 raw_spin_rq_lock(rq);
1518 run_deferred(rq);
1519 raw_spin_rq_unlock(rq);
1520 }
1521
1522 /**
1523 * schedule_deferred - Schedule execution of deferred actions on an rq
1524 * @rq: target rq
1525 *
1526 * Schedule execution of deferred actions on @rq. Must be called with @rq
1527 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1528 * can unlock @rq to e.g. migrate tasks to other rqs.
1529 */
schedule_deferred(struct rq * rq)1530 static void schedule_deferred(struct rq *rq)
1531 {
1532 lockdep_assert_rq_held(rq);
1533
1534 #ifdef CONFIG_SMP
1535 /*
1536 * If in the middle of waking up a task, task_woken_scx() will be called
1537 * afterwards which will then run the deferred actions, no need to
1538 * schedule anything.
1539 */
1540 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1541 return;
1542
1543 /*
1544 * If in balance, the balance callbacks will be called before rq lock is
1545 * released. Schedule one.
1546 */
1547 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1548 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1549 deferred_bal_cb_workfn);
1550 return;
1551 }
1552 #endif
1553 /*
1554 * No scheduler hooks available. Queue an irq work. They are executed on
1555 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1556 * The above WAKEUP and BALANCE paths should cover most of the cases and
1557 * the time to IRQ re-enable shouldn't be long.
1558 */
1559 irq_work_queue(&rq->scx.deferred_irq_work);
1560 }
1561
1562 /**
1563 * touch_core_sched - Update timestamp used for core-sched task ordering
1564 * @rq: rq to read clock from, must be locked
1565 * @p: task to update the timestamp for
1566 *
1567 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1568 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1569 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1570 * exhaustion).
1571 */
touch_core_sched(struct rq * rq,struct task_struct * p)1572 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1573 {
1574 lockdep_assert_rq_held(rq);
1575
1576 #ifdef CONFIG_SCHED_CORE
1577 /*
1578 * It's okay to update the timestamp spuriously. Use
1579 * sched_core_disabled() which is cheaper than enabled().
1580 *
1581 * As this is used to determine ordering between tasks of sibling CPUs,
1582 * it may be better to use per-core dispatch sequence instead.
1583 */
1584 if (!sched_core_disabled())
1585 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1586 #endif
1587 }
1588
1589 /**
1590 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1591 * @rq: rq to read clock from, must be locked
1592 * @p: task being dispatched
1593 *
1594 * If the BPF scheduler implements custom core-sched ordering via
1595 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1596 * ordering within each local DSQ. This function is called from dispatch paths
1597 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1598 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1599 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1600 {
1601 lockdep_assert_rq_held(rq);
1602
1603 #ifdef CONFIG_SCHED_CORE
1604 if (SCX_HAS_OP(core_sched_before))
1605 touch_core_sched(rq, p);
1606 #endif
1607 }
1608
update_curr_scx(struct rq * rq)1609 static void update_curr_scx(struct rq *rq)
1610 {
1611 struct task_struct *curr = rq->curr;
1612 s64 delta_exec;
1613
1614 delta_exec = update_curr_common(rq);
1615 if (unlikely(delta_exec <= 0))
1616 return;
1617
1618 if (curr->scx.slice != SCX_SLICE_INF) {
1619 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1620 if (!curr->scx.slice)
1621 touch_core_sched(rq, curr);
1622 }
1623 }
1624
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1625 static bool scx_dsq_priq_less(struct rb_node *node_a,
1626 const struct rb_node *node_b)
1627 {
1628 const struct task_struct *a =
1629 container_of(node_a, struct task_struct, scx.dsq_priq);
1630 const struct task_struct *b =
1631 container_of(node_b, struct task_struct, scx.dsq_priq);
1632
1633 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1634 }
1635
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1636 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1637 {
1638 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1639 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1640 }
1641
dispatch_enqueue(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1642 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1643 u64 enq_flags)
1644 {
1645 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1646
1647 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1648 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1649 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1650
1651 if (!is_local) {
1652 raw_spin_lock(&dsq->lock);
1653 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1654 scx_ops_error("attempting to dispatch to a destroyed dsq");
1655 /* fall back to the global dsq */
1656 raw_spin_unlock(&dsq->lock);
1657 dsq = find_global_dsq(p);
1658 raw_spin_lock(&dsq->lock);
1659 }
1660 }
1661
1662 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1663 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1664 /*
1665 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1666 * their FIFO queues. To avoid confusion and accidentally
1667 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1668 * disallow any internal DSQ from doing vtime ordering of
1669 * tasks.
1670 */
1671 scx_ops_error("cannot use vtime ordering for built-in DSQs");
1672 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1673 }
1674
1675 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1676 struct rb_node *rbp;
1677
1678 /*
1679 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1680 * linked to both the rbtree and list on PRIQs, this can only be
1681 * tested easily when adding the first task.
1682 */
1683 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1684 nldsq_next_task(dsq, NULL, false)))
1685 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1686 dsq->id);
1687
1688 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1689 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1690
1691 /*
1692 * Find the previous task and insert after it on the list so
1693 * that @dsq->list is vtime ordered.
1694 */
1695 rbp = rb_prev(&p->scx.dsq_priq);
1696 if (rbp) {
1697 struct task_struct *prev =
1698 container_of(rbp, struct task_struct,
1699 scx.dsq_priq);
1700 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1701 } else {
1702 list_add(&p->scx.dsq_list.node, &dsq->list);
1703 }
1704 } else {
1705 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1706 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1707 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1708 dsq->id);
1709
1710 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1711 list_add(&p->scx.dsq_list.node, &dsq->list);
1712 else
1713 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1714 }
1715
1716 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1717 dsq->seq++;
1718 p->scx.dsq_seq = dsq->seq;
1719
1720 dsq_mod_nr(dsq, 1);
1721 p->scx.dsq = dsq;
1722
1723 /*
1724 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1725 * direct dispatch path, but we clear them here because the direct
1726 * dispatch verdict may be overridden on the enqueue path during e.g.
1727 * bypass.
1728 */
1729 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1730 p->scx.ddsp_enq_flags = 0;
1731
1732 /*
1733 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1734 * match waiters' load_acquire.
1735 */
1736 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1737 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1738
1739 if (is_local) {
1740 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1741 bool preempt = false;
1742
1743 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1744 rq->curr->sched_class == &ext_sched_class) {
1745 rq->curr->scx.slice = 0;
1746 preempt = true;
1747 }
1748
1749 if (preempt || sched_class_above(&ext_sched_class,
1750 rq->curr->sched_class))
1751 resched_curr(rq);
1752 } else {
1753 raw_spin_unlock(&dsq->lock);
1754 }
1755 }
1756
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1757 static void task_unlink_from_dsq(struct task_struct *p,
1758 struct scx_dispatch_q *dsq)
1759 {
1760 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1761
1762 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1763 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1764 RB_CLEAR_NODE(&p->scx.dsq_priq);
1765 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1766 }
1767
1768 list_del_init(&p->scx.dsq_list.node);
1769 dsq_mod_nr(dsq, -1);
1770 }
1771
dispatch_dequeue(struct rq * rq,struct task_struct * p)1772 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1773 {
1774 struct scx_dispatch_q *dsq = p->scx.dsq;
1775 bool is_local = dsq == &rq->scx.local_dsq;
1776
1777 if (!dsq) {
1778 /*
1779 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1780 * Unlinking is all that's needed to cancel.
1781 */
1782 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1783 list_del_init(&p->scx.dsq_list.node);
1784
1785 /*
1786 * When dispatching directly from the BPF scheduler to a local
1787 * DSQ, the task isn't associated with any DSQ but
1788 * @p->scx.holding_cpu may be set under the protection of
1789 * %SCX_OPSS_DISPATCHING.
1790 */
1791 if (p->scx.holding_cpu >= 0)
1792 p->scx.holding_cpu = -1;
1793
1794 return;
1795 }
1796
1797 if (!is_local)
1798 raw_spin_lock(&dsq->lock);
1799
1800 /*
1801 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1802 * change underneath us.
1803 */
1804 if (p->scx.holding_cpu < 0) {
1805 /* @p must still be on @dsq, dequeue */
1806 task_unlink_from_dsq(p, dsq);
1807 } else {
1808 /*
1809 * We're racing against dispatch_to_local_dsq() which already
1810 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1811 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1812 * the race.
1813 */
1814 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1815 p->scx.holding_cpu = -1;
1816 }
1817 p->scx.dsq = NULL;
1818
1819 if (!is_local)
1820 raw_spin_unlock(&dsq->lock);
1821 }
1822
find_dsq_for_dispatch(struct rq * rq,u64 dsq_id,struct task_struct * p)1823 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1824 struct task_struct *p)
1825 {
1826 struct scx_dispatch_q *dsq;
1827
1828 if (dsq_id == SCX_DSQ_LOCAL)
1829 return &rq->scx.local_dsq;
1830
1831 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1832 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1833
1834 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1835 return find_global_dsq(p);
1836
1837 return &cpu_rq(cpu)->scx.local_dsq;
1838 }
1839
1840 if (dsq_id == SCX_DSQ_GLOBAL)
1841 dsq = find_global_dsq(p);
1842 else
1843 dsq = find_user_dsq(dsq_id);
1844
1845 if (unlikely(!dsq)) {
1846 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1847 dsq_id, p->comm, p->pid);
1848 return find_global_dsq(p);
1849 }
1850
1851 return dsq;
1852 }
1853
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1854 static void mark_direct_dispatch(struct task_struct *ddsp_task,
1855 struct task_struct *p, u64 dsq_id,
1856 u64 enq_flags)
1857 {
1858 /*
1859 * Mark that dispatch already happened from ops.select_cpu() or
1860 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1861 * which can never match a valid task pointer.
1862 */
1863 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1864
1865 /* @p must match the task on the enqueue path */
1866 if (unlikely(p != ddsp_task)) {
1867 if (IS_ERR(ddsp_task))
1868 scx_ops_error("%s[%d] already direct-dispatched",
1869 p->comm, p->pid);
1870 else
1871 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1872 ddsp_task->comm, ddsp_task->pid,
1873 p->comm, p->pid);
1874 return;
1875 }
1876
1877 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1878 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1879
1880 p->scx.ddsp_dsq_id = dsq_id;
1881 p->scx.ddsp_enq_flags = enq_flags;
1882 }
1883
direct_dispatch(struct task_struct * p,u64 enq_flags)1884 static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1885 {
1886 struct rq *rq = task_rq(p);
1887 struct scx_dispatch_q *dsq =
1888 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1889
1890 touch_core_sched_dispatch(rq, p);
1891
1892 p->scx.ddsp_enq_flags |= enq_flags;
1893
1894 /*
1895 * We are in the enqueue path with @rq locked and pinned, and thus can't
1896 * double lock a remote rq and enqueue to its local DSQ. For
1897 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1898 * the enqueue so that it's executed when @rq can be unlocked.
1899 */
1900 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1901 unsigned long opss;
1902
1903 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1904
1905 switch (opss & SCX_OPSS_STATE_MASK) {
1906 case SCX_OPSS_NONE:
1907 break;
1908 case SCX_OPSS_QUEUEING:
1909 /*
1910 * As @p was never passed to the BPF side, _release is
1911 * not strictly necessary. Still do it for consistency.
1912 */
1913 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1914 break;
1915 default:
1916 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1917 p->comm, p->pid, opss);
1918 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1919 break;
1920 }
1921
1922 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1923 list_add_tail(&p->scx.dsq_list.node,
1924 &rq->scx.ddsp_deferred_locals);
1925 schedule_deferred(rq);
1926 return;
1927 }
1928
1929 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1930 }
1931
scx_rq_online(struct rq * rq)1932 static bool scx_rq_online(struct rq *rq)
1933 {
1934 /*
1935 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1936 * the online state as seen from the BPF scheduler. cpu_active() test
1937 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1938 * stay set until the current scheduling operation is complete even if
1939 * we aren't locking @rq.
1940 */
1941 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1942 }
1943
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1944 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1945 int sticky_cpu)
1946 {
1947 bool bypassing = scx_rq_bypassing(rq);
1948 struct task_struct **ddsp_taskp;
1949 unsigned long qseq;
1950
1951 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1952
1953 /* rq migration */
1954 if (sticky_cpu == cpu_of(rq))
1955 goto local_norefill;
1956
1957 /*
1958 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1959 * is offline and are just running the hotplug path. Don't bother the
1960 * BPF scheduler.
1961 */
1962 if (!scx_rq_online(rq))
1963 goto local;
1964
1965 if (bypassing)
1966 goto global;
1967
1968 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1969 goto direct;
1970
1971 /* see %SCX_OPS_ENQ_EXITING */
1972 if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
1973 unlikely(p->flags & PF_EXITING))
1974 goto local;
1975
1976 if (!SCX_HAS_OP(enqueue))
1977 goto global;
1978
1979 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1980 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1981
1982 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1983 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1984
1985 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1986 WARN_ON_ONCE(*ddsp_taskp);
1987 *ddsp_taskp = p;
1988
1989 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
1990
1991 *ddsp_taskp = NULL;
1992 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1993 goto direct;
1994
1995 /*
1996 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1997 * dequeue may be waiting. The store_release matches their load_acquire.
1998 */
1999 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2000 return;
2001
2002 direct:
2003 direct_dispatch(p, enq_flags);
2004 return;
2005
2006 local:
2007 /*
2008 * For task-ordering, slice refill must be treated as implying the end
2009 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2010 * higher priority it becomes from scx_prio_less()'s POV.
2011 */
2012 touch_core_sched(rq, p);
2013 p->scx.slice = SCX_SLICE_DFL;
2014 local_norefill:
2015 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2016 return;
2017
2018 global:
2019 touch_core_sched(rq, p); /* see the comment in local: */
2020 p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL;
2021 dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2022 }
2023
task_runnable(const struct task_struct * p)2024 static bool task_runnable(const struct task_struct *p)
2025 {
2026 return !list_empty(&p->scx.runnable_node);
2027 }
2028
set_task_runnable(struct rq * rq,struct task_struct * p)2029 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2030 {
2031 lockdep_assert_rq_held(rq);
2032
2033 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2034 p->scx.runnable_at = jiffies;
2035 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2036 }
2037
2038 /*
2039 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2040 * appened to the runnable_list.
2041 */
2042 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2043 }
2044
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2045 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2046 {
2047 list_del_init(&p->scx.runnable_node);
2048 if (reset_runnable_at)
2049 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2050 }
2051
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2052 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2053 {
2054 int sticky_cpu = p->scx.sticky_cpu;
2055
2056 if (enq_flags & ENQUEUE_WAKEUP)
2057 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2058
2059 enq_flags |= rq->scx.extra_enq_flags;
2060
2061 if (sticky_cpu >= 0)
2062 p->scx.sticky_cpu = -1;
2063
2064 /*
2065 * Restoring a running task will be immediately followed by
2066 * set_next_task_scx() which expects the task to not be on the BPF
2067 * scheduler as tasks can only start running through local DSQs. Force
2068 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2069 */
2070 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2071 sticky_cpu = cpu_of(rq);
2072
2073 if (p->scx.flags & SCX_TASK_QUEUED) {
2074 WARN_ON_ONCE(!task_runnable(p));
2075 goto out;
2076 }
2077
2078 set_task_runnable(rq, p);
2079 p->scx.flags |= SCX_TASK_QUEUED;
2080 rq->scx.nr_running++;
2081 add_nr_running(rq, 1);
2082
2083 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2084 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2085
2086 if (enq_flags & SCX_ENQ_WAKEUP)
2087 touch_core_sched(rq, p);
2088
2089 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2090 out:
2091 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2092 }
2093
ops_dequeue(struct task_struct * p,u64 deq_flags)2094 static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2095 {
2096 unsigned long opss;
2097
2098 /* dequeue is always temporary, don't reset runnable_at */
2099 clr_task_runnable(p, false);
2100
2101 /* acquire ensures that we see the preceding updates on QUEUED */
2102 opss = atomic_long_read_acquire(&p->scx.ops_state);
2103
2104 switch (opss & SCX_OPSS_STATE_MASK) {
2105 case SCX_OPSS_NONE:
2106 break;
2107 case SCX_OPSS_QUEUEING:
2108 /*
2109 * QUEUEING is started and finished while holding @p's rq lock.
2110 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2111 */
2112 BUG();
2113 case SCX_OPSS_QUEUED:
2114 if (SCX_HAS_OP(dequeue))
2115 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2116
2117 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2118 SCX_OPSS_NONE))
2119 break;
2120 fallthrough;
2121 case SCX_OPSS_DISPATCHING:
2122 /*
2123 * If @p is being dispatched from the BPF scheduler to a DSQ,
2124 * wait for the transfer to complete so that @p doesn't get
2125 * added to its DSQ after dequeueing is complete.
2126 *
2127 * As we're waiting on DISPATCHING with the rq locked, the
2128 * dispatching side shouldn't try to lock the rq while
2129 * DISPATCHING is set. See dispatch_to_local_dsq().
2130 *
2131 * DISPATCHING shouldn't have qseq set and control can reach
2132 * here with NONE @opss from the above QUEUED case block.
2133 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2134 */
2135 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2136 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2137 break;
2138 }
2139 }
2140
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2141 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2142 {
2143 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2144 WARN_ON_ONCE(task_runnable(p));
2145 return true;
2146 }
2147
2148 ops_dequeue(p, deq_flags);
2149
2150 /*
2151 * A currently running task which is going off @rq first gets dequeued
2152 * and then stops running. As we want running <-> stopping transitions
2153 * to be contained within runnable <-> quiescent transitions, trigger
2154 * ->stopping() early here instead of in put_prev_task_scx().
2155 *
2156 * @p may go through multiple stopping <-> running transitions between
2157 * here and put_prev_task_scx() if task attribute changes occur while
2158 * balance_scx() leaves @rq unlocked. However, they don't contain any
2159 * information meaningful to the BPF scheduler and can be suppressed by
2160 * skipping the callbacks if the task is !QUEUED.
2161 */
2162 if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2163 update_curr_scx(rq);
2164 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2165 }
2166
2167 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2168 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2169
2170 if (deq_flags & SCX_DEQ_SLEEP)
2171 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2172 else
2173 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2174
2175 p->scx.flags &= ~SCX_TASK_QUEUED;
2176 rq->scx.nr_running--;
2177 sub_nr_running(rq, 1);
2178
2179 dispatch_dequeue(rq, p);
2180 return true;
2181 }
2182
yield_task_scx(struct rq * rq)2183 static void yield_task_scx(struct rq *rq)
2184 {
2185 struct task_struct *p = rq->curr;
2186
2187 if (SCX_HAS_OP(yield))
2188 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2189 else
2190 p->scx.slice = 0;
2191 }
2192
yield_to_task_scx(struct rq * rq,struct task_struct * to)2193 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2194 {
2195 struct task_struct *from = rq->curr;
2196
2197 if (SCX_HAS_OP(yield))
2198 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2199 else
2200 return false;
2201 }
2202
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2203 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2204 struct scx_dispatch_q *src_dsq,
2205 struct rq *dst_rq)
2206 {
2207 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2208
2209 /* @dsq is locked and @p is on @dst_rq */
2210 lockdep_assert_held(&src_dsq->lock);
2211 lockdep_assert_rq_held(dst_rq);
2212
2213 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2214
2215 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2216 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2217 else
2218 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2219
2220 dsq_mod_nr(dst_dsq, 1);
2221 p->scx.dsq = dst_dsq;
2222 }
2223
2224 #ifdef CONFIG_SMP
2225 /**
2226 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2227 * @p: task to move
2228 * @enq_flags: %SCX_ENQ_*
2229 * @src_rq: rq to move the task from, locked on entry, released on return
2230 * @dst_rq: rq to move the task into, locked on return
2231 *
2232 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2233 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2234 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2235 struct rq *src_rq, struct rq *dst_rq)
2236 {
2237 lockdep_assert_rq_held(src_rq);
2238
2239 /* the following marks @p MIGRATING which excludes dequeue */
2240 deactivate_task(src_rq, p, 0);
2241 set_task_cpu(p, cpu_of(dst_rq));
2242 p->scx.sticky_cpu = cpu_of(dst_rq);
2243
2244 raw_spin_rq_unlock(src_rq);
2245 raw_spin_rq_lock(dst_rq);
2246
2247 /*
2248 * We want to pass scx-specific enq_flags but activate_task() will
2249 * truncate the upper 32 bit. As we own @rq, we can pass them through
2250 * @rq->scx.extra_enq_flags instead.
2251 */
2252 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2253 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2254 dst_rq->scx.extra_enq_flags = enq_flags;
2255 activate_task(dst_rq, p, 0);
2256 dst_rq->scx.extra_enq_flags = 0;
2257 }
2258
2259 /*
2260 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2261 * differences:
2262 *
2263 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2264 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2265 * this CPU?".
2266 *
2267 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2268 * must be allowed to finish on the CPU that it's currently on regardless of
2269 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2270 * BPF scheduler shouldn't attempt to migrate a task which has migration
2271 * disabled.
2272 *
2273 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2274 * no to the BPF scheduler initiated migrations while offline.
2275 */
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2276 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2277 bool trigger_error)
2278 {
2279 int cpu = cpu_of(rq);
2280
2281 /*
2282 * We don't require the BPF scheduler to avoid dispatching to offline
2283 * CPUs mostly for convenience but also because CPUs can go offline
2284 * between scx_bpf_dispatch() calls and here. Trigger error iff the
2285 * picked CPU is outside the allowed mask.
2286 */
2287 if (!task_allowed_on_cpu(p, cpu)) {
2288 if (trigger_error)
2289 scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2290 cpu_of(rq), p->comm, p->pid);
2291 return false;
2292 }
2293
2294 if (unlikely(is_migration_disabled(p)))
2295 return false;
2296
2297 if (!scx_rq_online(rq))
2298 return false;
2299
2300 return true;
2301 }
2302
2303 /**
2304 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2305 * @p: target task
2306 * @dsq: locked DSQ @p is currently on
2307 * @src_rq: rq @p is currently on, stable with @dsq locked
2308 *
2309 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2310 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2311 * required when transferring into a local DSQ. Even when transferring into a
2312 * non-local DSQ, it's better to use the same mechanism to protect against
2313 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2314 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2315 *
2316 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2317 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2318 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2319 * dancing from our side.
2320 *
2321 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2322 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2323 * would be cleared to -1. While other cpus may have updated it to different
2324 * values afterwards, as this operation can't be preempted or recurse, the
2325 * holding_cpu can never become this CPU again before we're done. Thus, we can
2326 * tell whether we lost to dequeue by testing whether the holding_cpu still
2327 * points to this CPU. See dispatch_dequeue() for the counterpart.
2328 *
2329 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2330 * still valid. %false if lost to dequeue.
2331 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2332 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2333 struct scx_dispatch_q *dsq,
2334 struct rq *src_rq)
2335 {
2336 s32 cpu = raw_smp_processor_id();
2337
2338 lockdep_assert_held(&dsq->lock);
2339
2340 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2341 task_unlink_from_dsq(p, dsq);
2342 p->scx.holding_cpu = cpu;
2343
2344 raw_spin_unlock(&dsq->lock);
2345 raw_spin_rq_lock(src_rq);
2346
2347 /* task_rq couldn't have changed if we're still the holding cpu */
2348 return likely(p->scx.holding_cpu == cpu) &&
2349 !WARN_ON_ONCE(src_rq != task_rq(p));
2350 }
2351
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2352 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2353 struct scx_dispatch_q *dsq, struct rq *src_rq)
2354 {
2355 raw_spin_rq_unlock(this_rq);
2356
2357 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2358 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2359 return true;
2360 } else {
2361 raw_spin_rq_unlock(src_rq);
2362 raw_spin_rq_lock(this_rq);
2363 return false;
2364 }
2365 }
2366 #else /* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2367 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2368 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2369 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2370 #endif /* CONFIG_SMP */
2371
consume_dispatch_q(struct rq * rq,struct scx_dispatch_q * dsq)2372 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2373 {
2374 struct task_struct *p;
2375 retry:
2376 /*
2377 * The caller can't expect to successfully consume a task if the task's
2378 * addition to @dsq isn't guaranteed to be visible somehow. Test
2379 * @dsq->list without locking and skip if it seems empty.
2380 */
2381 if (list_empty(&dsq->list))
2382 return false;
2383
2384 raw_spin_lock(&dsq->lock);
2385
2386 nldsq_for_each_task(p, dsq) {
2387 struct rq *task_rq = task_rq(p);
2388
2389 if (rq == task_rq) {
2390 task_unlink_from_dsq(p, dsq);
2391 move_local_task_to_local_dsq(p, 0, dsq, rq);
2392 raw_spin_unlock(&dsq->lock);
2393 return true;
2394 }
2395
2396 if (task_can_run_on_remote_rq(p, rq, false)) {
2397 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2398 return true;
2399 goto retry;
2400 }
2401 }
2402
2403 raw_spin_unlock(&dsq->lock);
2404 return false;
2405 }
2406
consume_global_dsq(struct rq * rq)2407 static bool consume_global_dsq(struct rq *rq)
2408 {
2409 int node = cpu_to_node(cpu_of(rq));
2410
2411 return consume_dispatch_q(rq, global_dsqs[node]);
2412 }
2413
2414 /**
2415 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2416 * @rq: current rq which is locked
2417 * @dst_dsq: destination DSQ
2418 * @p: task to dispatch
2419 * @enq_flags: %SCX_ENQ_*
2420 *
2421 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2422 * DSQ. This function performs all the synchronization dancing needed because
2423 * local DSQs are protected with rq locks.
2424 *
2425 * The caller must have exclusive ownership of @p (e.g. through
2426 * %SCX_OPSS_DISPATCHING).
2427 */
dispatch_to_local_dsq(struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2428 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2429 struct task_struct *p, u64 enq_flags)
2430 {
2431 struct rq *src_rq = task_rq(p);
2432 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2433
2434 /*
2435 * We're synchronized against dequeue through DISPATCHING. As @p can't
2436 * be dequeued, its task_rq and cpus_allowed are stable too.
2437 *
2438 * If dispatching to @rq that @p is already on, no lock dancing needed.
2439 */
2440 if (rq == src_rq && rq == dst_rq) {
2441 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2442 return;
2443 }
2444
2445 #ifdef CONFIG_SMP
2446 if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2447 dispatch_enqueue(find_global_dsq(p), p,
2448 enq_flags | SCX_ENQ_CLEAR_OPSS);
2449 return;
2450 }
2451
2452 /*
2453 * @p is on a possibly remote @src_rq which we need to lock to move the
2454 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2455 * on DISPATCHING, so we can't grab @src_rq lock while holding
2456 * DISPATCHING.
2457 *
2458 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2459 * we're moving from a DSQ and use the same mechanism - mark the task
2460 * under transfer with holding_cpu, release DISPATCHING and then follow
2461 * the same protocol. See unlink_dsq_and_lock_src_rq().
2462 */
2463 p->scx.holding_cpu = raw_smp_processor_id();
2464
2465 /* store_release ensures that dequeue sees the above */
2466 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2467
2468 /* switch to @src_rq lock */
2469 if (rq != src_rq) {
2470 raw_spin_rq_unlock(rq);
2471 raw_spin_rq_lock(src_rq);
2472 }
2473
2474 /* task_rq couldn't have changed if we're still the holding cpu */
2475 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2476 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2477 /*
2478 * If @p is staying on the same rq, there's no need to go
2479 * through the full deactivate/activate cycle. Optimize by
2480 * abbreviating move_remote_task_to_local_dsq().
2481 */
2482 if (src_rq == dst_rq) {
2483 p->scx.holding_cpu = -1;
2484 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2485 } else {
2486 move_remote_task_to_local_dsq(p, enq_flags,
2487 src_rq, dst_rq);
2488 }
2489
2490 /* if the destination CPU is idle, wake it up */
2491 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2492 resched_curr(dst_rq);
2493 }
2494
2495 /* switch back to @rq lock */
2496 if (rq != dst_rq) {
2497 raw_spin_rq_unlock(dst_rq);
2498 raw_spin_rq_lock(rq);
2499 }
2500 #else /* CONFIG_SMP */
2501 BUG(); /* control can not reach here on UP */
2502 #endif /* CONFIG_SMP */
2503 }
2504
2505 /**
2506 * finish_dispatch - Asynchronously finish dispatching a task
2507 * @rq: current rq which is locked
2508 * @p: task to finish dispatching
2509 * @qseq_at_dispatch: qseq when @p started getting dispatched
2510 * @dsq_id: destination DSQ ID
2511 * @enq_flags: %SCX_ENQ_*
2512 *
2513 * Dispatching to local DSQs may need to wait for queueing to complete or
2514 * require rq lock dancing. As we don't wanna do either while inside
2515 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2516 * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
2517 * task and its qseq. Once ops.dispatch() returns, this function is called to
2518 * finish up.
2519 *
2520 * There is no guarantee that @p is still valid for dispatching or even that it
2521 * was valid in the first place. Make sure that the task is still owned by the
2522 * BPF scheduler and claim the ownership before dispatching.
2523 */
finish_dispatch(struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2524 static void finish_dispatch(struct rq *rq, struct task_struct *p,
2525 unsigned long qseq_at_dispatch,
2526 u64 dsq_id, u64 enq_flags)
2527 {
2528 struct scx_dispatch_q *dsq;
2529 unsigned long opss;
2530
2531 touch_core_sched_dispatch(rq, p);
2532 retry:
2533 /*
2534 * No need for _acquire here. @p is accessed only after a successful
2535 * try_cmpxchg to DISPATCHING.
2536 */
2537 opss = atomic_long_read(&p->scx.ops_state);
2538
2539 switch (opss & SCX_OPSS_STATE_MASK) {
2540 case SCX_OPSS_DISPATCHING:
2541 case SCX_OPSS_NONE:
2542 /* someone else already got to it */
2543 return;
2544 case SCX_OPSS_QUEUED:
2545 /*
2546 * If qseq doesn't match, @p has gone through at least one
2547 * dispatch/dequeue and re-enqueue cycle between
2548 * scx_bpf_dispatch() and here and we have no claim on it.
2549 */
2550 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2551 return;
2552
2553 /*
2554 * While we know @p is accessible, we don't yet have a claim on
2555 * it - the BPF scheduler is allowed to dispatch tasks
2556 * spuriously and there can be a racing dequeue attempt. Let's
2557 * claim @p by atomically transitioning it from QUEUED to
2558 * DISPATCHING.
2559 */
2560 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2561 SCX_OPSS_DISPATCHING)))
2562 break;
2563 goto retry;
2564 case SCX_OPSS_QUEUEING:
2565 /*
2566 * do_enqueue_task() is in the process of transferring the task
2567 * to the BPF scheduler while holding @p's rq lock. As we aren't
2568 * holding any kernel or BPF resource that the enqueue path may
2569 * depend upon, it's safe to wait.
2570 */
2571 wait_ops_state(p, opss);
2572 goto retry;
2573 }
2574
2575 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2576
2577 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2578
2579 if (dsq->id == SCX_DSQ_LOCAL)
2580 dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2581 else
2582 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2583 }
2584
flush_dispatch_buf(struct rq * rq)2585 static void flush_dispatch_buf(struct rq *rq)
2586 {
2587 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2588 u32 u;
2589
2590 for (u = 0; u < dspc->cursor; u++) {
2591 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2592
2593 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2594 ent->enq_flags);
2595 }
2596
2597 dspc->nr_tasks += dspc->cursor;
2598 dspc->cursor = 0;
2599 }
2600
balance_one(struct rq * rq,struct task_struct * prev)2601 static int balance_one(struct rq *rq, struct task_struct *prev)
2602 {
2603 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2604 bool prev_on_scx = prev->sched_class == &ext_sched_class;
2605 int nr_loops = SCX_DSP_MAX_LOOPS;
2606
2607 lockdep_assert_rq_held(rq);
2608 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2609 rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2610
2611 if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2612 unlikely(rq->scx.cpu_released)) {
2613 /*
2614 * If the previous sched_class for the current CPU was not SCX,
2615 * notify the BPF scheduler that it again has control of the
2616 * core. This callback complements ->cpu_release(), which is
2617 * emitted in scx_next_task_picked().
2618 */
2619 if (SCX_HAS_OP(cpu_acquire))
2620 SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
2621 rq->scx.cpu_released = false;
2622 }
2623
2624 if (prev_on_scx) {
2625 update_curr_scx(rq);
2626
2627 /*
2628 * If @prev is runnable & has slice left, it has priority and
2629 * fetching more just increases latency for the fetched tasks.
2630 * Tell pick_task_scx() to keep running @prev. If the BPF
2631 * scheduler wants to handle this explicitly, it should
2632 * implement ->cpu_release().
2633 *
2634 * See scx_ops_disable_workfn() for the explanation on the
2635 * bypassing test.
2636 */
2637 if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2638 prev->scx.slice && !scx_rq_bypassing(rq)) {
2639 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2640 goto has_tasks;
2641 }
2642 }
2643
2644 /* if there already are tasks to run, nothing to do */
2645 if (rq->scx.local_dsq.nr)
2646 goto has_tasks;
2647
2648 if (consume_global_dsq(rq))
2649 goto has_tasks;
2650
2651 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2652 goto no_tasks;
2653
2654 dspc->rq = rq;
2655
2656 /*
2657 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2658 * the local DSQ might still end up empty after a successful
2659 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2660 * produced some tasks, retry. The BPF scheduler may depend on this
2661 * looping behavior to simplify its implementation.
2662 */
2663 do {
2664 dspc->nr_tasks = 0;
2665
2666 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2667 prev_on_scx ? prev : NULL);
2668
2669 flush_dispatch_buf(rq);
2670
2671 if (rq->scx.local_dsq.nr)
2672 goto has_tasks;
2673 if (consume_global_dsq(rq))
2674 goto has_tasks;
2675
2676 /*
2677 * ops.dispatch() can trap us in this loop by repeatedly
2678 * dispatching ineligible tasks. Break out once in a while to
2679 * allow the watchdog to run. As IRQ can't be enabled in
2680 * balance(), we want to complete this scheduling cycle and then
2681 * start a new one. IOW, we want to call resched_curr() on the
2682 * next, most likely idle, task, not the current one. Use
2683 * scx_bpf_kick_cpu() for deferred kicking.
2684 */
2685 if (unlikely(!--nr_loops)) {
2686 scx_bpf_kick_cpu(cpu_of(rq), 0);
2687 break;
2688 }
2689 } while (dspc->nr_tasks);
2690
2691 no_tasks:
2692 /*
2693 * Didn't find another task to run. Keep running @prev unless
2694 * %SCX_OPS_ENQ_LAST is in effect.
2695 */
2696 if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2697 (!static_branch_unlikely(&scx_ops_enq_last) ||
2698 scx_rq_bypassing(rq))) {
2699 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2700 goto has_tasks;
2701 }
2702 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2703 return false;
2704
2705 has_tasks:
2706 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2707 return true;
2708 }
2709
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)2710 static int balance_scx(struct rq *rq, struct task_struct *prev,
2711 struct rq_flags *rf)
2712 {
2713 int ret;
2714
2715 rq_unpin_lock(rq, rf);
2716
2717 ret = balance_one(rq, prev);
2718
2719 #ifdef CONFIG_SCHED_SMT
2720 /*
2721 * When core-sched is enabled, this ops.balance() call will be followed
2722 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2723 * siblings too.
2724 */
2725 if (sched_core_enabled(rq)) {
2726 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2727 int scpu;
2728
2729 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2730 struct rq *srq = cpu_rq(scpu);
2731 struct task_struct *sprev = srq->curr;
2732
2733 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2734 update_rq_clock(srq);
2735 balance_one(srq, sprev);
2736 }
2737 }
2738 #endif
2739 rq_repin_lock(rq, rf);
2740
2741 return ret;
2742 }
2743
process_ddsp_deferred_locals(struct rq * rq)2744 static void process_ddsp_deferred_locals(struct rq *rq)
2745 {
2746 struct task_struct *p;
2747
2748 lockdep_assert_rq_held(rq);
2749
2750 /*
2751 * Now that @rq can be unlocked, execute the deferred enqueueing of
2752 * tasks directly dispatched to the local DSQs of other CPUs. See
2753 * direct_dispatch(). Keep popping from the head instead of using
2754 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2755 * temporarily.
2756 */
2757 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2758 struct task_struct, scx.dsq_list.node))) {
2759 struct scx_dispatch_q *dsq;
2760
2761 list_del_init(&p->scx.dsq_list.node);
2762
2763 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2764 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2765 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2766 }
2767 }
2768
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2769 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2770 {
2771 if (p->scx.flags & SCX_TASK_QUEUED) {
2772 /*
2773 * Core-sched might decide to execute @p before it is
2774 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2775 */
2776 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2777 dispatch_dequeue(rq, p);
2778 }
2779
2780 p->se.exec_start = rq_clock_task(rq);
2781
2782 /* see dequeue_task_scx() on why we skip when !QUEUED */
2783 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2784 SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2785
2786 clr_task_runnable(p, true);
2787
2788 /*
2789 * @p is getting newly scheduled or got kicked after someone updated its
2790 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2791 */
2792 if ((p->scx.slice == SCX_SLICE_INF) !=
2793 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2794 if (p->scx.slice == SCX_SLICE_INF)
2795 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2796 else
2797 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2798
2799 sched_update_tick_dependency(rq);
2800
2801 /*
2802 * For now, let's refresh the load_avgs just when transitioning
2803 * in and out of nohz. In the future, we might want to add a
2804 * mechanism which calls the following periodically on
2805 * tick-stopped CPUs.
2806 */
2807 update_other_load_avgs(rq);
2808 }
2809 }
2810
2811 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2812 preempt_reason_from_class(const struct sched_class *class)
2813 {
2814 #ifdef CONFIG_SMP
2815 if (class == &stop_sched_class)
2816 return SCX_CPU_PREEMPT_STOP;
2817 #endif
2818 if (class == &dl_sched_class)
2819 return SCX_CPU_PREEMPT_DL;
2820 if (class == &rt_sched_class)
2821 return SCX_CPU_PREEMPT_RT;
2822 return SCX_CPU_PREEMPT_UNKNOWN;
2823 }
2824
switch_class(struct rq * rq,struct task_struct * next)2825 static void switch_class(struct rq *rq, struct task_struct *next)
2826 {
2827 const struct sched_class *next_class = next->sched_class;
2828
2829 #ifdef CONFIG_SMP
2830 /*
2831 * Pairs with the smp_load_acquire() issued by a CPU in
2832 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2833 * resched.
2834 */
2835 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2836 #endif
2837 if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2838 return;
2839
2840 /*
2841 * The callback is conceptually meant to convey that the CPU is no
2842 * longer under the control of SCX. Therefore, don't invoke the callback
2843 * if the next class is below SCX (in which case the BPF scheduler has
2844 * actively decided not to schedule any tasks on the CPU).
2845 */
2846 if (sched_class_above(&ext_sched_class, next_class))
2847 return;
2848
2849 /*
2850 * At this point we know that SCX was preempted by a higher priority
2851 * sched_class, so invoke the ->cpu_release() callback if we have not
2852 * done so already. We only send the callback once between SCX being
2853 * preempted, and it regaining control of the CPU.
2854 *
2855 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2856 * next time that balance_scx() is invoked.
2857 */
2858 if (!rq->scx.cpu_released) {
2859 if (SCX_HAS_OP(cpu_release)) {
2860 struct scx_cpu_release_args args = {
2861 .reason = preempt_reason_from_class(next_class),
2862 .task = next,
2863 };
2864
2865 SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2866 cpu_release, cpu_of(rq), &args);
2867 }
2868 rq->scx.cpu_released = true;
2869 }
2870 }
2871
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)2872 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2873 struct task_struct *next)
2874 {
2875 update_curr_scx(rq);
2876
2877 /* see dequeue_task_scx() on why we skip when !QUEUED */
2878 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2879 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
2880
2881 if (p->scx.flags & SCX_TASK_QUEUED) {
2882 set_task_runnable(rq, p);
2883
2884 /*
2885 * If @p has slice left and is being put, @p is getting
2886 * preempted by a higher priority scheduler class or core-sched
2887 * forcing a different task. Leave it at the head of the local
2888 * DSQ.
2889 */
2890 if (p->scx.slice && !scx_rq_bypassing(rq)) {
2891 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
2892 return;
2893 }
2894
2895 /*
2896 * If @p is runnable but we're about to enter a lower
2897 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2898 * ops.enqueue() that @p is the only one available for this cpu,
2899 * which should trigger an explicit follow-up scheduling event.
2900 */
2901 if (sched_class_above(&ext_sched_class, next->sched_class)) {
2902 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
2903 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2904 } else {
2905 do_enqueue_task(rq, p, 0, -1);
2906 }
2907 }
2908
2909 if (next && next->sched_class != &ext_sched_class)
2910 switch_class(rq, next);
2911 }
2912
first_local_task(struct rq * rq)2913 static struct task_struct *first_local_task(struct rq *rq)
2914 {
2915 return list_first_entry_or_null(&rq->scx.local_dsq.list,
2916 struct task_struct, scx.dsq_list.node);
2917 }
2918
pick_task_scx(struct rq * rq)2919 static struct task_struct *pick_task_scx(struct rq *rq)
2920 {
2921 struct task_struct *prev = rq->curr;
2922 struct task_struct *p;
2923
2924 /*
2925 * If balance_scx() is telling us to keep running @prev, replenish slice
2926 * if necessary and keep running @prev. Otherwise, pop the first one
2927 * from the local DSQ.
2928 *
2929 * WORKAROUND:
2930 *
2931 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
2932 * have gone through balance_scx(). Unfortunately, there currently is a
2933 * bug where fair could say yes on balance() but no on pick_task(),
2934 * which then ends up calling pick_task_scx() without preceding
2935 * balance_scx().
2936 *
2937 * For now, ignore cases where $prev is not on SCX. This isn't great and
2938 * can theoretically lead to stalls. However, for switch_all cases, this
2939 * happens only while a BPF scheduler is being loaded or unloaded, and,
2940 * for partial cases, fair will likely keep triggering this CPU.
2941 *
2942 * Once fair is fixed, restore WARN_ON_ONCE().
2943 */
2944 if ((rq->scx.flags & SCX_RQ_BAL_KEEP) &&
2945 prev->sched_class == &ext_sched_class) {
2946 p = prev;
2947 if (!p->scx.slice)
2948 p->scx.slice = SCX_SLICE_DFL;
2949 } else {
2950 p = first_local_task(rq);
2951 if (!p)
2952 return NULL;
2953
2954 if (unlikely(!p->scx.slice)) {
2955 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
2956 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n",
2957 p->comm, p->pid);
2958 scx_warned_zero_slice = true;
2959 }
2960 p->scx.slice = SCX_SLICE_DFL;
2961 }
2962 }
2963
2964 return p;
2965 }
2966
2967 #ifdef CONFIG_SCHED_CORE
2968 /**
2969 * scx_prio_less - Task ordering for core-sched
2970 * @a: task A
2971 * @b: task B
2972 *
2973 * Core-sched is implemented as an additional scheduling layer on top of the
2974 * usual sched_class'es and needs to find out the expected task ordering. For
2975 * SCX, core-sched calls this function to interrogate the task ordering.
2976 *
2977 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2978 * to implement the default task ordering. The older the timestamp, the higher
2979 * prority the task - the global FIFO ordering matching the default scheduling
2980 * behavior.
2981 *
2982 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2983 * implement FIFO ordering within each local DSQ. See pick_task_scx().
2984 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)2985 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2986 bool in_fi)
2987 {
2988 /*
2989 * The const qualifiers are dropped from task_struct pointers when
2990 * calling ops.core_sched_before(). Accesses are controlled by the
2991 * verifier.
2992 */
2993 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
2994 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
2995 (struct task_struct *)a,
2996 (struct task_struct *)b);
2997 else
2998 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
2999 }
3000 #endif /* CONFIG_SCHED_CORE */
3001
3002 #ifdef CONFIG_SMP
3003
test_and_clear_cpu_idle(int cpu)3004 static bool test_and_clear_cpu_idle(int cpu)
3005 {
3006 #ifdef CONFIG_SCHED_SMT
3007 /*
3008 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3009 * cluster is not wholly idle either way. This also prevents
3010 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3011 */
3012 if (sched_smt_active()) {
3013 const struct cpumask *smt = cpu_smt_mask(cpu);
3014
3015 /*
3016 * If offline, @cpu is not its own sibling and
3017 * scx_pick_idle_cpu() can get caught in an infinite loop as
3018 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3019 * is eventually cleared.
3020 */
3021 if (cpumask_intersects(smt, idle_masks.smt))
3022 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3023 else if (cpumask_test_cpu(cpu, idle_masks.smt))
3024 __cpumask_clear_cpu(cpu, idle_masks.smt);
3025 }
3026 #endif
3027 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3028 }
3029
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3030 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3031 {
3032 int cpu;
3033
3034 retry:
3035 if (sched_smt_active()) {
3036 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3037 if (cpu < nr_cpu_ids)
3038 goto found;
3039
3040 if (flags & SCX_PICK_IDLE_CORE)
3041 return -EBUSY;
3042 }
3043
3044 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3045 if (cpu >= nr_cpu_ids)
3046 return -EBUSY;
3047
3048 found:
3049 if (test_and_clear_cpu_idle(cpu))
3050 return cpu;
3051 else
3052 goto retry;
3053 }
3054
scx_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * found)3055 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3056 u64 wake_flags, bool *found)
3057 {
3058 s32 cpu;
3059
3060 *found = false;
3061
3062 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
3063 scx_ops_error("built-in idle tracking is disabled");
3064 return prev_cpu;
3065 }
3066
3067 /*
3068 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
3069 * under utilized, wake up @p to the local DSQ of the waker. Checking
3070 * only for an empty local DSQ is insufficient as it could give the
3071 * wakee an unfair advantage when the system is oversaturated.
3072 * Checking only for the presence of idle CPUs is also insufficient as
3073 * the local DSQ of the waker could have tasks piled up on it even if
3074 * there is an idle core elsewhere on the system.
3075 */
3076 cpu = smp_processor_id();
3077 if ((wake_flags & SCX_WAKE_SYNC) &&
3078 !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
3079 cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3080 if (cpumask_test_cpu(cpu, p->cpus_ptr))
3081 goto cpu_found;
3082 }
3083
3084 /*
3085 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3086 * partially idle @prev_cpu.
3087 */
3088 if (sched_smt_active()) {
3089 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3090 test_and_clear_cpu_idle(prev_cpu)) {
3091 cpu = prev_cpu;
3092 goto cpu_found;
3093 }
3094
3095 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3096 if (cpu >= 0)
3097 goto cpu_found;
3098 }
3099
3100 if (test_and_clear_cpu_idle(prev_cpu)) {
3101 cpu = prev_cpu;
3102 goto cpu_found;
3103 }
3104
3105 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3106 if (cpu >= 0)
3107 goto cpu_found;
3108
3109 return prev_cpu;
3110
3111 cpu_found:
3112 *found = true;
3113 return cpu;
3114 }
3115
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3116 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3117 {
3118 /*
3119 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3120 * can be a good migration opportunity with low cache and memory
3121 * footprint. Returning a CPU different than @prev_cpu triggers
3122 * immediate rq migration. However, for SCX, as the current rq
3123 * association doesn't dictate where the task is going to run, this
3124 * doesn't fit well. If necessary, we can later add a dedicated method
3125 * which can decide to preempt self to force it through the regular
3126 * scheduling path.
3127 */
3128 if (unlikely(wake_flags & WF_EXEC))
3129 return prev_cpu;
3130
3131 if (SCX_HAS_OP(select_cpu)) {
3132 s32 cpu;
3133 struct task_struct **ddsp_taskp;
3134
3135 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3136 WARN_ON_ONCE(*ddsp_taskp);
3137 *ddsp_taskp = p;
3138
3139 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3140 select_cpu, p, prev_cpu, wake_flags);
3141 *ddsp_taskp = NULL;
3142 if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3143 return cpu;
3144 else
3145 return prev_cpu;
3146 } else {
3147 bool found;
3148 s32 cpu;
3149
3150 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3151 if (found) {
3152 p->scx.slice = SCX_SLICE_DFL;
3153 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3154 }
3155 return cpu;
3156 }
3157 }
3158
task_woken_scx(struct rq * rq,struct task_struct * p)3159 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3160 {
3161 run_deferred(rq);
3162 }
3163
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3164 static void set_cpus_allowed_scx(struct task_struct *p,
3165 struct affinity_context *ac)
3166 {
3167 set_cpus_allowed_common(p, ac);
3168
3169 /*
3170 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3171 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3172 * scheduler the effective one.
3173 *
3174 * Fine-grained memory write control is enforced by BPF making the const
3175 * designation pointless. Cast it away when calling the operation.
3176 */
3177 if (SCX_HAS_OP(set_cpumask))
3178 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3179 (struct cpumask *)p->cpus_ptr);
3180 }
3181
reset_idle_masks(void)3182 static void reset_idle_masks(void)
3183 {
3184 /*
3185 * Consider all online cpus idle. Should converge to the actual state
3186 * quickly.
3187 */
3188 cpumask_copy(idle_masks.cpu, cpu_online_mask);
3189 cpumask_copy(idle_masks.smt, cpu_online_mask);
3190 }
3191
__scx_update_idle(struct rq * rq,bool idle)3192 void __scx_update_idle(struct rq *rq, bool idle)
3193 {
3194 int cpu = cpu_of(rq);
3195
3196 if (SCX_HAS_OP(update_idle)) {
3197 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3198 if (!static_branch_unlikely(&scx_builtin_idle_enabled))
3199 return;
3200 }
3201
3202 if (idle)
3203 cpumask_set_cpu(cpu, idle_masks.cpu);
3204 else
3205 cpumask_clear_cpu(cpu, idle_masks.cpu);
3206
3207 #ifdef CONFIG_SCHED_SMT
3208 if (sched_smt_active()) {
3209 const struct cpumask *smt = cpu_smt_mask(cpu);
3210
3211 if (idle) {
3212 /*
3213 * idle_masks.smt handling is racy but that's fine as
3214 * it's only for optimization and self-correcting.
3215 */
3216 for_each_cpu(cpu, smt) {
3217 if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3218 return;
3219 }
3220 cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3221 } else {
3222 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3223 }
3224 }
3225 #endif
3226 }
3227
handle_hotplug(struct rq * rq,bool online)3228 static void handle_hotplug(struct rq *rq, bool online)
3229 {
3230 int cpu = cpu_of(rq);
3231
3232 atomic_long_inc(&scx_hotplug_seq);
3233
3234 if (online && SCX_HAS_OP(cpu_online))
3235 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3236 else if (!online && SCX_HAS_OP(cpu_offline))
3237 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3238 else
3239 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3240 "cpu %d going %s, exiting scheduler", cpu,
3241 online ? "online" : "offline");
3242 }
3243
scx_rq_activate(struct rq * rq)3244 void scx_rq_activate(struct rq *rq)
3245 {
3246 handle_hotplug(rq, true);
3247 }
3248
scx_rq_deactivate(struct rq * rq)3249 void scx_rq_deactivate(struct rq *rq)
3250 {
3251 handle_hotplug(rq, false);
3252 }
3253
rq_online_scx(struct rq * rq)3254 static void rq_online_scx(struct rq *rq)
3255 {
3256 rq->scx.flags |= SCX_RQ_ONLINE;
3257 }
3258
rq_offline_scx(struct rq * rq)3259 static void rq_offline_scx(struct rq *rq)
3260 {
3261 rq->scx.flags &= ~SCX_RQ_ONLINE;
3262 }
3263
3264 #else /* CONFIG_SMP */
3265
test_and_clear_cpu_idle(int cpu)3266 static bool test_and_clear_cpu_idle(int cpu) { return false; }
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3267 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
reset_idle_masks(void)3268 static void reset_idle_masks(void) {}
3269
3270 #endif /* CONFIG_SMP */
3271
check_rq_for_timeouts(struct rq * rq)3272 static bool check_rq_for_timeouts(struct rq *rq)
3273 {
3274 struct task_struct *p;
3275 struct rq_flags rf;
3276 bool timed_out = false;
3277
3278 rq_lock_irqsave(rq, &rf);
3279 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3280 unsigned long last_runnable = p->scx.runnable_at;
3281
3282 if (unlikely(time_after(jiffies,
3283 last_runnable + scx_watchdog_timeout))) {
3284 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3285
3286 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3287 "%s[%d] failed to run for %u.%03us",
3288 p->comm, p->pid,
3289 dur_ms / 1000, dur_ms % 1000);
3290 timed_out = true;
3291 break;
3292 }
3293 }
3294 rq_unlock_irqrestore(rq, &rf);
3295
3296 return timed_out;
3297 }
3298
scx_watchdog_workfn(struct work_struct * work)3299 static void scx_watchdog_workfn(struct work_struct *work)
3300 {
3301 int cpu;
3302
3303 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3304
3305 for_each_online_cpu(cpu) {
3306 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3307 break;
3308
3309 cond_resched();
3310 }
3311 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3312 scx_watchdog_timeout / 2);
3313 }
3314
scx_tick(struct rq * rq)3315 void scx_tick(struct rq *rq)
3316 {
3317 unsigned long last_check;
3318
3319 if (!scx_enabled())
3320 return;
3321
3322 last_check = READ_ONCE(scx_watchdog_timestamp);
3323 if (unlikely(time_after(jiffies,
3324 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3325 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3326
3327 scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3328 "watchdog failed to check in for %u.%03us",
3329 dur_ms / 1000, dur_ms % 1000);
3330 }
3331
3332 update_other_load_avgs(rq);
3333 }
3334
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3335 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3336 {
3337 update_curr_scx(rq);
3338
3339 /*
3340 * While disabling, always resched and refresh core-sched timestamp as
3341 * we can't trust the slice management or ops.core_sched_before().
3342 */
3343 if (scx_rq_bypassing(rq)) {
3344 curr->scx.slice = 0;
3345 touch_core_sched(rq, curr);
3346 } else if (SCX_HAS_OP(tick)) {
3347 SCX_CALL_OP(SCX_KF_REST, tick, curr);
3348 }
3349
3350 if (!curr->scx.slice)
3351 resched_curr(rq);
3352 }
3353
3354 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3355 static struct cgroup *tg_cgrp(struct task_group *tg)
3356 {
3357 /*
3358 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3359 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3360 * root cgroup.
3361 */
3362 if (tg && tg->css.cgroup)
3363 return tg->css.cgroup;
3364 else
3365 return &cgrp_dfl_root.cgrp;
3366 }
3367
3368 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3369
3370 #else /* CONFIG_EXT_GROUP_SCHED */
3371
3372 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3373
3374 #endif /* CONFIG_EXT_GROUP_SCHED */
3375
scx_get_task_state(const struct task_struct * p)3376 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3377 {
3378 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3379 }
3380
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3381 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3382 {
3383 enum scx_task_state prev_state = scx_get_task_state(p);
3384 bool warn = false;
3385
3386 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3387
3388 switch (state) {
3389 case SCX_TASK_NONE:
3390 break;
3391 case SCX_TASK_INIT:
3392 warn = prev_state != SCX_TASK_NONE;
3393 break;
3394 case SCX_TASK_READY:
3395 warn = prev_state == SCX_TASK_NONE;
3396 break;
3397 case SCX_TASK_ENABLED:
3398 warn = prev_state != SCX_TASK_READY;
3399 break;
3400 default:
3401 warn = true;
3402 return;
3403 }
3404
3405 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3406 prev_state, state, p->comm, p->pid);
3407
3408 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3409 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3410 }
3411
scx_ops_init_task(struct task_struct * p,struct task_group * tg,bool fork)3412 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3413 {
3414 int ret;
3415
3416 p->scx.disallow = false;
3417
3418 if (SCX_HAS_OP(init_task)) {
3419 struct scx_init_task_args args = {
3420 SCX_INIT_TASK_ARGS_CGROUP(tg)
3421 .fork = fork,
3422 };
3423
3424 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3425 if (unlikely(ret)) {
3426 ret = ops_sanitize_err("init_task", ret);
3427 return ret;
3428 }
3429 }
3430
3431 scx_set_task_state(p, SCX_TASK_INIT);
3432
3433 if (p->scx.disallow) {
3434 if (!fork) {
3435 struct rq *rq;
3436 struct rq_flags rf;
3437
3438 rq = task_rq_lock(p, &rf);
3439
3440 /*
3441 * We're in the load path and @p->policy will be applied
3442 * right after. Reverting @p->policy here and rejecting
3443 * %SCHED_EXT transitions from scx_check_setscheduler()
3444 * guarantees that if ops.init_task() sets @p->disallow,
3445 * @p can never be in SCX.
3446 */
3447 if (p->policy == SCHED_EXT) {
3448 p->policy = SCHED_NORMAL;
3449 atomic_long_inc(&scx_nr_rejected);
3450 }
3451
3452 task_rq_unlock(rq, p, &rf);
3453 } else if (p->policy == SCHED_EXT) {
3454 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3455 p->comm, p->pid);
3456 }
3457 }
3458
3459 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3460 return 0;
3461 }
3462
scx_ops_enable_task(struct task_struct * p)3463 static void scx_ops_enable_task(struct task_struct *p)
3464 {
3465 u32 weight;
3466
3467 lockdep_assert_rq_held(task_rq(p));
3468
3469 /*
3470 * Set the weight before calling ops.enable() so that the scheduler
3471 * doesn't see a stale value if they inspect the task struct.
3472 */
3473 if (task_has_idle_policy(p))
3474 weight = WEIGHT_IDLEPRIO;
3475 else
3476 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3477
3478 p->scx.weight = sched_weight_to_cgroup(weight);
3479
3480 if (SCX_HAS_OP(enable))
3481 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3482 scx_set_task_state(p, SCX_TASK_ENABLED);
3483
3484 if (SCX_HAS_OP(set_weight))
3485 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3486 }
3487
scx_ops_disable_task(struct task_struct * p)3488 static void scx_ops_disable_task(struct task_struct *p)
3489 {
3490 lockdep_assert_rq_held(task_rq(p));
3491 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3492
3493 if (SCX_HAS_OP(disable))
3494 SCX_CALL_OP(SCX_KF_REST, disable, p);
3495 scx_set_task_state(p, SCX_TASK_READY);
3496 }
3497
scx_ops_exit_task(struct task_struct * p)3498 static void scx_ops_exit_task(struct task_struct *p)
3499 {
3500 struct scx_exit_task_args args = {
3501 .cancelled = false,
3502 };
3503
3504 lockdep_assert_rq_held(task_rq(p));
3505
3506 switch (scx_get_task_state(p)) {
3507 case SCX_TASK_NONE:
3508 return;
3509 case SCX_TASK_INIT:
3510 args.cancelled = true;
3511 break;
3512 case SCX_TASK_READY:
3513 break;
3514 case SCX_TASK_ENABLED:
3515 scx_ops_disable_task(p);
3516 break;
3517 default:
3518 WARN_ON_ONCE(true);
3519 return;
3520 }
3521
3522 if (SCX_HAS_OP(exit_task))
3523 SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3524 scx_set_task_state(p, SCX_TASK_NONE);
3525 }
3526
init_scx_entity(struct sched_ext_entity * scx)3527 void init_scx_entity(struct sched_ext_entity *scx)
3528 {
3529 /*
3530 * init_idle() calls this function again after fork sequence is
3531 * complete. Don't touch ->tasks_node as it's already linked.
3532 */
3533 memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
3534
3535 INIT_LIST_HEAD(&scx->dsq_list.node);
3536 RB_CLEAR_NODE(&scx->dsq_priq);
3537 scx->sticky_cpu = -1;
3538 scx->holding_cpu = -1;
3539 INIT_LIST_HEAD(&scx->runnable_node);
3540 scx->runnable_at = jiffies;
3541 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3542 scx->slice = SCX_SLICE_DFL;
3543 }
3544
scx_pre_fork(struct task_struct * p)3545 void scx_pre_fork(struct task_struct *p)
3546 {
3547 /*
3548 * BPF scheduler enable/disable paths want to be able to iterate and
3549 * update all tasks which can become complex when racing forks. As
3550 * enable/disable are very cold paths, let's use a percpu_rwsem to
3551 * exclude forks.
3552 */
3553 percpu_down_read(&scx_fork_rwsem);
3554 }
3555
scx_fork(struct task_struct * p)3556 int scx_fork(struct task_struct *p)
3557 {
3558 percpu_rwsem_assert_held(&scx_fork_rwsem);
3559
3560 if (scx_ops_init_task_enabled)
3561 return scx_ops_init_task(p, task_group(p), true);
3562 else
3563 return 0;
3564 }
3565
scx_post_fork(struct task_struct * p)3566 void scx_post_fork(struct task_struct *p)
3567 {
3568 if (scx_ops_init_task_enabled) {
3569 scx_set_task_state(p, SCX_TASK_READY);
3570
3571 /*
3572 * Enable the task immediately if it's running on sched_ext.
3573 * Otherwise, it'll be enabled in switching_to_scx() if and
3574 * when it's ever configured to run with a SCHED_EXT policy.
3575 */
3576 if (p->sched_class == &ext_sched_class) {
3577 struct rq_flags rf;
3578 struct rq *rq;
3579
3580 rq = task_rq_lock(p, &rf);
3581 scx_ops_enable_task(p);
3582 task_rq_unlock(rq, p, &rf);
3583 }
3584 }
3585
3586 spin_lock_irq(&scx_tasks_lock);
3587 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3588 spin_unlock_irq(&scx_tasks_lock);
3589
3590 percpu_up_read(&scx_fork_rwsem);
3591 }
3592
scx_cancel_fork(struct task_struct * p)3593 void scx_cancel_fork(struct task_struct *p)
3594 {
3595 if (scx_enabled()) {
3596 struct rq *rq;
3597 struct rq_flags rf;
3598
3599 rq = task_rq_lock(p, &rf);
3600 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3601 scx_ops_exit_task(p);
3602 task_rq_unlock(rq, p, &rf);
3603 }
3604
3605 percpu_up_read(&scx_fork_rwsem);
3606 }
3607
sched_ext_free(struct task_struct * p)3608 void sched_ext_free(struct task_struct *p)
3609 {
3610 unsigned long flags;
3611
3612 spin_lock_irqsave(&scx_tasks_lock, flags);
3613 list_del_init(&p->scx.tasks_node);
3614 spin_unlock_irqrestore(&scx_tasks_lock, flags);
3615
3616 /*
3617 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
3618 * ENABLED transitions can't race us. Disable ops for @p.
3619 */
3620 if (scx_get_task_state(p) != SCX_TASK_NONE) {
3621 struct rq_flags rf;
3622 struct rq *rq;
3623
3624 rq = task_rq_lock(p, &rf);
3625 scx_ops_exit_task(p);
3626 task_rq_unlock(rq, p, &rf);
3627 }
3628 }
3629
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)3630 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3631 const struct load_weight *lw)
3632 {
3633 lockdep_assert_rq_held(task_rq(p));
3634
3635 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3636 if (SCX_HAS_OP(set_weight))
3637 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3638 }
3639
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)3640 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
3641 {
3642 }
3643
switching_to_scx(struct rq * rq,struct task_struct * p)3644 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3645 {
3646 scx_ops_enable_task(p);
3647
3648 /*
3649 * set_cpus_allowed_scx() is not called while @p is associated with a
3650 * different scheduler class. Keep the BPF scheduler up-to-date.
3651 */
3652 if (SCX_HAS_OP(set_cpumask))
3653 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3654 (struct cpumask *)p->cpus_ptr);
3655 }
3656
switched_from_scx(struct rq * rq,struct task_struct * p)3657 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3658 {
3659 scx_ops_disable_task(p);
3660 }
3661
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)3662 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)3663 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3664
scx_check_setscheduler(struct task_struct * p,int policy)3665 int scx_check_setscheduler(struct task_struct *p, int policy)
3666 {
3667 lockdep_assert_rq_held(task_rq(p));
3668
3669 /* if disallow, reject transitioning into SCX */
3670 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3671 p->policy != policy && policy == SCHED_EXT)
3672 return -EACCES;
3673
3674 return 0;
3675 }
3676
3677 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)3678 bool scx_can_stop_tick(struct rq *rq)
3679 {
3680 struct task_struct *p = rq->curr;
3681
3682 if (scx_rq_bypassing(rq))
3683 return false;
3684
3685 if (p->sched_class != &ext_sched_class)
3686 return true;
3687
3688 /*
3689 * @rq can dispatch from different DSQs, so we can't tell whether it
3690 * needs the tick or not by looking at nr_running. Allow stopping ticks
3691 * iff the BPF scheduler indicated so. See set_next_task_scx().
3692 */
3693 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3694 }
3695 #endif
3696
3697 #ifdef CONFIG_EXT_GROUP_SCHED
3698
3699 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
3700 static bool scx_cgroup_enabled;
3701 static bool cgroup_warned_missing_weight;
3702 static bool cgroup_warned_missing_idle;
3703
scx_cgroup_warn_missing_weight(struct task_group * tg)3704 static void scx_cgroup_warn_missing_weight(struct task_group *tg)
3705 {
3706 if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
3707 cgroup_warned_missing_weight)
3708 return;
3709
3710 if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
3711 return;
3712
3713 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
3714 scx_ops.name);
3715 cgroup_warned_missing_weight = true;
3716 }
3717
scx_cgroup_warn_missing_idle(struct task_group * tg)3718 static void scx_cgroup_warn_missing_idle(struct task_group *tg)
3719 {
3720 if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
3721 return;
3722
3723 if (!tg->idle)
3724 return;
3725
3726 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
3727 scx_ops.name);
3728 cgroup_warned_missing_idle = true;
3729 }
3730
scx_tg_online(struct task_group * tg)3731 int scx_tg_online(struct task_group *tg)
3732 {
3733 int ret = 0;
3734
3735 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3736
3737 percpu_down_read(&scx_cgroup_rwsem);
3738
3739 scx_cgroup_warn_missing_weight(tg);
3740
3741 if (scx_cgroup_enabled) {
3742 if (SCX_HAS_OP(cgroup_init)) {
3743 struct scx_cgroup_init_args args =
3744 { .weight = tg->scx_weight };
3745
3746 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
3747 tg->css.cgroup, &args);
3748 if (ret)
3749 ret = ops_sanitize_err("cgroup_init", ret);
3750 }
3751 if (ret == 0)
3752 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3753 } else {
3754 tg->scx_flags |= SCX_TG_ONLINE;
3755 }
3756
3757 percpu_up_read(&scx_cgroup_rwsem);
3758 return ret;
3759 }
3760
scx_tg_offline(struct task_group * tg)3761 void scx_tg_offline(struct task_group *tg)
3762 {
3763 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
3764
3765 percpu_down_read(&scx_cgroup_rwsem);
3766
3767 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
3768 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
3769 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3770
3771 percpu_up_read(&scx_cgroup_rwsem);
3772 }
3773
scx_cgroup_can_attach(struct cgroup_taskset * tset)3774 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3775 {
3776 struct cgroup_subsys_state *css;
3777 struct task_struct *p;
3778 int ret;
3779
3780 /* released in scx_finish/cancel_attach() */
3781 percpu_down_read(&scx_cgroup_rwsem);
3782
3783 if (!scx_cgroup_enabled)
3784 return 0;
3785
3786 cgroup_taskset_for_each(p, css, tset) {
3787 struct cgroup *from = tg_cgrp(task_group(p));
3788 struct cgroup *to = tg_cgrp(css_tg(css));
3789
3790 WARN_ON_ONCE(p->scx.cgrp_moving_from);
3791
3792 /*
3793 * sched_move_task() omits identity migrations. Let's match the
3794 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3795 * always match one-to-one.
3796 */
3797 if (from == to)
3798 continue;
3799
3800 if (SCX_HAS_OP(cgroup_prep_move)) {
3801 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
3802 p, from, css->cgroup);
3803 if (ret)
3804 goto err;
3805 }
3806
3807 p->scx.cgrp_moving_from = from;
3808 }
3809
3810 return 0;
3811
3812 err:
3813 cgroup_taskset_for_each(p, css, tset) {
3814 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3815 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3816 p->scx.cgrp_moving_from, css->cgroup);
3817 p->scx.cgrp_moving_from = NULL;
3818 }
3819
3820 percpu_up_read(&scx_cgroup_rwsem);
3821 return ops_sanitize_err("cgroup_prep_move", ret);
3822 }
3823
scx_move_task(struct task_struct * p)3824 void scx_move_task(struct task_struct *p)
3825 {
3826 if (!scx_cgroup_enabled)
3827 return;
3828
3829 /*
3830 * We're called from sched_move_task() which handles both cgroup and
3831 * autogroup moves. Ignore the latter.
3832 *
3833 * Also ignore exiting tasks, because in the exit path tasks transition
3834 * from the autogroup to the root group, so task_group_is_autogroup()
3835 * alone isn't able to catch exiting autogroup tasks. This is safe for
3836 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
3837 * tasks.
3838 */
3839 if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
3840 return;
3841
3842 /*
3843 * @p must have ops.cgroup_prep_move() called on it and thus
3844 * cgrp_moving_from set.
3845 */
3846 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3847 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
3848 p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
3849 p->scx.cgrp_moving_from = NULL;
3850 }
3851
scx_cgroup_finish_attach(void)3852 void scx_cgroup_finish_attach(void)
3853 {
3854 percpu_up_read(&scx_cgroup_rwsem);
3855 }
3856
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)3857 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3858 {
3859 struct cgroup_subsys_state *css;
3860 struct task_struct *p;
3861
3862 if (!scx_cgroup_enabled)
3863 goto out_unlock;
3864
3865 cgroup_taskset_for_each(p, css, tset) {
3866 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3867 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3868 p->scx.cgrp_moving_from, css->cgroup);
3869 p->scx.cgrp_moving_from = NULL;
3870 }
3871 out_unlock:
3872 percpu_up_read(&scx_cgroup_rwsem);
3873 }
3874
scx_group_set_weight(struct task_group * tg,unsigned long weight)3875 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3876 {
3877 percpu_down_read(&scx_cgroup_rwsem);
3878
3879 if (scx_cgroup_enabled && tg->scx_weight != weight) {
3880 if (SCX_HAS_OP(cgroup_set_weight))
3881 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
3882 tg_cgrp(tg), weight);
3883 tg->scx_weight = weight;
3884 }
3885
3886 percpu_up_read(&scx_cgroup_rwsem);
3887 }
3888
scx_group_set_idle(struct task_group * tg,bool idle)3889 void scx_group_set_idle(struct task_group *tg, bool idle)
3890 {
3891 percpu_down_read(&scx_cgroup_rwsem);
3892 scx_cgroup_warn_missing_idle(tg);
3893 percpu_up_read(&scx_cgroup_rwsem);
3894 }
3895
scx_cgroup_lock(void)3896 static void scx_cgroup_lock(void)
3897 {
3898 percpu_down_write(&scx_cgroup_rwsem);
3899 }
3900
scx_cgroup_unlock(void)3901 static void scx_cgroup_unlock(void)
3902 {
3903 percpu_up_write(&scx_cgroup_rwsem);
3904 }
3905
3906 #else /* CONFIG_EXT_GROUP_SCHED */
3907
scx_cgroup_lock(void)3908 static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)3909 static inline void scx_cgroup_unlock(void) {}
3910
3911 #endif /* CONFIG_EXT_GROUP_SCHED */
3912
3913 /*
3914 * Omitted operations:
3915 *
3916 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3917 * isn't tied to the CPU at that point. Preemption is implemented by resetting
3918 * the victim task's slice to 0 and triggering reschedule on the target CPU.
3919 *
3920 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3921 *
3922 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3923 * their current sched_class. Call them directly from sched core instead.
3924 */
3925 DEFINE_SCHED_CLASS(ext) = {
3926 .enqueue_task = enqueue_task_scx,
3927 .dequeue_task = dequeue_task_scx,
3928 .yield_task = yield_task_scx,
3929 .yield_to_task = yield_to_task_scx,
3930
3931 .wakeup_preempt = wakeup_preempt_scx,
3932
3933 .balance = balance_scx,
3934 .pick_task = pick_task_scx,
3935
3936 .put_prev_task = put_prev_task_scx,
3937 .set_next_task = set_next_task_scx,
3938
3939 #ifdef CONFIG_SMP
3940 .select_task_rq = select_task_rq_scx,
3941 .task_woken = task_woken_scx,
3942 .set_cpus_allowed = set_cpus_allowed_scx,
3943
3944 .rq_online = rq_online_scx,
3945 .rq_offline = rq_offline_scx,
3946 #endif
3947
3948 .task_tick = task_tick_scx,
3949
3950 .switching_to = switching_to_scx,
3951 .switched_from = switched_from_scx,
3952 .switched_to = switched_to_scx,
3953 .reweight_task = reweight_task_scx,
3954 .prio_changed = prio_changed_scx,
3955
3956 .update_curr = update_curr_scx,
3957
3958 #ifdef CONFIG_UCLAMP_TASK
3959 .uclamp_enabled = 1,
3960 #endif
3961 };
3962
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)3963 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3964 {
3965 memset(dsq, 0, sizeof(*dsq));
3966
3967 raw_spin_lock_init(&dsq->lock);
3968 INIT_LIST_HEAD(&dsq->list);
3969 dsq->id = dsq_id;
3970 }
3971
create_dsq(u64 dsq_id,int node)3972 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
3973 {
3974 struct scx_dispatch_q *dsq;
3975 int ret;
3976
3977 if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
3978 return ERR_PTR(-EINVAL);
3979
3980 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
3981 if (!dsq)
3982 return ERR_PTR(-ENOMEM);
3983
3984 init_dsq(dsq, dsq_id);
3985
3986 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
3987 dsq_hash_params);
3988 if (ret) {
3989 kfree(dsq);
3990 return ERR_PTR(ret);
3991 }
3992 return dsq;
3993 }
3994
free_dsq_irq_workfn(struct irq_work * irq_work)3995 static void free_dsq_irq_workfn(struct irq_work *irq_work)
3996 {
3997 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
3998 struct scx_dispatch_q *dsq, *tmp_dsq;
3999
4000 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4001 kfree_rcu(dsq, rcu);
4002 }
4003
4004 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4005
destroy_dsq(u64 dsq_id)4006 static void destroy_dsq(u64 dsq_id)
4007 {
4008 struct scx_dispatch_q *dsq;
4009 unsigned long flags;
4010
4011 rcu_read_lock();
4012
4013 dsq = find_user_dsq(dsq_id);
4014 if (!dsq)
4015 goto out_unlock_rcu;
4016
4017 raw_spin_lock_irqsave(&dsq->lock, flags);
4018
4019 if (dsq->nr) {
4020 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4021 dsq->id, dsq->nr);
4022 goto out_unlock_dsq;
4023 }
4024
4025 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4026 goto out_unlock_dsq;
4027
4028 /*
4029 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4030 * queueing more tasks. As this function can be called from anywhere,
4031 * freeing is bounced through an irq work to avoid nesting RCU
4032 * operations inside scheduler locks.
4033 */
4034 dsq->id = SCX_DSQ_INVALID;
4035 llist_add(&dsq->free_node, &dsqs_to_free);
4036 irq_work_queue(&free_dsq_irq_work);
4037
4038 out_unlock_dsq:
4039 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4040 out_unlock_rcu:
4041 rcu_read_unlock();
4042 }
4043
4044 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(void)4045 static void scx_cgroup_exit(void)
4046 {
4047 struct cgroup_subsys_state *css;
4048
4049 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4050
4051 WARN_ON_ONCE(!scx_cgroup_enabled);
4052 scx_cgroup_enabled = false;
4053
4054 /*
4055 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4056 * cgroups and exit all the inited ones, all online cgroups are exited.
4057 */
4058 rcu_read_lock();
4059 css_for_each_descendant_post(css, &root_task_group.css) {
4060 struct task_group *tg = css_tg(css);
4061
4062 if (!(tg->scx_flags & SCX_TG_INITED))
4063 continue;
4064 tg->scx_flags &= ~SCX_TG_INITED;
4065
4066 if (!scx_ops.cgroup_exit)
4067 continue;
4068
4069 if (WARN_ON_ONCE(!css_tryget(css)))
4070 continue;
4071 rcu_read_unlock();
4072
4073 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4074
4075 rcu_read_lock();
4076 css_put(css);
4077 }
4078 rcu_read_unlock();
4079 }
4080
scx_cgroup_init(void)4081 static int scx_cgroup_init(void)
4082 {
4083 struct cgroup_subsys_state *css;
4084 int ret;
4085
4086 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4087
4088 cgroup_warned_missing_weight = false;
4089 cgroup_warned_missing_idle = false;
4090
4091 /*
4092 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4093 * cgroups and init, all online cgroups are initialized.
4094 */
4095 rcu_read_lock();
4096 css_for_each_descendant_pre(css, &root_task_group.css) {
4097 struct task_group *tg = css_tg(css);
4098 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4099
4100 scx_cgroup_warn_missing_weight(tg);
4101 scx_cgroup_warn_missing_idle(tg);
4102
4103 if ((tg->scx_flags &
4104 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4105 continue;
4106
4107 if (!scx_ops.cgroup_init) {
4108 tg->scx_flags |= SCX_TG_INITED;
4109 continue;
4110 }
4111
4112 if (WARN_ON_ONCE(!css_tryget(css)))
4113 continue;
4114 rcu_read_unlock();
4115
4116 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4117 css->cgroup, &args);
4118 if (ret) {
4119 css_put(css);
4120 return ret;
4121 }
4122 tg->scx_flags |= SCX_TG_INITED;
4123
4124 rcu_read_lock();
4125 css_put(css);
4126 }
4127 rcu_read_unlock();
4128
4129 WARN_ON_ONCE(scx_cgroup_enabled);
4130 scx_cgroup_enabled = true;
4131
4132 return 0;
4133 }
4134
4135 #else
scx_cgroup_exit(void)4136 static void scx_cgroup_exit(void) {}
scx_cgroup_init(void)4137 static int scx_cgroup_init(void) { return 0; }
4138 #endif
4139
4140
4141 /********************************************************************************
4142 * Sysfs interface and ops enable/disable.
4143 */
4144
4145 #define SCX_ATTR(_name) \
4146 static struct kobj_attribute scx_attr_##_name = { \
4147 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4148 .show = scx_attr_##_name##_show, \
4149 }
4150
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4151 static ssize_t scx_attr_state_show(struct kobject *kobj,
4152 struct kobj_attribute *ka, char *buf)
4153 {
4154 return sysfs_emit(buf, "%s\n",
4155 scx_ops_enable_state_str[scx_ops_enable_state()]);
4156 }
4157 SCX_ATTR(state);
4158
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4159 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4160 struct kobj_attribute *ka, char *buf)
4161 {
4162 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4163 }
4164 SCX_ATTR(switch_all);
4165
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4166 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4167 struct kobj_attribute *ka, char *buf)
4168 {
4169 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4170 }
4171 SCX_ATTR(nr_rejected);
4172
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4173 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4174 struct kobj_attribute *ka, char *buf)
4175 {
4176 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4177 }
4178 SCX_ATTR(hotplug_seq);
4179
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4180 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4181 struct kobj_attribute *ka, char *buf)
4182 {
4183 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4184 }
4185 SCX_ATTR(enable_seq);
4186
4187 static struct attribute *scx_global_attrs[] = {
4188 &scx_attr_state.attr,
4189 &scx_attr_switch_all.attr,
4190 &scx_attr_nr_rejected.attr,
4191 &scx_attr_hotplug_seq.attr,
4192 &scx_attr_enable_seq.attr,
4193 NULL,
4194 };
4195
4196 static const struct attribute_group scx_global_attr_group = {
4197 .attrs = scx_global_attrs,
4198 };
4199
scx_kobj_release(struct kobject * kobj)4200 static void scx_kobj_release(struct kobject *kobj)
4201 {
4202 kfree(kobj);
4203 }
4204
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4205 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4206 struct kobj_attribute *ka, char *buf)
4207 {
4208 return sysfs_emit(buf, "%s\n", scx_ops.name);
4209 }
4210 SCX_ATTR(ops);
4211
4212 static struct attribute *scx_sched_attrs[] = {
4213 &scx_attr_ops.attr,
4214 NULL,
4215 };
4216 ATTRIBUTE_GROUPS(scx_sched);
4217
4218 static const struct kobj_type scx_ktype = {
4219 .release = scx_kobj_release,
4220 .sysfs_ops = &kobj_sysfs_ops,
4221 .default_groups = scx_sched_groups,
4222 };
4223
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4224 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4225 {
4226 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4227 }
4228
4229 static const struct kset_uevent_ops scx_uevent_ops = {
4230 .uevent = scx_uevent,
4231 };
4232
4233 /*
4234 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4235 * sched_class. dl/rt are already handled.
4236 */
task_should_scx(struct task_struct * p)4237 bool task_should_scx(struct task_struct *p)
4238 {
4239 if (!scx_enabled() ||
4240 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4241 return false;
4242 if (READ_ONCE(scx_switching_all))
4243 return true;
4244 return p->policy == SCHED_EXT;
4245 }
4246
4247 /**
4248 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4249 *
4250 * Bypassing guarantees that all runnable tasks make forward progress without
4251 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4252 * be held by tasks that the BPF scheduler is forgetting to run, which
4253 * unfortunately also excludes toggling the static branches.
4254 *
4255 * Let's work around by overriding a couple ops and modifying behaviors based on
4256 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4257 * to force global FIFO scheduling.
4258 *
4259 * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4260 * %SCX_OPS_ENQ_LAST is also ignored.
4261 *
4262 * b. ops.dispatch() is ignored.
4263 *
4264 * c. balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4265 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4266 * the tail of the queue with core_sched_at touched.
4267 *
4268 * d. pick_next_task() suppresses zero slice warning.
4269 *
4270 * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4271 * operations.
4272 *
4273 * f. scx_prio_less() reverts to the default core_sched_at order.
4274 */
scx_ops_bypass(bool bypass)4275 static void scx_ops_bypass(bool bypass)
4276 {
4277 int depth, cpu;
4278
4279 if (bypass) {
4280 depth = atomic_inc_return(&scx_ops_bypass_depth);
4281 WARN_ON_ONCE(depth <= 0);
4282 if (depth != 1)
4283 return;
4284 } else {
4285 depth = atomic_dec_return(&scx_ops_bypass_depth);
4286 WARN_ON_ONCE(depth < 0);
4287 if (depth != 0)
4288 return;
4289 }
4290
4291 /*
4292 * No task property is changing. We just need to make sure all currently
4293 * queued tasks are re-queued according to the new scx_rq_bypassing()
4294 * state. As an optimization, walk each rq's runnable_list instead of
4295 * the scx_tasks list.
4296 *
4297 * This function can't trust the scheduler and thus can't use
4298 * cpus_read_lock(). Walk all possible CPUs instead of online.
4299 */
4300 for_each_possible_cpu(cpu) {
4301 struct rq *rq = cpu_rq(cpu);
4302 struct rq_flags rf;
4303 struct task_struct *p, *n;
4304
4305 rq_lock_irqsave(rq, &rf);
4306
4307 if (bypass) {
4308 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4309 rq->scx.flags |= SCX_RQ_BYPASSING;
4310 } else {
4311 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4312 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4313 }
4314
4315 /*
4316 * We need to guarantee that no tasks are on the BPF scheduler
4317 * while bypassing. Either we see enabled or the enable path
4318 * sees scx_rq_bypassing() before moving tasks to SCX.
4319 */
4320 if (!scx_enabled()) {
4321 rq_unlock_irqrestore(rq, &rf);
4322 continue;
4323 }
4324
4325 /*
4326 * The use of list_for_each_entry_safe_reverse() is required
4327 * because each task is going to be removed from and added back
4328 * to the runnable_list during iteration. Because they're added
4329 * to the tail of the list, safe reverse iteration can still
4330 * visit all nodes.
4331 */
4332 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4333 scx.runnable_node) {
4334 struct sched_enq_and_set_ctx ctx;
4335
4336 /* cycling deq/enq is enough, see the function comment */
4337 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4338 sched_enq_and_set_task(&ctx);
4339 }
4340
4341 rq_unlock_irqrestore(rq, &rf);
4342
4343 /* kick to restore ticks */
4344 resched_cpu(cpu);
4345 }
4346 }
4347
free_exit_info(struct scx_exit_info * ei)4348 static void free_exit_info(struct scx_exit_info *ei)
4349 {
4350 kfree(ei->dump);
4351 kfree(ei->msg);
4352 kfree(ei->bt);
4353 kfree(ei);
4354 }
4355
alloc_exit_info(size_t exit_dump_len)4356 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4357 {
4358 struct scx_exit_info *ei;
4359
4360 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4361 if (!ei)
4362 return NULL;
4363
4364 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4365 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4366 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4367
4368 if (!ei->bt || !ei->msg || !ei->dump) {
4369 free_exit_info(ei);
4370 return NULL;
4371 }
4372
4373 return ei;
4374 }
4375
scx_exit_reason(enum scx_exit_kind kind)4376 static const char *scx_exit_reason(enum scx_exit_kind kind)
4377 {
4378 switch (kind) {
4379 case SCX_EXIT_UNREG:
4380 return "unregistered from user space";
4381 case SCX_EXIT_UNREG_BPF:
4382 return "unregistered from BPF";
4383 case SCX_EXIT_UNREG_KERN:
4384 return "unregistered from the main kernel";
4385 case SCX_EXIT_SYSRQ:
4386 return "disabled by sysrq-S";
4387 case SCX_EXIT_ERROR:
4388 return "runtime error";
4389 case SCX_EXIT_ERROR_BPF:
4390 return "scx_bpf_error";
4391 case SCX_EXIT_ERROR_STALL:
4392 return "runnable task stall";
4393 default:
4394 return "<UNKNOWN>";
4395 }
4396 }
4397
scx_ops_disable_workfn(struct kthread_work * work)4398 static void scx_ops_disable_workfn(struct kthread_work *work)
4399 {
4400 struct scx_exit_info *ei = scx_exit_info;
4401 struct scx_task_iter sti;
4402 struct task_struct *p;
4403 struct rhashtable_iter rht_iter;
4404 struct scx_dispatch_q *dsq;
4405 int i, kind;
4406
4407 kind = atomic_read(&scx_exit_kind);
4408 while (true) {
4409 /*
4410 * NONE indicates that a new scx_ops has been registered since
4411 * disable was scheduled - don't kill the new ops. DONE
4412 * indicates that the ops has already been disabled.
4413 */
4414 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4415 return;
4416 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4417 break;
4418 }
4419 ei->kind = kind;
4420 ei->reason = scx_exit_reason(ei->kind);
4421
4422 /* guarantee forward progress by bypassing scx_ops */
4423 scx_ops_bypass(true);
4424
4425 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4426 case SCX_OPS_DISABLING:
4427 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4428 break;
4429 case SCX_OPS_DISABLED:
4430 pr_warn("sched_ext: ops error detected without ops (%s)\n",
4431 scx_exit_info->msg);
4432 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4433 SCX_OPS_DISABLING);
4434 goto done;
4435 default:
4436 break;
4437 }
4438
4439 /*
4440 * Here, every runnable task is guaranteed to make forward progress and
4441 * we can safely use blocking synchronization constructs. Actually
4442 * disable ops.
4443 */
4444 mutex_lock(&scx_ops_enable_mutex);
4445
4446 static_branch_disable(&__scx_switched_all);
4447 WRITE_ONCE(scx_switching_all, false);
4448
4449 /*
4450 * Shut down cgroup support before tasks so that the cgroup attach path
4451 * doesn't race against scx_ops_exit_task().
4452 */
4453 scx_cgroup_lock();
4454 scx_cgroup_exit();
4455 scx_cgroup_unlock();
4456
4457 /*
4458 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4459 * must be switched out and exited synchronously.
4460 */
4461 percpu_down_write(&scx_fork_rwsem);
4462
4463 scx_ops_init_task_enabled = false;
4464
4465 spin_lock_irq(&scx_tasks_lock);
4466 scx_task_iter_init(&sti);
4467 while ((p = scx_task_iter_next_locked(&sti))) {
4468 const struct sched_class *old_class = p->sched_class;
4469 struct sched_enq_and_set_ctx ctx;
4470
4471 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4472
4473 p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
4474 __setscheduler_prio(p, p->prio);
4475 check_class_changing(task_rq(p), p, old_class);
4476
4477 sched_enq_and_set_task(&ctx);
4478
4479 check_class_changed(task_rq(p), p, old_class, p->prio);
4480 scx_ops_exit_task(p);
4481 }
4482 scx_task_iter_exit(&sti);
4483 spin_unlock_irq(&scx_tasks_lock);
4484 percpu_up_write(&scx_fork_rwsem);
4485
4486 /* no task is on scx, turn off all the switches and flush in-progress calls */
4487 static_branch_disable(&__scx_ops_enabled);
4488 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4489 static_branch_disable(&scx_has_op[i]);
4490 static_branch_disable(&scx_ops_enq_last);
4491 static_branch_disable(&scx_ops_enq_exiting);
4492 static_branch_disable(&scx_ops_cpu_preempt);
4493 static_branch_disable(&scx_builtin_idle_enabled);
4494 synchronize_rcu();
4495
4496 if (ei->kind >= SCX_EXIT_ERROR) {
4497 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4498 scx_ops.name, ei->reason);
4499
4500 if (ei->msg[0] != '\0')
4501 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
4502 #ifdef CONFIG_STACKTRACE
4503 stack_trace_print(ei->bt, ei->bt_len, 2);
4504 #endif
4505 } else {
4506 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4507 scx_ops.name, ei->reason);
4508 }
4509
4510 if (scx_ops.exit)
4511 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
4512
4513 cancel_delayed_work_sync(&scx_watchdog_work);
4514
4515 /*
4516 * Delete the kobject from the hierarchy eagerly in addition to just
4517 * dropping a reference. Otherwise, if the object is deleted
4518 * asynchronously, sysfs could observe an object of the same name still
4519 * in the hierarchy when another scheduler is loaded.
4520 */
4521 kobject_del(scx_root_kobj);
4522 kobject_put(scx_root_kobj);
4523 scx_root_kobj = NULL;
4524
4525 memset(&scx_ops, 0, sizeof(scx_ops));
4526
4527 rhashtable_walk_enter(&dsq_hash, &rht_iter);
4528 do {
4529 rhashtable_walk_start(&rht_iter);
4530
4531 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4532 destroy_dsq(dsq->id);
4533
4534 rhashtable_walk_stop(&rht_iter);
4535 } while (dsq == ERR_PTR(-EAGAIN));
4536 rhashtable_walk_exit(&rht_iter);
4537
4538 free_percpu(scx_dsp_ctx);
4539 scx_dsp_ctx = NULL;
4540 scx_dsp_max_batch = 0;
4541
4542 free_exit_info(scx_exit_info);
4543 scx_exit_info = NULL;
4544
4545 mutex_unlock(&scx_ops_enable_mutex);
4546
4547 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4548 SCX_OPS_DISABLING);
4549 done:
4550 scx_ops_bypass(false);
4551 }
4552
4553 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
4554
schedule_scx_ops_disable_work(void)4555 static void schedule_scx_ops_disable_work(void)
4556 {
4557 struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
4558
4559 /*
4560 * We may be called spuriously before the first bpf_sched_ext_reg(). If
4561 * scx_ops_helper isn't set up yet, there's nothing to do.
4562 */
4563 if (helper)
4564 kthread_queue_work(helper, &scx_ops_disable_work);
4565 }
4566
scx_ops_disable(enum scx_exit_kind kind)4567 static void scx_ops_disable(enum scx_exit_kind kind)
4568 {
4569 int none = SCX_EXIT_NONE;
4570
4571 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4572 kind = SCX_EXIT_ERROR;
4573
4574 atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
4575
4576 schedule_scx_ops_disable_work();
4577 }
4578
dump_newline(struct seq_buf * s)4579 static void dump_newline(struct seq_buf *s)
4580 {
4581 trace_sched_ext_dump("");
4582
4583 /* @s may be zero sized and seq_buf triggers WARN if so */
4584 if (s->size)
4585 seq_buf_putc(s, '\n');
4586 }
4587
dump_line(struct seq_buf * s,const char * fmt,...)4588 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4589 {
4590 va_list args;
4591
4592 #ifdef CONFIG_TRACEPOINTS
4593 if (trace_sched_ext_dump_enabled()) {
4594 /* protected by scx_dump_state()::dump_lock */
4595 static char line_buf[SCX_EXIT_MSG_LEN];
4596
4597 va_start(args, fmt);
4598 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4599 va_end(args);
4600
4601 trace_sched_ext_dump(line_buf);
4602 }
4603 #endif
4604 /* @s may be zero sized and seq_buf triggers WARN if so */
4605 if (s->size) {
4606 va_start(args, fmt);
4607 seq_buf_vprintf(s, fmt, args);
4608 va_end(args);
4609
4610 seq_buf_putc(s, '\n');
4611 }
4612 }
4613
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)4614 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4615 const unsigned long *bt, unsigned int len)
4616 {
4617 unsigned int i;
4618
4619 for (i = 0; i < len; i++)
4620 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4621 }
4622
ops_dump_init(struct seq_buf * s,const char * prefix)4623 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4624 {
4625 struct scx_dump_data *dd = &scx_dump_data;
4626
4627 lockdep_assert_irqs_disabled();
4628
4629 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
4630 dd->first = true;
4631 dd->cursor = 0;
4632 dd->s = s;
4633 dd->prefix = prefix;
4634 }
4635
ops_dump_flush(void)4636 static void ops_dump_flush(void)
4637 {
4638 struct scx_dump_data *dd = &scx_dump_data;
4639 char *line = dd->buf.line;
4640
4641 if (!dd->cursor)
4642 return;
4643
4644 /*
4645 * There's something to flush and this is the first line. Insert a blank
4646 * line to distinguish ops dump.
4647 */
4648 if (dd->first) {
4649 dump_newline(dd->s);
4650 dd->first = false;
4651 }
4652
4653 /*
4654 * There may be multiple lines in $line. Scan and emit each line
4655 * separately.
4656 */
4657 while (true) {
4658 char *end = line;
4659 char c;
4660
4661 while (*end != '\n' && *end != '\0')
4662 end++;
4663
4664 /*
4665 * If $line overflowed, it may not have newline at the end.
4666 * Always emit with a newline.
4667 */
4668 c = *end;
4669 *end = '\0';
4670 dump_line(dd->s, "%s%s", dd->prefix, line);
4671 if (c == '\0')
4672 break;
4673
4674 /* move to the next line */
4675 end++;
4676 if (*end == '\0')
4677 break;
4678 line = end;
4679 }
4680
4681 dd->cursor = 0;
4682 }
4683
ops_dump_exit(void)4684 static void ops_dump_exit(void)
4685 {
4686 ops_dump_flush();
4687 scx_dump_data.cpu = -1;
4688 }
4689
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)4690 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4691 struct task_struct *p, char marker)
4692 {
4693 static unsigned long bt[SCX_EXIT_BT_LEN];
4694 char dsq_id_buf[19] = "(n/a)";
4695 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4696 unsigned int bt_len = 0;
4697
4698 if (p->scx.dsq)
4699 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4700 (unsigned long long)p->scx.dsq->id);
4701
4702 dump_newline(s);
4703 dump_line(s, " %c%c %s[%d] %+ldms",
4704 marker, task_state_to_char(p), p->comm, p->pid,
4705 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4706 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4707 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4708 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4709 ops_state >> SCX_OPSS_QSEQ_SHIFT);
4710 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
4711 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
4712 p->scx.dsq_vtime);
4713 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
4714
4715 if (SCX_HAS_OP(dump_task)) {
4716 ops_dump_init(s, " ");
4717 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
4718 ops_dump_exit();
4719 }
4720
4721 #ifdef CONFIG_STACKTRACE
4722 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4723 #endif
4724 if (bt_len) {
4725 dump_newline(s);
4726 dump_stack_trace(s, " ", bt, bt_len);
4727 }
4728 }
4729
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)4730 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4731 {
4732 static DEFINE_SPINLOCK(dump_lock);
4733 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4734 struct scx_dump_ctx dctx = {
4735 .kind = ei->kind,
4736 .exit_code = ei->exit_code,
4737 .reason = ei->reason,
4738 .at_ns = ktime_get_ns(),
4739 .at_jiffies = jiffies,
4740 };
4741 struct seq_buf s;
4742 unsigned long flags;
4743 char *buf;
4744 int cpu;
4745
4746 spin_lock_irqsave(&dump_lock, flags);
4747
4748 seq_buf_init(&s, ei->dump, dump_len);
4749
4750 if (ei->kind == SCX_EXIT_NONE) {
4751 dump_line(&s, "Debug dump triggered by %s", ei->reason);
4752 } else {
4753 dump_line(&s, "%s[%d] triggered exit kind %d:",
4754 current->comm, current->pid, ei->kind);
4755 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
4756 dump_newline(&s);
4757 dump_line(&s, "Backtrace:");
4758 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
4759 }
4760
4761 if (SCX_HAS_OP(dump)) {
4762 ops_dump_init(&s, "");
4763 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
4764 ops_dump_exit();
4765 }
4766
4767 dump_newline(&s);
4768 dump_line(&s, "CPU states");
4769 dump_line(&s, "----------");
4770
4771 for_each_possible_cpu(cpu) {
4772 struct rq *rq = cpu_rq(cpu);
4773 struct rq_flags rf;
4774 struct task_struct *p;
4775 struct seq_buf ns;
4776 size_t avail, used;
4777 bool idle;
4778
4779 rq_lock(rq, &rf);
4780
4781 idle = list_empty(&rq->scx.runnable_list) &&
4782 rq->curr->sched_class == &idle_sched_class;
4783
4784 if (idle && !SCX_HAS_OP(dump_cpu))
4785 goto next;
4786
4787 /*
4788 * We don't yet know whether ops.dump_cpu() will produce output
4789 * and we may want to skip the default CPU dump if it doesn't.
4790 * Use a nested seq_buf to generate the standard dump so that we
4791 * can decide whether to commit later.
4792 */
4793 avail = seq_buf_get_buf(&s, &buf);
4794 seq_buf_init(&ns, buf, avail);
4795
4796 dump_newline(&ns);
4797 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
4798 cpu, rq->scx.nr_running, rq->scx.flags,
4799 rq->scx.cpu_released, rq->scx.ops_qseq,
4800 rq->scx.pnt_seq);
4801 dump_line(&ns, " curr=%s[%d] class=%ps",
4802 rq->curr->comm, rq->curr->pid,
4803 rq->curr->sched_class);
4804 if (!cpumask_empty(rq->scx.cpus_to_kick))
4805 dump_line(&ns, " cpus_to_kick : %*pb",
4806 cpumask_pr_args(rq->scx.cpus_to_kick));
4807 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4808 dump_line(&ns, " idle_to_kick : %*pb",
4809 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4810 if (!cpumask_empty(rq->scx.cpus_to_preempt))
4811 dump_line(&ns, " cpus_to_preempt: %*pb",
4812 cpumask_pr_args(rq->scx.cpus_to_preempt));
4813 if (!cpumask_empty(rq->scx.cpus_to_wait))
4814 dump_line(&ns, " cpus_to_wait : %*pb",
4815 cpumask_pr_args(rq->scx.cpus_to_wait));
4816
4817 used = seq_buf_used(&ns);
4818 if (SCX_HAS_OP(dump_cpu)) {
4819 ops_dump_init(&ns, " ");
4820 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
4821 ops_dump_exit();
4822 }
4823
4824 /*
4825 * If idle && nothing generated by ops.dump_cpu(), there's
4826 * nothing interesting. Skip.
4827 */
4828 if (idle && used == seq_buf_used(&ns))
4829 goto next;
4830
4831 /*
4832 * $s may already have overflowed when $ns was created. If so,
4833 * calling commit on it will trigger BUG.
4834 */
4835 if (avail) {
4836 seq_buf_commit(&s, seq_buf_used(&ns));
4837 if (seq_buf_has_overflowed(&ns))
4838 seq_buf_set_overflow(&s);
4839 }
4840
4841 if (rq->curr->sched_class == &ext_sched_class)
4842 scx_dump_task(&s, &dctx, rq->curr, '*');
4843
4844 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4845 scx_dump_task(&s, &dctx, p, ' ');
4846 next:
4847 rq_unlock(rq, &rf);
4848 }
4849
4850 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4851 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4852 trunc_marker, sizeof(trunc_marker));
4853
4854 spin_unlock_irqrestore(&dump_lock, flags);
4855 }
4856
scx_ops_error_irq_workfn(struct irq_work * irq_work)4857 static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
4858 {
4859 struct scx_exit_info *ei = scx_exit_info;
4860
4861 if (ei->kind >= SCX_EXIT_ERROR)
4862 scx_dump_state(ei, scx_ops.exit_dump_len);
4863
4864 schedule_scx_ops_disable_work();
4865 }
4866
4867 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
4868
scx_ops_exit_kind(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)4869 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
4870 s64 exit_code,
4871 const char *fmt, ...)
4872 {
4873 struct scx_exit_info *ei = scx_exit_info;
4874 int none = SCX_EXIT_NONE;
4875 va_list args;
4876
4877 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
4878 return;
4879
4880 ei->exit_code = exit_code;
4881 #ifdef CONFIG_STACKTRACE
4882 if (kind >= SCX_EXIT_ERROR)
4883 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4884 #endif
4885 va_start(args, fmt);
4886 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4887 va_end(args);
4888
4889 /*
4890 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4891 * in scx_ops_disable_workfn().
4892 */
4893 ei->kind = kind;
4894 ei->reason = scx_exit_reason(ei->kind);
4895
4896 irq_work_queue(&scx_ops_error_irq_work);
4897 }
4898
scx_create_rt_helper(const char * name)4899 static struct kthread_worker *scx_create_rt_helper(const char *name)
4900 {
4901 struct kthread_worker *helper;
4902
4903 helper = kthread_create_worker(0, name);
4904 if (helper)
4905 sched_set_fifo(helper->task);
4906 return helper;
4907 }
4908
check_hotplug_seq(const struct sched_ext_ops * ops)4909 static void check_hotplug_seq(const struct sched_ext_ops *ops)
4910 {
4911 unsigned long long global_hotplug_seq;
4912
4913 /*
4914 * If a hotplug event has occurred between when a scheduler was
4915 * initialized, and when we were able to attach, exit and notify user
4916 * space about it.
4917 */
4918 if (ops->hotplug_seq) {
4919 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4920 if (ops->hotplug_seq != global_hotplug_seq) {
4921 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4922 "expected hotplug seq %llu did not match actual %llu",
4923 ops->hotplug_seq, global_hotplug_seq);
4924 }
4925 }
4926 }
4927
validate_ops(const struct sched_ext_ops * ops)4928 static int validate_ops(const struct sched_ext_ops *ops)
4929 {
4930 /*
4931 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4932 * ops.enqueue() callback isn't implemented.
4933 */
4934 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4935 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4936 return -EINVAL;
4937 }
4938
4939 return 0;
4940 }
4941
scx_ops_enable(struct sched_ext_ops * ops,struct bpf_link * link)4942 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4943 {
4944 struct scx_task_iter sti;
4945 struct task_struct *p;
4946 unsigned long timeout;
4947 int i, cpu, node, ret;
4948
4949 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
4950 cpu_possible_mask)) {
4951 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation");
4952 return -EINVAL;
4953 }
4954
4955 mutex_lock(&scx_ops_enable_mutex);
4956
4957 if (!scx_ops_helper) {
4958 WRITE_ONCE(scx_ops_helper,
4959 scx_create_rt_helper("sched_ext_ops_helper"));
4960 if (!scx_ops_helper) {
4961 ret = -ENOMEM;
4962 goto err_unlock;
4963 }
4964 }
4965
4966 if (!global_dsqs) {
4967 struct scx_dispatch_q **dsqs;
4968
4969 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
4970 if (!dsqs) {
4971 ret = -ENOMEM;
4972 goto err_unlock;
4973 }
4974
4975 for_each_node_state(node, N_POSSIBLE) {
4976 struct scx_dispatch_q *dsq;
4977
4978 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4979 if (!dsq) {
4980 for_each_node_state(node, N_POSSIBLE)
4981 kfree(dsqs[node]);
4982 kfree(dsqs);
4983 ret = -ENOMEM;
4984 goto err_unlock;
4985 }
4986
4987 init_dsq(dsq, SCX_DSQ_GLOBAL);
4988 dsqs[node] = dsq;
4989 }
4990
4991 global_dsqs = dsqs;
4992 }
4993
4994 if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
4995 ret = -EBUSY;
4996 goto err_unlock;
4997 }
4998
4999 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5000 if (!scx_root_kobj) {
5001 ret = -ENOMEM;
5002 goto err_unlock;
5003 }
5004
5005 scx_root_kobj->kset = scx_kset;
5006 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5007 if (ret < 0)
5008 goto err;
5009
5010 scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5011 if (!scx_exit_info) {
5012 ret = -ENOMEM;
5013 goto err_del;
5014 }
5015
5016 /*
5017 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5018 * disable path. Failure triggers full disabling from here on.
5019 */
5020 scx_ops = *ops;
5021
5022 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5023 SCX_OPS_DISABLED);
5024
5025 atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5026 scx_warned_zero_slice = false;
5027
5028 atomic_long_set(&scx_nr_rejected, 0);
5029
5030 for_each_possible_cpu(cpu)
5031 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5032
5033 /*
5034 * Keep CPUs stable during enable so that the BPF scheduler can track
5035 * online CPUs by watching ->on/offline_cpu() after ->init().
5036 */
5037 cpus_read_lock();
5038
5039 if (scx_ops.init) {
5040 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5041 if (ret) {
5042 ret = ops_sanitize_err("init", ret);
5043 cpus_read_unlock();
5044 goto err_disable;
5045 }
5046 }
5047
5048 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5049 if (((void (**)(void))ops)[i])
5050 static_branch_enable_cpuslocked(&scx_has_op[i]);
5051
5052 check_hotplug_seq(ops);
5053 cpus_read_unlock();
5054
5055 ret = validate_ops(ops);
5056 if (ret)
5057 goto err_disable;
5058
5059 WARN_ON_ONCE(scx_dsp_ctx);
5060 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5061 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5062 scx_dsp_max_batch),
5063 __alignof__(struct scx_dsp_ctx));
5064 if (!scx_dsp_ctx) {
5065 ret = -ENOMEM;
5066 goto err_disable;
5067 }
5068
5069 if (ops->timeout_ms)
5070 timeout = msecs_to_jiffies(ops->timeout_ms);
5071 else
5072 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5073
5074 WRITE_ONCE(scx_watchdog_timeout, timeout);
5075 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5076 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5077 scx_watchdog_timeout / 2);
5078
5079 /*
5080 * Once __scx_ops_enabled is set, %current can be switched to SCX
5081 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5082 * userspace scheduling) may not function correctly before all tasks are
5083 * switched. Init in bypass mode to guarantee forward progress.
5084 */
5085 scx_ops_bypass(true);
5086
5087 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5088 if (((void (**)(void))ops)[i])
5089 static_branch_enable(&scx_has_op[i]);
5090
5091 if (ops->flags & SCX_OPS_ENQ_LAST)
5092 static_branch_enable(&scx_ops_enq_last);
5093
5094 if (ops->flags & SCX_OPS_ENQ_EXITING)
5095 static_branch_enable(&scx_ops_enq_exiting);
5096 if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5097 static_branch_enable(&scx_ops_cpu_preempt);
5098
5099 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5100 reset_idle_masks();
5101 static_branch_enable(&scx_builtin_idle_enabled);
5102 } else {
5103 static_branch_disable(&scx_builtin_idle_enabled);
5104 }
5105
5106 /*
5107 * Lock out forks, cgroup on/offlining and moves before opening the
5108 * floodgate so that they don't wander into the operations prematurely.
5109 */
5110 percpu_down_write(&scx_fork_rwsem);
5111
5112 WARN_ON_ONCE(scx_ops_init_task_enabled);
5113 scx_ops_init_task_enabled = true;
5114
5115 /*
5116 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5117 * preventing new tasks from being added. No need to exclude tasks
5118 * leaving as sched_ext_free() can handle both prepped and enabled
5119 * tasks. Prep all tasks first and then enable them with preemption
5120 * disabled.
5121 *
5122 * All cgroups should be initialized before scx_ops_init_task() so that
5123 * the BPF scheduler can reliably track each task's cgroup membership
5124 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5125 * migrations while tasks are being initialized so that
5126 * scx_cgroup_can_attach() never sees uninitialized tasks.
5127 */
5128 scx_cgroup_lock();
5129 ret = scx_cgroup_init();
5130 if (ret)
5131 goto err_disable_unlock_all;
5132
5133 spin_lock_irq(&scx_tasks_lock);
5134 scx_task_iter_init(&sti);
5135 while ((p = scx_task_iter_next_locked(&sti))) {
5136 /*
5137 * @p may already be dead, have lost all its usages counts and
5138 * be waiting for RCU grace period before being freed. @p can't
5139 * be initialized for SCX in such cases and should be ignored.
5140 */
5141 if (!tryget_task_struct(p))
5142 continue;
5143
5144 scx_task_iter_rq_unlock(&sti);
5145 spin_unlock_irq(&scx_tasks_lock);
5146
5147 ret = scx_ops_init_task(p, task_group(p), false);
5148 if (ret) {
5149 put_task_struct(p);
5150 spin_lock_irq(&scx_tasks_lock);
5151 scx_task_iter_exit(&sti);
5152 spin_unlock_irq(&scx_tasks_lock);
5153 pr_err("sched_ext: ops.init_task() failed (%d) for %s[%d] while loading\n",
5154 ret, p->comm, p->pid);
5155 goto err_disable_unlock_all;
5156 }
5157
5158 scx_set_task_state(p, SCX_TASK_READY);
5159
5160 put_task_struct(p);
5161 spin_lock_irq(&scx_tasks_lock);
5162 }
5163 scx_task_iter_exit(&sti);
5164 spin_unlock_irq(&scx_tasks_lock);
5165 scx_cgroup_unlock();
5166 percpu_up_write(&scx_fork_rwsem);
5167
5168 /*
5169 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5170 * all eligible tasks.
5171 */
5172 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5173 static_branch_enable(&__scx_ops_enabled);
5174
5175 /*
5176 * We're fully committed and can't fail. The task READY -> ENABLED
5177 * transitions here are synchronized against sched_ext_free() through
5178 * scx_tasks_lock.
5179 */
5180 percpu_down_write(&scx_fork_rwsem);
5181 spin_lock_irq(&scx_tasks_lock);
5182 scx_task_iter_init(&sti);
5183 while ((p = scx_task_iter_next_locked(&sti))) {
5184 const struct sched_class *old_class = p->sched_class;
5185 struct sched_enq_and_set_ctx ctx;
5186
5187 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5188
5189 __setscheduler_prio(p, p->prio);
5190 check_class_changing(task_rq(p), p, old_class);
5191
5192 sched_enq_and_set_task(&ctx);
5193
5194 check_class_changed(task_rq(p), p, old_class, p->prio);
5195 }
5196 scx_task_iter_exit(&sti);
5197 spin_unlock_irq(&scx_tasks_lock);
5198 percpu_up_write(&scx_fork_rwsem);
5199
5200 scx_ops_bypass(false);
5201
5202 /*
5203 * Returning an error code here would lose the recorded error
5204 * information. Exit indicating success so that the error is notified
5205 * through ops.exit() with all the details.
5206 */
5207 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5208 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5209 ret = 0;
5210 goto err_disable;
5211 }
5212
5213 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5214 static_branch_enable(&__scx_switched_all);
5215
5216 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5217 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5218 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5219 mutex_unlock(&scx_ops_enable_mutex);
5220
5221 atomic_long_inc(&scx_enable_seq);
5222
5223 return 0;
5224
5225 err_del:
5226 kobject_del(scx_root_kobj);
5227 err:
5228 kobject_put(scx_root_kobj);
5229 scx_root_kobj = NULL;
5230 if (scx_exit_info) {
5231 free_exit_info(scx_exit_info);
5232 scx_exit_info = NULL;
5233 }
5234 err_unlock:
5235 mutex_unlock(&scx_ops_enable_mutex);
5236 return ret;
5237
5238 err_disable_unlock_all:
5239 scx_cgroup_unlock();
5240 percpu_up_write(&scx_fork_rwsem);
5241 scx_ops_bypass(false);
5242 err_disable:
5243 mutex_unlock(&scx_ops_enable_mutex);
5244 /* must be fully disabled before returning */
5245 scx_ops_disable(SCX_EXIT_ERROR);
5246 kthread_flush_work(&scx_ops_disable_work);
5247 return ret;
5248 }
5249
5250
5251 /********************************************************************************
5252 * bpf_struct_ops plumbing.
5253 */
5254 #include <linux/bpf_verifier.h>
5255 #include <linux/bpf.h>
5256 #include <linux/btf.h>
5257
5258 extern struct btf *btf_vmlinux;
5259 static const struct btf_type *task_struct_type;
5260 static u32 task_struct_type_id;
5261
set_arg_maybe_null(const char * op,int arg_n,int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5262 static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
5263 enum bpf_access_type type,
5264 const struct bpf_prog *prog,
5265 struct bpf_insn_access_aux *info)
5266 {
5267 struct btf *btf = bpf_get_btf_vmlinux();
5268 const struct bpf_struct_ops_desc *st_ops_desc;
5269 const struct btf_member *member;
5270 const struct btf_type *t;
5271 u32 btf_id, member_idx;
5272 const char *mname;
5273
5274 /* struct_ops op args are all sequential, 64-bit numbers */
5275 if (off != arg_n * sizeof(__u64))
5276 return false;
5277
5278 /* btf_id should be the type id of struct sched_ext_ops */
5279 btf_id = prog->aux->attach_btf_id;
5280 st_ops_desc = bpf_struct_ops_find(btf, btf_id);
5281 if (!st_ops_desc)
5282 return false;
5283
5284 /* BTF type of struct sched_ext_ops */
5285 t = st_ops_desc->type;
5286
5287 member_idx = prog->expected_attach_type;
5288 if (member_idx >= btf_type_vlen(t))
5289 return false;
5290
5291 /*
5292 * Get the member name of this struct_ops program, which corresponds to
5293 * a field in struct sched_ext_ops. For example, the member name of the
5294 * dispatch struct_ops program (callback) is "dispatch".
5295 */
5296 member = &btf_type_member(t)[member_idx];
5297 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
5298
5299 if (!strcmp(mname, op)) {
5300 /*
5301 * The value is a pointer to a type (struct task_struct) given
5302 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
5303 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
5304 * should check the pointer to make sure it is not NULL before
5305 * using it, or the verifier will reject the program.
5306 *
5307 * Longer term, this is something that should be addressed by
5308 * BTF, and be fully contained within the verifier.
5309 */
5310 info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
5311 info->btf = btf_vmlinux;
5312 info->btf_id = task_struct_type_id;
5313
5314 return true;
5315 }
5316
5317 return false;
5318 }
5319
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5320 static bool bpf_scx_is_valid_access(int off, int size,
5321 enum bpf_access_type type,
5322 const struct bpf_prog *prog,
5323 struct bpf_insn_access_aux *info)
5324 {
5325 if (type != BPF_READ)
5326 return false;
5327 if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
5328 set_arg_maybe_null("yield", 1, off, size, type, prog, info))
5329 return true;
5330 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5331 return false;
5332 if (off % size != 0)
5333 return false;
5334
5335 return btf_ctx_access(off, size, type, prog, info);
5336 }
5337
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5338 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5339 const struct bpf_reg_state *reg, int off,
5340 int size)
5341 {
5342 const struct btf_type *t;
5343
5344 t = btf_type_by_id(reg->btf, reg->btf_id);
5345 if (t == task_struct_type) {
5346 if (off >= offsetof(struct task_struct, scx.slice) &&
5347 off + size <= offsetofend(struct task_struct, scx.slice))
5348 return SCALAR_VALUE;
5349 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5350 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5351 return SCALAR_VALUE;
5352 if (off >= offsetof(struct task_struct, scx.disallow) &&
5353 off + size <= offsetofend(struct task_struct, scx.disallow))
5354 return SCALAR_VALUE;
5355 }
5356
5357 return -EACCES;
5358 }
5359
5360 static const struct bpf_func_proto *
bpf_scx_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5361 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5362 {
5363 switch (func_id) {
5364 case BPF_FUNC_task_storage_get:
5365 return &bpf_task_storage_get_proto;
5366 case BPF_FUNC_task_storage_delete:
5367 return &bpf_task_storage_delete_proto;
5368 default:
5369 return bpf_base_func_proto(func_id, prog);
5370 }
5371 }
5372
5373 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5374 .get_func_proto = bpf_scx_get_func_proto,
5375 .is_valid_access = bpf_scx_is_valid_access,
5376 .btf_struct_access = bpf_scx_btf_struct_access,
5377 };
5378
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5379 static int bpf_scx_init_member(const struct btf_type *t,
5380 const struct btf_member *member,
5381 void *kdata, const void *udata)
5382 {
5383 const struct sched_ext_ops *uops = udata;
5384 struct sched_ext_ops *ops = kdata;
5385 u32 moff = __btf_member_bit_offset(t, member) / 8;
5386 int ret;
5387
5388 switch (moff) {
5389 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5390 if (*(u32 *)(udata + moff) > INT_MAX)
5391 return -E2BIG;
5392 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5393 return 1;
5394 case offsetof(struct sched_ext_ops, flags):
5395 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5396 return -EINVAL;
5397 ops->flags = *(u64 *)(udata + moff);
5398 return 1;
5399 case offsetof(struct sched_ext_ops, name):
5400 ret = bpf_obj_name_cpy(ops->name, uops->name,
5401 sizeof(ops->name));
5402 if (ret < 0)
5403 return ret;
5404 if (ret == 0)
5405 return -EINVAL;
5406 return 1;
5407 case offsetof(struct sched_ext_ops, timeout_ms):
5408 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5409 SCX_WATCHDOG_MAX_TIMEOUT)
5410 return -E2BIG;
5411 ops->timeout_ms = *(u32 *)(udata + moff);
5412 return 1;
5413 case offsetof(struct sched_ext_ops, exit_dump_len):
5414 ops->exit_dump_len =
5415 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5416 return 1;
5417 case offsetof(struct sched_ext_ops, hotplug_seq):
5418 ops->hotplug_seq = *(u64 *)(udata + moff);
5419 return 1;
5420 }
5421
5422 return 0;
5423 }
5424
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5425 static int bpf_scx_check_member(const struct btf_type *t,
5426 const struct btf_member *member,
5427 const struct bpf_prog *prog)
5428 {
5429 u32 moff = __btf_member_bit_offset(t, member) / 8;
5430
5431 switch (moff) {
5432 case offsetof(struct sched_ext_ops, init_task):
5433 #ifdef CONFIG_EXT_GROUP_SCHED
5434 case offsetof(struct sched_ext_ops, cgroup_init):
5435 case offsetof(struct sched_ext_ops, cgroup_exit):
5436 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5437 #endif
5438 case offsetof(struct sched_ext_ops, cpu_online):
5439 case offsetof(struct sched_ext_ops, cpu_offline):
5440 case offsetof(struct sched_ext_ops, init):
5441 case offsetof(struct sched_ext_ops, exit):
5442 break;
5443 default:
5444 if (prog->sleepable)
5445 return -EINVAL;
5446 }
5447
5448 return 0;
5449 }
5450
bpf_scx_reg(void * kdata,struct bpf_link * link)5451 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5452 {
5453 return scx_ops_enable(kdata, link);
5454 }
5455
bpf_scx_unreg(void * kdata,struct bpf_link * link)5456 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5457 {
5458 scx_ops_disable(SCX_EXIT_UNREG);
5459 kthread_flush_work(&scx_ops_disable_work);
5460 }
5461
bpf_scx_init(struct btf * btf)5462 static int bpf_scx_init(struct btf *btf)
5463 {
5464 s32 type_id;
5465
5466 type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
5467 if (type_id < 0)
5468 return -EINVAL;
5469 task_struct_type = btf_type_by_id(btf, type_id);
5470 task_struct_type_id = type_id;
5471
5472 return 0;
5473 }
5474
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5475 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5476 {
5477 /*
5478 * sched_ext does not support updating the actively-loaded BPF
5479 * scheduler, as registering a BPF scheduler can always fail if the
5480 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5481 * etc. Similarly, we can always race with unregistration happening
5482 * elsewhere, such as with sysrq.
5483 */
5484 return -EOPNOTSUPP;
5485 }
5486
bpf_scx_validate(void * kdata)5487 static int bpf_scx_validate(void *kdata)
5488 {
5489 return 0;
5490 }
5491
select_cpu_stub(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5492 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
enqueue_stub(struct task_struct * p,u64 enq_flags)5493 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
dequeue_stub(struct task_struct * p,u64 enq_flags)5494 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
dispatch_stub(s32 prev_cpu,struct task_struct * p)5495 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
tick_stub(struct task_struct * p)5496 static void tick_stub(struct task_struct *p) {}
runnable_stub(struct task_struct * p,u64 enq_flags)5497 static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
running_stub(struct task_struct * p)5498 static void running_stub(struct task_struct *p) {}
stopping_stub(struct task_struct * p,bool runnable)5499 static void stopping_stub(struct task_struct *p, bool runnable) {}
quiescent_stub(struct task_struct * p,u64 deq_flags)5500 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
yield_stub(struct task_struct * from,struct task_struct * to)5501 static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
core_sched_before_stub(struct task_struct * a,struct task_struct * b)5502 static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
set_weight_stub(struct task_struct * p,u32 weight)5503 static void set_weight_stub(struct task_struct *p, u32 weight) {}
set_cpumask_stub(struct task_struct * p,const struct cpumask * mask)5504 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
update_idle_stub(s32 cpu,bool idle)5505 static void update_idle_stub(s32 cpu, bool idle) {}
cpu_acquire_stub(s32 cpu,struct scx_cpu_acquire_args * args)5506 static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
cpu_release_stub(s32 cpu,struct scx_cpu_release_args * args)5507 static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
init_task_stub(struct task_struct * p,struct scx_init_task_args * args)5508 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
exit_task_stub(struct task_struct * p,struct scx_exit_task_args * args)5509 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
enable_stub(struct task_struct * p)5510 static void enable_stub(struct task_struct *p) {}
disable_stub(struct task_struct * p)5511 static void disable_stub(struct task_struct *p) {}
5512 #ifdef CONFIG_EXT_GROUP_SCHED
cgroup_init_stub(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5513 static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
cgroup_exit_stub(struct cgroup * cgrp)5514 static void cgroup_exit_stub(struct cgroup *cgrp) {}
cgroup_prep_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5515 static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
cgroup_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5516 static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
cgroup_cancel_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5517 static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
cgroup_set_weight_stub(struct cgroup * cgrp,u32 weight)5518 static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
5519 #endif
cpu_online_stub(s32 cpu)5520 static void cpu_online_stub(s32 cpu) {}
cpu_offline_stub(s32 cpu)5521 static void cpu_offline_stub(s32 cpu) {}
init_stub(void)5522 static s32 init_stub(void) { return -EINVAL; }
exit_stub(struct scx_exit_info * info)5523 static void exit_stub(struct scx_exit_info *info) {}
dump_stub(struct scx_dump_ctx * ctx)5524 static void dump_stub(struct scx_dump_ctx *ctx) {}
dump_cpu_stub(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5525 static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
dump_task_stub(struct scx_dump_ctx * ctx,struct task_struct * p)5526 static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5527
5528 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5529 .select_cpu = select_cpu_stub,
5530 .enqueue = enqueue_stub,
5531 .dequeue = dequeue_stub,
5532 .dispatch = dispatch_stub,
5533 .tick = tick_stub,
5534 .runnable = runnable_stub,
5535 .running = running_stub,
5536 .stopping = stopping_stub,
5537 .quiescent = quiescent_stub,
5538 .yield = yield_stub,
5539 .core_sched_before = core_sched_before_stub,
5540 .set_weight = set_weight_stub,
5541 .set_cpumask = set_cpumask_stub,
5542 .update_idle = update_idle_stub,
5543 .cpu_acquire = cpu_acquire_stub,
5544 .cpu_release = cpu_release_stub,
5545 .init_task = init_task_stub,
5546 .exit_task = exit_task_stub,
5547 .enable = enable_stub,
5548 .disable = disable_stub,
5549 #ifdef CONFIG_EXT_GROUP_SCHED
5550 .cgroup_init = cgroup_init_stub,
5551 .cgroup_exit = cgroup_exit_stub,
5552 .cgroup_prep_move = cgroup_prep_move_stub,
5553 .cgroup_move = cgroup_move_stub,
5554 .cgroup_cancel_move = cgroup_cancel_move_stub,
5555 .cgroup_set_weight = cgroup_set_weight_stub,
5556 #endif
5557 .cpu_online = cpu_online_stub,
5558 .cpu_offline = cpu_offline_stub,
5559 .init = init_stub,
5560 .exit = exit_stub,
5561 .dump = dump_stub,
5562 .dump_cpu = dump_cpu_stub,
5563 .dump_task = dump_task_stub,
5564 };
5565
5566 static struct bpf_struct_ops bpf_sched_ext_ops = {
5567 .verifier_ops = &bpf_scx_verifier_ops,
5568 .reg = bpf_scx_reg,
5569 .unreg = bpf_scx_unreg,
5570 .check_member = bpf_scx_check_member,
5571 .init_member = bpf_scx_init_member,
5572 .init = bpf_scx_init,
5573 .update = bpf_scx_update,
5574 .validate = bpf_scx_validate,
5575 .name = "sched_ext_ops",
5576 .owner = THIS_MODULE,
5577 .cfi_stubs = &__bpf_ops_sched_ext_ops
5578 };
5579
5580
5581 /********************************************************************************
5582 * System integration and init.
5583 */
5584
sysrq_handle_sched_ext_reset(u8 key)5585 static void sysrq_handle_sched_ext_reset(u8 key)
5586 {
5587 if (scx_ops_helper)
5588 scx_ops_disable(SCX_EXIT_SYSRQ);
5589 else
5590 pr_info("sched_ext: BPF scheduler not yet used\n");
5591 }
5592
5593 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5594 .handler = sysrq_handle_sched_ext_reset,
5595 .help_msg = "reset-sched-ext(S)",
5596 .action_msg = "Disable sched_ext and revert all tasks to CFS",
5597 .enable_mask = SYSRQ_ENABLE_RTNICE,
5598 };
5599
sysrq_handle_sched_ext_dump(u8 key)5600 static void sysrq_handle_sched_ext_dump(u8 key)
5601 {
5602 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5603
5604 if (scx_enabled())
5605 scx_dump_state(&ei, 0);
5606 }
5607
5608 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5609 .handler = sysrq_handle_sched_ext_dump,
5610 .help_msg = "dump-sched-ext(D)",
5611 .action_msg = "Trigger sched_ext debug dump",
5612 .enable_mask = SYSRQ_ENABLE_RTNICE,
5613 };
5614
can_skip_idle_kick(struct rq * rq)5615 static bool can_skip_idle_kick(struct rq *rq)
5616 {
5617 lockdep_assert_rq_held(rq);
5618
5619 /*
5620 * We can skip idle kicking if @rq is going to go through at least one
5621 * full SCX scheduling cycle before going idle. Just checking whether
5622 * curr is not idle is insufficient because we could be racing
5623 * balance_one() trying to pull the next task from a remote rq, which
5624 * may fail, and @rq may become idle afterwards.
5625 *
5626 * The race window is small and we don't and can't guarantee that @rq is
5627 * only kicked while idle anyway. Skip only when sure.
5628 */
5629 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5630 }
5631
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)5632 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5633 {
5634 struct rq *rq = cpu_rq(cpu);
5635 struct scx_rq *this_scx = &this_rq->scx;
5636 bool should_wait = false;
5637 unsigned long flags;
5638
5639 raw_spin_rq_lock_irqsave(rq, flags);
5640
5641 /*
5642 * During CPU hotplug, a CPU may depend on kicking itself to make
5643 * forward progress. Allow kicking self regardless of online state.
5644 */
5645 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5646 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5647 if (rq->curr->sched_class == &ext_sched_class)
5648 rq->curr->scx.slice = 0;
5649 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5650 }
5651
5652 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5653 pseqs[cpu] = rq->scx.pnt_seq;
5654 should_wait = true;
5655 }
5656
5657 resched_curr(rq);
5658 } else {
5659 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5660 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5661 }
5662
5663 raw_spin_rq_unlock_irqrestore(rq, flags);
5664
5665 return should_wait;
5666 }
5667
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)5668 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5669 {
5670 struct rq *rq = cpu_rq(cpu);
5671 unsigned long flags;
5672
5673 raw_spin_rq_lock_irqsave(rq, flags);
5674
5675 if (!can_skip_idle_kick(rq) &&
5676 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5677 resched_curr(rq);
5678
5679 raw_spin_rq_unlock_irqrestore(rq, flags);
5680 }
5681
kick_cpus_irq_workfn(struct irq_work * irq_work)5682 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5683 {
5684 struct rq *this_rq = this_rq();
5685 struct scx_rq *this_scx = &this_rq->scx;
5686 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5687 bool should_wait = false;
5688 s32 cpu;
5689
5690 for_each_cpu(cpu, this_scx->cpus_to_kick) {
5691 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5692 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5693 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5694 }
5695
5696 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5697 kick_one_cpu_if_idle(cpu, this_rq);
5698 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5699 }
5700
5701 if (!should_wait)
5702 return;
5703
5704 for_each_cpu(cpu, this_scx->cpus_to_wait) {
5705 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5706
5707 if (cpu != cpu_of(this_rq)) {
5708 /*
5709 * Pairs with smp_store_release() issued by this CPU in
5710 * scx_next_task_picked() on the resched path.
5711 *
5712 * We busy-wait here to guarantee that no other task can
5713 * be scheduled on our core before the target CPU has
5714 * entered the resched path.
5715 */
5716 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5717 cpu_relax();
5718 }
5719
5720 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5721 }
5722 }
5723
5724 /**
5725 * print_scx_info - print out sched_ext scheduler state
5726 * @log_lvl: the log level to use when printing
5727 * @p: target task
5728 *
5729 * If a sched_ext scheduler is enabled, print the name and state of the
5730 * scheduler. If @p is on sched_ext, print further information about the task.
5731 *
5732 * This function can be safely called on any task as long as the task_struct
5733 * itself is accessible. While safe, this function isn't synchronized and may
5734 * print out mixups or garbages of limited length.
5735 */
print_scx_info(const char * log_lvl,struct task_struct * p)5736 void print_scx_info(const char *log_lvl, struct task_struct *p)
5737 {
5738 enum scx_ops_enable_state state = scx_ops_enable_state();
5739 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5740 char runnable_at_buf[22] = "?";
5741 struct sched_class *class;
5742 unsigned long runnable_at;
5743
5744 if (state == SCX_OPS_DISABLED)
5745 return;
5746
5747 /*
5748 * Carefully check if the task was running on sched_ext, and then
5749 * carefully copy the time it's been runnable, and its state.
5750 */
5751 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5752 class != &ext_sched_class) {
5753 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
5754 scx_ops_enable_state_str[state], all);
5755 return;
5756 }
5757
5758 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5759 sizeof(runnable_at)))
5760 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5761 jiffies_delta_msecs(runnable_at, jiffies));
5762
5763 /* print everything onto one line to conserve console space */
5764 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5765 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
5766 runnable_at_buf);
5767 }
5768
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)5769 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5770 {
5771 /*
5772 * SCX schedulers often have userspace components which are sometimes
5773 * involved in critial scheduling paths. PM operations involve freezing
5774 * userspace which can lead to scheduling misbehaviors including stalls.
5775 * Let's bypass while PM operations are in progress.
5776 */
5777 switch (event) {
5778 case PM_HIBERNATION_PREPARE:
5779 case PM_SUSPEND_PREPARE:
5780 case PM_RESTORE_PREPARE:
5781 scx_ops_bypass(true);
5782 break;
5783 case PM_POST_HIBERNATION:
5784 case PM_POST_SUSPEND:
5785 case PM_POST_RESTORE:
5786 scx_ops_bypass(false);
5787 break;
5788 }
5789
5790 return NOTIFY_OK;
5791 }
5792
5793 static struct notifier_block scx_pm_notifier = {
5794 .notifier_call = scx_pm_handler,
5795 };
5796
init_sched_ext_class(void)5797 void __init init_sched_ext_class(void)
5798 {
5799 s32 cpu, v;
5800
5801 /*
5802 * The following is to prevent the compiler from optimizing out the enum
5803 * definitions so that BPF scheduler implementations can use them
5804 * through the generated vmlinux.h.
5805 */
5806 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5807 SCX_TG_ONLINE);
5808
5809 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
5810 #ifdef CONFIG_SMP
5811 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
5812 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
5813 #endif
5814 scx_kick_cpus_pnt_seqs =
5815 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
5816 __alignof__(scx_kick_cpus_pnt_seqs[0]));
5817 BUG_ON(!scx_kick_cpus_pnt_seqs);
5818
5819 for_each_possible_cpu(cpu) {
5820 struct rq *rq = cpu_rq(cpu);
5821
5822 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5823 INIT_LIST_HEAD(&rq->scx.runnable_list);
5824 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5825
5826 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
5827 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
5828 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
5829 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
5830 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
5831 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5832
5833 if (cpu_online(cpu))
5834 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5835 }
5836
5837 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5838 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5839 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5840 }
5841
5842
5843 /********************************************************************************
5844 * Helpers that can be called from the BPF scheduler.
5845 */
5846 #include <linux/btf_ids.h>
5847
5848 __bpf_kfunc_start_defs();
5849
5850 /**
5851 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
5852 * @p: task_struct to select a CPU for
5853 * @prev_cpu: CPU @p was on previously
5854 * @wake_flags: %SCX_WAKE_* flags
5855 * @is_idle: out parameter indicating whether the returned CPU is idle
5856 *
5857 * Can only be called from ops.select_cpu() if the built-in CPU selection is
5858 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
5859 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
5860 *
5861 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
5862 * currently idle and thus a good candidate for direct dispatching.
5863 */
scx_bpf_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * is_idle)5864 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
5865 u64 wake_flags, bool *is_idle)
5866 {
5867 if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
5868 *is_idle = false;
5869 return prev_cpu;
5870 }
5871 #ifdef CONFIG_SMP
5872 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
5873 #else
5874 *is_idle = false;
5875 return prev_cpu;
5876 #endif
5877 }
5878
5879 __bpf_kfunc_end_defs();
5880
5881 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
5882 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
5883 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
5884
5885 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
5886 .owner = THIS_MODULE,
5887 .set = &scx_kfunc_ids_select_cpu,
5888 };
5889
scx_dispatch_preamble(struct task_struct * p,u64 enq_flags)5890 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
5891 {
5892 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5893 return false;
5894
5895 lockdep_assert_irqs_disabled();
5896
5897 if (unlikely(!p)) {
5898 scx_ops_error("called with NULL task");
5899 return false;
5900 }
5901
5902 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5903 scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
5904 return false;
5905 }
5906
5907 return true;
5908 }
5909
scx_dispatch_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)5910 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
5911 {
5912 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5913 struct task_struct *ddsp_task;
5914
5915 ddsp_task = __this_cpu_read(direct_dispatch_task);
5916 if (ddsp_task) {
5917 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
5918 return;
5919 }
5920
5921 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5922 scx_ops_error("dispatch buffer overflow");
5923 return;
5924 }
5925
5926 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5927 .task = p,
5928 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5929 .dsq_id = dsq_id,
5930 .enq_flags = enq_flags,
5931 };
5932 }
5933
5934 __bpf_kfunc_start_defs();
5935
5936 /**
5937 * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
5938 * @p: task_struct to dispatch
5939 * @dsq_id: DSQ to dispatch to
5940 * @slice: duration @p can run for in nsecs, 0 to keep the current value
5941 * @enq_flags: SCX_ENQ_*
5942 *
5943 * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
5944 * to call this function spuriously. Can be called from ops.enqueue(),
5945 * ops.select_cpu(), and ops.dispatch().
5946 *
5947 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5948 * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
5949 * used to target the local DSQ of a CPU other than the enqueueing one. Use
5950 * ops.select_cpu() to be on the target CPU in the first place.
5951 *
5952 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5953 * will be directly dispatched to the corresponding dispatch queue after
5954 * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
5955 * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
5956 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5957 * task is dispatched.
5958 *
5959 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5960 * and this function can be called upto ops.dispatch_max_batch times to dispatch
5961 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5962 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
5963 *
5964 * This function doesn't have any locking restrictions and may be called under
5965 * BPF locks (in the future when BPF introduces more flexible locking).
5966 *
5967 * @p is allowed to run for @slice. The scheduling path is triggered on slice
5968 * exhaustion. If zero, the current residual slice is maintained. If
5969 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5970 * scx_bpf_kick_cpu() to trigger scheduling.
5971 */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)5972 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
5973 u64 enq_flags)
5974 {
5975 if (!scx_dispatch_preamble(p, enq_flags))
5976 return;
5977
5978 if (slice)
5979 p->scx.slice = slice;
5980 else
5981 p->scx.slice = p->scx.slice ?: 1;
5982
5983 scx_dispatch_commit(p, dsq_id, enq_flags);
5984 }
5985
5986 /**
5987 * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
5988 * @p: task_struct to dispatch
5989 * @dsq_id: DSQ to dispatch to
5990 * @slice: duration @p can run for in nsecs, 0 to keep the current value
5991 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
5992 * @enq_flags: SCX_ENQ_*
5993 *
5994 * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
5995 * Tasks queued into the priority queue are ordered by @vtime and always
5996 * consumed after the tasks in the FIFO queue. All other aspects are identical
5997 * to scx_bpf_dispatch().
5998 *
5999 * @vtime ordering is according to time_before64() which considers wrapping. A
6000 * numerically larger vtime may indicate an earlier position in the ordering and
6001 * vice-versa.
6002 */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6003 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6004 u64 slice, u64 vtime, u64 enq_flags)
6005 {
6006 if (!scx_dispatch_preamble(p, enq_flags))
6007 return;
6008
6009 if (slice)
6010 p->scx.slice = slice;
6011 else
6012 p->scx.slice = p->scx.slice ?: 1;
6013
6014 p->scx.dsq_vtime = vtime;
6015
6016 scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6017 }
6018
6019 __bpf_kfunc_end_defs();
6020
6021 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6022 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6023 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6024 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6025
6026 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6027 .owner = THIS_MODULE,
6028 .set = &scx_kfunc_ids_enqueue_dispatch,
6029 };
6030
scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6031 static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
6032 struct task_struct *p, u64 dsq_id,
6033 u64 enq_flags)
6034 {
6035 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6036 struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
6037 bool dispatched = false;
6038 bool in_balance;
6039 unsigned long flags;
6040
6041 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6042 return false;
6043
6044 /*
6045 * Can be called from either ops.dispatch() locking this_rq() or any
6046 * context where no rq lock is held. If latter, lock @p's task_rq which
6047 * we'll likely need anyway.
6048 */
6049 src_rq = task_rq(p);
6050
6051 local_irq_save(flags);
6052 this_rq = this_rq();
6053 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6054
6055 if (in_balance) {
6056 if (this_rq != src_rq) {
6057 raw_spin_rq_unlock(this_rq);
6058 raw_spin_rq_lock(src_rq);
6059 }
6060 } else {
6061 raw_spin_rq_lock(src_rq);
6062 }
6063
6064 locked_rq = src_rq;
6065 raw_spin_lock(&src_dsq->lock);
6066
6067 /*
6068 * Did someone else get to it? @p could have already left $src_dsq, got
6069 * re-enqueud, or be in the process of being consumed by someone else.
6070 */
6071 if (unlikely(p->scx.dsq != src_dsq ||
6072 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6073 p->scx.holding_cpu >= 0) ||
6074 WARN_ON_ONCE(src_rq != task_rq(p))) {
6075 raw_spin_unlock(&src_dsq->lock);
6076 goto out;
6077 }
6078
6079 /* @p is still on $src_dsq and stable, determine the destination */
6080 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6081
6082 if (dst_dsq->id == SCX_DSQ_LOCAL) {
6083 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
6084 if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
6085 dst_dsq = find_global_dsq(p);
6086 dst_rq = src_rq;
6087 }
6088 } else {
6089 /* no need to migrate if destination is a non-local DSQ */
6090 dst_rq = src_rq;
6091 }
6092
6093 /*
6094 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
6095 * CPU, @p will be migrated.
6096 */
6097 if (dst_dsq->id == SCX_DSQ_LOCAL) {
6098 /* @p is going from a non-local DSQ to a local DSQ */
6099 if (src_rq == dst_rq) {
6100 task_unlink_from_dsq(p, src_dsq);
6101 move_local_task_to_local_dsq(p, enq_flags,
6102 src_dsq, dst_rq);
6103 raw_spin_unlock(&src_dsq->lock);
6104 } else {
6105 raw_spin_unlock(&src_dsq->lock);
6106 move_remote_task_to_local_dsq(p, enq_flags,
6107 src_rq, dst_rq);
6108 locked_rq = dst_rq;
6109 }
6110 } else {
6111 /*
6112 * @p is going from a non-local DSQ to a non-local DSQ. As
6113 * $src_dsq is already locked, do an abbreviated dequeue.
6114 */
6115 task_unlink_from_dsq(p, src_dsq);
6116 p->scx.dsq = NULL;
6117 raw_spin_unlock(&src_dsq->lock);
6118
6119 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6120 p->scx.dsq_vtime = kit->vtime;
6121 dispatch_enqueue(dst_dsq, p, enq_flags);
6122 }
6123
6124 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6125 p->scx.slice = kit->slice;
6126
6127 dispatched = true;
6128 out:
6129 if (in_balance) {
6130 if (this_rq != locked_rq) {
6131 raw_spin_rq_unlock(locked_rq);
6132 raw_spin_rq_lock(this_rq);
6133 }
6134 } else {
6135 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6136 }
6137
6138 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6139 __SCX_DSQ_ITER_HAS_VTIME);
6140 return dispatched;
6141 }
6142
6143 __bpf_kfunc_start_defs();
6144
6145 /**
6146 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6147 *
6148 * Can only be called from ops.dispatch().
6149 */
scx_bpf_dispatch_nr_slots(void)6150 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6151 {
6152 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6153 return 0;
6154
6155 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6156 }
6157
6158 /**
6159 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6160 *
6161 * Cancel the latest dispatch. Can be called multiple times to cancel further
6162 * dispatches. Can only be called from ops.dispatch().
6163 */
scx_bpf_dispatch_cancel(void)6164 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6165 {
6166 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6167
6168 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6169 return;
6170
6171 if (dspc->cursor > 0)
6172 dspc->cursor--;
6173 else
6174 scx_ops_error("dispatch buffer underflow");
6175 }
6176
6177 /**
6178 * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
6179 * @dsq_id: DSQ to consume
6180 *
6181 * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
6182 * to the current CPU's local DSQ for execution. Can only be called from
6183 * ops.dispatch().
6184 *
6185 * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
6186 * trying to consume the specified DSQ. It may also grab rq locks and thus can't
6187 * be called under any BPF locks.
6188 *
6189 * Returns %true if a task has been consumed, %false if there isn't any task to
6190 * consume.
6191 */
scx_bpf_consume(u64 dsq_id)6192 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6193 {
6194 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6195 struct scx_dispatch_q *dsq;
6196
6197 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6198 return false;
6199
6200 flush_dispatch_buf(dspc->rq);
6201
6202 dsq = find_user_dsq(dsq_id);
6203 if (unlikely(!dsq)) {
6204 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6205 return false;
6206 }
6207
6208 if (consume_dispatch_q(dspc->rq, dsq)) {
6209 /*
6210 * A successfully consumed task can be dequeued before it starts
6211 * running while the CPU is trying to migrate other dispatched
6212 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6213 * local DSQ.
6214 */
6215 dspc->nr_tasks++;
6216 return true;
6217 } else {
6218 return false;
6219 }
6220 }
6221
6222 /**
6223 * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
6224 * @it__iter: DSQ iterator in progress
6225 * @slice: duration the dispatched task can run for in nsecs
6226 *
6227 * Override the slice of the next task that will be dispatched from @it__iter
6228 * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
6229 * the previous slice duration is kept.
6230 */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6231 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6232 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6233 {
6234 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6235
6236 kit->slice = slice;
6237 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6238 }
6239
6240 /**
6241 * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
6242 * @it__iter: DSQ iterator in progress
6243 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6244 *
6245 * Override the vtime of the next task that will be dispatched from @it__iter
6246 * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
6247 * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
6248 * dispatch the next task, the override is ignored and cleared.
6249 */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6250 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6251 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6252 {
6253 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6254
6255 kit->vtime = vtime;
6256 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6257 }
6258
6259 /**
6260 * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
6261 * @it__iter: DSQ iterator in progress
6262 * @p: task to transfer
6263 * @dsq_id: DSQ to move @p to
6264 * @enq_flags: SCX_ENQ_*
6265 *
6266 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6267 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6268 * be the destination.
6269 *
6270 * For the transfer to be successful, @p must still be on the DSQ and have been
6271 * queued before the DSQ iteration started. This function doesn't care whether
6272 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6273 * been queued before the iteration started.
6274 *
6275 * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
6276 * update.
6277 *
6278 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6279 * lock (e.g. BPF timers or SYSCALL programs).
6280 *
6281 * Returns %true if @p has been consumed, %false if @p had already been consumed
6282 * or dequeued.
6283 */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6284 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6285 struct task_struct *p, u64 dsq_id,
6286 u64 enq_flags)
6287 {
6288 return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6289 p, dsq_id, enq_flags);
6290 }
6291
6292 /**
6293 * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
6294 * @it__iter: DSQ iterator in progress
6295 * @p: task to transfer
6296 * @dsq_id: DSQ to move @p to
6297 * @enq_flags: SCX_ENQ_*
6298 *
6299 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6300 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6301 * user DSQ as only user DSQs support priority queue.
6302 *
6303 * @p's slice and vtime are kept by default. Use
6304 * scx_bpf_dispatch_from_dsq_set_slice() and
6305 * scx_bpf_dispatch_from_dsq_set_vtime() to update.
6306 *
6307 * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6308 * scx_bpf_dispatch_vtime() for more information on @vtime.
6309 */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6310 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6311 struct task_struct *p, u64 dsq_id,
6312 u64 enq_flags)
6313 {
6314 return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6315 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6316 }
6317
6318 __bpf_kfunc_end_defs();
6319
6320 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6321 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6322 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6323 BTF_ID_FLAGS(func, scx_bpf_consume)
6324 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6325 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6326 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6327 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6328 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6329
6330 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6331 .owner = THIS_MODULE,
6332 .set = &scx_kfunc_ids_dispatch,
6333 };
6334
6335 __bpf_kfunc_start_defs();
6336
6337 /**
6338 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6339 *
6340 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6341 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6342 * processed tasks. Can only be called from ops.cpu_release().
6343 */
scx_bpf_reenqueue_local(void)6344 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6345 {
6346 LIST_HEAD(tasks);
6347 u32 nr_enqueued = 0;
6348 struct rq *rq;
6349 struct task_struct *p, *n;
6350
6351 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6352 return 0;
6353
6354 rq = cpu_rq(smp_processor_id());
6355 lockdep_assert_rq_held(rq);
6356
6357 /*
6358 * The BPF scheduler may choose to dispatch tasks back to
6359 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6360 * first to avoid processing the same tasks repeatedly.
6361 */
6362 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6363 scx.dsq_list.node) {
6364 /*
6365 * If @p is being migrated, @p's current CPU may not agree with
6366 * its allowed CPUs and the migration_cpu_stop is about to
6367 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6368 *
6369 * While racing sched property changes may also dequeue and
6370 * re-enqueue a migrating task while its current CPU and allowed
6371 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6372 * the current local DSQ for running tasks and thus are not
6373 * visible to the BPF scheduler.
6374 */
6375 if (p->migration_pending)
6376 continue;
6377
6378 dispatch_dequeue(rq, p);
6379 list_add_tail(&p->scx.dsq_list.node, &tasks);
6380 }
6381
6382 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6383 list_del_init(&p->scx.dsq_list.node);
6384 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6385 nr_enqueued++;
6386 }
6387
6388 return nr_enqueued;
6389 }
6390
6391 __bpf_kfunc_end_defs();
6392
6393 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6394 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6395 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6396
6397 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6398 .owner = THIS_MODULE,
6399 .set = &scx_kfunc_ids_cpu_release,
6400 };
6401
6402 __bpf_kfunc_start_defs();
6403
6404 /**
6405 * scx_bpf_create_dsq - Create a custom DSQ
6406 * @dsq_id: DSQ to create
6407 * @node: NUMA node to allocate from
6408 *
6409 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6410 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6411 */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6412 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6413 {
6414 if (unlikely(node >= (int)nr_node_ids ||
6415 (node < 0 && node != NUMA_NO_NODE)))
6416 return -EINVAL;
6417 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6418 }
6419
6420 __bpf_kfunc_end_defs();
6421
6422 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6423 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6424 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6425 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6426 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6427
6428 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6429 .owner = THIS_MODULE,
6430 .set = &scx_kfunc_ids_unlocked,
6431 };
6432
6433 __bpf_kfunc_start_defs();
6434
6435 /**
6436 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6437 * @cpu: cpu to kick
6438 * @flags: %SCX_KICK_* flags
6439 *
6440 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6441 * trigger rescheduling on a busy CPU. This can be called from any online
6442 * scx_ops operation and the actual kicking is performed asynchronously through
6443 * an irq work.
6444 */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6445 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6446 {
6447 struct rq *this_rq;
6448 unsigned long irq_flags;
6449
6450 if (!ops_cpu_valid(cpu, NULL))
6451 return;
6452
6453 local_irq_save(irq_flags);
6454
6455 this_rq = this_rq();
6456
6457 /*
6458 * While bypassing for PM ops, IRQ handling may not be online which can
6459 * lead to irq_work_queue() malfunction such as infinite busy wait for
6460 * IRQ status update. Suppress kicking.
6461 */
6462 if (scx_rq_bypassing(this_rq))
6463 goto out;
6464
6465 /*
6466 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6467 * rq locks. We can probably be smarter and avoid bouncing if called
6468 * from ops which don't hold a rq lock.
6469 */
6470 if (flags & SCX_KICK_IDLE) {
6471 struct rq *target_rq = cpu_rq(cpu);
6472
6473 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6474 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6475
6476 if (raw_spin_rq_trylock(target_rq)) {
6477 if (can_skip_idle_kick(target_rq)) {
6478 raw_spin_rq_unlock(target_rq);
6479 goto out;
6480 }
6481 raw_spin_rq_unlock(target_rq);
6482 }
6483 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6484 } else {
6485 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6486
6487 if (flags & SCX_KICK_PREEMPT)
6488 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6489 if (flags & SCX_KICK_WAIT)
6490 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6491 }
6492
6493 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6494 out:
6495 local_irq_restore(irq_flags);
6496 }
6497
6498 /**
6499 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6500 * @dsq_id: id of the DSQ
6501 *
6502 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6503 * -%ENOENT is returned.
6504 */
scx_bpf_dsq_nr_queued(u64 dsq_id)6505 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6506 {
6507 struct scx_dispatch_q *dsq;
6508 s32 ret;
6509
6510 preempt_disable();
6511
6512 if (dsq_id == SCX_DSQ_LOCAL) {
6513 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6514 goto out;
6515 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6516 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6517
6518 if (ops_cpu_valid(cpu, NULL)) {
6519 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6520 goto out;
6521 }
6522 } else {
6523 dsq = find_user_dsq(dsq_id);
6524 if (dsq) {
6525 ret = READ_ONCE(dsq->nr);
6526 goto out;
6527 }
6528 }
6529 ret = -ENOENT;
6530 out:
6531 preempt_enable();
6532 return ret;
6533 }
6534
6535 /**
6536 * scx_bpf_destroy_dsq - Destroy a custom DSQ
6537 * @dsq_id: DSQ to destroy
6538 *
6539 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6540 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6541 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6542 * which doesn't exist. Can be called from any online scx_ops operations.
6543 */
scx_bpf_destroy_dsq(u64 dsq_id)6544 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6545 {
6546 destroy_dsq(dsq_id);
6547 }
6548
6549 /**
6550 * bpf_iter_scx_dsq_new - Create a DSQ iterator
6551 * @it: iterator to initialize
6552 * @dsq_id: DSQ to iterate
6553 * @flags: %SCX_DSQ_ITER_*
6554 *
6555 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6556 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6557 * tasks which are already queued when this function is invoked.
6558 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)6559 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6560 u64 flags)
6561 {
6562 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6563
6564 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6565 sizeof(struct bpf_iter_scx_dsq));
6566 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6567 __alignof__(struct bpf_iter_scx_dsq));
6568
6569 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6570 return -EINVAL;
6571
6572 kit->dsq = find_user_dsq(dsq_id);
6573 if (!kit->dsq)
6574 return -ENOENT;
6575
6576 INIT_LIST_HEAD(&kit->cursor.node);
6577 kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
6578 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
6579
6580 return 0;
6581 }
6582
6583 /**
6584 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6585 * @it: iterator to progress
6586 *
6587 * Return the next task. See bpf_iter_scx_dsq_new().
6588 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)6589 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6590 {
6591 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6592 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6593 struct task_struct *p;
6594 unsigned long flags;
6595
6596 if (!kit->dsq)
6597 return NULL;
6598
6599 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6600
6601 if (list_empty(&kit->cursor.node))
6602 p = NULL;
6603 else
6604 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6605
6606 /*
6607 * Only tasks which were queued before the iteration started are
6608 * visible. This bounds BPF iterations and guarantees that vtime never
6609 * jumps in the other direction while iterating.
6610 */
6611 do {
6612 p = nldsq_next_task(kit->dsq, p, rev);
6613 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6614
6615 if (p) {
6616 if (rev)
6617 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6618 else
6619 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6620 } else {
6621 list_del_init(&kit->cursor.node);
6622 }
6623
6624 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6625
6626 return p;
6627 }
6628
6629 /**
6630 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6631 * @it: iterator to destroy
6632 *
6633 * Undo scx_iter_scx_dsq_new().
6634 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)6635 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6636 {
6637 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6638
6639 if (!kit->dsq)
6640 return;
6641
6642 if (!list_empty(&kit->cursor.node)) {
6643 unsigned long flags;
6644
6645 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6646 list_del_init(&kit->cursor.node);
6647 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6648 }
6649 kit->dsq = NULL;
6650 }
6651
6652 __bpf_kfunc_end_defs();
6653
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)6654 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
6655 char *fmt, unsigned long long *data, u32 data__sz)
6656 {
6657 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6658 s32 ret;
6659
6660 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6661 (data__sz && !data)) {
6662 scx_ops_error("invalid data=%p and data__sz=%u",
6663 (void *)data, data__sz);
6664 return -EINVAL;
6665 }
6666
6667 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6668 if (ret < 0) {
6669 scx_ops_error("failed to read data fields (%d)", ret);
6670 return ret;
6671 }
6672
6673 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6674 &bprintf_data);
6675 if (ret < 0) {
6676 scx_ops_error("format preparation failed (%d)", ret);
6677 return ret;
6678 }
6679
6680 ret = bstr_printf(line_buf, line_size, fmt,
6681 bprintf_data.bin_args);
6682 bpf_bprintf_cleanup(&bprintf_data);
6683 if (ret < 0) {
6684 scx_ops_error("(\"%s\", %p, %u) failed to format",
6685 fmt, data, data__sz);
6686 return ret;
6687 }
6688
6689 return ret;
6690 }
6691
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)6692 static s32 bstr_format(struct scx_bstr_buf *buf,
6693 char *fmt, unsigned long long *data, u32 data__sz)
6694 {
6695 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
6696 fmt, data, data__sz);
6697 }
6698
6699 __bpf_kfunc_start_defs();
6700
6701 /**
6702 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6703 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6704 * @fmt: error message format string
6705 * @data: format string parameters packaged using ___bpf_fill() macro
6706 * @data__sz: @data len, must end in '__sz' for the verifier
6707 *
6708 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6709 * disabling.
6710 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)6711 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6712 unsigned long long *data, u32 data__sz)
6713 {
6714 unsigned long flags;
6715
6716 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6717 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6718 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
6719 scx_exit_bstr_buf.line);
6720 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6721 }
6722
6723 /**
6724 * scx_bpf_error_bstr - Indicate fatal error
6725 * @fmt: error message format string
6726 * @data: format string parameters packaged using ___bpf_fill() macro
6727 * @data__sz: @data len, must end in '__sz' for the verifier
6728 *
6729 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6730 * disabling.
6731 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)6732 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6733 u32 data__sz)
6734 {
6735 unsigned long flags;
6736
6737 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6738 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6739 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
6740 scx_exit_bstr_buf.line);
6741 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6742 }
6743
6744 /**
6745 * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
6746 * @fmt: format string
6747 * @data: format string parameters packaged using ___bpf_fill() macro
6748 * @data__sz: @data len, must end in '__sz' for the verifier
6749 *
6750 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6751 * dump_task() to generate extra debug dump specific to the BPF scheduler.
6752 *
6753 * The extra dump may be multiple lines. A single line may be split over
6754 * multiple calls. The last line is automatically terminated.
6755 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)6756 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6757 u32 data__sz)
6758 {
6759 struct scx_dump_data *dd = &scx_dump_data;
6760 struct scx_bstr_buf *buf = &dd->buf;
6761 s32 ret;
6762
6763 if (raw_smp_processor_id() != dd->cpu) {
6764 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
6765 return;
6766 }
6767
6768 /* append the formatted string to the line buf */
6769 ret = __bstr_format(buf->data, buf->line + dd->cursor,
6770 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6771 if (ret < 0) {
6772 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6773 dd->prefix, fmt, data, data__sz, ret);
6774 return;
6775 }
6776
6777 dd->cursor += ret;
6778 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6779
6780 if (!dd->cursor)
6781 return;
6782
6783 /*
6784 * If the line buf overflowed or ends in a newline, flush it into the
6785 * dump. This is to allow the caller to generate a single line over
6786 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6787 * the line buf, the only case which can lead to an unexpected
6788 * truncation is when the caller keeps generating newlines in the middle
6789 * instead of the end consecutively. Don't do that.
6790 */
6791 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6792 ops_dump_flush();
6793 }
6794
6795 /**
6796 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
6797 * @cpu: CPU of interest
6798 *
6799 * Return the maximum relative capacity of @cpu in relation to the most
6800 * performant CPU in the system. The return value is in the range [1,
6801 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
6802 */
scx_bpf_cpuperf_cap(s32 cpu)6803 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
6804 {
6805 if (ops_cpu_valid(cpu, NULL))
6806 return arch_scale_cpu_capacity(cpu);
6807 else
6808 return SCX_CPUPERF_ONE;
6809 }
6810
6811 /**
6812 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
6813 * @cpu: CPU of interest
6814 *
6815 * Return the current relative performance of @cpu in relation to its maximum.
6816 * The return value is in the range [1, %SCX_CPUPERF_ONE].
6817 *
6818 * The current performance level of a CPU in relation to the maximum performance
6819 * available in the system can be calculated as follows:
6820 *
6821 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
6822 *
6823 * The result is in the range [1, %SCX_CPUPERF_ONE].
6824 */
scx_bpf_cpuperf_cur(s32 cpu)6825 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
6826 {
6827 if (ops_cpu_valid(cpu, NULL))
6828 return arch_scale_freq_capacity(cpu);
6829 else
6830 return SCX_CPUPERF_ONE;
6831 }
6832
6833 /**
6834 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
6835 * @cpu: CPU of interest
6836 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
6837 * @flags: %SCX_CPUPERF_* flags
6838 *
6839 * Set the target performance level of @cpu to @perf. @perf is in linear
6840 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
6841 * schedutil cpufreq governor chooses the target frequency.
6842 *
6843 * The actual performance level chosen, CPU grouping, and the overhead and
6844 * latency of the operations are dependent on the hardware and cpufreq driver in
6845 * use. Consult hardware and cpufreq documentation for more information. The
6846 * current performance level can be monitored using scx_bpf_cpuperf_cur().
6847 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)6848 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
6849 {
6850 if (unlikely(perf > SCX_CPUPERF_ONE)) {
6851 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
6852 return;
6853 }
6854
6855 if (ops_cpu_valid(cpu, NULL)) {
6856 struct rq *rq = cpu_rq(cpu);
6857
6858 rq->scx.cpuperf_target = perf;
6859
6860 rcu_read_lock_sched_notrace();
6861 cpufreq_update_util(cpu_rq(cpu), 0);
6862 rcu_read_unlock_sched_notrace();
6863 }
6864 }
6865
6866 /**
6867 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
6868 *
6869 * All valid CPU IDs in the system are smaller than the returned value.
6870 */
scx_bpf_nr_cpu_ids(void)6871 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
6872 {
6873 return nr_cpu_ids;
6874 }
6875
6876 /**
6877 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
6878 */
scx_bpf_get_possible_cpumask(void)6879 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
6880 {
6881 return cpu_possible_mask;
6882 }
6883
6884 /**
6885 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
6886 */
scx_bpf_get_online_cpumask(void)6887 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
6888 {
6889 return cpu_online_mask;
6890 }
6891
6892 /**
6893 * scx_bpf_put_cpumask - Release a possible/online cpumask
6894 * @cpumask: cpumask to release
6895 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)6896 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
6897 {
6898 /*
6899 * Empty function body because we aren't actually acquiring or releasing
6900 * a reference to a global cpumask, which is read-only in the caller and
6901 * is never released. The acquire / release semantics here are just used
6902 * to make the cpumask is a trusted pointer in the caller.
6903 */
6904 }
6905
6906 /**
6907 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
6908 * per-CPU cpumask.
6909 *
6910 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6911 */
scx_bpf_get_idle_cpumask(void)6912 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
6913 {
6914 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6915 scx_ops_error("built-in idle tracking is disabled");
6916 return cpu_none_mask;
6917 }
6918
6919 #ifdef CONFIG_SMP
6920 return idle_masks.cpu;
6921 #else
6922 return cpu_none_mask;
6923 #endif
6924 }
6925
6926 /**
6927 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
6928 * per-physical-core cpumask. Can be used to determine if an entire physical
6929 * core is free.
6930 *
6931 * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6932 */
scx_bpf_get_idle_smtmask(void)6933 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
6934 {
6935 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6936 scx_ops_error("built-in idle tracking is disabled");
6937 return cpu_none_mask;
6938 }
6939
6940 #ifdef CONFIG_SMP
6941 if (sched_smt_active())
6942 return idle_masks.smt;
6943 else
6944 return idle_masks.cpu;
6945 #else
6946 return cpu_none_mask;
6947 #endif
6948 }
6949
6950 /**
6951 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
6952 * either the percpu, or SMT idle-tracking cpumask.
6953 */
scx_bpf_put_idle_cpumask(const struct cpumask * idle_mask)6954 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
6955 {
6956 /*
6957 * Empty function body because we aren't actually acquiring or releasing
6958 * a reference to a global idle cpumask, which is read-only in the
6959 * caller and is never released. The acquire / release semantics here
6960 * are just used to make the cpumask a trusted pointer in the caller.
6961 */
6962 }
6963
6964 /**
6965 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
6966 * @cpu: cpu to test and clear idle for
6967 *
6968 * Returns %true if @cpu was idle and its idle state was successfully cleared.
6969 * %false otherwise.
6970 *
6971 * Unavailable if ops.update_idle() is implemented and
6972 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
6973 */
scx_bpf_test_and_clear_cpu_idle(s32 cpu)6974 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
6975 {
6976 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6977 scx_ops_error("built-in idle tracking is disabled");
6978 return false;
6979 }
6980
6981 if (ops_cpu_valid(cpu, NULL))
6982 return test_and_clear_cpu_idle(cpu);
6983 else
6984 return false;
6985 }
6986
6987 /**
6988 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
6989 * @cpus_allowed: Allowed cpumask
6990 * @flags: %SCX_PICK_IDLE_CPU_* flags
6991 *
6992 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
6993 * number on success. -%EBUSY if no matching cpu was found.
6994 *
6995 * Idle CPU tracking may race against CPU scheduling state transitions. For
6996 * example, this function may return -%EBUSY as CPUs are transitioning into the
6997 * idle state. If the caller then assumes that there will be dispatch events on
6998 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
6999 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7000 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7001 * event in the near future.
7002 *
7003 * Unavailable if ops.update_idle() is implemented and
7004 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7005 */
scx_bpf_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)7006 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7007 u64 flags)
7008 {
7009 if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7010 scx_ops_error("built-in idle tracking is disabled");
7011 return -EBUSY;
7012 }
7013
7014 return scx_pick_idle_cpu(cpus_allowed, flags);
7015 }
7016
7017 /**
7018 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7019 * @cpus_allowed: Allowed cpumask
7020 * @flags: %SCX_PICK_IDLE_CPU_* flags
7021 *
7022 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7023 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7024 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7025 * empty.
7026 *
7027 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7028 * set, this function can't tell which CPUs are idle and will always pick any
7029 * CPU.
7030 */
scx_bpf_pick_any_cpu(const struct cpumask * cpus_allowed,u64 flags)7031 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7032 u64 flags)
7033 {
7034 s32 cpu;
7035
7036 if (static_branch_likely(&scx_builtin_idle_enabled)) {
7037 cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7038 if (cpu >= 0)
7039 return cpu;
7040 }
7041
7042 cpu = cpumask_any_distribute(cpus_allowed);
7043 if (cpu < nr_cpu_ids)
7044 return cpu;
7045 else
7046 return -EBUSY;
7047 }
7048
7049 /**
7050 * scx_bpf_task_running - Is task currently running?
7051 * @p: task of interest
7052 */
scx_bpf_task_running(const struct task_struct * p)7053 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7054 {
7055 return task_rq(p)->curr == p;
7056 }
7057
7058 /**
7059 * scx_bpf_task_cpu - CPU a task is currently associated with
7060 * @p: task of interest
7061 */
scx_bpf_task_cpu(const struct task_struct * p)7062 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7063 {
7064 return task_cpu(p);
7065 }
7066
7067 /**
7068 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7069 * @cpu: CPU of the rq
7070 */
scx_bpf_cpu_rq(s32 cpu)7071 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7072 {
7073 if (!ops_cpu_valid(cpu, NULL))
7074 return NULL;
7075
7076 return cpu_rq(cpu);
7077 }
7078
7079 /**
7080 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7081 * @p: task of interest
7082 *
7083 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7084 * from the scheduler's POV. SCX operations should use this function to
7085 * determine @p's current cgroup as, unlike following @p->cgroups,
7086 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7087 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7088 * operations. The restriction guarantees that @p's rq is locked by the caller.
7089 */
7090 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7091 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7092 {
7093 struct task_group *tg = p->sched_task_group;
7094 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7095
7096 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7097 goto out;
7098
7099 /*
7100 * A task_group may either be a cgroup or an autogroup. In the latter
7101 * case, @tg->css.cgroup is %NULL. A task_group can't become the other
7102 * kind once created.
7103 */
7104 if (tg && tg->css.cgroup)
7105 cgrp = tg->css.cgroup;
7106 else
7107 cgrp = &cgrp_dfl_root.cgrp;
7108 out:
7109 cgroup_get(cgrp);
7110 return cgrp;
7111 }
7112 #endif
7113
7114 __bpf_kfunc_end_defs();
7115
7116 BTF_KFUNCS_START(scx_kfunc_ids_any)
7117 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7118 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7119 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7120 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7121 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7122 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7123 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7124 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7125 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7126 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7127 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7128 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7129 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7130 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7131 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7132 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7133 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7134 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7135 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7136 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7137 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7138 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7139 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7140 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7141 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7142 #ifdef CONFIG_CGROUP_SCHED
7143 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7144 #endif
7145 BTF_KFUNCS_END(scx_kfunc_ids_any)
7146
7147 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7148 .owner = THIS_MODULE,
7149 .set = &scx_kfunc_ids_any,
7150 };
7151
scx_init(void)7152 static int __init scx_init(void)
7153 {
7154 int ret;
7155
7156 /*
7157 * kfunc registration can't be done from init_sched_ext_class() as
7158 * register_btf_kfunc_id_set() needs most of the system to be up.
7159 *
7160 * Some kfuncs are context-sensitive and can only be called from
7161 * specific SCX ops. They are grouped into BTF sets accordingly.
7162 * Unfortunately, BPF currently doesn't have a way of enforcing such
7163 * restrictions. Eventually, the verifier should be able to enforce
7164 * them. For now, register them the same and make each kfunc explicitly
7165 * check using scx_kf_allowed().
7166 */
7167 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7168 &scx_kfunc_set_select_cpu)) ||
7169 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7170 &scx_kfunc_set_enqueue_dispatch)) ||
7171 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7172 &scx_kfunc_set_dispatch)) ||
7173 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7174 &scx_kfunc_set_cpu_release)) ||
7175 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7176 &scx_kfunc_set_unlocked)) ||
7177 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7178 &scx_kfunc_set_unlocked)) ||
7179 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7180 &scx_kfunc_set_any)) ||
7181 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7182 &scx_kfunc_set_any)) ||
7183 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7184 &scx_kfunc_set_any))) {
7185 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7186 return ret;
7187 }
7188
7189 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7190 if (ret) {
7191 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7192 return ret;
7193 }
7194
7195 ret = register_pm_notifier(&scx_pm_notifier);
7196 if (ret) {
7197 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7198 return ret;
7199 }
7200
7201 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7202 if (!scx_kset) {
7203 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7204 return -ENOMEM;
7205 }
7206
7207 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7208 if (ret < 0) {
7209 pr_err("sched_ext: Failed to add global attributes\n");
7210 return ret;
7211 }
7212
7213 return 0;
7214 }
7215 __initcall(scx_init);
7216