1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11
12 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
13
14 enum scx_consts {
15 SCX_DSP_DFL_MAX_BATCH = 32,
16 SCX_DSP_MAX_LOOPS = 32,
17 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
18
19 SCX_EXIT_BT_LEN = 64,
20 SCX_EXIT_MSG_LEN = 1024,
21 SCX_EXIT_DUMP_DFL_LEN = 32768,
22
23 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
24
25 /*
26 * Iterating all tasks may take a while. Periodically drop
27 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
28 */
29 SCX_TASK_ITER_BATCH = 32,
30 };
31
32 enum scx_exit_kind {
33 SCX_EXIT_NONE,
34 SCX_EXIT_DONE,
35
36 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
37 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
38 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
39 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
40
41 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
42 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
43 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
44 };
45
46 /*
47 * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
48 * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
49 * are 64bit of the format:
50 *
51 * Bits: [63 .. 48 47 .. 32 31 .. 0]
52 * [ SYS ACT ] [ SYS RSN ] [ USR ]
53 *
54 * SYS ACT: System-defined exit actions
55 * SYS RSN: System-defined exit reasons
56 * USR : User-defined exit codes and reasons
57 *
58 * Using the above, users may communicate intention and context by ORing system
59 * actions and/or system reasons with a user-defined exit code.
60 */
61 enum scx_exit_code {
62 /* Reasons */
63 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
64
65 /* Actions */
66 SCX_ECODE_ACT_RESTART = 1LLU << 48,
67 };
68
69 /*
70 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
71 * being disabled.
72 */
73 struct scx_exit_info {
74 /* %SCX_EXIT_* - broad category of the exit reason */
75 enum scx_exit_kind kind;
76
77 /* exit code if gracefully exiting */
78 s64 exit_code;
79
80 /* textual representation of the above */
81 const char *reason;
82
83 /* backtrace if exiting due to an error */
84 unsigned long *bt;
85 u32 bt_len;
86
87 /* informational message */
88 char *msg;
89
90 /* debug dump */
91 char *dump;
92 };
93
94 /* sched_ext_ops.flags */
95 enum scx_ops_flags {
96 /*
97 * Keep built-in idle tracking even if ops.update_idle() is implemented.
98 */
99 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
100
101 /*
102 * By default, if there are no other task to run on the CPU, ext core
103 * keeps running the current task even after its slice expires. If this
104 * flag is specified, such tasks are passed to ops.enqueue() with
105 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
106 */
107 SCX_OPS_ENQ_LAST = 1LLU << 1,
108
109 /*
110 * An exiting task may schedule after PF_EXITING is set. In such cases,
111 * bpf_task_from_pid() may not be able to find the task and if the BPF
112 * scheduler depends on pid lookup for dispatching, the task will be
113 * lost leading to various issues including RCU grace period stalls.
114 *
115 * To mask this problem, by default, unhashed tasks are automatically
116 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
117 * depend on pid lookups and wants to handle these tasks directly, the
118 * following flag can be used.
119 */
120 SCX_OPS_ENQ_EXITING = 1LLU << 2,
121
122 /*
123 * If set, only tasks with policy set to SCHED_EXT are attached to
124 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
125 */
126 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
127
128 /*
129 * A migration disabled task can only execute on its current CPU. By
130 * default, such tasks are automatically put on the CPU's local DSQ with
131 * the default slice on enqueue. If this ops flag is set, they also go
132 * through ops.enqueue().
133 *
134 * A migration disabled task never invokes ops.select_cpu() as it can
135 * only select the current CPU. Also, p->cpus_ptr will only contain its
136 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
137 * and thus may disagree with cpumask_weight(p->cpus_ptr).
138 */
139 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
140
141 /*
142 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
143 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
144 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
145 * transfers. When this optimization is enabled, ops.select_cpu() is
146 * skipped in some cases (when racing against the wakee switching out).
147 * As the BPF scheduler may depend on ops.select_cpu() being invoked
148 * during wakeups, queued wakeup is disabled by default.
149 *
150 * If this ops flag is set, queued wakeup optimization is enabled and
151 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
152 * wakee's CPU without preceding ops.select_cpu() even for tasks which
153 * may be executed on multiple CPUs.
154 */
155 SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
156
157 /*
158 * If set, enable per-node idle cpumasks. If clear, use a single global
159 * flat idle cpumask.
160 */
161 SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
162
163 /*
164 * CPU cgroup support flags
165 */
166 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
167
168 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
169 SCX_OPS_ENQ_LAST |
170 SCX_OPS_ENQ_EXITING |
171 SCX_OPS_ENQ_MIGRATION_DISABLED |
172 SCX_OPS_ALLOW_QUEUED_WAKEUP |
173 SCX_OPS_SWITCH_PARTIAL |
174 SCX_OPS_BUILTIN_IDLE_PER_NODE |
175 SCX_OPS_HAS_CGROUP_WEIGHT,
176
177 /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
178 __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56,
179
180 SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56,
181 };
182
183 /* argument container for ops.init_task() */
184 struct scx_init_task_args {
185 /*
186 * Set if ops.init_task() is being invoked on the fork path, as opposed
187 * to the scheduler transition path.
188 */
189 bool fork;
190 #ifdef CONFIG_EXT_GROUP_SCHED
191 /* the cgroup the task is joining */
192 struct cgroup *cgroup;
193 #endif
194 };
195
196 /* argument container for ops.exit_task() */
197 struct scx_exit_task_args {
198 /* Whether the task exited before running on sched_ext. */
199 bool cancelled;
200 };
201
202 /* argument container for ops->cgroup_init() */
203 struct scx_cgroup_init_args {
204 /* the weight of the cgroup [1..10000] */
205 u32 weight;
206 };
207
208 enum scx_cpu_preempt_reason {
209 /* next task is being scheduled by &sched_class_rt */
210 SCX_CPU_PREEMPT_RT,
211 /* next task is being scheduled by &sched_class_dl */
212 SCX_CPU_PREEMPT_DL,
213 /* next task is being scheduled by &sched_class_stop */
214 SCX_CPU_PREEMPT_STOP,
215 /* unknown reason for SCX being preempted */
216 SCX_CPU_PREEMPT_UNKNOWN,
217 };
218
219 /*
220 * Argument container for ops->cpu_acquire(). Currently empty, but may be
221 * expanded in the future.
222 */
223 struct scx_cpu_acquire_args {};
224
225 /* argument container for ops->cpu_release() */
226 struct scx_cpu_release_args {
227 /* the reason the CPU was preempted */
228 enum scx_cpu_preempt_reason reason;
229
230 /* the task that's going to be scheduled on the CPU */
231 struct task_struct *task;
232 };
233
234 /*
235 * Informational context provided to dump operations.
236 */
237 struct scx_dump_ctx {
238 enum scx_exit_kind kind;
239 s64 exit_code;
240 const char *reason;
241 u64 at_ns;
242 u64 at_jiffies;
243 };
244
245 /**
246 * struct sched_ext_ops - Operation table for BPF scheduler implementation
247 *
248 * A BPF scheduler can implement an arbitrary scheduling policy by
249 * implementing and loading operations in this table. Note that a userland
250 * scheduling policy can also be implemented using the BPF scheduler
251 * as a shim layer.
252 */
253 struct sched_ext_ops {
254 /**
255 * @select_cpu: Pick the target CPU for a task which is being woken up
256 * @p: task being woken up
257 * @prev_cpu: the cpu @p was on before sleeping
258 * @wake_flags: SCX_WAKE_*
259 *
260 * Decision made here isn't final. @p may be moved to any CPU while it
261 * is getting dispatched for execution later. However, as @p is not on
262 * the rq at this point, getting the eventual execution CPU right here
263 * saves a small bit of overhead down the line.
264 *
265 * If an idle CPU is returned, the CPU is kicked and will try to
266 * dispatch. While an explicit custom mechanism can be added,
267 * select_cpu() serves as the default way to wake up idle CPUs.
268 *
269 * @p may be inserted into a DSQ directly by calling
270 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
271 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
272 * of the CPU returned by this operation.
273 *
274 * Note that select_cpu() is never called for tasks that can only run
275 * on a single CPU or tasks with migration disabled, as they don't have
276 * the option to select a different CPU. See select_task_rq() for
277 * details.
278 */
279 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
280
281 /**
282 * @enqueue: Enqueue a task on the BPF scheduler
283 * @p: task being enqueued
284 * @enq_flags: %SCX_ENQ_*
285 *
286 * @p is ready to run. Insert directly into a DSQ by calling
287 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
288 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
289 * the task will stall.
290 *
291 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
292 * skipped.
293 */
294 void (*enqueue)(struct task_struct *p, u64 enq_flags);
295
296 /**
297 * @dequeue: Remove a task from the BPF scheduler
298 * @p: task being dequeued
299 * @deq_flags: %SCX_DEQ_*
300 *
301 * Remove @p from the BPF scheduler. This is usually called to isolate
302 * the task while updating its scheduling properties (e.g. priority).
303 *
304 * The ext core keeps track of whether the BPF side owns a given task or
305 * not and can gracefully ignore spurious dispatches from BPF side,
306 * which makes it safe to not implement this method. However, depending
307 * on the scheduling logic, this can lead to confusing behaviors - e.g.
308 * scheduling position not being updated across a priority change.
309 */
310 void (*dequeue)(struct task_struct *p, u64 deq_flags);
311
312 /**
313 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
314 * @cpu: CPU to dispatch tasks for
315 * @prev: previous task being switched out
316 *
317 * Called when a CPU's local dsq is empty. The operation should dispatch
318 * one or more tasks from the BPF scheduler into the DSQs using
319 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
320 * using scx_bpf_dsq_move_to_local().
321 *
322 * The maximum number of times scx_bpf_dsq_insert() can be called
323 * without an intervening scx_bpf_dsq_move_to_local() is specified by
324 * ops.dispatch_max_batch. See the comments on top of the two functions
325 * for more details.
326 *
327 * When not %NULL, @prev is an SCX task with its slice depleted. If
328 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
329 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
330 * ops.dispatch() returns. To keep executing @prev, return without
331 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
332 */
333 void (*dispatch)(s32 cpu, struct task_struct *prev);
334
335 /**
336 * @tick: Periodic tick
337 * @p: task running currently
338 *
339 * This operation is called every 1/HZ seconds on CPUs which are
340 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
341 * immediate dispatch cycle on the CPU.
342 */
343 void (*tick)(struct task_struct *p);
344
345 /**
346 * @runnable: A task is becoming runnable on its associated CPU
347 * @p: task becoming runnable
348 * @enq_flags: %SCX_ENQ_*
349 *
350 * This and the following three functions can be used to track a task's
351 * execution state transitions. A task becomes ->runnable() on a CPU,
352 * and then goes through one or more ->running() and ->stopping() pairs
353 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
354 * done running on the CPU.
355 *
356 * @p is becoming runnable on the CPU because it's
357 *
358 * - waking up (%SCX_ENQ_WAKEUP)
359 * - being moved from another CPU
360 * - being restored after temporarily taken off the queue for an
361 * attribute change.
362 *
363 * This and ->enqueue() are related but not coupled. This operation
364 * notifies @p's state transition and may not be followed by ->enqueue()
365 * e.g. when @p is being dispatched to a remote CPU, or when @p is
366 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
367 * task may be ->enqueue()'d without being preceded by this operation
368 * e.g. after exhausting its slice.
369 */
370 void (*runnable)(struct task_struct *p, u64 enq_flags);
371
372 /**
373 * @running: A task is starting to run on its associated CPU
374 * @p: task starting to run
375 *
376 * Note that this callback may be called from a CPU other than the
377 * one the task is going to run on. This can happen when a task
378 * property is changed (i.e., affinity), since scx_next_task_scx(),
379 * which triggers this callback, may run on a CPU different from
380 * the task's assigned CPU.
381 *
382 * Therefore, always use scx_bpf_task_cpu(@p) to determine the
383 * target CPU the task is going to use.
384 *
385 * See ->runnable() for explanation on the task state notifiers.
386 */
387 void (*running)(struct task_struct *p);
388
389 /**
390 * @stopping: A task is stopping execution
391 * @p: task stopping to run
392 * @runnable: is task @p still runnable?
393 *
394 * Note that this callback may be called from a CPU other than the
395 * one the task was running on. This can happen when a task
396 * property is changed (i.e., affinity), since dequeue_task_scx(),
397 * which triggers this callback, may run on a CPU different from
398 * the task's assigned CPU.
399 *
400 * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
401 * the task was running on.
402 *
403 * See ->runnable() for explanation on the task state notifiers. If
404 * !@runnable, ->quiescent() will be invoked after this operation
405 * returns.
406 */
407 void (*stopping)(struct task_struct *p, bool runnable);
408
409 /**
410 * @quiescent: A task is becoming not runnable on its associated CPU
411 * @p: task becoming not runnable
412 * @deq_flags: %SCX_DEQ_*
413 *
414 * See ->runnable() for explanation on the task state notifiers.
415 *
416 * @p is becoming quiescent on the CPU because it's
417 *
418 * - sleeping (%SCX_DEQ_SLEEP)
419 * - being moved to another CPU
420 * - being temporarily taken off the queue for an attribute change
421 * (%SCX_DEQ_SAVE)
422 *
423 * This and ->dequeue() are related but not coupled. This operation
424 * notifies @p's state transition and may not be preceded by ->dequeue()
425 * e.g. when @p is being dispatched to a remote CPU.
426 */
427 void (*quiescent)(struct task_struct *p, u64 deq_flags);
428
429 /**
430 * @yield: Yield CPU
431 * @from: yielding task
432 * @to: optional yield target task
433 *
434 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
435 * The BPF scheduler should ensure that other available tasks are
436 * dispatched before the yielding task. Return value is ignored in this
437 * case.
438 *
439 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
440 * scheduler can implement the request, return %true; otherwise, %false.
441 */
442 bool (*yield)(struct task_struct *from, struct task_struct *to);
443
444 /**
445 * @core_sched_before: Task ordering for core-sched
446 * @a: task A
447 * @b: task B
448 *
449 * Used by core-sched to determine the ordering between two tasks. See
450 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
451 * core-sched.
452 *
453 * Both @a and @b are runnable and may or may not currently be queued on
454 * the BPF scheduler. Should return %true if @a should run before @b.
455 * %false if there's no required ordering or @b should run before @a.
456 *
457 * If not specified, the default is ordering them according to when they
458 * became runnable.
459 */
460 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
461
462 /**
463 * @set_weight: Set task weight
464 * @p: task to set weight for
465 * @weight: new weight [1..10000]
466 *
467 * Update @p's weight to @weight.
468 */
469 void (*set_weight)(struct task_struct *p, u32 weight);
470
471 /**
472 * @set_cpumask: Set CPU affinity
473 * @p: task to set CPU affinity for
474 * @cpumask: cpumask of cpus that @p can run on
475 *
476 * Update @p's CPU affinity to @cpumask.
477 */
478 void (*set_cpumask)(struct task_struct *p,
479 const struct cpumask *cpumask);
480
481 /**
482 * @update_idle: Update the idle state of a CPU
483 * @cpu: CPU to update the idle state for
484 * @idle: whether entering or exiting the idle state
485 *
486 * This operation is called when @rq's CPU goes or leaves the idle
487 * state. By default, implementing this operation disables the built-in
488 * idle CPU tracking and the following helpers become unavailable:
489 *
490 * - scx_bpf_select_cpu_dfl()
491 * - scx_bpf_select_cpu_and()
492 * - scx_bpf_test_and_clear_cpu_idle()
493 * - scx_bpf_pick_idle_cpu()
494 *
495 * The user also must implement ops.select_cpu() as the default
496 * implementation relies on scx_bpf_select_cpu_dfl().
497 *
498 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
499 * tracking.
500 */
501 void (*update_idle)(s32 cpu, bool idle);
502
503 /**
504 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
505 * @cpu: The CPU being acquired by the BPF scheduler.
506 * @args: Acquire arguments, see the struct definition.
507 *
508 * A CPU that was previously released from the BPF scheduler is now once
509 * again under its control.
510 */
511 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
512
513 /**
514 * @cpu_release: A CPU is taken away from the BPF scheduler
515 * @cpu: The CPU being released by the BPF scheduler.
516 * @args: Release arguments, see the struct definition.
517 *
518 * The specified CPU is no longer under the control of the BPF
519 * scheduler. This could be because it was preempted by a higher
520 * priority sched_class, though there may be other reasons as well. The
521 * caller should consult @args->reason to determine the cause.
522 */
523 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
524
525 /**
526 * @init_task: Initialize a task to run in a BPF scheduler
527 * @p: task to initialize for BPF scheduling
528 * @args: init arguments, see the struct definition
529 *
530 * Either we're loading a BPF scheduler or a new task is being forked.
531 * Initialize @p for BPF scheduling. This operation may block and can
532 * be used for allocations, and is called exactly once for a task.
533 *
534 * Return 0 for success, -errno for failure. An error return while
535 * loading will abort loading of the BPF scheduler. During a fork, it
536 * will abort that specific fork.
537 */
538 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
539
540 /**
541 * @exit_task: Exit a previously-running task from the system
542 * @p: task to exit
543 * @args: exit arguments, see the struct definition
544 *
545 * @p is exiting or the BPF scheduler is being unloaded. Perform any
546 * necessary cleanup for @p.
547 */
548 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
549
550 /**
551 * @enable: Enable BPF scheduling for a task
552 * @p: task to enable BPF scheduling for
553 *
554 * Enable @p for BPF scheduling. enable() is called on @p any time it
555 * enters SCX, and is always paired with a matching disable().
556 */
557 void (*enable)(struct task_struct *p);
558
559 /**
560 * @disable: Disable BPF scheduling for a task
561 * @p: task to disable BPF scheduling for
562 *
563 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
564 * Disable BPF scheduling for @p. A disable() call is always matched
565 * with a prior enable() call.
566 */
567 void (*disable)(struct task_struct *p);
568
569 /**
570 * @dump: Dump BPF scheduler state on error
571 * @ctx: debug dump context
572 *
573 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
574 */
575 void (*dump)(struct scx_dump_ctx *ctx);
576
577 /**
578 * @dump_cpu: Dump BPF scheduler state for a CPU on error
579 * @ctx: debug dump context
580 * @cpu: CPU to generate debug dump for
581 * @idle: @cpu is currently idle without any runnable tasks
582 *
583 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
584 * @cpu. If @idle is %true and this operation doesn't produce any
585 * output, @cpu is skipped for dump.
586 */
587 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
588
589 /**
590 * @dump_task: Dump BPF scheduler state for a runnable task on error
591 * @ctx: debug dump context
592 * @p: runnable task to generate debug dump for
593 *
594 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
595 * @p.
596 */
597 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
598
599 #ifdef CONFIG_EXT_GROUP_SCHED
600 /**
601 * @cgroup_init: Initialize a cgroup
602 * @cgrp: cgroup being initialized
603 * @args: init arguments, see the struct definition
604 *
605 * Either the BPF scheduler is being loaded or @cgrp created, initialize
606 * @cgrp for sched_ext. This operation may block.
607 *
608 * Return 0 for success, -errno for failure. An error return while
609 * loading will abort loading of the BPF scheduler. During cgroup
610 * creation, it will abort the specific cgroup creation.
611 */
612 s32 (*cgroup_init)(struct cgroup *cgrp,
613 struct scx_cgroup_init_args *args);
614
615 /**
616 * @cgroup_exit: Exit a cgroup
617 * @cgrp: cgroup being exited
618 *
619 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
620 * @cgrp for sched_ext. This operation my block.
621 */
622 void (*cgroup_exit)(struct cgroup *cgrp);
623
624 /**
625 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
626 * @p: task being moved
627 * @from: cgroup @p is being moved from
628 * @to: cgroup @p is being moved to
629 *
630 * Prepare @p for move from cgroup @from to @to. This operation may
631 * block and can be used for allocations.
632 *
633 * Return 0 for success, -errno for failure. An error return aborts the
634 * migration.
635 */
636 s32 (*cgroup_prep_move)(struct task_struct *p,
637 struct cgroup *from, struct cgroup *to);
638
639 /**
640 * @cgroup_move: Commit cgroup move
641 * @p: task being moved
642 * @from: cgroup @p is being moved from
643 * @to: cgroup @p is being moved to
644 *
645 * Commit the move. @p is dequeued during this operation.
646 */
647 void (*cgroup_move)(struct task_struct *p,
648 struct cgroup *from, struct cgroup *to);
649
650 /**
651 * @cgroup_cancel_move: Cancel cgroup move
652 * @p: task whose cgroup move is being canceled
653 * @from: cgroup @p was being moved from
654 * @to: cgroup @p was being moved to
655 *
656 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
657 * Undo the preparation.
658 */
659 void (*cgroup_cancel_move)(struct task_struct *p,
660 struct cgroup *from, struct cgroup *to);
661
662 /**
663 * @cgroup_set_weight: A cgroup's weight is being changed
664 * @cgrp: cgroup whose weight is being updated
665 * @weight: new weight [1..10000]
666 *
667 * Update @tg's weight to @weight.
668 */
669 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
670 #endif /* CONFIG_EXT_GROUP_SCHED */
671
672 /*
673 * All online ops must come before ops.cpu_online().
674 */
675
676 /**
677 * @cpu_online: A CPU became online
678 * @cpu: CPU which just came up
679 *
680 * @cpu just came online. @cpu will not call ops.enqueue() or
681 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
682 */
683 void (*cpu_online)(s32 cpu);
684
685 /**
686 * @cpu_offline: A CPU is going offline
687 * @cpu: CPU which is going offline
688 *
689 * @cpu is going offline. @cpu will not call ops.enqueue() or
690 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
691 */
692 void (*cpu_offline)(s32 cpu);
693
694 /*
695 * All CPU hotplug ops must come before ops.init().
696 */
697
698 /**
699 * @init: Initialize the BPF scheduler
700 */
701 s32 (*init)(void);
702
703 /**
704 * @exit: Clean up after the BPF scheduler
705 * @info: Exit info
706 *
707 * ops.exit() is also called on ops.init() failure, which is a bit
708 * unusual. This is to allow rich reporting through @info on how
709 * ops.init() failed.
710 */
711 void (*exit)(struct scx_exit_info *info);
712
713 /**
714 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
715 */
716 u32 dispatch_max_batch;
717
718 /**
719 * @flags: %SCX_OPS_* flags
720 */
721 u64 flags;
722
723 /**
724 * @timeout_ms: The maximum amount of time, in milliseconds, that a
725 * runnable task should be able to wait before being scheduled. The
726 * maximum timeout may not exceed the default timeout of 30 seconds.
727 *
728 * Defaults to the maximum allowed timeout value of 30 seconds.
729 */
730 u32 timeout_ms;
731
732 /**
733 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
734 * value of 32768 is used.
735 */
736 u32 exit_dump_len;
737
738 /**
739 * @hotplug_seq: A sequence number that may be set by the scheduler to
740 * detect when a hotplug event has occurred during the loading process.
741 * If 0, no detection occurs. Otherwise, the scheduler will fail to
742 * load if the sequence number does not match @scx_hotplug_seq on the
743 * enable path.
744 */
745 u64 hotplug_seq;
746
747 /**
748 * @name: BPF scheduler's name
749 *
750 * Must be a non-zero valid BPF object name including only isalnum(),
751 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
752 * BPF scheduler is enabled.
753 */
754 char name[SCX_OPS_NAME_LEN];
755
756 /* internal use only, must be NULL */
757 void *priv;
758 };
759
760 enum scx_opi {
761 SCX_OPI_BEGIN = 0,
762 SCX_OPI_NORMAL_BEGIN = 0,
763 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
764 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
765 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
766 SCX_OPI_END = SCX_OP_IDX(init),
767 };
768
769 /*
770 * Collection of event counters. Event types are placed in descending order.
771 */
772 struct scx_event_stats {
773 /*
774 * If ops.select_cpu() returns a CPU which can't be used by the task,
775 * the core scheduler code silently picks a fallback CPU.
776 */
777 s64 SCX_EV_SELECT_CPU_FALLBACK;
778
779 /*
780 * When dispatching to a local DSQ, the CPU may have gone offline in
781 * the meantime. In this case, the task is bounced to the global DSQ.
782 */
783 s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
784
785 /*
786 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
787 * continued to run because there were no other tasks on the CPU.
788 */
789 s64 SCX_EV_DISPATCH_KEEP_LAST;
790
791 /*
792 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
793 * is dispatched to a local DSQ when exiting.
794 */
795 s64 SCX_EV_ENQ_SKIP_EXITING;
796
797 /*
798 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
799 * migration disabled task skips ops.enqueue() and is dispatched to its
800 * local DSQ.
801 */
802 s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
803
804 /*
805 * Total number of times a task's time slice was refilled with the
806 * default value (SCX_SLICE_DFL).
807 */
808 s64 SCX_EV_REFILL_SLICE_DFL;
809
810 /*
811 * The total duration of bypass modes in nanoseconds.
812 */
813 s64 SCX_EV_BYPASS_DURATION;
814
815 /*
816 * The number of tasks dispatched in the bypassing mode.
817 */
818 s64 SCX_EV_BYPASS_DISPATCH;
819
820 /*
821 * The number of times the bypassing mode has been activated.
822 */
823 s64 SCX_EV_BYPASS_ACTIVATE;
824 };
825
826 struct scx_sched {
827 struct sched_ext_ops ops;
828 DECLARE_BITMAP(has_op, SCX_OPI_END);
829
830 /*
831 * Dispatch queues.
832 *
833 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
834 * This is to avoid live-locking in bypass mode where all tasks are
835 * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
836 * per-node split isn't sufficient, it can be further split.
837 */
838 struct rhashtable dsq_hash;
839 struct scx_dispatch_q **global_dsqs;
840
841 /*
842 * The event counters are in a per-CPU variable to minimize the
843 * accounting overhead. A system-wide view on the event counter is
844 * constructed when requested by scx_bpf_events().
845 */
846 struct scx_event_stats __percpu *event_stats_cpu;
847
848 bool warned_zero_slice;
849
850 atomic_t exit_kind;
851 struct scx_exit_info *exit_info;
852
853 struct kobject kobj;
854
855 struct kthread_worker *helper;
856 struct irq_work error_irq_work;
857 struct kthread_work disable_work;
858 struct rcu_work rcu_work;
859 };
860
861 enum scx_wake_flags {
862 /* expose select WF_* flags as enums */
863 SCX_WAKE_FORK = WF_FORK,
864 SCX_WAKE_TTWU = WF_TTWU,
865 SCX_WAKE_SYNC = WF_SYNC,
866 };
867
868 enum scx_enq_flags {
869 /* expose select ENQUEUE_* flags as enums */
870 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
871 SCX_ENQ_HEAD = ENQUEUE_HEAD,
872 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
873
874 /* high 32bits are SCX specific */
875
876 /*
877 * Set the following to trigger preemption when calling
878 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
879 * current task is cleared to zero and the CPU is kicked into the
880 * scheduling path. Implies %SCX_ENQ_HEAD.
881 */
882 SCX_ENQ_PREEMPT = 1LLU << 32,
883
884 /*
885 * The task being enqueued was previously enqueued on the current CPU's
886 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
887 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
888 * invoked in a ->cpu_release() callback, and the task is again
889 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
890 * task will not be scheduled on the CPU until at least the next invocation
891 * of the ->cpu_acquire() callback.
892 */
893 SCX_ENQ_REENQ = 1LLU << 40,
894
895 /*
896 * The task being enqueued is the only task available for the cpu. By
897 * default, ext core keeps executing such tasks but when
898 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
899 * %SCX_ENQ_LAST flag set.
900 *
901 * The BPF scheduler is responsible for triggering a follow-up
902 * scheduling event. Otherwise, Execution may stall.
903 */
904 SCX_ENQ_LAST = 1LLU << 41,
905
906 /* high 8 bits are internal */
907 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
908
909 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
910 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
911 };
912
913 enum scx_deq_flags {
914 /* expose select DEQUEUE_* flags as enums */
915 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
916
917 /* high 32bits are SCX specific */
918
919 /*
920 * The generic core-sched layer decided to execute the task even though
921 * it hasn't been dispatched yet. Dequeue from the BPF side.
922 */
923 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
924 };
925
926 enum scx_pick_idle_cpu_flags {
927 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
928 SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
929 };
930
931 enum scx_kick_flags {
932 /*
933 * Kick the target CPU if idle. Guarantees that the target CPU goes
934 * through at least one full scheduling cycle before going idle. If the
935 * target CPU can be determined to be currently not idle and going to go
936 * through a scheduling cycle before going idle, noop.
937 */
938 SCX_KICK_IDLE = 1LLU << 0,
939
940 /*
941 * Preempt the current task and execute the dispatch path. If the
942 * current task of the target CPU is an SCX task, its ->scx.slice is
943 * cleared to zero before the scheduling path is invoked so that the
944 * task expires and the dispatch path is invoked.
945 */
946 SCX_KICK_PREEMPT = 1LLU << 1,
947
948 /*
949 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
950 * return after the target CPU finishes picking the next task.
951 */
952 SCX_KICK_WAIT = 1LLU << 2,
953 };
954
955 enum scx_tg_flags {
956 SCX_TG_ONLINE = 1U << 0,
957 SCX_TG_INITED = 1U << 1,
958 };
959
960 enum scx_enable_state {
961 SCX_ENABLING,
962 SCX_ENABLED,
963 SCX_DISABLING,
964 SCX_DISABLED,
965 };
966
967 static const char *scx_enable_state_str[] = {
968 [SCX_ENABLING] = "enabling",
969 [SCX_ENABLED] = "enabled",
970 [SCX_DISABLING] = "disabling",
971 [SCX_DISABLED] = "disabled",
972 };
973
974 /*
975 * sched_ext_entity->ops_state
976 *
977 * Used to track the task ownership between the SCX core and the BPF scheduler.
978 * State transitions look as follows:
979 *
980 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
981 * ^ | |
982 * | v v
983 * \-------------------------------/
984 *
985 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
986 * sites for explanations on the conditions being waited upon and why they are
987 * safe. Transitions out of them into NONE or QUEUED must store_release and the
988 * waiters should load_acquire.
989 *
990 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
991 * any given task can be dispatched by the BPF scheduler at all times and thus
992 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
993 * to try to dispatch any task anytime regardless of its state as the SCX core
994 * can safely reject invalid dispatches.
995 */
996 enum scx_ops_state {
997 SCX_OPSS_NONE, /* owned by the SCX core */
998 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
999 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
1000 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
1001
1002 /*
1003 * QSEQ brands each QUEUED instance so that, when dispatch races
1004 * dequeue/requeue, the dispatcher can tell whether it still has a claim
1005 * on the task being dispatched.
1006 *
1007 * As some 32bit archs can't do 64bit store_release/load_acquire,
1008 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
1009 * 32bit machines. The dispatch race window QSEQ protects is very narrow
1010 * and runs with IRQ disabled. 30 bits should be sufficient.
1011 */
1012 SCX_OPSS_QSEQ_SHIFT = 2,
1013 };
1014
1015 /* Use macros to ensure that the type is unsigned long for the masks */
1016 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
1017 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
1018
1019 /*
1020 * NOTE: sched_ext is in the process of growing multiple scheduler support and
1021 * scx_root usage is in a transitional state. Naked dereferences are safe if the
1022 * caller is one of the tasks attached to SCX and explicit RCU dereference is
1023 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
1024 * are used as temporary markers to indicate that the dereferences need to be
1025 * updated to point to the associated scheduler instances rather than scx_root.
1026 */
1027 static struct scx_sched __rcu *scx_root;
1028
1029 /*
1030 * During exit, a task may schedule after losing its PIDs. When disabling the
1031 * BPF scheduler, we need to be able to iterate tasks in every state to
1032 * guarantee system safety. Maintain a dedicated task list which contains every
1033 * task between its fork and eventual free.
1034 */
1035 static DEFINE_SPINLOCK(scx_tasks_lock);
1036 static LIST_HEAD(scx_tasks);
1037
1038 /* ops enable/disable */
1039 static DEFINE_MUTEX(scx_enable_mutex);
1040 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
1041 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
1042 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
1043 static unsigned long scx_in_softlockup;
1044 static atomic_t scx_breather_depth = ATOMIC_INIT(0);
1045 static int scx_bypass_depth;
1046 static bool scx_init_task_enabled;
1047 static bool scx_switching_all;
1048 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
1049
1050 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
1051 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
1052
1053 /*
1054 * A monotically increasing sequence number that is incremented every time a
1055 * scheduler is enabled. This can be used by to check if any custom sched_ext
1056 * scheduler has ever been used in the system.
1057 */
1058 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
1059
1060 /*
1061 * The maximum amount of time in jiffies that a task may be runnable without
1062 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
1063 * scx_error().
1064 */
1065 static unsigned long scx_watchdog_timeout;
1066
1067 /*
1068 * The last time the delayed work was run. This delayed work relies on
1069 * ksoftirqd being able to run to service timer interrupts, so it's possible
1070 * that this work itself could get wedged. To account for this, we check that
1071 * it's not stalled in the timer tick, and trigger an error if it is.
1072 */
1073 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
1074
1075 static struct delayed_work scx_watchdog_work;
1076
1077 /* for %SCX_KICK_WAIT */
1078 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
1079
1080 /*
1081 * Direct dispatch marker.
1082 *
1083 * Non-NULL values are used for direct dispatch from enqueue path. A valid
1084 * pointer points to the task currently being enqueued. An ERR_PTR value is used
1085 * to indicate that direct dispatch has already happened.
1086 */
1087 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
1088
1089 static const struct rhashtable_params dsq_hash_params = {
1090 .key_len = sizeof_field(struct scx_dispatch_q, id),
1091 .key_offset = offsetof(struct scx_dispatch_q, id),
1092 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
1093 };
1094
1095 static LLIST_HEAD(dsqs_to_free);
1096
1097 /* dispatch buf */
1098 struct scx_dsp_buf_ent {
1099 struct task_struct *task;
1100 unsigned long qseq;
1101 u64 dsq_id;
1102 u64 enq_flags;
1103 };
1104
1105 static u32 scx_dsp_max_batch;
1106
1107 struct scx_dsp_ctx {
1108 struct rq *rq;
1109 u32 cursor;
1110 u32 nr_tasks;
1111 struct scx_dsp_buf_ent buf[];
1112 };
1113
1114 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
1115
1116 /* string formatting from BPF */
1117 struct scx_bstr_buf {
1118 u64 data[MAX_BPRINTF_VARARGS];
1119 char line[SCX_EXIT_MSG_LEN];
1120 };
1121
1122 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
1123 static struct scx_bstr_buf scx_exit_bstr_buf;
1124
1125 /* ops debug dump */
1126 struct scx_dump_data {
1127 s32 cpu;
1128 bool first;
1129 s32 cursor;
1130 struct seq_buf *s;
1131 const char *prefix;
1132 struct scx_bstr_buf buf;
1133 };
1134
1135 static struct scx_dump_data scx_dump_data = {
1136 .cpu = -1,
1137 };
1138
1139 /* /sys/kernel/sched_ext interface */
1140 static struct kset *scx_kset;
1141
1142 #define CREATE_TRACE_POINTS
1143 #include <trace/events/sched_ext.h>
1144
1145 static void process_ddsp_deferred_locals(struct rq *rq);
1146 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1147 static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
1148 s64 exit_code, const char *fmt, va_list args);
1149
scx_exit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)1150 static __printf(4, 5) void scx_exit(struct scx_sched *sch,
1151 enum scx_exit_kind kind, s64 exit_code,
1152 const char *fmt, ...)
1153 {
1154 va_list args;
1155
1156 va_start(args, fmt);
1157 scx_vexit(sch, kind, exit_code, fmt, args);
1158 va_end(args);
1159 }
1160
scx_kf_exit(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)1161 static __printf(3, 4) void scx_kf_exit(enum scx_exit_kind kind, s64 exit_code,
1162 const char *fmt, ...)
1163 {
1164 struct scx_sched *sch;
1165 va_list args;
1166
1167 rcu_read_lock();
1168 sch = rcu_dereference(scx_root);
1169 if (sch) {
1170 va_start(args, fmt);
1171 scx_vexit(sch, kind, exit_code, fmt, args);
1172 va_end(args);
1173 }
1174 rcu_read_unlock();
1175 }
1176
1177 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
1178 #define scx_kf_error(fmt, args...) scx_kf_exit(SCX_EXIT_ERROR, 0, fmt, ##args)
1179
1180 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op)
1181
jiffies_delta_msecs(unsigned long at,unsigned long now)1182 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1183 {
1184 if (time_after(at, now))
1185 return jiffies_to_msecs(at - now);
1186 else
1187 return -(long)jiffies_to_msecs(now - at);
1188 }
1189
1190 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1191 static u32 higher_bits(u32 flags)
1192 {
1193 return ~((1 << fls(flags)) - 1);
1194 }
1195
1196 /* return the mask with only the highest bit set */
highest_bit(u32 flags)1197 static u32 highest_bit(u32 flags)
1198 {
1199 int bit = fls(flags);
1200 return ((u64)1 << bit) >> 1;
1201 }
1202
u32_before(u32 a,u32 b)1203 static bool u32_before(u32 a, u32 b)
1204 {
1205 return (s32)(a - b) < 0;
1206 }
1207
find_global_dsq(struct task_struct * p)1208 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1209 {
1210 struct scx_sched *sch = scx_root;
1211
1212 return sch->global_dsqs[cpu_to_node(task_cpu(p))];
1213 }
1214
find_user_dsq(struct scx_sched * sch,u64 dsq_id)1215 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
1216 {
1217 return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params);
1218 }
1219
1220 /*
1221 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1222 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1223 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1224 * whether it's running from an allowed context.
1225 *
1226 * @mask is constant, always inline to cull the mask calculations.
1227 */
scx_kf_allow(u32 mask)1228 static __always_inline void scx_kf_allow(u32 mask)
1229 {
1230 /* nesting is allowed only in increasing scx_kf_mask order */
1231 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1232 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1233 current->scx.kf_mask, mask);
1234 current->scx.kf_mask |= mask;
1235 barrier();
1236 }
1237
scx_kf_disallow(u32 mask)1238 static void scx_kf_disallow(u32 mask)
1239 {
1240 barrier();
1241 current->scx.kf_mask &= ~mask;
1242 }
1243
1244 /*
1245 * Track the rq currently locked.
1246 *
1247 * This allows kfuncs to safely operate on rq from any scx ops callback,
1248 * knowing which rq is already locked.
1249 */
1250 static DEFINE_PER_CPU(struct rq *, locked_rq);
1251
update_locked_rq(struct rq * rq)1252 static inline void update_locked_rq(struct rq *rq)
1253 {
1254 /*
1255 * Check whether @rq is actually locked. This can help expose bugs
1256 * or incorrect assumptions about the context in which a kfunc or
1257 * callback is executed.
1258 */
1259 if (rq)
1260 lockdep_assert_rq_held(rq);
1261 __this_cpu_write(locked_rq, rq);
1262 }
1263
1264 /*
1265 * Return the rq currently locked from an scx callback, or NULL if no rq is
1266 * locked.
1267 */
scx_locked_rq(void)1268 static inline struct rq *scx_locked_rq(void)
1269 {
1270 return __this_cpu_read(locked_rq);
1271 }
1272
1273 #define SCX_CALL_OP(sch, mask, op, rq, args...) \
1274 do { \
1275 if (rq) \
1276 update_locked_rq(rq); \
1277 if (mask) { \
1278 scx_kf_allow(mask); \
1279 (sch)->ops.op(args); \
1280 scx_kf_disallow(mask); \
1281 } else { \
1282 (sch)->ops.op(args); \
1283 } \
1284 if (rq) \
1285 update_locked_rq(NULL); \
1286 } while (0)
1287
1288 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
1289 ({ \
1290 __typeof__((sch)->ops.op(args)) __ret; \
1291 \
1292 if (rq) \
1293 update_locked_rq(rq); \
1294 if (mask) { \
1295 scx_kf_allow(mask); \
1296 __ret = (sch)->ops.op(args); \
1297 scx_kf_disallow(mask); \
1298 } else { \
1299 __ret = (sch)->ops.op(args); \
1300 } \
1301 if (rq) \
1302 update_locked_rq(NULL); \
1303 __ret; \
1304 })
1305
1306 /*
1307 * Some kfuncs are allowed only on the tasks that are subjects of the
1308 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1309 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1310 * invoking scx_ops operations that take task arguments. These can only be used
1311 * for non-nesting operations due to the way the tasks are tracked.
1312 *
1313 * kfuncs which can only operate on such tasks can in turn use
1314 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1315 * the specific task.
1316 */
1317 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
1318 do { \
1319 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1320 current->scx.kf_tasks[0] = task; \
1321 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
1322 current->scx.kf_tasks[0] = NULL; \
1323 } while (0)
1324
1325 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
1326 ({ \
1327 __typeof__((sch)->ops.op(task, ##args)) __ret; \
1328 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1329 current->scx.kf_tasks[0] = task; \
1330 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
1331 current->scx.kf_tasks[0] = NULL; \
1332 __ret; \
1333 })
1334
1335 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \
1336 ({ \
1337 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
1338 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1339 current->scx.kf_tasks[0] = task0; \
1340 current->scx.kf_tasks[1] = task1; \
1341 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
1342 current->scx.kf_tasks[0] = NULL; \
1343 current->scx.kf_tasks[1] = NULL; \
1344 __ret; \
1345 })
1346
1347 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1348 static __always_inline bool scx_kf_allowed(u32 mask)
1349 {
1350 if (unlikely(!(current->scx.kf_mask & mask))) {
1351 scx_kf_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1352 mask, current->scx.kf_mask);
1353 return false;
1354 }
1355
1356 /*
1357 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1358 * DISPATCH must not be called if we're running DEQUEUE which is nested
1359 * inside ops.dispatch(). We don't need to check boundaries for any
1360 * blocking kfuncs as the verifier ensures they're only called from
1361 * sleepable progs.
1362 */
1363 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1364 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1365 scx_kf_error("cpu_release kfunc called from a nested operation");
1366 return false;
1367 }
1368
1369 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1370 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1371 scx_kf_error("dispatch kfunc called from a nested operation");
1372 return false;
1373 }
1374
1375 return true;
1376 }
1377
1378 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1379 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1380 struct task_struct *p)
1381 {
1382 if (!scx_kf_allowed(mask))
1383 return false;
1384
1385 if (unlikely((p != current->scx.kf_tasks[0] &&
1386 p != current->scx.kf_tasks[1]))) {
1387 scx_kf_error("called on a task not being operated on");
1388 return false;
1389 }
1390
1391 return true;
1392 }
1393
1394 /**
1395 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1396 * @dsq: user dsq being iterated
1397 * @cur: current position, %NULL to start iteration
1398 * @rev: walk backwards
1399 *
1400 * Returns %NULL when iteration is finished.
1401 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1402 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1403 struct task_struct *cur, bool rev)
1404 {
1405 struct list_head *list_node;
1406 struct scx_dsq_list_node *dsq_lnode;
1407
1408 lockdep_assert_held(&dsq->lock);
1409
1410 if (cur)
1411 list_node = &cur->scx.dsq_list.node;
1412 else
1413 list_node = &dsq->list;
1414
1415 /* find the next task, need to skip BPF iteration cursors */
1416 do {
1417 if (rev)
1418 list_node = list_node->prev;
1419 else
1420 list_node = list_node->next;
1421
1422 if (list_node == &dsq->list)
1423 return NULL;
1424
1425 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1426 node);
1427 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1428
1429 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1430 }
1431
1432 #define nldsq_for_each_task(p, dsq) \
1433 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1434 (p) = nldsq_next_task((dsq), (p), false))
1435
1436
1437 /*
1438 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1439 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1440 * changes without breaking backward compatibility. Can be used with
1441 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1442 */
1443 enum scx_dsq_iter_flags {
1444 /* iterate in the reverse dispatch order */
1445 SCX_DSQ_ITER_REV = 1U << 16,
1446
1447 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1448 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1449
1450 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1451 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1452 __SCX_DSQ_ITER_HAS_SLICE |
1453 __SCX_DSQ_ITER_HAS_VTIME,
1454 };
1455
1456 struct bpf_iter_scx_dsq_kern {
1457 struct scx_dsq_list_node cursor;
1458 struct scx_dispatch_q *dsq;
1459 u64 slice;
1460 u64 vtime;
1461 } __attribute__((aligned(8)));
1462
1463 struct bpf_iter_scx_dsq {
1464 u64 __opaque[6];
1465 } __attribute__((aligned(8)));
1466
1467
1468 /*
1469 * SCX task iterator.
1470 */
1471 struct scx_task_iter {
1472 struct sched_ext_entity cursor;
1473 struct task_struct *locked;
1474 struct rq *rq;
1475 struct rq_flags rf;
1476 u32 cnt;
1477 };
1478
1479 /**
1480 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1481 * @iter: iterator to init
1482 *
1483 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1484 * must eventually be stopped with scx_task_iter_stop().
1485 *
1486 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1487 * between this and the first next() call or between any two next() calls. If
1488 * the locks are released between two next() calls, the caller is responsible
1489 * for ensuring that the task being iterated remains accessible either through
1490 * RCU read lock or obtaining a reference count.
1491 *
1492 * All tasks which existed when the iteration started are guaranteed to be
1493 * visited as long as they still exist.
1494 */
scx_task_iter_start(struct scx_task_iter * iter)1495 static void scx_task_iter_start(struct scx_task_iter *iter)
1496 {
1497 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1498 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1499
1500 spin_lock_irq(&scx_tasks_lock);
1501
1502 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1503 list_add(&iter->cursor.tasks_node, &scx_tasks);
1504 iter->locked = NULL;
1505 iter->cnt = 0;
1506 }
1507
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)1508 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1509 {
1510 if (iter->locked) {
1511 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1512 iter->locked = NULL;
1513 }
1514 }
1515
1516 /**
1517 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1518 * @iter: iterator to unlock
1519 *
1520 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1521 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1522 * This function can be safely called anytime during an iteration.
1523 */
scx_task_iter_unlock(struct scx_task_iter * iter)1524 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1525 {
1526 __scx_task_iter_rq_unlock(iter);
1527 spin_unlock_irq(&scx_tasks_lock);
1528 }
1529
1530 /**
1531 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1532 * @iter: iterator to re-lock
1533 *
1534 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1535 * doesn't re-lock the rq lock. Must be called before other iterator operations.
1536 */
scx_task_iter_relock(struct scx_task_iter * iter)1537 static void scx_task_iter_relock(struct scx_task_iter *iter)
1538 {
1539 spin_lock_irq(&scx_tasks_lock);
1540 }
1541
1542 /**
1543 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1544 * @iter: iterator to exit
1545 *
1546 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1547 * which is released on return. If the iterator holds a task's rq lock, that rq
1548 * lock is also released. See scx_task_iter_start() for details.
1549 */
scx_task_iter_stop(struct scx_task_iter * iter)1550 static void scx_task_iter_stop(struct scx_task_iter *iter)
1551 {
1552 list_del_init(&iter->cursor.tasks_node);
1553 scx_task_iter_unlock(iter);
1554 }
1555
1556 /**
1557 * scx_task_iter_next - Next task
1558 * @iter: iterator to walk
1559 *
1560 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1561 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
1562 * by holding scx_tasks_lock for too long.
1563 */
scx_task_iter_next(struct scx_task_iter * iter)1564 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1565 {
1566 struct list_head *cursor = &iter->cursor.tasks_node;
1567 struct sched_ext_entity *pos;
1568
1569 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
1570 scx_task_iter_unlock(iter);
1571 cond_resched();
1572 scx_task_iter_relock(iter);
1573 }
1574
1575 list_for_each_entry(pos, cursor, tasks_node) {
1576 if (&pos->tasks_node == &scx_tasks)
1577 return NULL;
1578 if (!(pos->flags & SCX_TASK_CURSOR)) {
1579 list_move(cursor, &pos->tasks_node);
1580 return container_of(pos, struct task_struct, scx);
1581 }
1582 }
1583
1584 /* can't happen, should always terminate at scx_tasks above */
1585 BUG();
1586 }
1587
1588 /**
1589 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1590 * @iter: iterator to walk
1591 *
1592 * Visit the non-idle task with its rq lock held. Allows callers to specify
1593 * whether they would like to filter out dead tasks. See scx_task_iter_start()
1594 * for details.
1595 */
scx_task_iter_next_locked(struct scx_task_iter * iter)1596 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1597 {
1598 struct task_struct *p;
1599
1600 __scx_task_iter_rq_unlock(iter);
1601
1602 while ((p = scx_task_iter_next(iter))) {
1603 /*
1604 * scx_task_iter is used to prepare and move tasks into SCX
1605 * while loading the BPF scheduler and vice-versa while
1606 * unloading. The init_tasks ("swappers") should be excluded
1607 * from the iteration because:
1608 *
1609 * - It's unsafe to use __setschduler_prio() on an init_task to
1610 * determine the sched_class to use as it won't preserve its
1611 * idle_sched_class.
1612 *
1613 * - ops.init/exit_task() can easily be confused if called with
1614 * init_tasks as they, e.g., share PID 0.
1615 *
1616 * As init_tasks are never scheduled through SCX, they can be
1617 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1618 * doesn't work here:
1619 *
1620 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1621 * yet been onlined.
1622 *
1623 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1624 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1625 *
1626 * Test for idle_sched_class as only init_tasks are on it.
1627 */
1628 if (p->sched_class != &idle_sched_class)
1629 break;
1630 }
1631 if (!p)
1632 return NULL;
1633
1634 iter->rq = task_rq_lock(p, &iter->rf);
1635 iter->locked = p;
1636
1637 return p;
1638 }
1639
1640 /**
1641 * scx_add_event - Increase an event counter for 'name' by 'cnt'
1642 * @sch: scx_sched to account events for
1643 * @name: an event name defined in struct scx_event_stats
1644 * @cnt: the number of the event occured
1645 *
1646 * This can be used when preemption is not disabled.
1647 */
1648 #define scx_add_event(sch, name, cnt) do { \
1649 this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1650 trace_sched_ext_event(#name, (cnt)); \
1651 } while(0)
1652
1653 /**
1654 * __scx_add_event - Increase an event counter for 'name' by 'cnt'
1655 * @sch: scx_sched to account events for
1656 * @name: an event name defined in struct scx_event_stats
1657 * @cnt: the number of the event occured
1658 *
1659 * This should be used only when preemption is disabled.
1660 */
1661 #define __scx_add_event(sch, name, cnt) do { \
1662 __this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1663 trace_sched_ext_event(#name, cnt); \
1664 } while(0)
1665
1666 /**
1667 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
1668 * @dst_e: destination event stats
1669 * @src_e: source event stats
1670 * @kind: a kind of event to be aggregated
1671 */
1672 #define scx_agg_event(dst_e, src_e, kind) do { \
1673 (dst_e)->kind += READ_ONCE((src_e)->kind); \
1674 } while(0)
1675
1676 /**
1677 * scx_dump_event - Dump an event 'kind' in 'events' to 's'
1678 * @s: output seq_buf
1679 * @events: event stats
1680 * @kind: a kind of event to dump
1681 */
1682 #define scx_dump_event(s, events, kind) do { \
1683 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \
1684 } while (0)
1685
1686
1687 static void scx_read_events(struct scx_sched *sch,
1688 struct scx_event_stats *events);
1689
scx_enable_state(void)1690 static enum scx_enable_state scx_enable_state(void)
1691 {
1692 return atomic_read(&scx_enable_state_var);
1693 }
1694
scx_set_enable_state(enum scx_enable_state to)1695 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
1696 {
1697 return atomic_xchg(&scx_enable_state_var, to);
1698 }
1699
scx_tryset_enable_state(enum scx_enable_state to,enum scx_enable_state from)1700 static bool scx_tryset_enable_state(enum scx_enable_state to,
1701 enum scx_enable_state from)
1702 {
1703 int from_v = from;
1704
1705 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
1706 }
1707
scx_rq_bypassing(struct rq * rq)1708 static bool scx_rq_bypassing(struct rq *rq)
1709 {
1710 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1711 }
1712
1713 /**
1714 * wait_ops_state - Busy-wait the specified ops state to end
1715 * @p: target task
1716 * @opss: state to wait the end of
1717 *
1718 * Busy-wait for @p to transition out of @opss. This can only be used when the
1719 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1720 * has load_acquire semantics to ensure that the caller can see the updates made
1721 * in the enqueueing and dispatching paths.
1722 */
wait_ops_state(struct task_struct * p,unsigned long opss)1723 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1724 {
1725 do {
1726 cpu_relax();
1727 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1728 }
1729
__cpu_valid(s32 cpu)1730 static inline bool __cpu_valid(s32 cpu)
1731 {
1732 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
1733 }
1734
1735 /**
1736 * ops_cpu_valid - Verify a cpu number, to be used on ops input args
1737 * @sch: scx_sched to abort on error
1738 * @cpu: cpu number which came from a BPF ops
1739 * @where: extra information reported on error
1740 *
1741 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1742 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1743 * an ops error.
1744 */
ops_cpu_valid(struct scx_sched * sch,s32 cpu,const char * where)1745 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
1746 {
1747 if (__cpu_valid(cpu)) {
1748 return true;
1749 } else {
1750 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1751 return false;
1752 }
1753 }
1754
1755 /**
1756 * kf_cpu_valid - Verify a CPU number, to be used on kfunc input args
1757 * @cpu: cpu number which came from a BPF ops
1758 * @where: extra information reported on error
1759 *
1760 * The same as ops_cpu_valid() but @sch is implicit.
1761 */
kf_cpu_valid(u32 cpu,const char * where)1762 static bool kf_cpu_valid(u32 cpu, const char *where)
1763 {
1764 if (__cpu_valid(cpu)) {
1765 return true;
1766 } else {
1767 scx_kf_error("invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1768 return false;
1769 }
1770 }
1771
1772 /**
1773 * ops_sanitize_err - Sanitize a -errno value
1774 * @sch: scx_sched to error out on error
1775 * @ops_name: operation to blame on failure
1776 * @err: -errno value to sanitize
1777 *
1778 * Verify @err is a valid -errno. If not, trigger scx_error() and return
1779 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1780 * cause misbehaviors. For an example, a large negative return from
1781 * ops.init_task() triggers an oops when passed up the call chain because the
1782 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1783 * handled as a pointer.
1784 */
ops_sanitize_err(struct scx_sched * sch,const char * ops_name,s32 err)1785 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
1786 {
1787 if (err < 0 && err >= -MAX_ERRNO)
1788 return err;
1789
1790 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
1791 return -EPROTO;
1792 }
1793
run_deferred(struct rq * rq)1794 static void run_deferred(struct rq *rq)
1795 {
1796 process_ddsp_deferred_locals(rq);
1797 }
1798
1799 #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1800 static void deferred_bal_cb_workfn(struct rq *rq)
1801 {
1802 run_deferred(rq);
1803 }
1804 #endif
1805
deferred_irq_workfn(struct irq_work * irq_work)1806 static void deferred_irq_workfn(struct irq_work *irq_work)
1807 {
1808 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1809
1810 raw_spin_rq_lock(rq);
1811 run_deferred(rq);
1812 raw_spin_rq_unlock(rq);
1813 }
1814
1815 /**
1816 * schedule_deferred - Schedule execution of deferred actions on an rq
1817 * @rq: target rq
1818 *
1819 * Schedule execution of deferred actions on @rq. Must be called with @rq
1820 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1821 * can unlock @rq to e.g. migrate tasks to other rqs.
1822 */
schedule_deferred(struct rq * rq)1823 static void schedule_deferred(struct rq *rq)
1824 {
1825 lockdep_assert_rq_held(rq);
1826
1827 #ifdef CONFIG_SMP
1828 /*
1829 * If in the middle of waking up a task, task_woken_scx() will be called
1830 * afterwards which will then run the deferred actions, no need to
1831 * schedule anything.
1832 */
1833 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1834 return;
1835
1836 /*
1837 * If in balance, the balance callbacks will be called before rq lock is
1838 * released. Schedule one.
1839 */
1840 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1841 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1842 deferred_bal_cb_workfn);
1843 return;
1844 }
1845 #endif
1846 /*
1847 * No scheduler hooks available. Queue an irq work. They are executed on
1848 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1849 * The above WAKEUP and BALANCE paths should cover most of the cases and
1850 * the time to IRQ re-enable shouldn't be long.
1851 */
1852 irq_work_queue(&rq->scx.deferred_irq_work);
1853 }
1854
1855 /**
1856 * touch_core_sched - Update timestamp used for core-sched task ordering
1857 * @rq: rq to read clock from, must be locked
1858 * @p: task to update the timestamp for
1859 *
1860 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1861 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1862 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1863 * exhaustion).
1864 */
touch_core_sched(struct rq * rq,struct task_struct * p)1865 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1866 {
1867 lockdep_assert_rq_held(rq);
1868
1869 #ifdef CONFIG_SCHED_CORE
1870 /*
1871 * It's okay to update the timestamp spuriously. Use
1872 * sched_core_disabled() which is cheaper than enabled().
1873 *
1874 * As this is used to determine ordering between tasks of sibling CPUs,
1875 * it may be better to use per-core dispatch sequence instead.
1876 */
1877 if (!sched_core_disabled())
1878 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1879 #endif
1880 }
1881
1882 /**
1883 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1884 * @rq: rq to read clock from, must be locked
1885 * @p: task being dispatched
1886 *
1887 * If the BPF scheduler implements custom core-sched ordering via
1888 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1889 * ordering within each local DSQ. This function is called from dispatch paths
1890 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1891 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1892 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1893 {
1894 lockdep_assert_rq_held(rq);
1895
1896 #ifdef CONFIG_SCHED_CORE
1897 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
1898 touch_core_sched(rq, p);
1899 #endif
1900 }
1901
update_curr_scx(struct rq * rq)1902 static void update_curr_scx(struct rq *rq)
1903 {
1904 struct task_struct *curr = rq->curr;
1905 s64 delta_exec;
1906
1907 delta_exec = update_curr_common(rq);
1908 if (unlikely(delta_exec <= 0))
1909 return;
1910
1911 if (curr->scx.slice != SCX_SLICE_INF) {
1912 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1913 if (!curr->scx.slice)
1914 touch_core_sched(rq, curr);
1915 }
1916 }
1917
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1918 static bool scx_dsq_priq_less(struct rb_node *node_a,
1919 const struct rb_node *node_b)
1920 {
1921 const struct task_struct *a =
1922 container_of(node_a, struct task_struct, scx.dsq_priq);
1923 const struct task_struct *b =
1924 container_of(node_b, struct task_struct, scx.dsq_priq);
1925
1926 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1927 }
1928
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1929 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1930 {
1931 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1932 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1933 }
1934
refill_task_slice_dfl(struct task_struct * p)1935 static void refill_task_slice_dfl(struct task_struct *p)
1936 {
1937 p->scx.slice = SCX_SLICE_DFL;
1938 __scx_add_event(scx_root, SCX_EV_REFILL_SLICE_DFL, 1);
1939 }
1940
dispatch_enqueue(struct scx_sched * sch,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1941 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1942 struct task_struct *p, u64 enq_flags)
1943 {
1944 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1945
1946 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1947 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1948 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1949
1950 if (!is_local) {
1951 raw_spin_lock(&dsq->lock);
1952 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1953 scx_error(sch, "attempting to dispatch to a destroyed dsq");
1954 /* fall back to the global dsq */
1955 raw_spin_unlock(&dsq->lock);
1956 dsq = find_global_dsq(p);
1957 raw_spin_lock(&dsq->lock);
1958 }
1959 }
1960
1961 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1962 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1963 /*
1964 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1965 * their FIFO queues. To avoid confusion and accidentally
1966 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1967 * disallow any internal DSQ from doing vtime ordering of
1968 * tasks.
1969 */
1970 scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1971 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1972 }
1973
1974 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1975 struct rb_node *rbp;
1976
1977 /*
1978 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1979 * linked to both the rbtree and list on PRIQs, this can only be
1980 * tested easily when adding the first task.
1981 */
1982 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1983 nldsq_next_task(dsq, NULL, false)))
1984 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1985 dsq->id);
1986
1987 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1988 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1989
1990 /*
1991 * Find the previous task and insert after it on the list so
1992 * that @dsq->list is vtime ordered.
1993 */
1994 rbp = rb_prev(&p->scx.dsq_priq);
1995 if (rbp) {
1996 struct task_struct *prev =
1997 container_of(rbp, struct task_struct,
1998 scx.dsq_priq);
1999 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
2000 } else {
2001 list_add(&p->scx.dsq_list.node, &dsq->list);
2002 }
2003 } else {
2004 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
2005 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
2006 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
2007 dsq->id);
2008
2009 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2010 list_add(&p->scx.dsq_list.node, &dsq->list);
2011 else
2012 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
2013 }
2014
2015 /* seq records the order tasks are queued, used by BPF DSQ iterator */
2016 dsq->seq++;
2017 p->scx.dsq_seq = dsq->seq;
2018
2019 dsq_mod_nr(dsq, 1);
2020 p->scx.dsq = dsq;
2021
2022 /*
2023 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
2024 * direct dispatch path, but we clear them here because the direct
2025 * dispatch verdict may be overridden on the enqueue path during e.g.
2026 * bypass.
2027 */
2028 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
2029 p->scx.ddsp_enq_flags = 0;
2030
2031 /*
2032 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
2033 * match waiters' load_acquire.
2034 */
2035 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
2036 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2037
2038 if (is_local) {
2039 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
2040 bool preempt = false;
2041
2042 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
2043 rq->curr->sched_class == &ext_sched_class) {
2044 rq->curr->scx.slice = 0;
2045 preempt = true;
2046 }
2047
2048 if (preempt || sched_class_above(&ext_sched_class,
2049 rq->curr->sched_class))
2050 resched_curr(rq);
2051 } else {
2052 raw_spin_unlock(&dsq->lock);
2053 }
2054 }
2055
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)2056 static void task_unlink_from_dsq(struct task_struct *p,
2057 struct scx_dispatch_q *dsq)
2058 {
2059 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
2060
2061 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
2062 rb_erase(&p->scx.dsq_priq, &dsq->priq);
2063 RB_CLEAR_NODE(&p->scx.dsq_priq);
2064 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
2065 }
2066
2067 list_del_init(&p->scx.dsq_list.node);
2068 dsq_mod_nr(dsq, -1);
2069 }
2070
dispatch_dequeue(struct rq * rq,struct task_struct * p)2071 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
2072 {
2073 struct scx_dispatch_q *dsq = p->scx.dsq;
2074 bool is_local = dsq == &rq->scx.local_dsq;
2075
2076 if (!dsq) {
2077 /*
2078 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
2079 * Unlinking is all that's needed to cancel.
2080 */
2081 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
2082 list_del_init(&p->scx.dsq_list.node);
2083
2084 /*
2085 * When dispatching directly from the BPF scheduler to a local
2086 * DSQ, the task isn't associated with any DSQ but
2087 * @p->scx.holding_cpu may be set under the protection of
2088 * %SCX_OPSS_DISPATCHING.
2089 */
2090 if (p->scx.holding_cpu >= 0)
2091 p->scx.holding_cpu = -1;
2092
2093 return;
2094 }
2095
2096 if (!is_local)
2097 raw_spin_lock(&dsq->lock);
2098
2099 /*
2100 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
2101 * change underneath us.
2102 */
2103 if (p->scx.holding_cpu < 0) {
2104 /* @p must still be on @dsq, dequeue */
2105 task_unlink_from_dsq(p, dsq);
2106 } else {
2107 /*
2108 * We're racing against dispatch_to_local_dsq() which already
2109 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
2110 * holding_cpu which tells dispatch_to_local_dsq() that it lost
2111 * the race.
2112 */
2113 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
2114 p->scx.holding_cpu = -1;
2115 }
2116 p->scx.dsq = NULL;
2117
2118 if (!is_local)
2119 raw_spin_unlock(&dsq->lock);
2120 }
2121
find_dsq_for_dispatch(struct scx_sched * sch,struct rq * rq,u64 dsq_id,struct task_struct * p)2122 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
2123 struct rq *rq, u64 dsq_id,
2124 struct task_struct *p)
2125 {
2126 struct scx_dispatch_q *dsq;
2127
2128 if (dsq_id == SCX_DSQ_LOCAL)
2129 return &rq->scx.local_dsq;
2130
2131 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
2132 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
2133
2134 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2135 return find_global_dsq(p);
2136
2137 return &cpu_rq(cpu)->scx.local_dsq;
2138 }
2139
2140 if (dsq_id == SCX_DSQ_GLOBAL)
2141 dsq = find_global_dsq(p);
2142 else
2143 dsq = find_user_dsq(sch, dsq_id);
2144
2145 if (unlikely(!dsq)) {
2146 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
2147 dsq_id, p->comm, p->pid);
2148 return find_global_dsq(p);
2149 }
2150
2151 return dsq;
2152 }
2153
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)2154 static void mark_direct_dispatch(struct task_struct *ddsp_task,
2155 struct task_struct *p, u64 dsq_id,
2156 u64 enq_flags)
2157 {
2158 /*
2159 * Mark that dispatch already happened from ops.select_cpu() or
2160 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
2161 * which can never match a valid task pointer.
2162 */
2163 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
2164
2165 /* @p must match the task on the enqueue path */
2166 if (unlikely(p != ddsp_task)) {
2167 if (IS_ERR(ddsp_task))
2168 scx_kf_error("%s[%d] already direct-dispatched",
2169 p->comm, p->pid);
2170 else
2171 scx_kf_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
2172 ddsp_task->comm, ddsp_task->pid,
2173 p->comm, p->pid);
2174 return;
2175 }
2176
2177 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
2178 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
2179
2180 p->scx.ddsp_dsq_id = dsq_id;
2181 p->scx.ddsp_enq_flags = enq_flags;
2182 }
2183
direct_dispatch(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)2184 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
2185 u64 enq_flags)
2186 {
2187 struct rq *rq = task_rq(p);
2188 struct scx_dispatch_q *dsq =
2189 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2190
2191 touch_core_sched_dispatch(rq, p);
2192
2193 p->scx.ddsp_enq_flags |= enq_flags;
2194
2195 /*
2196 * We are in the enqueue path with @rq locked and pinned, and thus can't
2197 * double lock a remote rq and enqueue to its local DSQ. For
2198 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
2199 * the enqueue so that it's executed when @rq can be unlocked.
2200 */
2201 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
2202 unsigned long opss;
2203
2204 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
2205
2206 switch (opss & SCX_OPSS_STATE_MASK) {
2207 case SCX_OPSS_NONE:
2208 break;
2209 case SCX_OPSS_QUEUEING:
2210 /*
2211 * As @p was never passed to the BPF side, _release is
2212 * not strictly necessary. Still do it for consistency.
2213 */
2214 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2215 break;
2216 default:
2217 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
2218 p->comm, p->pid, opss);
2219 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2220 break;
2221 }
2222
2223 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
2224 list_add_tail(&p->scx.dsq_list.node,
2225 &rq->scx.ddsp_deferred_locals);
2226 schedule_deferred(rq);
2227 return;
2228 }
2229
2230 dispatch_enqueue(sch, dsq, p,
2231 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
2232 }
2233
scx_rq_online(struct rq * rq)2234 static bool scx_rq_online(struct rq *rq)
2235 {
2236 /*
2237 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
2238 * the online state as seen from the BPF scheduler. cpu_active() test
2239 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
2240 * stay set until the current scheduling operation is complete even if
2241 * we aren't locking @rq.
2242 */
2243 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
2244 }
2245
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)2246 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
2247 int sticky_cpu)
2248 {
2249 struct scx_sched *sch = scx_root;
2250 struct task_struct **ddsp_taskp;
2251 unsigned long qseq;
2252
2253 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
2254
2255 /* rq migration */
2256 if (sticky_cpu == cpu_of(rq))
2257 goto local_norefill;
2258
2259 /*
2260 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2261 * is offline and are just running the hotplug path. Don't bother the
2262 * BPF scheduler.
2263 */
2264 if (!scx_rq_online(rq))
2265 goto local;
2266
2267 if (scx_rq_bypassing(rq)) {
2268 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2269 goto global;
2270 }
2271
2272 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2273 goto direct;
2274
2275 /* see %SCX_OPS_ENQ_EXITING */
2276 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
2277 unlikely(p->flags & PF_EXITING)) {
2278 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
2279 goto local;
2280 }
2281
2282 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
2283 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
2284 is_migration_disabled(p)) {
2285 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
2286 goto local;
2287 }
2288
2289 if (unlikely(!SCX_HAS_OP(sch, enqueue)))
2290 goto global;
2291
2292 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2293 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2294
2295 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2296 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2297
2298 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2299 WARN_ON_ONCE(*ddsp_taskp);
2300 *ddsp_taskp = p;
2301
2302 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
2303
2304 *ddsp_taskp = NULL;
2305 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2306 goto direct;
2307
2308 /*
2309 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2310 * dequeue may be waiting. The store_release matches their load_acquire.
2311 */
2312 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2313 return;
2314
2315 direct:
2316 direct_dispatch(sch, p, enq_flags);
2317 return;
2318
2319 local:
2320 /*
2321 * For task-ordering, slice refill must be treated as implying the end
2322 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2323 * higher priority it becomes from scx_prio_less()'s POV.
2324 */
2325 touch_core_sched(rq, p);
2326 refill_task_slice_dfl(p);
2327 local_norefill:
2328 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
2329 return;
2330
2331 global:
2332 touch_core_sched(rq, p); /* see the comment in local: */
2333 refill_task_slice_dfl(p);
2334 dispatch_enqueue(sch, find_global_dsq(p), p, enq_flags);
2335 }
2336
task_runnable(const struct task_struct * p)2337 static bool task_runnable(const struct task_struct *p)
2338 {
2339 return !list_empty(&p->scx.runnable_node);
2340 }
2341
set_task_runnable(struct rq * rq,struct task_struct * p)2342 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2343 {
2344 lockdep_assert_rq_held(rq);
2345
2346 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2347 p->scx.runnable_at = jiffies;
2348 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2349 }
2350
2351 /*
2352 * list_add_tail() must be used. scx_bypass() depends on tasks being
2353 * appended to the runnable_list.
2354 */
2355 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2356 }
2357
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2358 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2359 {
2360 list_del_init(&p->scx.runnable_node);
2361 if (reset_runnable_at)
2362 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2363 }
2364
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2365 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2366 {
2367 struct scx_sched *sch = scx_root;
2368 int sticky_cpu = p->scx.sticky_cpu;
2369
2370 if (enq_flags & ENQUEUE_WAKEUP)
2371 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2372
2373 enq_flags |= rq->scx.extra_enq_flags;
2374
2375 if (sticky_cpu >= 0)
2376 p->scx.sticky_cpu = -1;
2377
2378 /*
2379 * Restoring a running task will be immediately followed by
2380 * set_next_task_scx() which expects the task to not be on the BPF
2381 * scheduler as tasks can only start running through local DSQs. Force
2382 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2383 */
2384 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2385 sticky_cpu = cpu_of(rq);
2386
2387 if (p->scx.flags & SCX_TASK_QUEUED) {
2388 WARN_ON_ONCE(!task_runnable(p));
2389 goto out;
2390 }
2391
2392 set_task_runnable(rq, p);
2393 p->scx.flags |= SCX_TASK_QUEUED;
2394 rq->scx.nr_running++;
2395 add_nr_running(rq, 1);
2396
2397 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
2398 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
2399
2400 if (enq_flags & SCX_ENQ_WAKEUP)
2401 touch_core_sched(rq, p);
2402
2403 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2404 out:
2405 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2406
2407 if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
2408 unlikely(cpu_of(rq) != p->scx.selected_cpu))
2409 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
2410 }
2411
ops_dequeue(struct rq * rq,struct task_struct * p,u64 deq_flags)2412 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
2413 {
2414 struct scx_sched *sch = scx_root;
2415 unsigned long opss;
2416
2417 /* dequeue is always temporary, don't reset runnable_at */
2418 clr_task_runnable(p, false);
2419
2420 /* acquire ensures that we see the preceding updates on QUEUED */
2421 opss = atomic_long_read_acquire(&p->scx.ops_state);
2422
2423 switch (opss & SCX_OPSS_STATE_MASK) {
2424 case SCX_OPSS_NONE:
2425 break;
2426 case SCX_OPSS_QUEUEING:
2427 /*
2428 * QUEUEING is started and finished while holding @p's rq lock.
2429 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2430 */
2431 BUG();
2432 case SCX_OPSS_QUEUED:
2433 if (SCX_HAS_OP(sch, dequeue))
2434 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
2435 p, deq_flags);
2436
2437 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2438 SCX_OPSS_NONE))
2439 break;
2440 fallthrough;
2441 case SCX_OPSS_DISPATCHING:
2442 /*
2443 * If @p is being dispatched from the BPF scheduler to a DSQ,
2444 * wait for the transfer to complete so that @p doesn't get
2445 * added to its DSQ after dequeueing is complete.
2446 *
2447 * As we're waiting on DISPATCHING with the rq locked, the
2448 * dispatching side shouldn't try to lock the rq while
2449 * DISPATCHING is set. See dispatch_to_local_dsq().
2450 *
2451 * DISPATCHING shouldn't have qseq set and control can reach
2452 * here with NONE @opss from the above QUEUED case block.
2453 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2454 */
2455 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2456 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2457 break;
2458 }
2459 }
2460
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2461 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2462 {
2463 struct scx_sched *sch = scx_root;
2464
2465 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2466 WARN_ON_ONCE(task_runnable(p));
2467 return true;
2468 }
2469
2470 ops_dequeue(rq, p, deq_flags);
2471
2472 /*
2473 * A currently running task which is going off @rq first gets dequeued
2474 * and then stops running. As we want running <-> stopping transitions
2475 * to be contained within runnable <-> quiescent transitions, trigger
2476 * ->stopping() early here instead of in put_prev_task_scx().
2477 *
2478 * @p may go through multiple stopping <-> running transitions between
2479 * here and put_prev_task_scx() if task attribute changes occur while
2480 * balance_scx() leaves @rq unlocked. However, they don't contain any
2481 * information meaningful to the BPF scheduler and can be suppressed by
2482 * skipping the callbacks if the task is !QUEUED.
2483 */
2484 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
2485 update_curr_scx(rq);
2486 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
2487 }
2488
2489 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
2490 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
2491
2492 if (deq_flags & SCX_DEQ_SLEEP)
2493 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2494 else
2495 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2496
2497 p->scx.flags &= ~SCX_TASK_QUEUED;
2498 rq->scx.nr_running--;
2499 sub_nr_running(rq, 1);
2500
2501 dispatch_dequeue(rq, p);
2502 return true;
2503 }
2504
yield_task_scx(struct rq * rq)2505 static void yield_task_scx(struct rq *rq)
2506 {
2507 struct scx_sched *sch = scx_root;
2508 struct task_struct *p = rq->curr;
2509
2510 if (SCX_HAS_OP(sch, yield))
2511 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
2512 else
2513 p->scx.slice = 0;
2514 }
2515
yield_to_task_scx(struct rq * rq,struct task_struct * to)2516 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2517 {
2518 struct scx_sched *sch = scx_root;
2519 struct task_struct *from = rq->curr;
2520
2521 if (SCX_HAS_OP(sch, yield))
2522 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
2523 from, to);
2524 else
2525 return false;
2526 }
2527
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2528 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2529 struct scx_dispatch_q *src_dsq,
2530 struct rq *dst_rq)
2531 {
2532 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2533
2534 /* @dsq is locked and @p is on @dst_rq */
2535 lockdep_assert_held(&src_dsq->lock);
2536 lockdep_assert_rq_held(dst_rq);
2537
2538 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2539
2540 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2541 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2542 else
2543 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2544
2545 dsq_mod_nr(dst_dsq, 1);
2546 p->scx.dsq = dst_dsq;
2547 }
2548
2549 #ifdef CONFIG_SMP
2550 /**
2551 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2552 * @p: task to move
2553 * @enq_flags: %SCX_ENQ_*
2554 * @src_rq: rq to move the task from, locked on entry, released on return
2555 * @dst_rq: rq to move the task into, locked on return
2556 *
2557 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2558 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2559 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2560 struct rq *src_rq, struct rq *dst_rq)
2561 {
2562 lockdep_assert_rq_held(src_rq);
2563
2564 /* the following marks @p MIGRATING which excludes dequeue */
2565 deactivate_task(src_rq, p, 0);
2566 set_task_cpu(p, cpu_of(dst_rq));
2567 p->scx.sticky_cpu = cpu_of(dst_rq);
2568
2569 raw_spin_rq_unlock(src_rq);
2570 raw_spin_rq_lock(dst_rq);
2571
2572 /*
2573 * We want to pass scx-specific enq_flags but activate_task() will
2574 * truncate the upper 32 bit. As we own @rq, we can pass them through
2575 * @rq->scx.extra_enq_flags instead.
2576 */
2577 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2578 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2579 dst_rq->scx.extra_enq_flags = enq_flags;
2580 activate_task(dst_rq, p, 0);
2581 dst_rq->scx.extra_enq_flags = 0;
2582 }
2583
2584 /*
2585 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2586 * differences:
2587 *
2588 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2589 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2590 * this CPU?".
2591 *
2592 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2593 * must be allowed to finish on the CPU that it's currently on regardless of
2594 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2595 * BPF scheduler shouldn't attempt to migrate a task which has migration
2596 * disabled.
2597 *
2598 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2599 * no to the BPF scheduler initiated migrations while offline.
2600 *
2601 * The caller must ensure that @p and @rq are on different CPUs.
2602 */
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)2603 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
2604 struct task_struct *p, struct rq *rq,
2605 bool enforce)
2606 {
2607 int cpu = cpu_of(rq);
2608
2609 WARN_ON_ONCE(task_cpu(p) == cpu);
2610
2611 /*
2612 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2613 * the pinned CPU in migrate_disable_switch() while @p is being switched
2614 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2615 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2616 * @p passing the below task_allowed_on_cpu() check while migration is
2617 * disabled.
2618 *
2619 * Test the migration disabled state first as the race window is narrow
2620 * and the BPF scheduler failing to check migration disabled state can
2621 * easily be masked if task_allowed_on_cpu() is done first.
2622 */
2623 if (unlikely(is_migration_disabled(p))) {
2624 if (enforce)
2625 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2626 p->comm, p->pid, task_cpu(p), cpu);
2627 return false;
2628 }
2629
2630 /*
2631 * We don't require the BPF scheduler to avoid dispatching to offline
2632 * CPUs mostly for convenience but also because CPUs can go offline
2633 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2634 * picked CPU is outside the allowed mask.
2635 */
2636 if (!task_allowed_on_cpu(p, cpu)) {
2637 if (enforce)
2638 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2639 cpu, p->comm, p->pid);
2640 return false;
2641 }
2642
2643 if (!scx_rq_online(rq)) {
2644 if (enforce)
2645 __scx_add_event(scx_root,
2646 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
2647 return false;
2648 }
2649
2650 return true;
2651 }
2652
2653 /**
2654 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2655 * @p: target task
2656 * @dsq: locked DSQ @p is currently on
2657 * @src_rq: rq @p is currently on, stable with @dsq locked
2658 *
2659 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2660 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2661 * required when transferring into a local DSQ. Even when transferring into a
2662 * non-local DSQ, it's better to use the same mechanism to protect against
2663 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2664 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2665 *
2666 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2667 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2668 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2669 * dancing from our side.
2670 *
2671 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2672 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2673 * would be cleared to -1. While other cpus may have updated it to different
2674 * values afterwards, as this operation can't be preempted or recurse, the
2675 * holding_cpu can never become this CPU again before we're done. Thus, we can
2676 * tell whether we lost to dequeue by testing whether the holding_cpu still
2677 * points to this CPU. See dispatch_dequeue() for the counterpart.
2678 *
2679 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2680 * still valid. %false if lost to dequeue.
2681 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2682 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2683 struct scx_dispatch_q *dsq,
2684 struct rq *src_rq)
2685 {
2686 s32 cpu = raw_smp_processor_id();
2687
2688 lockdep_assert_held(&dsq->lock);
2689
2690 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2691 task_unlink_from_dsq(p, dsq);
2692 p->scx.holding_cpu = cpu;
2693
2694 raw_spin_unlock(&dsq->lock);
2695 raw_spin_rq_lock(src_rq);
2696
2697 /* task_rq couldn't have changed if we're still the holding cpu */
2698 return likely(p->scx.holding_cpu == cpu) &&
2699 !WARN_ON_ONCE(src_rq != task_rq(p));
2700 }
2701
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2702 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2703 struct scx_dispatch_q *dsq, struct rq *src_rq)
2704 {
2705 raw_spin_rq_unlock(this_rq);
2706
2707 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2708 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2709 return true;
2710 } else {
2711 raw_spin_rq_unlock(src_rq);
2712 raw_spin_rq_lock(this_rq);
2713 return false;
2714 }
2715 }
2716 #else /* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2717 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)2718 static inline bool task_can_run_on_remote_rq(struct scx_sched *sch, struct task_struct *p, struct rq *rq, bool enforce) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2719 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2720 #endif /* CONFIG_SMP */
2721
2722 /**
2723 * move_task_between_dsqs() - Move a task from one DSQ to another
2724 * @sch: scx_sched being operated on
2725 * @p: target task
2726 * @enq_flags: %SCX_ENQ_*
2727 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2728 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2729 *
2730 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2731 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2732 * will change. As @p's task_rq is locked, this function doesn't need to use the
2733 * holding_cpu mechanism.
2734 *
2735 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2736 * return value, is locked.
2737 */
move_task_between_dsqs(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)2738 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
2739 struct task_struct *p, u64 enq_flags,
2740 struct scx_dispatch_q *src_dsq,
2741 struct scx_dispatch_q *dst_dsq)
2742 {
2743 struct rq *src_rq = task_rq(p), *dst_rq;
2744
2745 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2746 lockdep_assert_held(&src_dsq->lock);
2747 lockdep_assert_rq_held(src_rq);
2748
2749 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2750 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2751 if (src_rq != dst_rq &&
2752 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2753 dst_dsq = find_global_dsq(p);
2754 dst_rq = src_rq;
2755 }
2756 } else {
2757 /* no need to migrate if destination is a non-local DSQ */
2758 dst_rq = src_rq;
2759 }
2760
2761 /*
2762 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2763 * CPU, @p will be migrated.
2764 */
2765 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2766 /* @p is going from a non-local DSQ to a local DSQ */
2767 if (src_rq == dst_rq) {
2768 task_unlink_from_dsq(p, src_dsq);
2769 move_local_task_to_local_dsq(p, enq_flags,
2770 src_dsq, dst_rq);
2771 raw_spin_unlock(&src_dsq->lock);
2772 } else {
2773 raw_spin_unlock(&src_dsq->lock);
2774 move_remote_task_to_local_dsq(p, enq_flags,
2775 src_rq, dst_rq);
2776 }
2777 } else {
2778 /*
2779 * @p is going from a non-local DSQ to a non-local DSQ. As
2780 * $src_dsq is already locked, do an abbreviated dequeue.
2781 */
2782 task_unlink_from_dsq(p, src_dsq);
2783 p->scx.dsq = NULL;
2784 raw_spin_unlock(&src_dsq->lock);
2785
2786 dispatch_enqueue(sch, dst_dsq, p, enq_flags);
2787 }
2788
2789 return dst_rq;
2790 }
2791
2792 /*
2793 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2794 * banging on the same DSQ on a large NUMA system to the point where switching
2795 * to the bypass mode can take a long time. Inject artificial delays while the
2796 * bypass mode is switching to guarantee timely completion.
2797 */
scx_breather(struct rq * rq)2798 static void scx_breather(struct rq *rq)
2799 {
2800 u64 until;
2801
2802 lockdep_assert_rq_held(rq);
2803
2804 if (likely(!atomic_read(&scx_breather_depth)))
2805 return;
2806
2807 raw_spin_rq_unlock(rq);
2808
2809 until = ktime_get_ns() + NSEC_PER_MSEC;
2810
2811 do {
2812 int cnt = 1024;
2813 while (atomic_read(&scx_breather_depth) && --cnt)
2814 cpu_relax();
2815 } while (atomic_read(&scx_breather_depth) &&
2816 time_before64(ktime_get_ns(), until));
2817
2818 raw_spin_rq_lock(rq);
2819 }
2820
consume_dispatch_q(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq)2821 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
2822 struct scx_dispatch_q *dsq)
2823 {
2824 struct task_struct *p;
2825 retry:
2826 /*
2827 * This retry loop can repeatedly race against scx_bypass() dequeueing
2828 * tasks from @dsq trying to put the system into the bypass mode. On
2829 * some multi-socket machines (e.g. 2x Intel 8480c), this can live-lock
2830 * the machine into soft lockups. Give a breather.
2831 */
2832 scx_breather(rq);
2833
2834 /*
2835 * The caller can't expect to successfully consume a task if the task's
2836 * addition to @dsq isn't guaranteed to be visible somehow. Test
2837 * @dsq->list without locking and skip if it seems empty.
2838 */
2839 if (list_empty(&dsq->list))
2840 return false;
2841
2842 raw_spin_lock(&dsq->lock);
2843
2844 nldsq_for_each_task(p, dsq) {
2845 struct rq *task_rq = task_rq(p);
2846
2847 if (rq == task_rq) {
2848 task_unlink_from_dsq(p, dsq);
2849 move_local_task_to_local_dsq(p, 0, dsq, rq);
2850 raw_spin_unlock(&dsq->lock);
2851 return true;
2852 }
2853
2854 if (task_can_run_on_remote_rq(sch, p, rq, false)) {
2855 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2856 return true;
2857 goto retry;
2858 }
2859 }
2860
2861 raw_spin_unlock(&dsq->lock);
2862 return false;
2863 }
2864
consume_global_dsq(struct scx_sched * sch,struct rq * rq)2865 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
2866 {
2867 int node = cpu_to_node(cpu_of(rq));
2868
2869 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
2870 }
2871
2872 /**
2873 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2874 * @sch: scx_sched being operated on
2875 * @rq: current rq which is locked
2876 * @dst_dsq: destination DSQ
2877 * @p: task to dispatch
2878 * @enq_flags: %SCX_ENQ_*
2879 *
2880 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2881 * DSQ. This function performs all the synchronization dancing needed because
2882 * local DSQs are protected with rq locks.
2883 *
2884 * The caller must have exclusive ownership of @p (e.g. through
2885 * %SCX_OPSS_DISPATCHING).
2886 */
dispatch_to_local_dsq(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2887 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2888 struct scx_dispatch_q *dst_dsq,
2889 struct task_struct *p, u64 enq_flags)
2890 {
2891 struct rq *src_rq = task_rq(p);
2892 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2893 #ifdef CONFIG_SMP
2894 struct rq *locked_rq = rq;
2895 #endif
2896
2897 /*
2898 * We're synchronized against dequeue through DISPATCHING. As @p can't
2899 * be dequeued, its task_rq and cpus_allowed are stable too.
2900 *
2901 * If dispatching to @rq that @p is already on, no lock dancing needed.
2902 */
2903 if (rq == src_rq && rq == dst_rq) {
2904 dispatch_enqueue(sch, dst_dsq, p,
2905 enq_flags | SCX_ENQ_CLEAR_OPSS);
2906 return;
2907 }
2908
2909 #ifdef CONFIG_SMP
2910 if (src_rq != dst_rq &&
2911 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2912 dispatch_enqueue(sch, find_global_dsq(p), p,
2913 enq_flags | SCX_ENQ_CLEAR_OPSS);
2914 return;
2915 }
2916
2917 /*
2918 * @p is on a possibly remote @src_rq which we need to lock to move the
2919 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2920 * on DISPATCHING, so we can't grab @src_rq lock while holding
2921 * DISPATCHING.
2922 *
2923 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2924 * we're moving from a DSQ and use the same mechanism - mark the task
2925 * under transfer with holding_cpu, release DISPATCHING and then follow
2926 * the same protocol. See unlink_dsq_and_lock_src_rq().
2927 */
2928 p->scx.holding_cpu = raw_smp_processor_id();
2929
2930 /* store_release ensures that dequeue sees the above */
2931 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2932
2933 /* switch to @src_rq lock */
2934 if (locked_rq != src_rq) {
2935 raw_spin_rq_unlock(locked_rq);
2936 locked_rq = src_rq;
2937 raw_spin_rq_lock(src_rq);
2938 }
2939
2940 /* task_rq couldn't have changed if we're still the holding cpu */
2941 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2942 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2943 /*
2944 * If @p is staying on the same rq, there's no need to go
2945 * through the full deactivate/activate cycle. Optimize by
2946 * abbreviating move_remote_task_to_local_dsq().
2947 */
2948 if (src_rq == dst_rq) {
2949 p->scx.holding_cpu = -1;
2950 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
2951 enq_flags);
2952 } else {
2953 move_remote_task_to_local_dsq(p, enq_flags,
2954 src_rq, dst_rq);
2955 /* task has been moved to dst_rq, which is now locked */
2956 locked_rq = dst_rq;
2957 }
2958
2959 /* if the destination CPU is idle, wake it up */
2960 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2961 resched_curr(dst_rq);
2962 }
2963
2964 /* switch back to @rq lock */
2965 if (locked_rq != rq) {
2966 raw_spin_rq_unlock(locked_rq);
2967 raw_spin_rq_lock(rq);
2968 }
2969 #else /* CONFIG_SMP */
2970 BUG(); /* control can not reach here on UP */
2971 #endif /* CONFIG_SMP */
2972 }
2973
2974 /**
2975 * finish_dispatch - Asynchronously finish dispatching a task
2976 * @rq: current rq which is locked
2977 * @p: task to finish dispatching
2978 * @qseq_at_dispatch: qseq when @p started getting dispatched
2979 * @dsq_id: destination DSQ ID
2980 * @enq_flags: %SCX_ENQ_*
2981 *
2982 * Dispatching to local DSQs may need to wait for queueing to complete or
2983 * require rq lock dancing. As we don't wanna do either while inside
2984 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2985 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2986 * task and its qseq. Once ops.dispatch() returns, this function is called to
2987 * finish up.
2988 *
2989 * There is no guarantee that @p is still valid for dispatching or even that it
2990 * was valid in the first place. Make sure that the task is still owned by the
2991 * BPF scheduler and claim the ownership before dispatching.
2992 */
finish_dispatch(struct scx_sched * sch,struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2993 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2994 struct task_struct *p,
2995 unsigned long qseq_at_dispatch,
2996 u64 dsq_id, u64 enq_flags)
2997 {
2998 struct scx_dispatch_q *dsq;
2999 unsigned long opss;
3000
3001 touch_core_sched_dispatch(rq, p);
3002 retry:
3003 /*
3004 * No need for _acquire here. @p is accessed only after a successful
3005 * try_cmpxchg to DISPATCHING.
3006 */
3007 opss = atomic_long_read(&p->scx.ops_state);
3008
3009 switch (opss & SCX_OPSS_STATE_MASK) {
3010 case SCX_OPSS_DISPATCHING:
3011 case SCX_OPSS_NONE:
3012 /* someone else already got to it */
3013 return;
3014 case SCX_OPSS_QUEUED:
3015 /*
3016 * If qseq doesn't match, @p has gone through at least one
3017 * dispatch/dequeue and re-enqueue cycle between
3018 * scx_bpf_dsq_insert() and here and we have no claim on it.
3019 */
3020 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
3021 return;
3022
3023 /*
3024 * While we know @p is accessible, we don't yet have a claim on
3025 * it - the BPF scheduler is allowed to dispatch tasks
3026 * spuriously and there can be a racing dequeue attempt. Let's
3027 * claim @p by atomically transitioning it from QUEUED to
3028 * DISPATCHING.
3029 */
3030 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
3031 SCX_OPSS_DISPATCHING)))
3032 break;
3033 goto retry;
3034 case SCX_OPSS_QUEUEING:
3035 /*
3036 * do_enqueue_task() is in the process of transferring the task
3037 * to the BPF scheduler while holding @p's rq lock. As we aren't
3038 * holding any kernel or BPF resource that the enqueue path may
3039 * depend upon, it's safe to wait.
3040 */
3041 wait_ops_state(p, opss);
3042 goto retry;
3043 }
3044
3045 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
3046
3047 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
3048
3049 if (dsq->id == SCX_DSQ_LOCAL)
3050 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
3051 else
3052 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
3053 }
3054
flush_dispatch_buf(struct scx_sched * sch,struct rq * rq)3055 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
3056 {
3057 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
3058 u32 u;
3059
3060 for (u = 0; u < dspc->cursor; u++) {
3061 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
3062
3063 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
3064 ent->enq_flags);
3065 }
3066
3067 dspc->nr_tasks += dspc->cursor;
3068 dspc->cursor = 0;
3069 }
3070
balance_one(struct rq * rq,struct task_struct * prev)3071 static int balance_one(struct rq *rq, struct task_struct *prev)
3072 {
3073 struct scx_sched *sch = scx_root;
3074 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
3075 bool prev_on_scx = prev->sched_class == &ext_sched_class;
3076 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
3077 int nr_loops = SCX_DSP_MAX_LOOPS;
3078
3079 lockdep_assert_rq_held(rq);
3080 rq->scx.flags |= SCX_RQ_IN_BALANCE;
3081 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
3082
3083 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
3084 unlikely(rq->scx.cpu_released)) {
3085 /*
3086 * If the previous sched_class for the current CPU was not SCX,
3087 * notify the BPF scheduler that it again has control of the
3088 * core. This callback complements ->cpu_release(), which is
3089 * emitted in switch_class().
3090 */
3091 if (SCX_HAS_OP(sch, cpu_acquire))
3092 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
3093 cpu_of(rq), NULL);
3094 rq->scx.cpu_released = false;
3095 }
3096
3097 if (prev_on_scx) {
3098 update_curr_scx(rq);
3099
3100 /*
3101 * If @prev is runnable & has slice left, it has priority and
3102 * fetching more just increases latency for the fetched tasks.
3103 * Tell pick_task_scx() to keep running @prev. If the BPF
3104 * scheduler wants to handle this explicitly, it should
3105 * implement ->cpu_release().
3106 *
3107 * See scx_disable_workfn() for the explanation on the bypassing
3108 * test.
3109 */
3110 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
3111 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3112 goto has_tasks;
3113 }
3114 }
3115
3116 /* if there already are tasks to run, nothing to do */
3117 if (rq->scx.local_dsq.nr)
3118 goto has_tasks;
3119
3120 if (consume_global_dsq(sch, rq))
3121 goto has_tasks;
3122
3123 if (unlikely(!SCX_HAS_OP(sch, dispatch)) ||
3124 scx_rq_bypassing(rq) || !scx_rq_online(rq))
3125 goto no_tasks;
3126
3127 dspc->rq = rq;
3128
3129 /*
3130 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
3131 * the local DSQ might still end up empty after a successful
3132 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
3133 * produced some tasks, retry. The BPF scheduler may depend on this
3134 * looping behavior to simplify its implementation.
3135 */
3136 do {
3137 dspc->nr_tasks = 0;
3138
3139 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
3140 cpu_of(rq), prev_on_scx ? prev : NULL);
3141
3142 flush_dispatch_buf(sch, rq);
3143
3144 if (prev_on_rq && prev->scx.slice) {
3145 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3146 goto has_tasks;
3147 }
3148 if (rq->scx.local_dsq.nr)
3149 goto has_tasks;
3150 if (consume_global_dsq(sch, rq))
3151 goto has_tasks;
3152
3153 /*
3154 * ops.dispatch() can trap us in this loop by repeatedly
3155 * dispatching ineligible tasks. Break out once in a while to
3156 * allow the watchdog to run. As IRQ can't be enabled in
3157 * balance(), we want to complete this scheduling cycle and then
3158 * start a new one. IOW, we want to call resched_curr() on the
3159 * next, most likely idle, task, not the current one. Use
3160 * scx_bpf_kick_cpu() for deferred kicking.
3161 */
3162 if (unlikely(!--nr_loops)) {
3163 scx_bpf_kick_cpu(cpu_of(rq), 0);
3164 break;
3165 }
3166 } while (dspc->nr_tasks);
3167
3168 no_tasks:
3169 /*
3170 * Didn't find another task to run. Keep running @prev unless
3171 * %SCX_OPS_ENQ_LAST is in effect.
3172 */
3173 if (prev_on_rq &&
3174 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
3175 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3176 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
3177 goto has_tasks;
3178 }
3179 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3180 return false;
3181
3182 has_tasks:
3183 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3184 return true;
3185 }
3186
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)3187 static int balance_scx(struct rq *rq, struct task_struct *prev,
3188 struct rq_flags *rf)
3189 {
3190 int ret;
3191
3192 rq_unpin_lock(rq, rf);
3193
3194 ret = balance_one(rq, prev);
3195
3196 #ifdef CONFIG_SCHED_SMT
3197 /*
3198 * When core-sched is enabled, this ops.balance() call will be followed
3199 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
3200 * siblings too.
3201 */
3202 if (sched_core_enabled(rq)) {
3203 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
3204 int scpu;
3205
3206 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
3207 struct rq *srq = cpu_rq(scpu);
3208 struct task_struct *sprev = srq->curr;
3209
3210 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
3211 update_rq_clock(srq);
3212 balance_one(srq, sprev);
3213 }
3214 }
3215 #endif
3216 rq_repin_lock(rq, rf);
3217
3218 return ret;
3219 }
3220
process_ddsp_deferred_locals(struct rq * rq)3221 static void process_ddsp_deferred_locals(struct rq *rq)
3222 {
3223 struct task_struct *p;
3224
3225 lockdep_assert_rq_held(rq);
3226
3227 /*
3228 * Now that @rq can be unlocked, execute the deferred enqueueing of
3229 * tasks directly dispatched to the local DSQs of other CPUs. See
3230 * direct_dispatch(). Keep popping from the head instead of using
3231 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
3232 * temporarily.
3233 */
3234 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3235 struct task_struct, scx.dsq_list.node))) {
3236 struct scx_sched *sch = scx_root;
3237 struct scx_dispatch_q *dsq;
3238
3239 list_del_init(&p->scx.dsq_list.node);
3240
3241 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
3242 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
3243 dispatch_to_local_dsq(sch, rq, dsq, p,
3244 p->scx.ddsp_enq_flags);
3245 }
3246 }
3247
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)3248 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
3249 {
3250 struct scx_sched *sch = scx_root;
3251
3252 if (p->scx.flags & SCX_TASK_QUEUED) {
3253 /*
3254 * Core-sched might decide to execute @p before it is
3255 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
3256 */
3257 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
3258 dispatch_dequeue(rq, p);
3259 }
3260
3261 p->se.exec_start = rq_clock_task(rq);
3262
3263 /* see dequeue_task_scx() on why we skip when !QUEUED */
3264 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
3265 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
3266
3267 clr_task_runnable(p, true);
3268
3269 /*
3270 * @p is getting newly scheduled or got kicked after someone updated its
3271 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
3272 */
3273 if ((p->scx.slice == SCX_SLICE_INF) !=
3274 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
3275 if (p->scx.slice == SCX_SLICE_INF)
3276 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
3277 else
3278 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
3279
3280 sched_update_tick_dependency(rq);
3281
3282 /*
3283 * For now, let's refresh the load_avgs just when transitioning
3284 * in and out of nohz. In the future, we might want to add a
3285 * mechanism which calls the following periodically on
3286 * tick-stopped CPUs.
3287 */
3288 update_other_load_avgs(rq);
3289 }
3290 }
3291
3292 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)3293 preempt_reason_from_class(const struct sched_class *class)
3294 {
3295 #ifdef CONFIG_SMP
3296 if (class == &stop_sched_class)
3297 return SCX_CPU_PREEMPT_STOP;
3298 #endif
3299 if (class == &dl_sched_class)
3300 return SCX_CPU_PREEMPT_DL;
3301 if (class == &rt_sched_class)
3302 return SCX_CPU_PREEMPT_RT;
3303 return SCX_CPU_PREEMPT_UNKNOWN;
3304 }
3305
switch_class(struct rq * rq,struct task_struct * next)3306 static void switch_class(struct rq *rq, struct task_struct *next)
3307 {
3308 struct scx_sched *sch = scx_root;
3309 const struct sched_class *next_class = next->sched_class;
3310
3311 #ifdef CONFIG_SMP
3312 /*
3313 * Pairs with the smp_load_acquire() issued by a CPU in
3314 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3315 * resched.
3316 */
3317 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3318 #endif
3319 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
3320 return;
3321
3322 /*
3323 * The callback is conceptually meant to convey that the CPU is no
3324 * longer under the control of SCX. Therefore, don't invoke the callback
3325 * if the next class is below SCX (in which case the BPF scheduler has
3326 * actively decided not to schedule any tasks on the CPU).
3327 */
3328 if (sched_class_above(&ext_sched_class, next_class))
3329 return;
3330
3331 /*
3332 * At this point we know that SCX was preempted by a higher priority
3333 * sched_class, so invoke the ->cpu_release() callback if we have not
3334 * done so already. We only send the callback once between SCX being
3335 * preempted, and it regaining control of the CPU.
3336 *
3337 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3338 * next time that balance_scx() is invoked.
3339 */
3340 if (!rq->scx.cpu_released) {
3341 if (SCX_HAS_OP(sch, cpu_release)) {
3342 struct scx_cpu_release_args args = {
3343 .reason = preempt_reason_from_class(next_class),
3344 .task = next,
3345 };
3346
3347 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
3348 cpu_of(rq), &args);
3349 }
3350 rq->scx.cpu_released = true;
3351 }
3352 }
3353
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)3354 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3355 struct task_struct *next)
3356 {
3357 struct scx_sched *sch = scx_root;
3358 update_curr_scx(rq);
3359
3360 /* see dequeue_task_scx() on why we skip when !QUEUED */
3361 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3362 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
3363
3364 if (p->scx.flags & SCX_TASK_QUEUED) {
3365 set_task_runnable(rq, p);
3366
3367 /*
3368 * If @p has slice left and is being put, @p is getting
3369 * preempted by a higher priority scheduler class or core-sched
3370 * forcing a different task. Leave it at the head of the local
3371 * DSQ.
3372 */
3373 if (p->scx.slice && !scx_rq_bypassing(rq)) {
3374 dispatch_enqueue(sch, &rq->scx.local_dsq, p,
3375 SCX_ENQ_HEAD);
3376 goto switch_class;
3377 }
3378
3379 /*
3380 * If @p is runnable but we're about to enter a lower
3381 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3382 * ops.enqueue() that @p is the only one available for this cpu,
3383 * which should trigger an explicit follow-up scheduling event.
3384 */
3385 if (sched_class_above(&ext_sched_class, next->sched_class)) {
3386 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
3387 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3388 } else {
3389 do_enqueue_task(rq, p, 0, -1);
3390 }
3391 }
3392
3393 switch_class:
3394 if (next && next->sched_class != &ext_sched_class)
3395 switch_class(rq, next);
3396 }
3397
first_local_task(struct rq * rq)3398 static struct task_struct *first_local_task(struct rq *rq)
3399 {
3400 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3401 struct task_struct, scx.dsq_list.node);
3402 }
3403
pick_task_scx(struct rq * rq)3404 static struct task_struct *pick_task_scx(struct rq *rq)
3405 {
3406 struct task_struct *prev = rq->curr;
3407 struct task_struct *p;
3408 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3409 bool kick_idle = false;
3410
3411 /*
3412 * WORKAROUND:
3413 *
3414 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3415 * have gone through balance_scx(). Unfortunately, there currently is a
3416 * bug where fair could say yes on balance() but no on pick_task(),
3417 * which then ends up calling pick_task_scx() without preceding
3418 * balance_scx().
3419 *
3420 * Keep running @prev if possible and avoid stalling from entering idle
3421 * without balancing.
3422 *
3423 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3424 * if pick_task_scx() is called without preceding balance_scx().
3425 */
3426 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3427 if (prev->scx.flags & SCX_TASK_QUEUED) {
3428 keep_prev = true;
3429 } else {
3430 keep_prev = false;
3431 kick_idle = true;
3432 }
3433 } else if (unlikely(keep_prev &&
3434 prev->sched_class != &ext_sched_class)) {
3435 /*
3436 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
3437 * conditional on scx_enabled() and may have been skipped.
3438 */
3439 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
3440 keep_prev = false;
3441 }
3442
3443 /*
3444 * If balance_scx() is telling us to keep running @prev, replenish slice
3445 * if necessary and keep running @prev. Otherwise, pop the first one
3446 * from the local DSQ.
3447 */
3448 if (keep_prev) {
3449 p = prev;
3450 if (!p->scx.slice)
3451 refill_task_slice_dfl(p);
3452 } else {
3453 p = first_local_task(rq);
3454 if (!p) {
3455 if (kick_idle)
3456 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3457 return NULL;
3458 }
3459
3460 if (unlikely(!p->scx.slice)) {
3461 struct scx_sched *sch = scx_root;
3462
3463 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
3464 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3465 p->comm, p->pid, __func__);
3466 sch->warned_zero_slice = true;
3467 }
3468 refill_task_slice_dfl(p);
3469 }
3470 }
3471
3472 return p;
3473 }
3474
3475 #ifdef CONFIG_SCHED_CORE
3476 /**
3477 * scx_prio_less - Task ordering for core-sched
3478 * @a: task A
3479 * @b: task B
3480 * @in_fi: in forced idle state
3481 *
3482 * Core-sched is implemented as an additional scheduling layer on top of the
3483 * usual sched_class'es and needs to find out the expected task ordering. For
3484 * SCX, core-sched calls this function to interrogate the task ordering.
3485 *
3486 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3487 * to implement the default task ordering. The older the timestamp, the higher
3488 * priority the task - the global FIFO ordering matching the default scheduling
3489 * behavior.
3490 *
3491 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3492 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3493 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3494 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3495 bool in_fi)
3496 {
3497 struct scx_sched *sch = scx_root;
3498
3499 /*
3500 * The const qualifiers are dropped from task_struct pointers when
3501 * calling ops.core_sched_before(). Accesses are controlled by the
3502 * verifier.
3503 */
3504 if (SCX_HAS_OP(sch, core_sched_before) &&
3505 !scx_rq_bypassing(task_rq(a)))
3506 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
3507 NULL,
3508 (struct task_struct *)a,
3509 (struct task_struct *)b);
3510 else
3511 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3512 }
3513 #endif /* CONFIG_SCHED_CORE */
3514
3515 #ifdef CONFIG_SMP
3516
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3517 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3518 {
3519 struct scx_sched *sch = scx_root;
3520 bool rq_bypass;
3521
3522 /*
3523 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3524 * can be a good migration opportunity with low cache and memory
3525 * footprint. Returning a CPU different than @prev_cpu triggers
3526 * immediate rq migration. However, for SCX, as the current rq
3527 * association doesn't dictate where the task is going to run, this
3528 * doesn't fit well. If necessary, we can later add a dedicated method
3529 * which can decide to preempt self to force it through the regular
3530 * scheduling path.
3531 */
3532 if (unlikely(wake_flags & WF_EXEC))
3533 return prev_cpu;
3534
3535 rq_bypass = scx_rq_bypassing(task_rq(p));
3536 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
3537 s32 cpu;
3538 struct task_struct **ddsp_taskp;
3539
3540 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3541 WARN_ON_ONCE(*ddsp_taskp);
3542 *ddsp_taskp = p;
3543
3544 cpu = SCX_CALL_OP_TASK_RET(sch,
3545 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3546 select_cpu, NULL, p, prev_cpu,
3547 wake_flags);
3548 p->scx.selected_cpu = cpu;
3549 *ddsp_taskp = NULL;
3550 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
3551 return cpu;
3552 else
3553 return prev_cpu;
3554 } else {
3555 s32 cpu;
3556
3557 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
3558 if (cpu >= 0) {
3559 refill_task_slice_dfl(p);
3560 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3561 } else {
3562 cpu = prev_cpu;
3563 }
3564 p->scx.selected_cpu = cpu;
3565
3566 if (rq_bypass)
3567 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
3568 return cpu;
3569 }
3570 }
3571
task_woken_scx(struct rq * rq,struct task_struct * p)3572 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3573 {
3574 run_deferred(rq);
3575 }
3576
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3577 static void set_cpus_allowed_scx(struct task_struct *p,
3578 struct affinity_context *ac)
3579 {
3580 struct scx_sched *sch = scx_root;
3581
3582 set_cpus_allowed_common(p, ac);
3583
3584 /*
3585 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3586 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3587 * scheduler the effective one.
3588 *
3589 * Fine-grained memory write control is enforced by BPF making the const
3590 * designation pointless. Cast it away when calling the operation.
3591 */
3592 if (SCX_HAS_OP(sch, set_cpumask))
3593 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
3594 p, (struct cpumask *)p->cpus_ptr);
3595 }
3596
handle_hotplug(struct rq * rq,bool online)3597 static void handle_hotplug(struct rq *rq, bool online)
3598 {
3599 struct scx_sched *sch = scx_root;
3600 int cpu = cpu_of(rq);
3601
3602 atomic_long_inc(&scx_hotplug_seq);
3603
3604 /*
3605 * scx_root updates are protected by cpus_read_lock() and will stay
3606 * stable here. Note that we can't depend on scx_enabled() test as the
3607 * hotplug ops need to be enabled before __scx_enabled is set.
3608 */
3609 if (unlikely(!sch))
3610 return;
3611
3612 if (scx_enabled())
3613 scx_idle_update_selcpu_topology(&sch->ops);
3614
3615 if (online && SCX_HAS_OP(sch, cpu_online))
3616 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
3617 else if (!online && SCX_HAS_OP(sch, cpu_offline))
3618 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
3619 else
3620 scx_exit(sch, SCX_EXIT_UNREG_KERN,
3621 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3622 "cpu %d going %s, exiting scheduler", cpu,
3623 online ? "online" : "offline");
3624 }
3625
scx_rq_activate(struct rq * rq)3626 void scx_rq_activate(struct rq *rq)
3627 {
3628 handle_hotplug(rq, true);
3629 }
3630
scx_rq_deactivate(struct rq * rq)3631 void scx_rq_deactivate(struct rq *rq)
3632 {
3633 handle_hotplug(rq, false);
3634 }
3635
rq_online_scx(struct rq * rq)3636 static void rq_online_scx(struct rq *rq)
3637 {
3638 rq->scx.flags |= SCX_RQ_ONLINE;
3639 }
3640
rq_offline_scx(struct rq * rq)3641 static void rq_offline_scx(struct rq *rq)
3642 {
3643 rq->scx.flags &= ~SCX_RQ_ONLINE;
3644 }
3645
3646 #endif /* CONFIG_SMP */
3647
check_rq_for_timeouts(struct rq * rq)3648 static bool check_rq_for_timeouts(struct rq *rq)
3649 {
3650 struct scx_sched *sch;
3651 struct task_struct *p;
3652 struct rq_flags rf;
3653 bool timed_out = false;
3654
3655 rq_lock_irqsave(rq, &rf);
3656 sch = rcu_dereference_bh(scx_root);
3657 if (unlikely(!sch))
3658 goto out_unlock;
3659
3660 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3661 unsigned long last_runnable = p->scx.runnable_at;
3662
3663 if (unlikely(time_after(jiffies,
3664 last_runnable + scx_watchdog_timeout))) {
3665 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3666
3667 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3668 "%s[%d] failed to run for %u.%03us",
3669 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
3670 timed_out = true;
3671 break;
3672 }
3673 }
3674 out_unlock:
3675 rq_unlock_irqrestore(rq, &rf);
3676 return timed_out;
3677 }
3678
scx_watchdog_workfn(struct work_struct * work)3679 static void scx_watchdog_workfn(struct work_struct *work)
3680 {
3681 int cpu;
3682
3683 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3684
3685 for_each_online_cpu(cpu) {
3686 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3687 break;
3688
3689 cond_resched();
3690 }
3691 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3692 scx_watchdog_timeout / 2);
3693 }
3694
scx_tick(struct rq * rq)3695 void scx_tick(struct rq *rq)
3696 {
3697 struct scx_sched *sch;
3698 unsigned long last_check;
3699
3700 if (!scx_enabled())
3701 return;
3702
3703 sch = rcu_dereference_bh(scx_root);
3704 if (unlikely(!sch))
3705 return;
3706
3707 last_check = READ_ONCE(scx_watchdog_timestamp);
3708 if (unlikely(time_after(jiffies,
3709 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3710 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3711
3712 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3713 "watchdog failed to check in for %u.%03us",
3714 dur_ms / 1000, dur_ms % 1000);
3715 }
3716
3717 update_other_load_avgs(rq);
3718 }
3719
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3720 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3721 {
3722 struct scx_sched *sch = scx_root;
3723
3724 update_curr_scx(rq);
3725
3726 /*
3727 * While disabling, always resched and refresh core-sched timestamp as
3728 * we can't trust the slice management or ops.core_sched_before().
3729 */
3730 if (scx_rq_bypassing(rq)) {
3731 curr->scx.slice = 0;
3732 touch_core_sched(rq, curr);
3733 } else if (SCX_HAS_OP(sch, tick)) {
3734 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
3735 }
3736
3737 if (!curr->scx.slice)
3738 resched_curr(rq);
3739 }
3740
3741 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3742 static struct cgroup *tg_cgrp(struct task_group *tg)
3743 {
3744 /*
3745 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3746 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3747 * root cgroup.
3748 */
3749 if (tg && tg->css.cgroup)
3750 return tg->css.cgroup;
3751 else
3752 return &cgrp_dfl_root.cgrp;
3753 }
3754
3755 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3756
3757 #else /* CONFIG_EXT_GROUP_SCHED */
3758
3759 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3760
3761 #endif /* CONFIG_EXT_GROUP_SCHED */
3762
scx_get_task_state(const struct task_struct * p)3763 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3764 {
3765 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3766 }
3767
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3768 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3769 {
3770 enum scx_task_state prev_state = scx_get_task_state(p);
3771 bool warn = false;
3772
3773 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3774
3775 switch (state) {
3776 case SCX_TASK_NONE:
3777 break;
3778 case SCX_TASK_INIT:
3779 warn = prev_state != SCX_TASK_NONE;
3780 break;
3781 case SCX_TASK_READY:
3782 warn = prev_state == SCX_TASK_NONE;
3783 break;
3784 case SCX_TASK_ENABLED:
3785 warn = prev_state != SCX_TASK_READY;
3786 break;
3787 default:
3788 warn = true;
3789 return;
3790 }
3791
3792 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3793 prev_state, state, p->comm, p->pid);
3794
3795 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3796 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3797 }
3798
scx_init_task(struct task_struct * p,struct task_group * tg,bool fork)3799 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3800 {
3801 struct scx_sched *sch = scx_root;
3802 int ret;
3803
3804 p->scx.disallow = false;
3805
3806 if (SCX_HAS_OP(sch, init_task)) {
3807 struct scx_init_task_args args = {
3808 SCX_INIT_TASK_ARGS_CGROUP(tg)
3809 .fork = fork,
3810 };
3811
3812 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
3813 p, &args);
3814 if (unlikely(ret)) {
3815 ret = ops_sanitize_err(sch, "init_task", ret);
3816 return ret;
3817 }
3818 }
3819
3820 scx_set_task_state(p, SCX_TASK_INIT);
3821
3822 if (p->scx.disallow) {
3823 if (!fork) {
3824 struct rq *rq;
3825 struct rq_flags rf;
3826
3827 rq = task_rq_lock(p, &rf);
3828
3829 /*
3830 * We're in the load path and @p->policy will be applied
3831 * right after. Reverting @p->policy here and rejecting
3832 * %SCHED_EXT transitions from scx_check_setscheduler()
3833 * guarantees that if ops.init_task() sets @p->disallow,
3834 * @p can never be in SCX.
3835 */
3836 if (p->policy == SCHED_EXT) {
3837 p->policy = SCHED_NORMAL;
3838 atomic_long_inc(&scx_nr_rejected);
3839 }
3840
3841 task_rq_unlock(rq, p, &rf);
3842 } else if (p->policy == SCHED_EXT) {
3843 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3844 p->comm, p->pid);
3845 }
3846 }
3847
3848 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3849 return 0;
3850 }
3851
scx_enable_task(struct task_struct * p)3852 static void scx_enable_task(struct task_struct *p)
3853 {
3854 struct scx_sched *sch = scx_root;
3855 struct rq *rq = task_rq(p);
3856 u32 weight;
3857
3858 lockdep_assert_rq_held(rq);
3859
3860 /*
3861 * Set the weight before calling ops.enable() so that the scheduler
3862 * doesn't see a stale value if they inspect the task struct.
3863 */
3864 if (task_has_idle_policy(p))
3865 weight = WEIGHT_IDLEPRIO;
3866 else
3867 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3868
3869 p->scx.weight = sched_weight_to_cgroup(weight);
3870
3871 if (SCX_HAS_OP(sch, enable))
3872 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
3873 scx_set_task_state(p, SCX_TASK_ENABLED);
3874
3875 if (SCX_HAS_OP(sch, set_weight))
3876 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3877 p, p->scx.weight);
3878 }
3879
scx_disable_task(struct task_struct * p)3880 static void scx_disable_task(struct task_struct *p)
3881 {
3882 struct scx_sched *sch = scx_root;
3883 struct rq *rq = task_rq(p);
3884
3885 lockdep_assert_rq_held(rq);
3886 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3887
3888 if (SCX_HAS_OP(sch, disable))
3889 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
3890 scx_set_task_state(p, SCX_TASK_READY);
3891 }
3892
scx_exit_task(struct task_struct * p)3893 static void scx_exit_task(struct task_struct *p)
3894 {
3895 struct scx_sched *sch = scx_root;
3896 struct scx_exit_task_args args = {
3897 .cancelled = false,
3898 };
3899
3900 lockdep_assert_rq_held(task_rq(p));
3901
3902 switch (scx_get_task_state(p)) {
3903 case SCX_TASK_NONE:
3904 return;
3905 case SCX_TASK_INIT:
3906 args.cancelled = true;
3907 break;
3908 case SCX_TASK_READY:
3909 break;
3910 case SCX_TASK_ENABLED:
3911 scx_disable_task(p);
3912 break;
3913 default:
3914 WARN_ON_ONCE(true);
3915 return;
3916 }
3917
3918 if (SCX_HAS_OP(sch, exit_task))
3919 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
3920 p, &args);
3921 scx_set_task_state(p, SCX_TASK_NONE);
3922 }
3923
init_scx_entity(struct sched_ext_entity * scx)3924 void init_scx_entity(struct sched_ext_entity *scx)
3925 {
3926 memset(scx, 0, sizeof(*scx));
3927 INIT_LIST_HEAD(&scx->dsq_list.node);
3928 RB_CLEAR_NODE(&scx->dsq_priq);
3929 scx->sticky_cpu = -1;
3930 scx->holding_cpu = -1;
3931 INIT_LIST_HEAD(&scx->runnable_node);
3932 scx->runnable_at = jiffies;
3933 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3934 scx->slice = SCX_SLICE_DFL;
3935 }
3936
scx_pre_fork(struct task_struct * p)3937 void scx_pre_fork(struct task_struct *p)
3938 {
3939 /*
3940 * BPF scheduler enable/disable paths want to be able to iterate and
3941 * update all tasks which can become complex when racing forks. As
3942 * enable/disable are very cold paths, let's use a percpu_rwsem to
3943 * exclude forks.
3944 */
3945 percpu_down_read(&scx_fork_rwsem);
3946 }
3947
scx_fork(struct task_struct * p)3948 int scx_fork(struct task_struct *p)
3949 {
3950 percpu_rwsem_assert_held(&scx_fork_rwsem);
3951
3952 if (scx_init_task_enabled)
3953 return scx_init_task(p, task_group(p), true);
3954 else
3955 return 0;
3956 }
3957
scx_post_fork(struct task_struct * p)3958 void scx_post_fork(struct task_struct *p)
3959 {
3960 if (scx_init_task_enabled) {
3961 scx_set_task_state(p, SCX_TASK_READY);
3962
3963 /*
3964 * Enable the task immediately if it's running on sched_ext.
3965 * Otherwise, it'll be enabled in switching_to_scx() if and
3966 * when it's ever configured to run with a SCHED_EXT policy.
3967 */
3968 if (p->sched_class == &ext_sched_class) {
3969 struct rq_flags rf;
3970 struct rq *rq;
3971
3972 rq = task_rq_lock(p, &rf);
3973 scx_enable_task(p);
3974 task_rq_unlock(rq, p, &rf);
3975 }
3976 }
3977
3978 spin_lock_irq(&scx_tasks_lock);
3979 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3980 spin_unlock_irq(&scx_tasks_lock);
3981
3982 percpu_up_read(&scx_fork_rwsem);
3983 }
3984
scx_cancel_fork(struct task_struct * p)3985 void scx_cancel_fork(struct task_struct *p)
3986 {
3987 if (scx_enabled()) {
3988 struct rq *rq;
3989 struct rq_flags rf;
3990
3991 rq = task_rq_lock(p, &rf);
3992 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3993 scx_exit_task(p);
3994 task_rq_unlock(rq, p, &rf);
3995 }
3996
3997 percpu_up_read(&scx_fork_rwsem);
3998 }
3999
sched_ext_free(struct task_struct * p)4000 void sched_ext_free(struct task_struct *p)
4001 {
4002 unsigned long flags;
4003
4004 spin_lock_irqsave(&scx_tasks_lock, flags);
4005 list_del_init(&p->scx.tasks_node);
4006 spin_unlock_irqrestore(&scx_tasks_lock, flags);
4007
4008 /*
4009 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
4010 * transitions can't race us. Disable ops for @p.
4011 */
4012 if (scx_get_task_state(p) != SCX_TASK_NONE) {
4013 struct rq_flags rf;
4014 struct rq *rq;
4015
4016 rq = task_rq_lock(p, &rf);
4017 scx_exit_task(p);
4018 task_rq_unlock(rq, p, &rf);
4019 }
4020 }
4021
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)4022 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4023 const struct load_weight *lw)
4024 {
4025 struct scx_sched *sch = scx_root;
4026
4027 lockdep_assert_rq_held(task_rq(p));
4028
4029 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4030 if (SCX_HAS_OP(sch, set_weight))
4031 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
4032 p, p->scx.weight);
4033 }
4034
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)4035 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4036 {
4037 }
4038
switching_to_scx(struct rq * rq,struct task_struct * p)4039 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4040 {
4041 struct scx_sched *sch = scx_root;
4042
4043 scx_enable_task(p);
4044
4045 /*
4046 * set_cpus_allowed_scx() is not called while @p is associated with a
4047 * different scheduler class. Keep the BPF scheduler up-to-date.
4048 */
4049 if (SCX_HAS_OP(sch, set_cpumask))
4050 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
4051 p, (struct cpumask *)p->cpus_ptr);
4052 }
4053
switched_from_scx(struct rq * rq,struct task_struct * p)4054 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4055 {
4056 scx_disable_task(p);
4057 }
4058
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)4059 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)4060 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4061
scx_check_setscheduler(struct task_struct * p,int policy)4062 int scx_check_setscheduler(struct task_struct *p, int policy)
4063 {
4064 lockdep_assert_rq_held(task_rq(p));
4065
4066 /* if disallow, reject transitioning into SCX */
4067 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4068 p->policy != policy && policy == SCHED_EXT)
4069 return -EACCES;
4070
4071 return 0;
4072 }
4073
4074 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)4075 bool scx_can_stop_tick(struct rq *rq)
4076 {
4077 struct task_struct *p = rq->curr;
4078
4079 if (scx_rq_bypassing(rq))
4080 return false;
4081
4082 if (p->sched_class != &ext_sched_class)
4083 return true;
4084
4085 /*
4086 * @rq can dispatch from different DSQs, so we can't tell whether it
4087 * needs the tick or not by looking at nr_running. Allow stopping ticks
4088 * iff the BPF scheduler indicated so. See set_next_task_scx().
4089 */
4090 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4091 }
4092 #endif
4093
4094 #ifdef CONFIG_EXT_GROUP_SCHED
4095
4096 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4097 static bool scx_cgroup_enabled;
4098
scx_tg_init(struct task_group * tg)4099 void scx_tg_init(struct task_group *tg)
4100 {
4101 tg->scx_weight = CGROUP_WEIGHT_DFL;
4102 }
4103
scx_tg_online(struct task_group * tg)4104 int scx_tg_online(struct task_group *tg)
4105 {
4106 struct scx_sched *sch = scx_root;
4107 int ret = 0;
4108
4109 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4110
4111 percpu_down_read(&scx_cgroup_rwsem);
4112
4113 if (scx_cgroup_enabled) {
4114 if (SCX_HAS_OP(sch, cgroup_init)) {
4115 struct scx_cgroup_init_args args =
4116 { .weight = tg->scx_weight };
4117
4118 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
4119 NULL, tg->css.cgroup, &args);
4120 if (ret)
4121 ret = ops_sanitize_err(sch, "cgroup_init", ret);
4122 }
4123 if (ret == 0)
4124 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4125 } else {
4126 tg->scx_flags |= SCX_TG_ONLINE;
4127 }
4128
4129 percpu_up_read(&scx_cgroup_rwsem);
4130 return ret;
4131 }
4132
scx_tg_offline(struct task_group * tg)4133 void scx_tg_offline(struct task_group *tg)
4134 {
4135 struct scx_sched *sch = scx_root;
4136
4137 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4138
4139 percpu_down_read(&scx_cgroup_rwsem);
4140
4141 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
4142 (tg->scx_flags & SCX_TG_INITED))
4143 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4144 tg->css.cgroup);
4145 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4146
4147 percpu_up_read(&scx_cgroup_rwsem);
4148 }
4149
scx_cgroup_can_attach(struct cgroup_taskset * tset)4150 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4151 {
4152 struct scx_sched *sch = scx_root;
4153 struct cgroup_subsys_state *css;
4154 struct task_struct *p;
4155 int ret;
4156
4157 /* released in scx_finish/cancel_attach() */
4158 percpu_down_read(&scx_cgroup_rwsem);
4159
4160 if (!scx_cgroup_enabled)
4161 return 0;
4162
4163 cgroup_taskset_for_each(p, css, tset) {
4164 struct cgroup *from = tg_cgrp(task_group(p));
4165 struct cgroup *to = tg_cgrp(css_tg(css));
4166
4167 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4168
4169 /*
4170 * sched_move_task() omits identity migrations. Let's match the
4171 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4172 * always match one-to-one.
4173 */
4174 if (from == to)
4175 continue;
4176
4177 if (SCX_HAS_OP(sch, cgroup_prep_move)) {
4178 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
4179 cgroup_prep_move, NULL,
4180 p, from, css->cgroup);
4181 if (ret)
4182 goto err;
4183 }
4184
4185 p->scx.cgrp_moving_from = from;
4186 }
4187
4188 return 0;
4189
4190 err:
4191 cgroup_taskset_for_each(p, css, tset) {
4192 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4193 p->scx.cgrp_moving_from)
4194 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4195 p, p->scx.cgrp_moving_from, css->cgroup);
4196 p->scx.cgrp_moving_from = NULL;
4197 }
4198
4199 percpu_up_read(&scx_cgroup_rwsem);
4200 return ops_sanitize_err(sch, "cgroup_prep_move", ret);
4201 }
4202
scx_cgroup_move_task(struct task_struct * p)4203 void scx_cgroup_move_task(struct task_struct *p)
4204 {
4205 struct scx_sched *sch = scx_root;
4206
4207 if (!scx_cgroup_enabled)
4208 return;
4209
4210 /*
4211 * @p must have ops.cgroup_prep_move() called on it and thus
4212 * cgrp_moving_from set.
4213 */
4214 if (SCX_HAS_OP(sch, cgroup_move) &&
4215 !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4216 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
4217 p, p->scx.cgrp_moving_from,
4218 tg_cgrp(task_group(p)));
4219 p->scx.cgrp_moving_from = NULL;
4220 }
4221
scx_cgroup_finish_attach(void)4222 void scx_cgroup_finish_attach(void)
4223 {
4224 percpu_up_read(&scx_cgroup_rwsem);
4225 }
4226
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)4227 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4228 {
4229 struct scx_sched *sch = scx_root;
4230 struct cgroup_subsys_state *css;
4231 struct task_struct *p;
4232
4233 if (!scx_cgroup_enabled)
4234 goto out_unlock;
4235
4236 cgroup_taskset_for_each(p, css, tset) {
4237 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4238 p->scx.cgrp_moving_from)
4239 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4240 p, p->scx.cgrp_moving_from, css->cgroup);
4241 p->scx.cgrp_moving_from = NULL;
4242 }
4243 out_unlock:
4244 percpu_up_read(&scx_cgroup_rwsem);
4245 }
4246
scx_group_set_weight(struct task_group * tg,unsigned long weight)4247 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4248 {
4249 struct scx_sched *sch = scx_root;
4250
4251 percpu_down_read(&scx_cgroup_rwsem);
4252
4253 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
4254 tg->scx_weight != weight)
4255 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
4256 tg_cgrp(tg), weight);
4257
4258 tg->scx_weight = weight;
4259
4260 percpu_up_read(&scx_cgroup_rwsem);
4261 }
4262
scx_group_set_idle(struct task_group * tg,bool idle)4263 void scx_group_set_idle(struct task_group *tg, bool idle)
4264 {
4265 /* TODO: Implement ops->cgroup_set_idle() */
4266 }
4267
scx_cgroup_lock(void)4268 static void scx_cgroup_lock(void)
4269 {
4270 percpu_down_write(&scx_cgroup_rwsem);
4271 }
4272
scx_cgroup_unlock(void)4273 static void scx_cgroup_unlock(void)
4274 {
4275 percpu_up_write(&scx_cgroup_rwsem);
4276 }
4277
4278 #else /* CONFIG_EXT_GROUP_SCHED */
4279
scx_cgroup_lock(void)4280 static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)4281 static inline void scx_cgroup_unlock(void) {}
4282
4283 #endif /* CONFIG_EXT_GROUP_SCHED */
4284
4285 /*
4286 * Omitted operations:
4287 *
4288 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4289 * isn't tied to the CPU at that point. Preemption is implemented by resetting
4290 * the victim task's slice to 0 and triggering reschedule on the target CPU.
4291 *
4292 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4293 *
4294 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4295 * their current sched_class. Call them directly from sched core instead.
4296 */
4297 DEFINE_SCHED_CLASS(ext) = {
4298 .enqueue_task = enqueue_task_scx,
4299 .dequeue_task = dequeue_task_scx,
4300 .yield_task = yield_task_scx,
4301 .yield_to_task = yield_to_task_scx,
4302
4303 .wakeup_preempt = wakeup_preempt_scx,
4304
4305 .balance = balance_scx,
4306 .pick_task = pick_task_scx,
4307
4308 .put_prev_task = put_prev_task_scx,
4309 .set_next_task = set_next_task_scx,
4310
4311 #ifdef CONFIG_SMP
4312 .select_task_rq = select_task_rq_scx,
4313 .task_woken = task_woken_scx,
4314 .set_cpus_allowed = set_cpus_allowed_scx,
4315
4316 .rq_online = rq_online_scx,
4317 .rq_offline = rq_offline_scx,
4318 #endif
4319
4320 .task_tick = task_tick_scx,
4321
4322 .switching_to = switching_to_scx,
4323 .switched_from = switched_from_scx,
4324 .switched_to = switched_to_scx,
4325 .reweight_task = reweight_task_scx,
4326 .prio_changed = prio_changed_scx,
4327
4328 .update_curr = update_curr_scx,
4329
4330 #ifdef CONFIG_UCLAMP_TASK
4331 .uclamp_enabled = 1,
4332 #endif
4333 };
4334
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)4335 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4336 {
4337 memset(dsq, 0, sizeof(*dsq));
4338
4339 raw_spin_lock_init(&dsq->lock);
4340 INIT_LIST_HEAD(&dsq->list);
4341 dsq->id = dsq_id;
4342 }
4343
free_dsq_irq_workfn(struct irq_work * irq_work)4344 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4345 {
4346 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4347 struct scx_dispatch_q *dsq, *tmp_dsq;
4348
4349 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4350 kfree_rcu(dsq, rcu);
4351 }
4352
4353 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4354
destroy_dsq(struct scx_sched * sch,u64 dsq_id)4355 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
4356 {
4357 struct scx_dispatch_q *dsq;
4358 unsigned long flags;
4359
4360 rcu_read_lock();
4361
4362 dsq = find_user_dsq(sch, dsq_id);
4363 if (!dsq)
4364 goto out_unlock_rcu;
4365
4366 raw_spin_lock_irqsave(&dsq->lock, flags);
4367
4368 if (dsq->nr) {
4369 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4370 dsq->id, dsq->nr);
4371 goto out_unlock_dsq;
4372 }
4373
4374 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
4375 dsq_hash_params))
4376 goto out_unlock_dsq;
4377
4378 /*
4379 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4380 * queueing more tasks. As this function can be called from anywhere,
4381 * freeing is bounced through an irq work to avoid nesting RCU
4382 * operations inside scheduler locks.
4383 */
4384 dsq->id = SCX_DSQ_INVALID;
4385 llist_add(&dsq->free_node, &dsqs_to_free);
4386 irq_work_queue(&free_dsq_irq_work);
4387
4388 out_unlock_dsq:
4389 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4390 out_unlock_rcu:
4391 rcu_read_unlock();
4392 }
4393
4394 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(struct scx_sched * sch)4395 static void scx_cgroup_exit(struct scx_sched *sch)
4396 {
4397 struct cgroup_subsys_state *css;
4398
4399 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4400
4401 scx_cgroup_enabled = false;
4402
4403 /*
4404 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4405 * cgroups and exit all the inited ones, all online cgroups are exited.
4406 */
4407 rcu_read_lock();
4408 css_for_each_descendant_post(css, &root_task_group.css) {
4409 struct task_group *tg = css_tg(css);
4410
4411 if (!(tg->scx_flags & SCX_TG_INITED))
4412 continue;
4413 tg->scx_flags &= ~SCX_TG_INITED;
4414
4415 if (!sch->ops.cgroup_exit)
4416 continue;
4417
4418 if (WARN_ON_ONCE(!css_tryget(css)))
4419 continue;
4420 rcu_read_unlock();
4421
4422 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4423 css->cgroup);
4424
4425 rcu_read_lock();
4426 css_put(css);
4427 }
4428 rcu_read_unlock();
4429 }
4430
scx_cgroup_init(struct scx_sched * sch)4431 static int scx_cgroup_init(struct scx_sched *sch)
4432 {
4433 struct cgroup_subsys_state *css;
4434 int ret;
4435
4436 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4437
4438 /*
4439 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4440 * cgroups and init, all online cgroups are initialized.
4441 */
4442 rcu_read_lock();
4443 css_for_each_descendant_pre(css, &root_task_group.css) {
4444 struct task_group *tg = css_tg(css);
4445 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4446
4447 if ((tg->scx_flags &
4448 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4449 continue;
4450
4451 if (!sch->ops.cgroup_init) {
4452 tg->scx_flags |= SCX_TG_INITED;
4453 continue;
4454 }
4455
4456 if (WARN_ON_ONCE(!css_tryget(css)))
4457 continue;
4458 rcu_read_unlock();
4459
4460 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
4461 css->cgroup, &args);
4462 if (ret) {
4463 css_put(css);
4464 scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
4465 return ret;
4466 }
4467 tg->scx_flags |= SCX_TG_INITED;
4468
4469 rcu_read_lock();
4470 css_put(css);
4471 }
4472 rcu_read_unlock();
4473
4474 WARN_ON_ONCE(scx_cgroup_enabled);
4475 scx_cgroup_enabled = true;
4476
4477 return 0;
4478 }
4479
4480 #else
scx_cgroup_exit(struct scx_sched * sch)4481 static void scx_cgroup_exit(struct scx_sched *sch) {}
scx_cgroup_init(struct scx_sched * sch)4482 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
4483 #endif
4484
4485
4486 /********************************************************************************
4487 * Sysfs interface and ops enable/disable.
4488 */
4489
4490 #define SCX_ATTR(_name) \
4491 static struct kobj_attribute scx_attr_##_name = { \
4492 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4493 .show = scx_attr_##_name##_show, \
4494 }
4495
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4496 static ssize_t scx_attr_state_show(struct kobject *kobj,
4497 struct kobj_attribute *ka, char *buf)
4498 {
4499 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
4500 }
4501 SCX_ATTR(state);
4502
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4503 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4504 struct kobj_attribute *ka, char *buf)
4505 {
4506 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4507 }
4508 SCX_ATTR(switch_all);
4509
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4510 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4511 struct kobj_attribute *ka, char *buf)
4512 {
4513 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4514 }
4515 SCX_ATTR(nr_rejected);
4516
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4517 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4518 struct kobj_attribute *ka, char *buf)
4519 {
4520 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4521 }
4522 SCX_ATTR(hotplug_seq);
4523
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4524 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4525 struct kobj_attribute *ka, char *buf)
4526 {
4527 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4528 }
4529 SCX_ATTR(enable_seq);
4530
4531 static struct attribute *scx_global_attrs[] = {
4532 &scx_attr_state.attr,
4533 &scx_attr_switch_all.attr,
4534 &scx_attr_nr_rejected.attr,
4535 &scx_attr_hotplug_seq.attr,
4536 &scx_attr_enable_seq.attr,
4537 NULL,
4538 };
4539
4540 static const struct attribute_group scx_global_attr_group = {
4541 .attrs = scx_global_attrs,
4542 };
4543
4544 static void free_exit_info(struct scx_exit_info *ei);
4545
scx_sched_free_rcu_work(struct work_struct * work)4546 static void scx_sched_free_rcu_work(struct work_struct *work)
4547 {
4548 struct rcu_work *rcu_work = to_rcu_work(work);
4549 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
4550 struct rhashtable_iter rht_iter;
4551 struct scx_dispatch_q *dsq;
4552 int node;
4553
4554 kthread_stop(sch->helper->task);
4555 free_percpu(sch->event_stats_cpu);
4556
4557 for_each_node_state(node, N_POSSIBLE)
4558 kfree(sch->global_dsqs[node]);
4559 kfree(sch->global_dsqs);
4560
4561 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
4562 do {
4563 rhashtable_walk_start(&rht_iter);
4564
4565 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4566 destroy_dsq(sch, dsq->id);
4567
4568 rhashtable_walk_stop(&rht_iter);
4569 } while (dsq == ERR_PTR(-EAGAIN));
4570 rhashtable_walk_exit(&rht_iter);
4571
4572 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4573 free_exit_info(sch->exit_info);
4574 kfree(sch);
4575 }
4576
scx_kobj_release(struct kobject * kobj)4577 static void scx_kobj_release(struct kobject *kobj)
4578 {
4579 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4580
4581 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
4582 queue_rcu_work(system_unbound_wq, &sch->rcu_work);
4583 }
4584
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4585 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4586 struct kobj_attribute *ka, char *buf)
4587 {
4588 return sysfs_emit(buf, "%s\n", scx_root->ops.name);
4589 }
4590 SCX_ATTR(ops);
4591
4592 #define scx_attr_event_show(buf, at, events, kind) ({ \
4593 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \
4594 })
4595
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4596 static ssize_t scx_attr_events_show(struct kobject *kobj,
4597 struct kobj_attribute *ka, char *buf)
4598 {
4599 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4600 struct scx_event_stats events;
4601 int at = 0;
4602
4603 scx_read_events(sch, &events);
4604 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
4605 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4606 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
4607 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
4608 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4609 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
4610 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
4611 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
4612 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
4613 return at;
4614 }
4615 SCX_ATTR(events);
4616
4617 static struct attribute *scx_sched_attrs[] = {
4618 &scx_attr_ops.attr,
4619 &scx_attr_events.attr,
4620 NULL,
4621 };
4622 ATTRIBUTE_GROUPS(scx_sched);
4623
4624 static const struct kobj_type scx_ktype = {
4625 .release = scx_kobj_release,
4626 .sysfs_ops = &kobj_sysfs_ops,
4627 .default_groups = scx_sched_groups,
4628 };
4629
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4630 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4631 {
4632 return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name);
4633 }
4634
4635 static const struct kset_uevent_ops scx_uevent_ops = {
4636 .uevent = scx_uevent,
4637 };
4638
4639 /*
4640 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4641 * sched_class. dl/rt are already handled.
4642 */
task_should_scx(int policy)4643 bool task_should_scx(int policy)
4644 {
4645 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
4646 return false;
4647 if (READ_ONCE(scx_switching_all))
4648 return true;
4649 return policy == SCHED_EXT;
4650 }
4651
scx_allow_ttwu_queue(const struct task_struct * p)4652 bool scx_allow_ttwu_queue(const struct task_struct *p)
4653 {
4654 return !scx_enabled() ||
4655 (scx_root->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) ||
4656 p->sched_class != &ext_sched_class;
4657 }
4658
4659 /**
4660 * scx_softlockup - sched_ext softlockup handler
4661 * @dur_s: number of seconds of CPU stuck due to soft lockup
4662 *
4663 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4664 * live-lock the system by making many CPUs target the same DSQ to the point
4665 * where soft-lockup detection triggers. This function is called from
4666 * soft-lockup watchdog when the triggering point is close and tries to unjam
4667 * the system by enabling the breather and aborting the BPF scheduler.
4668 */
scx_softlockup(u32 dur_s)4669 void scx_softlockup(u32 dur_s)
4670 {
4671 struct scx_sched *sch;
4672
4673 rcu_read_lock();
4674
4675 sch = rcu_dereference(scx_root);
4676 if (unlikely(!sch))
4677 goto out_unlock;
4678
4679 switch (scx_enable_state()) {
4680 case SCX_ENABLING:
4681 case SCX_ENABLED:
4682 break;
4683 default:
4684 goto out_unlock;
4685 }
4686
4687 /* allow only one instance, cleared at the end of scx_bypass() */
4688 if (test_and_set_bit(0, &scx_in_softlockup))
4689 goto out_unlock;
4690
4691 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4692 smp_processor_id(), dur_s, scx_root->ops.name);
4693
4694 /*
4695 * Some CPUs may be trapped in the dispatch paths. Enable breather
4696 * immediately; otherwise, we might even be able to get to scx_bypass().
4697 */
4698 atomic_inc(&scx_breather_depth);
4699
4700 scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s);
4701 out_unlock:
4702 rcu_read_unlock();
4703 }
4704
scx_clear_softlockup(void)4705 static void scx_clear_softlockup(void)
4706 {
4707 if (test_and_clear_bit(0, &scx_in_softlockup))
4708 atomic_dec(&scx_breather_depth);
4709 }
4710
4711 /**
4712 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
4713 * @bypass: true for bypass, false for unbypass
4714 *
4715 * Bypassing guarantees that all runnable tasks make forward progress without
4716 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4717 * be held by tasks that the BPF scheduler is forgetting to run, which
4718 * unfortunately also excludes toggling the static branches.
4719 *
4720 * Let's work around by overriding a couple ops and modifying behaviors based on
4721 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4722 * to force global FIFO scheduling.
4723 *
4724 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4725 *
4726 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4727 * %SCX_OPS_ENQ_LAST is also ignored.
4728 *
4729 * - ops.dispatch() is ignored.
4730 *
4731 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4732 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4733 * the tail of the queue with core_sched_at touched.
4734 *
4735 * - pick_next_task() suppresses zero slice warning.
4736 *
4737 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4738 * operations.
4739 *
4740 * - scx_prio_less() reverts to the default core_sched_at order.
4741 */
scx_bypass(bool bypass)4742 static void scx_bypass(bool bypass)
4743 {
4744 static DEFINE_RAW_SPINLOCK(bypass_lock);
4745 static unsigned long bypass_timestamp;
4746 struct scx_sched *sch;
4747 unsigned long flags;
4748 int cpu;
4749
4750 raw_spin_lock_irqsave(&bypass_lock, flags);
4751 sch = rcu_dereference_bh(scx_root);
4752
4753 if (bypass) {
4754 scx_bypass_depth++;
4755 WARN_ON_ONCE(scx_bypass_depth <= 0);
4756 if (scx_bypass_depth != 1)
4757 goto unlock;
4758 bypass_timestamp = ktime_get_ns();
4759 if (sch)
4760 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
4761 } else {
4762 scx_bypass_depth--;
4763 WARN_ON_ONCE(scx_bypass_depth < 0);
4764 if (scx_bypass_depth != 0)
4765 goto unlock;
4766 if (sch)
4767 scx_add_event(sch, SCX_EV_BYPASS_DURATION,
4768 ktime_get_ns() - bypass_timestamp);
4769 }
4770
4771 atomic_inc(&scx_breather_depth);
4772
4773 /*
4774 * No task property is changing. We just need to make sure all currently
4775 * queued tasks are re-queued according to the new scx_rq_bypassing()
4776 * state. As an optimization, walk each rq's runnable_list instead of
4777 * the scx_tasks list.
4778 *
4779 * This function can't trust the scheduler and thus can't use
4780 * cpus_read_lock(). Walk all possible CPUs instead of online.
4781 */
4782 for_each_possible_cpu(cpu) {
4783 struct rq *rq = cpu_rq(cpu);
4784 struct task_struct *p, *n;
4785
4786 raw_spin_rq_lock(rq);
4787
4788 if (bypass) {
4789 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4790 rq->scx.flags |= SCX_RQ_BYPASSING;
4791 } else {
4792 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4793 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4794 }
4795
4796 /*
4797 * We need to guarantee that no tasks are on the BPF scheduler
4798 * while bypassing. Either we see enabled or the enable path
4799 * sees scx_rq_bypassing() before moving tasks to SCX.
4800 */
4801 if (!scx_enabled()) {
4802 raw_spin_rq_unlock(rq);
4803 continue;
4804 }
4805
4806 /*
4807 * The use of list_for_each_entry_safe_reverse() is required
4808 * because each task is going to be removed from and added back
4809 * to the runnable_list during iteration. Because they're added
4810 * to the tail of the list, safe reverse iteration can still
4811 * visit all nodes.
4812 */
4813 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4814 scx.runnable_node) {
4815 struct sched_enq_and_set_ctx ctx;
4816
4817 /* cycling deq/enq is enough, see the function comment */
4818 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4819 sched_enq_and_set_task(&ctx);
4820 }
4821
4822 /* resched to restore ticks and idle state */
4823 if (cpu_online(cpu) || cpu == smp_processor_id())
4824 resched_curr(rq);
4825
4826 raw_spin_rq_unlock(rq);
4827 }
4828
4829 atomic_dec(&scx_breather_depth);
4830 unlock:
4831 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4832 scx_clear_softlockup();
4833 }
4834
free_exit_info(struct scx_exit_info * ei)4835 static void free_exit_info(struct scx_exit_info *ei)
4836 {
4837 kvfree(ei->dump);
4838 kfree(ei->msg);
4839 kfree(ei->bt);
4840 kfree(ei);
4841 }
4842
alloc_exit_info(size_t exit_dump_len)4843 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4844 {
4845 struct scx_exit_info *ei;
4846
4847 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4848 if (!ei)
4849 return NULL;
4850
4851 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4852 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4853 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
4854
4855 if (!ei->bt || !ei->msg || !ei->dump) {
4856 free_exit_info(ei);
4857 return NULL;
4858 }
4859
4860 return ei;
4861 }
4862
scx_exit_reason(enum scx_exit_kind kind)4863 static const char *scx_exit_reason(enum scx_exit_kind kind)
4864 {
4865 switch (kind) {
4866 case SCX_EXIT_UNREG:
4867 return "unregistered from user space";
4868 case SCX_EXIT_UNREG_BPF:
4869 return "unregistered from BPF";
4870 case SCX_EXIT_UNREG_KERN:
4871 return "unregistered from the main kernel";
4872 case SCX_EXIT_SYSRQ:
4873 return "disabled by sysrq-S";
4874 case SCX_EXIT_ERROR:
4875 return "runtime error";
4876 case SCX_EXIT_ERROR_BPF:
4877 return "scx_bpf_error";
4878 case SCX_EXIT_ERROR_STALL:
4879 return "runnable task stall";
4880 default:
4881 return "<UNKNOWN>";
4882 }
4883 }
4884
scx_disable_workfn(struct kthread_work * work)4885 static void scx_disable_workfn(struct kthread_work *work)
4886 {
4887 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
4888 struct scx_exit_info *ei = sch->exit_info;
4889 struct scx_task_iter sti;
4890 struct task_struct *p;
4891 int kind, cpu;
4892
4893 kind = atomic_read(&sch->exit_kind);
4894 while (true) {
4895 if (kind == SCX_EXIT_DONE) /* already disabled? */
4896 return;
4897 WARN_ON_ONCE(kind == SCX_EXIT_NONE);
4898 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
4899 break;
4900 }
4901 ei->kind = kind;
4902 ei->reason = scx_exit_reason(ei->kind);
4903
4904 /* guarantee forward progress by bypassing scx_ops */
4905 scx_bypass(true);
4906
4907 switch (scx_set_enable_state(SCX_DISABLING)) {
4908 case SCX_DISABLING:
4909 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4910 break;
4911 case SCX_DISABLED:
4912 pr_warn("sched_ext: ops error detected without ops (%s)\n",
4913 sch->exit_info->msg);
4914 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4915 goto done;
4916 default:
4917 break;
4918 }
4919
4920 /*
4921 * Here, every runnable task is guaranteed to make forward progress and
4922 * we can safely use blocking synchronization constructs. Actually
4923 * disable ops.
4924 */
4925 mutex_lock(&scx_enable_mutex);
4926
4927 static_branch_disable(&__scx_switched_all);
4928 WRITE_ONCE(scx_switching_all, false);
4929
4930 /*
4931 * Shut down cgroup support before tasks so that the cgroup attach path
4932 * doesn't race against scx_exit_task().
4933 */
4934 scx_cgroup_lock();
4935 scx_cgroup_exit(sch);
4936 scx_cgroup_unlock();
4937
4938 /*
4939 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4940 * must be switched out and exited synchronously.
4941 */
4942 percpu_down_write(&scx_fork_rwsem);
4943
4944 scx_init_task_enabled = false;
4945
4946 scx_task_iter_start(&sti);
4947 while ((p = scx_task_iter_next_locked(&sti))) {
4948 const struct sched_class *old_class = p->sched_class;
4949 const struct sched_class *new_class =
4950 __setscheduler_class(p->policy, p->prio);
4951 struct sched_enq_and_set_ctx ctx;
4952
4953 if (old_class != new_class && p->se.sched_delayed)
4954 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4955
4956 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4957
4958 p->sched_class = new_class;
4959 check_class_changing(task_rq(p), p, old_class);
4960
4961 sched_enq_and_set_task(&ctx);
4962
4963 check_class_changed(task_rq(p), p, old_class, p->prio);
4964 scx_exit_task(p);
4965 }
4966 scx_task_iter_stop(&sti);
4967 percpu_up_write(&scx_fork_rwsem);
4968
4969 /*
4970 * Invalidate all the rq clocks to prevent getting outdated
4971 * rq clocks from a previous scx scheduler.
4972 */
4973 for_each_possible_cpu(cpu) {
4974 struct rq *rq = cpu_rq(cpu);
4975 scx_rq_clock_invalidate(rq);
4976 }
4977
4978 /* no task is on scx, turn off all the switches and flush in-progress calls */
4979 static_branch_disable(&__scx_enabled);
4980 bitmap_zero(sch->has_op, SCX_OPI_END);
4981 scx_idle_disable();
4982 synchronize_rcu();
4983
4984 if (ei->kind >= SCX_EXIT_ERROR) {
4985 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4986 sch->ops.name, ei->reason);
4987
4988 if (ei->msg[0] != '\0')
4989 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
4990 #ifdef CONFIG_STACKTRACE
4991 stack_trace_print(ei->bt, ei->bt_len, 2);
4992 #endif
4993 } else {
4994 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4995 sch->ops.name, ei->reason);
4996 }
4997
4998 if (sch->ops.exit)
4999 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
5000
5001 cancel_delayed_work_sync(&scx_watchdog_work);
5002
5003 /*
5004 * scx_root clearing must be inside cpus_read_lock(). See
5005 * handle_hotplug().
5006 */
5007 cpus_read_lock();
5008 RCU_INIT_POINTER(scx_root, NULL);
5009 cpus_read_unlock();
5010
5011 /*
5012 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
5013 * could observe an object of the same name still in the hierarchy when
5014 * the next scheduler is loaded.
5015 */
5016 kobject_del(&sch->kobj);
5017
5018 free_percpu(scx_dsp_ctx);
5019 scx_dsp_ctx = NULL;
5020 scx_dsp_max_batch = 0;
5021
5022 mutex_unlock(&scx_enable_mutex);
5023
5024 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5025 done:
5026 scx_bypass(false);
5027 }
5028
scx_disable(enum scx_exit_kind kind)5029 static void scx_disable(enum scx_exit_kind kind)
5030 {
5031 int none = SCX_EXIT_NONE;
5032 struct scx_sched *sch;
5033
5034 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5035 kind = SCX_EXIT_ERROR;
5036
5037 rcu_read_lock();
5038 sch = rcu_dereference(scx_root);
5039 if (sch) {
5040 atomic_try_cmpxchg(&sch->exit_kind, &none, kind);
5041 kthread_queue_work(sch->helper, &sch->disable_work);
5042 }
5043 rcu_read_unlock();
5044 }
5045
dump_newline(struct seq_buf * s)5046 static void dump_newline(struct seq_buf *s)
5047 {
5048 trace_sched_ext_dump("");
5049
5050 /* @s may be zero sized and seq_buf triggers WARN if so */
5051 if (s->size)
5052 seq_buf_putc(s, '\n');
5053 }
5054
dump_line(struct seq_buf * s,const char * fmt,...)5055 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5056 {
5057 va_list args;
5058
5059 #ifdef CONFIG_TRACEPOINTS
5060 if (trace_sched_ext_dump_enabled()) {
5061 /* protected by scx_dump_state()::dump_lock */
5062 static char line_buf[SCX_EXIT_MSG_LEN];
5063
5064 va_start(args, fmt);
5065 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5066 va_end(args);
5067
5068 trace_sched_ext_dump(line_buf);
5069 }
5070 #endif
5071 /* @s may be zero sized and seq_buf triggers WARN if so */
5072 if (s->size) {
5073 va_start(args, fmt);
5074 seq_buf_vprintf(s, fmt, args);
5075 va_end(args);
5076
5077 seq_buf_putc(s, '\n');
5078 }
5079 }
5080
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)5081 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5082 const unsigned long *bt, unsigned int len)
5083 {
5084 unsigned int i;
5085
5086 for (i = 0; i < len; i++)
5087 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5088 }
5089
ops_dump_init(struct seq_buf * s,const char * prefix)5090 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5091 {
5092 struct scx_dump_data *dd = &scx_dump_data;
5093
5094 lockdep_assert_irqs_disabled();
5095
5096 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
5097 dd->first = true;
5098 dd->cursor = 0;
5099 dd->s = s;
5100 dd->prefix = prefix;
5101 }
5102
ops_dump_flush(void)5103 static void ops_dump_flush(void)
5104 {
5105 struct scx_dump_data *dd = &scx_dump_data;
5106 char *line = dd->buf.line;
5107
5108 if (!dd->cursor)
5109 return;
5110
5111 /*
5112 * There's something to flush and this is the first line. Insert a blank
5113 * line to distinguish ops dump.
5114 */
5115 if (dd->first) {
5116 dump_newline(dd->s);
5117 dd->first = false;
5118 }
5119
5120 /*
5121 * There may be multiple lines in $line. Scan and emit each line
5122 * separately.
5123 */
5124 while (true) {
5125 char *end = line;
5126 char c;
5127
5128 while (*end != '\n' && *end != '\0')
5129 end++;
5130
5131 /*
5132 * If $line overflowed, it may not have newline at the end.
5133 * Always emit with a newline.
5134 */
5135 c = *end;
5136 *end = '\0';
5137 dump_line(dd->s, "%s%s", dd->prefix, line);
5138 if (c == '\0')
5139 break;
5140
5141 /* move to the next line */
5142 end++;
5143 if (*end == '\0')
5144 break;
5145 line = end;
5146 }
5147
5148 dd->cursor = 0;
5149 }
5150
ops_dump_exit(void)5151 static void ops_dump_exit(void)
5152 {
5153 ops_dump_flush();
5154 scx_dump_data.cpu = -1;
5155 }
5156
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)5157 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5158 struct task_struct *p, char marker)
5159 {
5160 static unsigned long bt[SCX_EXIT_BT_LEN];
5161 struct scx_sched *sch = scx_root;
5162 char dsq_id_buf[19] = "(n/a)";
5163 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5164 unsigned int bt_len = 0;
5165
5166 if (p->scx.dsq)
5167 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5168 (unsigned long long)p->scx.dsq->id);
5169
5170 dump_newline(s);
5171 dump_line(s, " %c%c %s[%d] %+ldms",
5172 marker, task_state_to_char(p), p->comm, p->pid,
5173 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5174 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5175 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5176 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5177 ops_state >> SCX_OPSS_QSEQ_SHIFT);
5178 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s",
5179 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
5180 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u",
5181 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
5182 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5183
5184 if (SCX_HAS_OP(sch, dump_task)) {
5185 ops_dump_init(s, " ");
5186 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
5187 ops_dump_exit();
5188 }
5189
5190 #ifdef CONFIG_STACKTRACE
5191 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5192 #endif
5193 if (bt_len) {
5194 dump_newline(s);
5195 dump_stack_trace(s, " ", bt, bt_len);
5196 }
5197 }
5198
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)5199 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5200 {
5201 static DEFINE_SPINLOCK(dump_lock);
5202 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5203 struct scx_sched *sch = scx_root;
5204 struct scx_dump_ctx dctx = {
5205 .kind = ei->kind,
5206 .exit_code = ei->exit_code,
5207 .reason = ei->reason,
5208 .at_ns = ktime_get_ns(),
5209 .at_jiffies = jiffies,
5210 };
5211 struct seq_buf s;
5212 struct scx_event_stats events;
5213 unsigned long flags;
5214 char *buf;
5215 int cpu;
5216
5217 spin_lock_irqsave(&dump_lock, flags);
5218
5219 seq_buf_init(&s, ei->dump, dump_len);
5220
5221 if (ei->kind == SCX_EXIT_NONE) {
5222 dump_line(&s, "Debug dump triggered by %s", ei->reason);
5223 } else {
5224 dump_line(&s, "%s[%d] triggered exit kind %d:",
5225 current->comm, current->pid, ei->kind);
5226 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
5227 dump_newline(&s);
5228 dump_line(&s, "Backtrace:");
5229 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
5230 }
5231
5232 if (SCX_HAS_OP(sch, dump)) {
5233 ops_dump_init(&s, "");
5234 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
5235 ops_dump_exit();
5236 }
5237
5238 dump_newline(&s);
5239 dump_line(&s, "CPU states");
5240 dump_line(&s, "----------");
5241
5242 for_each_possible_cpu(cpu) {
5243 struct rq *rq = cpu_rq(cpu);
5244 struct rq_flags rf;
5245 struct task_struct *p;
5246 struct seq_buf ns;
5247 size_t avail, used;
5248 bool idle;
5249
5250 rq_lock(rq, &rf);
5251
5252 idle = list_empty(&rq->scx.runnable_list) &&
5253 rq->curr->sched_class == &idle_sched_class;
5254
5255 if (idle && !SCX_HAS_OP(sch, dump_cpu))
5256 goto next;
5257
5258 /*
5259 * We don't yet know whether ops.dump_cpu() will produce output
5260 * and we may want to skip the default CPU dump if it doesn't.
5261 * Use a nested seq_buf to generate the standard dump so that we
5262 * can decide whether to commit later.
5263 */
5264 avail = seq_buf_get_buf(&s, &buf);
5265 seq_buf_init(&ns, buf, avail);
5266
5267 dump_newline(&ns);
5268 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5269 cpu, rq->scx.nr_running, rq->scx.flags,
5270 rq->scx.cpu_released, rq->scx.ops_qseq,
5271 rq->scx.pnt_seq);
5272 dump_line(&ns, " curr=%s[%d] class=%ps",
5273 rq->curr->comm, rq->curr->pid,
5274 rq->curr->sched_class);
5275 if (!cpumask_empty(rq->scx.cpus_to_kick))
5276 dump_line(&ns, " cpus_to_kick : %*pb",
5277 cpumask_pr_args(rq->scx.cpus_to_kick));
5278 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5279 dump_line(&ns, " idle_to_kick : %*pb",
5280 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5281 if (!cpumask_empty(rq->scx.cpus_to_preempt))
5282 dump_line(&ns, " cpus_to_preempt: %*pb",
5283 cpumask_pr_args(rq->scx.cpus_to_preempt));
5284 if (!cpumask_empty(rq->scx.cpus_to_wait))
5285 dump_line(&ns, " cpus_to_wait : %*pb",
5286 cpumask_pr_args(rq->scx.cpus_to_wait));
5287
5288 used = seq_buf_used(&ns);
5289 if (SCX_HAS_OP(sch, dump_cpu)) {
5290 ops_dump_init(&ns, " ");
5291 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
5292 &dctx, cpu, idle);
5293 ops_dump_exit();
5294 }
5295
5296 /*
5297 * If idle && nothing generated by ops.dump_cpu(), there's
5298 * nothing interesting. Skip.
5299 */
5300 if (idle && used == seq_buf_used(&ns))
5301 goto next;
5302
5303 /*
5304 * $s may already have overflowed when $ns was created. If so,
5305 * calling commit on it will trigger BUG.
5306 */
5307 if (avail) {
5308 seq_buf_commit(&s, seq_buf_used(&ns));
5309 if (seq_buf_has_overflowed(&ns))
5310 seq_buf_set_overflow(&s);
5311 }
5312
5313 if (rq->curr->sched_class == &ext_sched_class)
5314 scx_dump_task(&s, &dctx, rq->curr, '*');
5315
5316 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5317 scx_dump_task(&s, &dctx, p, ' ');
5318 next:
5319 rq_unlock(rq, &rf);
5320 }
5321
5322 dump_newline(&s);
5323 dump_line(&s, "Event counters");
5324 dump_line(&s, "--------------");
5325
5326 scx_read_events(sch, &events);
5327 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
5328 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
5329 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
5330 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
5331 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
5332 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
5333 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
5334 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
5335 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
5336
5337 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5338 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5339 trunc_marker, sizeof(trunc_marker));
5340
5341 spin_unlock_irqrestore(&dump_lock, flags);
5342 }
5343
scx_error_irq_workfn(struct irq_work * irq_work)5344 static void scx_error_irq_workfn(struct irq_work *irq_work)
5345 {
5346 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
5347 struct scx_exit_info *ei = sch->exit_info;
5348
5349 if (ei->kind >= SCX_EXIT_ERROR)
5350 scx_dump_state(ei, sch->ops.exit_dump_len);
5351
5352 kthread_queue_work(sch->helper, &sch->disable_work);
5353 }
5354
scx_vexit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,va_list args)5355 static void scx_vexit(struct scx_sched *sch,
5356 enum scx_exit_kind kind, s64 exit_code,
5357 const char *fmt, va_list args)
5358 {
5359 struct scx_exit_info *ei = sch->exit_info;
5360 int none = SCX_EXIT_NONE;
5361
5362 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
5363 return;
5364
5365 ei->exit_code = exit_code;
5366 #ifdef CONFIG_STACKTRACE
5367 if (kind >= SCX_EXIT_ERROR)
5368 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5369 #endif
5370 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5371
5372 /*
5373 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5374 * in scx_disable_workfn().
5375 */
5376 ei->kind = kind;
5377 ei->reason = scx_exit_reason(ei->kind);
5378
5379 irq_work_queue(&sch->error_irq_work);
5380 }
5381
scx_alloc_and_add_sched(struct sched_ext_ops * ops)5382 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
5383 {
5384 struct scx_sched *sch;
5385 int node, ret;
5386
5387 sch = kzalloc(sizeof(*sch), GFP_KERNEL);
5388 if (!sch)
5389 return ERR_PTR(-ENOMEM);
5390
5391 sch->exit_info = alloc_exit_info(ops->exit_dump_len);
5392 if (!sch->exit_info) {
5393 ret = -ENOMEM;
5394 goto err_free_sch;
5395 }
5396
5397 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
5398 if (ret < 0)
5399 goto err_free_ei;
5400
5401 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
5402 GFP_KERNEL);
5403 if (!sch->global_dsqs) {
5404 ret = -ENOMEM;
5405 goto err_free_hash;
5406 }
5407
5408 for_each_node_state(node, N_POSSIBLE) {
5409 struct scx_dispatch_q *dsq;
5410
5411 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5412 if (!dsq) {
5413 ret = -ENOMEM;
5414 goto err_free_gdsqs;
5415 }
5416
5417 init_dsq(dsq, SCX_DSQ_GLOBAL);
5418 sch->global_dsqs[node] = dsq;
5419 }
5420
5421 sch->event_stats_cpu = alloc_percpu(struct scx_event_stats);
5422 if (!sch->event_stats_cpu)
5423 goto err_free_gdsqs;
5424
5425 sch->helper = kthread_run_worker(0, "sched_ext_helper");
5426 if (!sch->helper)
5427 goto err_free_event_stats;
5428 sched_set_fifo(sch->helper->task);
5429
5430 atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
5431 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
5432 kthread_init_work(&sch->disable_work, scx_disable_workfn);
5433 sch->ops = *ops;
5434 ops->priv = sch;
5435
5436 sch->kobj.kset = scx_kset;
5437 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
5438 if (ret < 0)
5439 goto err_stop_helper;
5440
5441 return sch;
5442
5443 err_stop_helper:
5444 kthread_stop(sch->helper->task);
5445 err_free_event_stats:
5446 free_percpu(sch->event_stats_cpu);
5447 err_free_gdsqs:
5448 for_each_node_state(node, N_POSSIBLE)
5449 kfree(sch->global_dsqs[node]);
5450 kfree(sch->global_dsqs);
5451 err_free_hash:
5452 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
5453 err_free_ei:
5454 free_exit_info(sch->exit_info);
5455 err_free_sch:
5456 kfree(sch);
5457 return ERR_PTR(ret);
5458 }
5459
check_hotplug_seq(struct scx_sched * sch,const struct sched_ext_ops * ops)5460 static void check_hotplug_seq(struct scx_sched *sch,
5461 const struct sched_ext_ops *ops)
5462 {
5463 unsigned long long global_hotplug_seq;
5464
5465 /*
5466 * If a hotplug event has occurred between when a scheduler was
5467 * initialized, and when we were able to attach, exit and notify user
5468 * space about it.
5469 */
5470 if (ops->hotplug_seq) {
5471 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5472 if (ops->hotplug_seq != global_hotplug_seq) {
5473 scx_exit(sch, SCX_EXIT_UNREG_KERN,
5474 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5475 "expected hotplug seq %llu did not match actual %llu",
5476 ops->hotplug_seq, global_hotplug_seq);
5477 }
5478 }
5479 }
5480
validate_ops(struct scx_sched * sch,const struct sched_ext_ops * ops)5481 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
5482 {
5483 /*
5484 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5485 * ops.enqueue() callback isn't implemented.
5486 */
5487 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5488 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5489 return -EINVAL;
5490 }
5491
5492 /*
5493 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
5494 * selection policy to be enabled.
5495 */
5496 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
5497 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
5498 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
5499 return -EINVAL;
5500 }
5501
5502 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
5503 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
5504
5505 return 0;
5506 }
5507
scx_enable(struct sched_ext_ops * ops,struct bpf_link * link)5508 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5509 {
5510 struct scx_sched *sch;
5511 struct scx_task_iter sti;
5512 struct task_struct *p;
5513 unsigned long timeout;
5514 int i, cpu, ret;
5515
5516 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5517 cpu_possible_mask)) {
5518 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5519 return -EINVAL;
5520 }
5521
5522 mutex_lock(&scx_enable_mutex);
5523
5524 if (scx_enable_state() != SCX_DISABLED) {
5525 ret = -EBUSY;
5526 goto err_unlock;
5527 }
5528
5529 sch = scx_alloc_and_add_sched(ops);
5530 if (IS_ERR(sch)) {
5531 ret = PTR_ERR(sch);
5532 goto err_unlock;
5533 }
5534
5535 /*
5536 * Transition to ENABLING and clear exit info to arm the disable path.
5537 * Failure triggers full disabling from here on.
5538 */
5539 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
5540 WARN_ON_ONCE(scx_root);
5541
5542 atomic_long_set(&scx_nr_rejected, 0);
5543
5544 for_each_possible_cpu(cpu)
5545 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5546
5547 /*
5548 * Keep CPUs stable during enable so that the BPF scheduler can track
5549 * online CPUs by watching ->on/offline_cpu() after ->init().
5550 */
5551 cpus_read_lock();
5552
5553 /*
5554 * Make the scheduler instance visible. Must be inside cpus_read_lock().
5555 * See handle_hotplug().
5556 */
5557 rcu_assign_pointer(scx_root, sch);
5558
5559 scx_idle_enable(ops);
5560
5561 if (sch->ops.init) {
5562 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
5563 if (ret) {
5564 ret = ops_sanitize_err(sch, "init", ret);
5565 cpus_read_unlock();
5566 scx_error(sch, "ops.init() failed (%d)", ret);
5567 goto err_disable;
5568 }
5569 }
5570
5571 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5572 if (((void (**)(void))ops)[i])
5573 set_bit(i, sch->has_op);
5574
5575 check_hotplug_seq(sch, ops);
5576 scx_idle_update_selcpu_topology(ops);
5577
5578 cpus_read_unlock();
5579
5580 ret = validate_ops(sch, ops);
5581 if (ret)
5582 goto err_disable;
5583
5584 WARN_ON_ONCE(scx_dsp_ctx);
5585 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5586 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5587 scx_dsp_max_batch),
5588 __alignof__(struct scx_dsp_ctx));
5589 if (!scx_dsp_ctx) {
5590 ret = -ENOMEM;
5591 goto err_disable;
5592 }
5593
5594 if (ops->timeout_ms)
5595 timeout = msecs_to_jiffies(ops->timeout_ms);
5596 else
5597 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5598
5599 WRITE_ONCE(scx_watchdog_timeout, timeout);
5600 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5601 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5602 scx_watchdog_timeout / 2);
5603
5604 /*
5605 * Once __scx_enabled is set, %current can be switched to SCX anytime.
5606 * This can lead to stalls as some BPF schedulers (e.g. userspace
5607 * scheduling) may not function correctly before all tasks are switched.
5608 * Init in bypass mode to guarantee forward progress.
5609 */
5610 scx_bypass(true);
5611
5612 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5613 if (((void (**)(void))ops)[i])
5614 set_bit(i, sch->has_op);
5615
5616 if (sch->ops.cpu_acquire || sch->ops.cpu_release)
5617 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
5618
5619 /*
5620 * Lock out forks, cgroup on/offlining and moves before opening the
5621 * floodgate so that they don't wander into the operations prematurely.
5622 */
5623 percpu_down_write(&scx_fork_rwsem);
5624
5625 WARN_ON_ONCE(scx_init_task_enabled);
5626 scx_init_task_enabled = true;
5627
5628 /*
5629 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5630 * preventing new tasks from being added. No need to exclude tasks
5631 * leaving as sched_ext_free() can handle both prepped and enabled
5632 * tasks. Prep all tasks first and then enable them with preemption
5633 * disabled.
5634 *
5635 * All cgroups should be initialized before scx_init_task() so that the
5636 * BPF scheduler can reliably track each task's cgroup membership from
5637 * scx_init_task(). Lock out cgroup on/offlining and task migrations
5638 * while tasks are being initialized so that scx_cgroup_can_attach()
5639 * never sees uninitialized tasks.
5640 */
5641 scx_cgroup_lock();
5642 ret = scx_cgroup_init(sch);
5643 if (ret)
5644 goto err_disable_unlock_all;
5645
5646 scx_task_iter_start(&sti);
5647 while ((p = scx_task_iter_next_locked(&sti))) {
5648 /*
5649 * @p may already be dead, have lost all its usages counts and
5650 * be waiting for RCU grace period before being freed. @p can't
5651 * be initialized for SCX in such cases and should be ignored.
5652 */
5653 if (!tryget_task_struct(p))
5654 continue;
5655
5656 scx_task_iter_unlock(&sti);
5657
5658 ret = scx_init_task(p, task_group(p), false);
5659 if (ret) {
5660 put_task_struct(p);
5661 scx_task_iter_relock(&sti);
5662 scx_task_iter_stop(&sti);
5663 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
5664 ret, p->comm, p->pid);
5665 goto err_disable_unlock_all;
5666 }
5667
5668 scx_set_task_state(p, SCX_TASK_READY);
5669
5670 put_task_struct(p);
5671 scx_task_iter_relock(&sti);
5672 }
5673 scx_task_iter_stop(&sti);
5674 scx_cgroup_unlock();
5675 percpu_up_write(&scx_fork_rwsem);
5676
5677 /*
5678 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5679 * all eligible tasks.
5680 */
5681 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5682 static_branch_enable(&__scx_enabled);
5683
5684 /*
5685 * We're fully committed and can't fail. The task READY -> ENABLED
5686 * transitions here are synchronized against sched_ext_free() through
5687 * scx_tasks_lock.
5688 */
5689 percpu_down_write(&scx_fork_rwsem);
5690 scx_task_iter_start(&sti);
5691 while ((p = scx_task_iter_next_locked(&sti))) {
5692 const struct sched_class *old_class = p->sched_class;
5693 const struct sched_class *new_class =
5694 __setscheduler_class(p->policy, p->prio);
5695 struct sched_enq_and_set_ctx ctx;
5696
5697 if (old_class != new_class && p->se.sched_delayed)
5698 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5699
5700 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5701
5702 p->scx.slice = SCX_SLICE_DFL;
5703 p->sched_class = new_class;
5704 check_class_changing(task_rq(p), p, old_class);
5705
5706 sched_enq_and_set_task(&ctx);
5707
5708 check_class_changed(task_rq(p), p, old_class, p->prio);
5709 }
5710 scx_task_iter_stop(&sti);
5711 percpu_up_write(&scx_fork_rwsem);
5712
5713 scx_bypass(false);
5714
5715 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
5716 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
5717 goto err_disable;
5718 }
5719
5720 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5721 static_branch_enable(&__scx_switched_all);
5722
5723 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5724 sch->ops.name, scx_switched_all() ? "" : " (partial)");
5725 kobject_uevent(&sch->kobj, KOBJ_ADD);
5726 mutex_unlock(&scx_enable_mutex);
5727
5728 atomic_long_inc(&scx_enable_seq);
5729
5730 return 0;
5731
5732 err_unlock:
5733 mutex_unlock(&scx_enable_mutex);
5734 return ret;
5735
5736 err_disable_unlock_all:
5737 scx_cgroup_unlock();
5738 percpu_up_write(&scx_fork_rwsem);
5739 scx_bypass(false);
5740 err_disable:
5741 mutex_unlock(&scx_enable_mutex);
5742 /*
5743 * Returning an error code here would not pass all the error information
5744 * to userspace. Record errno using scx_error() for cases scx_error()
5745 * wasn't already invoked and exit indicating success so that the error
5746 * is notified through ops.exit() with all the details.
5747 *
5748 * Flush scx_disable_work to ensure that error is reported before init
5749 * completion. sch's base reference will be put by bpf_scx_unreg().
5750 */
5751 scx_error(sch, "scx_enable() failed (%d)", ret);
5752 kthread_flush_work(&sch->disable_work);
5753 return 0;
5754 }
5755
5756
5757 /********************************************************************************
5758 * bpf_struct_ops plumbing.
5759 */
5760 #include <linux/bpf_verifier.h>
5761 #include <linux/bpf.h>
5762 #include <linux/btf.h>
5763
5764 static const struct btf_type *task_struct_type;
5765
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5766 static bool bpf_scx_is_valid_access(int off, int size,
5767 enum bpf_access_type type,
5768 const struct bpf_prog *prog,
5769 struct bpf_insn_access_aux *info)
5770 {
5771 if (type != BPF_READ)
5772 return false;
5773 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5774 return false;
5775 if (off % size != 0)
5776 return false;
5777
5778 return btf_ctx_access(off, size, type, prog, info);
5779 }
5780
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5781 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5782 const struct bpf_reg_state *reg, int off,
5783 int size)
5784 {
5785 const struct btf_type *t;
5786
5787 t = btf_type_by_id(reg->btf, reg->btf_id);
5788 if (t == task_struct_type) {
5789 if (off >= offsetof(struct task_struct, scx.slice) &&
5790 off + size <= offsetofend(struct task_struct, scx.slice))
5791 return SCALAR_VALUE;
5792 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5793 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5794 return SCALAR_VALUE;
5795 if (off >= offsetof(struct task_struct, scx.disallow) &&
5796 off + size <= offsetofend(struct task_struct, scx.disallow))
5797 return SCALAR_VALUE;
5798 }
5799
5800 return -EACCES;
5801 }
5802
5803 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5804 .get_func_proto = bpf_base_func_proto,
5805 .is_valid_access = bpf_scx_is_valid_access,
5806 .btf_struct_access = bpf_scx_btf_struct_access,
5807 };
5808
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5809 static int bpf_scx_init_member(const struct btf_type *t,
5810 const struct btf_member *member,
5811 void *kdata, const void *udata)
5812 {
5813 const struct sched_ext_ops *uops = udata;
5814 struct sched_ext_ops *ops = kdata;
5815 u32 moff = __btf_member_bit_offset(t, member) / 8;
5816 int ret;
5817
5818 switch (moff) {
5819 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5820 if (*(u32 *)(udata + moff) > INT_MAX)
5821 return -E2BIG;
5822 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5823 return 1;
5824 case offsetof(struct sched_ext_ops, flags):
5825 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5826 return -EINVAL;
5827 ops->flags = *(u64 *)(udata + moff);
5828 return 1;
5829 case offsetof(struct sched_ext_ops, name):
5830 ret = bpf_obj_name_cpy(ops->name, uops->name,
5831 sizeof(ops->name));
5832 if (ret < 0)
5833 return ret;
5834 if (ret == 0)
5835 return -EINVAL;
5836 return 1;
5837 case offsetof(struct sched_ext_ops, timeout_ms):
5838 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5839 SCX_WATCHDOG_MAX_TIMEOUT)
5840 return -E2BIG;
5841 ops->timeout_ms = *(u32 *)(udata + moff);
5842 return 1;
5843 case offsetof(struct sched_ext_ops, exit_dump_len):
5844 ops->exit_dump_len =
5845 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5846 return 1;
5847 case offsetof(struct sched_ext_ops, hotplug_seq):
5848 ops->hotplug_seq = *(u64 *)(udata + moff);
5849 return 1;
5850 }
5851
5852 return 0;
5853 }
5854
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5855 static int bpf_scx_check_member(const struct btf_type *t,
5856 const struct btf_member *member,
5857 const struct bpf_prog *prog)
5858 {
5859 u32 moff = __btf_member_bit_offset(t, member) / 8;
5860
5861 switch (moff) {
5862 case offsetof(struct sched_ext_ops, init_task):
5863 #ifdef CONFIG_EXT_GROUP_SCHED
5864 case offsetof(struct sched_ext_ops, cgroup_init):
5865 case offsetof(struct sched_ext_ops, cgroup_exit):
5866 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5867 #endif
5868 case offsetof(struct sched_ext_ops, cpu_online):
5869 case offsetof(struct sched_ext_ops, cpu_offline):
5870 case offsetof(struct sched_ext_ops, init):
5871 case offsetof(struct sched_ext_ops, exit):
5872 break;
5873 default:
5874 if (prog->sleepable)
5875 return -EINVAL;
5876 }
5877
5878 return 0;
5879 }
5880
bpf_scx_reg(void * kdata,struct bpf_link * link)5881 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5882 {
5883 return scx_enable(kdata, link);
5884 }
5885
bpf_scx_unreg(void * kdata,struct bpf_link * link)5886 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5887 {
5888 struct sched_ext_ops *ops = kdata;
5889 struct scx_sched *sch = ops->priv;
5890
5891 scx_disable(SCX_EXIT_UNREG);
5892 kthread_flush_work(&sch->disable_work);
5893 kobject_put(&sch->kobj);
5894 }
5895
bpf_scx_init(struct btf * btf)5896 static int bpf_scx_init(struct btf *btf)
5897 {
5898 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5899
5900 return 0;
5901 }
5902
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5903 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5904 {
5905 /*
5906 * sched_ext does not support updating the actively-loaded BPF
5907 * scheduler, as registering a BPF scheduler can always fail if the
5908 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5909 * etc. Similarly, we can always race with unregistration happening
5910 * elsewhere, such as with sysrq.
5911 */
5912 return -EOPNOTSUPP;
5913 }
5914
bpf_scx_validate(void * kdata)5915 static int bpf_scx_validate(void *kdata)
5916 {
5917 return 0;
5918 }
5919
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5920 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)5921 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)5922 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)5923 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)5924 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)5925 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)5926 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)5927 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)5928 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)5929 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)5930 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)5931 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)5932 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)5933 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)5934 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)5935 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)5936 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)5937 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)5938 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)5939 static void sched_ext_ops__disable(struct task_struct *p) {}
5940 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5941 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)5942 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5943 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5944 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5945 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)5946 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
5947 #endif
sched_ext_ops__cpu_online(s32 cpu)5948 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)5949 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)5950 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)5951 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)5952 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5953 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)5954 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5955
5956 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5957 .select_cpu = sched_ext_ops__select_cpu,
5958 .enqueue = sched_ext_ops__enqueue,
5959 .dequeue = sched_ext_ops__dequeue,
5960 .dispatch = sched_ext_ops__dispatch,
5961 .tick = sched_ext_ops__tick,
5962 .runnable = sched_ext_ops__runnable,
5963 .running = sched_ext_ops__running,
5964 .stopping = sched_ext_ops__stopping,
5965 .quiescent = sched_ext_ops__quiescent,
5966 .yield = sched_ext_ops__yield,
5967 .core_sched_before = sched_ext_ops__core_sched_before,
5968 .set_weight = sched_ext_ops__set_weight,
5969 .set_cpumask = sched_ext_ops__set_cpumask,
5970 .update_idle = sched_ext_ops__update_idle,
5971 .cpu_acquire = sched_ext_ops__cpu_acquire,
5972 .cpu_release = sched_ext_ops__cpu_release,
5973 .init_task = sched_ext_ops__init_task,
5974 .exit_task = sched_ext_ops__exit_task,
5975 .enable = sched_ext_ops__enable,
5976 .disable = sched_ext_ops__disable,
5977 #ifdef CONFIG_EXT_GROUP_SCHED
5978 .cgroup_init = sched_ext_ops__cgroup_init,
5979 .cgroup_exit = sched_ext_ops__cgroup_exit,
5980 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
5981 .cgroup_move = sched_ext_ops__cgroup_move,
5982 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
5983 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
5984 #endif
5985 .cpu_online = sched_ext_ops__cpu_online,
5986 .cpu_offline = sched_ext_ops__cpu_offline,
5987 .init = sched_ext_ops__init,
5988 .exit = sched_ext_ops__exit,
5989 .dump = sched_ext_ops__dump,
5990 .dump_cpu = sched_ext_ops__dump_cpu,
5991 .dump_task = sched_ext_ops__dump_task,
5992 };
5993
5994 static struct bpf_struct_ops bpf_sched_ext_ops = {
5995 .verifier_ops = &bpf_scx_verifier_ops,
5996 .reg = bpf_scx_reg,
5997 .unreg = bpf_scx_unreg,
5998 .check_member = bpf_scx_check_member,
5999 .init_member = bpf_scx_init_member,
6000 .init = bpf_scx_init,
6001 .update = bpf_scx_update,
6002 .validate = bpf_scx_validate,
6003 .name = "sched_ext_ops",
6004 .owner = THIS_MODULE,
6005 .cfi_stubs = &__bpf_ops_sched_ext_ops
6006 };
6007
6008
6009 /********************************************************************************
6010 * System integration and init.
6011 */
6012
sysrq_handle_sched_ext_reset(u8 key)6013 static void sysrq_handle_sched_ext_reset(u8 key)
6014 {
6015 scx_disable(SCX_EXIT_SYSRQ);
6016 }
6017
6018 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6019 .handler = sysrq_handle_sched_ext_reset,
6020 .help_msg = "reset-sched-ext(S)",
6021 .action_msg = "Disable sched_ext and revert all tasks to CFS",
6022 .enable_mask = SYSRQ_ENABLE_RTNICE,
6023 };
6024
sysrq_handle_sched_ext_dump(u8 key)6025 static void sysrq_handle_sched_ext_dump(u8 key)
6026 {
6027 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6028
6029 if (scx_enabled())
6030 scx_dump_state(&ei, 0);
6031 }
6032
6033 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6034 .handler = sysrq_handle_sched_ext_dump,
6035 .help_msg = "dump-sched-ext(D)",
6036 .action_msg = "Trigger sched_ext debug dump",
6037 .enable_mask = SYSRQ_ENABLE_RTNICE,
6038 };
6039
can_skip_idle_kick(struct rq * rq)6040 static bool can_skip_idle_kick(struct rq *rq)
6041 {
6042 lockdep_assert_rq_held(rq);
6043
6044 /*
6045 * We can skip idle kicking if @rq is going to go through at least one
6046 * full SCX scheduling cycle before going idle. Just checking whether
6047 * curr is not idle is insufficient because we could be racing
6048 * balance_one() trying to pull the next task from a remote rq, which
6049 * may fail, and @rq may become idle afterwards.
6050 *
6051 * The race window is small and we don't and can't guarantee that @rq is
6052 * only kicked while idle anyway. Skip only when sure.
6053 */
6054 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6055 }
6056
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)6057 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6058 {
6059 struct rq *rq = cpu_rq(cpu);
6060 struct scx_rq *this_scx = &this_rq->scx;
6061 bool should_wait = false;
6062 unsigned long flags;
6063
6064 raw_spin_rq_lock_irqsave(rq, flags);
6065
6066 /*
6067 * During CPU hotplug, a CPU may depend on kicking itself to make
6068 * forward progress. Allow kicking self regardless of online state.
6069 */
6070 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6071 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6072 if (rq->curr->sched_class == &ext_sched_class)
6073 rq->curr->scx.slice = 0;
6074 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6075 }
6076
6077 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6078 pseqs[cpu] = rq->scx.pnt_seq;
6079 should_wait = true;
6080 }
6081
6082 resched_curr(rq);
6083 } else {
6084 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6085 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6086 }
6087
6088 raw_spin_rq_unlock_irqrestore(rq, flags);
6089
6090 return should_wait;
6091 }
6092
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)6093 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6094 {
6095 struct rq *rq = cpu_rq(cpu);
6096 unsigned long flags;
6097
6098 raw_spin_rq_lock_irqsave(rq, flags);
6099
6100 if (!can_skip_idle_kick(rq) &&
6101 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6102 resched_curr(rq);
6103
6104 raw_spin_rq_unlock_irqrestore(rq, flags);
6105 }
6106
kick_cpus_irq_workfn(struct irq_work * irq_work)6107 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6108 {
6109 struct rq *this_rq = this_rq();
6110 struct scx_rq *this_scx = &this_rq->scx;
6111 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6112 bool should_wait = false;
6113 s32 cpu;
6114
6115 for_each_cpu(cpu, this_scx->cpus_to_kick) {
6116 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6117 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6118 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6119 }
6120
6121 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6122 kick_one_cpu_if_idle(cpu, this_rq);
6123 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6124 }
6125
6126 if (!should_wait)
6127 return;
6128
6129 for_each_cpu(cpu, this_scx->cpus_to_wait) {
6130 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6131
6132 if (cpu != cpu_of(this_rq)) {
6133 /*
6134 * Pairs with smp_store_release() issued by this CPU in
6135 * switch_class() on the resched path.
6136 *
6137 * We busy-wait here to guarantee that no other task can
6138 * be scheduled on our core before the target CPU has
6139 * entered the resched path.
6140 */
6141 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6142 cpu_relax();
6143 }
6144
6145 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6146 }
6147 }
6148
6149 /**
6150 * print_scx_info - print out sched_ext scheduler state
6151 * @log_lvl: the log level to use when printing
6152 * @p: target task
6153 *
6154 * If a sched_ext scheduler is enabled, print the name and state of the
6155 * scheduler. If @p is on sched_ext, print further information about the task.
6156 *
6157 * This function can be safely called on any task as long as the task_struct
6158 * itself is accessible. While safe, this function isn't synchronized and may
6159 * print out mixups or garbages of limited length.
6160 */
print_scx_info(const char * log_lvl,struct task_struct * p)6161 void print_scx_info(const char *log_lvl, struct task_struct *p)
6162 {
6163 struct scx_sched *sch = scx_root;
6164 enum scx_enable_state state = scx_enable_state();
6165 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6166 char runnable_at_buf[22] = "?";
6167 struct sched_class *class;
6168 unsigned long runnable_at;
6169
6170 if (state == SCX_DISABLED)
6171 return;
6172
6173 /*
6174 * Carefully check if the task was running on sched_ext, and then
6175 * carefully copy the time it's been runnable, and its state.
6176 */
6177 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6178 class != &ext_sched_class) {
6179 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
6180 scx_enable_state_str[state], all);
6181 return;
6182 }
6183
6184 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6185 sizeof(runnable_at)))
6186 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6187 jiffies_delta_msecs(runnable_at, jiffies));
6188
6189 /* print everything onto one line to conserve console space */
6190 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6191 log_lvl, sch->ops.name, scx_enable_state_str[state], all,
6192 runnable_at_buf);
6193 }
6194
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)6195 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6196 {
6197 /*
6198 * SCX schedulers often have userspace components which are sometimes
6199 * involved in critial scheduling paths. PM operations involve freezing
6200 * userspace which can lead to scheduling misbehaviors including stalls.
6201 * Let's bypass while PM operations are in progress.
6202 */
6203 switch (event) {
6204 case PM_HIBERNATION_PREPARE:
6205 case PM_SUSPEND_PREPARE:
6206 case PM_RESTORE_PREPARE:
6207 scx_bypass(true);
6208 break;
6209 case PM_POST_HIBERNATION:
6210 case PM_POST_SUSPEND:
6211 case PM_POST_RESTORE:
6212 scx_bypass(false);
6213 break;
6214 }
6215
6216 return NOTIFY_OK;
6217 }
6218
6219 static struct notifier_block scx_pm_notifier = {
6220 .notifier_call = scx_pm_handler,
6221 };
6222
init_sched_ext_class(void)6223 void __init init_sched_ext_class(void)
6224 {
6225 s32 cpu, v;
6226
6227 /*
6228 * The following is to prevent the compiler from optimizing out the enum
6229 * definitions so that BPF scheduler implementations can use them
6230 * through the generated vmlinux.h.
6231 */
6232 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6233 SCX_TG_ONLINE);
6234
6235 scx_idle_init_masks();
6236
6237 scx_kick_cpus_pnt_seqs =
6238 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6239 __alignof__(scx_kick_cpus_pnt_seqs[0]));
6240 BUG_ON(!scx_kick_cpus_pnt_seqs);
6241
6242 for_each_possible_cpu(cpu) {
6243 struct rq *rq = cpu_rq(cpu);
6244 int n = cpu_to_node(cpu);
6245
6246 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6247 INIT_LIST_HEAD(&rq->scx.runnable_list);
6248 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6249
6250 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
6251 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
6252 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
6253 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
6254 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6255 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6256
6257 if (cpu_online(cpu))
6258 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6259 }
6260
6261 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6262 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6263 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6264 }
6265
6266
6267 /********************************************************************************
6268 * Helpers that can be called from the BPF scheduler.
6269 */
scx_dsq_insert_preamble(struct task_struct * p,u64 enq_flags)6270 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6271 {
6272 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6273 return false;
6274
6275 lockdep_assert_irqs_disabled();
6276
6277 if (unlikely(!p)) {
6278 scx_kf_error("called with NULL task");
6279 return false;
6280 }
6281
6282 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6283 scx_kf_error("invalid enq_flags 0x%llx", enq_flags);
6284 return false;
6285 }
6286
6287 return true;
6288 }
6289
scx_dsq_insert_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)6290 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6291 u64 enq_flags)
6292 {
6293 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6294 struct task_struct *ddsp_task;
6295
6296 ddsp_task = __this_cpu_read(direct_dispatch_task);
6297 if (ddsp_task) {
6298 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6299 return;
6300 }
6301
6302 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6303 scx_kf_error("dispatch buffer overflow");
6304 return;
6305 }
6306
6307 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6308 .task = p,
6309 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6310 .dsq_id = dsq_id,
6311 .enq_flags = enq_flags,
6312 };
6313 }
6314
6315 __bpf_kfunc_start_defs();
6316
6317 /**
6318 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6319 * @p: task_struct to insert
6320 * @dsq_id: DSQ to insert into
6321 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6322 * @enq_flags: SCX_ENQ_*
6323 *
6324 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6325 * call this function spuriously. Can be called from ops.enqueue(),
6326 * ops.select_cpu(), and ops.dispatch().
6327 *
6328 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6329 * and @p must match the task being enqueued.
6330 *
6331 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6332 * will be directly inserted into the corresponding dispatch queue after
6333 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6334 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6335 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6336 * task is inserted.
6337 *
6338 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6339 * and this function can be called upto ops.dispatch_max_batch times to insert
6340 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6341 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6342 *
6343 * This function doesn't have any locking restrictions and may be called under
6344 * BPF locks (in the future when BPF introduces more flexible locking).
6345 *
6346 * @p is allowed to run for @slice. The scheduling path is triggered on slice
6347 * exhaustion. If zero, the current residual slice is maintained. If
6348 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6349 * scx_bpf_kick_cpu() to trigger scheduling.
6350 */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6351 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6352 u64 enq_flags)
6353 {
6354 if (!scx_dsq_insert_preamble(p, enq_flags))
6355 return;
6356
6357 if (slice)
6358 p->scx.slice = slice;
6359 else
6360 p->scx.slice = p->scx.slice ?: 1;
6361
6362 scx_dsq_insert_commit(p, dsq_id, enq_flags);
6363 }
6364
6365 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6366 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6367 u64 enq_flags)
6368 {
6369 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6370 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6371 }
6372
6373 /**
6374 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6375 * @p: task_struct to insert
6376 * @dsq_id: DSQ to insert into
6377 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6378 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6379 * @enq_flags: SCX_ENQ_*
6380 *
6381 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6382 * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6383 * are identical to scx_bpf_dsq_insert().
6384 *
6385 * @vtime ordering is according to time_before64() which considers wrapping. A
6386 * numerically larger vtime may indicate an earlier position in the ordering and
6387 * vice-versa.
6388 *
6389 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6390 * function must not be called on a DSQ which already has one or more FIFO tasks
6391 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6392 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6393 */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6394 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6395 u64 slice, u64 vtime, u64 enq_flags)
6396 {
6397 if (!scx_dsq_insert_preamble(p, enq_flags))
6398 return;
6399
6400 if (slice)
6401 p->scx.slice = slice;
6402 else
6403 p->scx.slice = p->scx.slice ?: 1;
6404
6405 p->scx.dsq_vtime = vtime;
6406
6407 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6408 }
6409
6410 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6411 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6412 u64 slice, u64 vtime, u64 enq_flags)
6413 {
6414 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6415 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6416 }
6417
6418 __bpf_kfunc_end_defs();
6419
6420 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6421 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6422 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6423 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6424 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6425 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6426
6427 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6428 .owner = THIS_MODULE,
6429 .set = &scx_kfunc_ids_enqueue_dispatch,
6430 };
6431
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6432 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6433 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6434 {
6435 struct scx_sched *sch = scx_root;
6436 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6437 struct rq *this_rq, *src_rq, *locked_rq;
6438 bool dispatched = false;
6439 bool in_balance;
6440 unsigned long flags;
6441
6442 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6443 return false;
6444
6445 /*
6446 * Can be called from either ops.dispatch() locking this_rq() or any
6447 * context where no rq lock is held. If latter, lock @p's task_rq which
6448 * we'll likely need anyway.
6449 */
6450 src_rq = task_rq(p);
6451
6452 local_irq_save(flags);
6453 this_rq = this_rq();
6454 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6455
6456 if (in_balance) {
6457 if (this_rq != src_rq) {
6458 raw_spin_rq_unlock(this_rq);
6459 raw_spin_rq_lock(src_rq);
6460 }
6461 } else {
6462 raw_spin_rq_lock(src_rq);
6463 }
6464
6465 /*
6466 * If the BPF scheduler keeps calling this function repeatedly, it can
6467 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6468 * breather if necessary.
6469 */
6470 scx_breather(src_rq);
6471
6472 locked_rq = src_rq;
6473 raw_spin_lock(&src_dsq->lock);
6474
6475 /*
6476 * Did someone else get to it? @p could have already left $src_dsq, got
6477 * re-enqueud, or be in the process of being consumed by someone else.
6478 */
6479 if (unlikely(p->scx.dsq != src_dsq ||
6480 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6481 p->scx.holding_cpu >= 0) ||
6482 WARN_ON_ONCE(src_rq != task_rq(p))) {
6483 raw_spin_unlock(&src_dsq->lock);
6484 goto out;
6485 }
6486
6487 /* @p is still on $src_dsq and stable, determine the destination */
6488 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
6489
6490 /*
6491 * Apply vtime and slice updates before moving so that the new time is
6492 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6493 * this is safe as we're locking it.
6494 */
6495 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6496 p->scx.dsq_vtime = kit->vtime;
6497 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6498 p->scx.slice = kit->slice;
6499
6500 /* execute move */
6501 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
6502 dispatched = true;
6503 out:
6504 if (in_balance) {
6505 if (this_rq != locked_rq) {
6506 raw_spin_rq_unlock(locked_rq);
6507 raw_spin_rq_lock(this_rq);
6508 }
6509 } else {
6510 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6511 }
6512
6513 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6514 __SCX_DSQ_ITER_HAS_VTIME);
6515 return dispatched;
6516 }
6517
6518 __bpf_kfunc_start_defs();
6519
6520 /**
6521 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6522 *
6523 * Can only be called from ops.dispatch().
6524 */
scx_bpf_dispatch_nr_slots(void)6525 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6526 {
6527 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6528 return 0;
6529
6530 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6531 }
6532
6533 /**
6534 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6535 *
6536 * Cancel the latest dispatch. Can be called multiple times to cancel further
6537 * dispatches. Can only be called from ops.dispatch().
6538 */
scx_bpf_dispatch_cancel(void)6539 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6540 {
6541 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6542
6543 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6544 return;
6545
6546 if (dspc->cursor > 0)
6547 dspc->cursor--;
6548 else
6549 scx_kf_error("dispatch buffer underflow");
6550 }
6551
6552 /**
6553 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6554 * @dsq_id: DSQ to move task from
6555 *
6556 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6557 * local DSQ for execution. Can only be called from ops.dispatch().
6558 *
6559 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6560 * before trying to move from the specified DSQ. It may also grab rq locks and
6561 * thus can't be called under any BPF locks.
6562 *
6563 * Returns %true if a task has been moved, %false if there isn't any task to
6564 * move.
6565 */
scx_bpf_dsq_move_to_local(u64 dsq_id)6566 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6567 {
6568 struct scx_sched *sch = scx_root;
6569 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6570 struct scx_dispatch_q *dsq;
6571
6572 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6573 return false;
6574
6575 flush_dispatch_buf(sch, dspc->rq);
6576
6577 dsq = find_user_dsq(sch, dsq_id);
6578 if (unlikely(!dsq)) {
6579 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
6580 return false;
6581 }
6582
6583 if (consume_dispatch_q(sch, dspc->rq, dsq)) {
6584 /*
6585 * A successfully consumed task can be dequeued before it starts
6586 * running while the CPU is trying to migrate other dispatched
6587 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6588 * local DSQ.
6589 */
6590 dspc->nr_tasks++;
6591 return true;
6592 } else {
6593 return false;
6594 }
6595 }
6596
6597 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_consume(u64 dsq_id)6598 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6599 {
6600 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6601 return scx_bpf_dsq_move_to_local(dsq_id);
6602 }
6603
6604 /**
6605 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6606 * @it__iter: DSQ iterator in progress
6607 * @slice: duration the moved task can run for in nsecs
6608 *
6609 * Override the slice of the next task that will be moved from @it__iter using
6610 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6611 * slice duration is kept.
6612 */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6613 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6614 u64 slice)
6615 {
6616 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6617
6618 kit->slice = slice;
6619 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6620 }
6621
6622 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6623 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6624 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6625 {
6626 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6627 scx_bpf_dsq_move_set_slice(it__iter, slice);
6628 }
6629
6630 /**
6631 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6632 * @it__iter: DSQ iterator in progress
6633 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6634 *
6635 * Override the vtime of the next task that will be moved from @it__iter using
6636 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6637 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6638 * override is ignored and cleared.
6639 */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6640 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6641 u64 vtime)
6642 {
6643 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6644
6645 kit->vtime = vtime;
6646 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6647 }
6648
6649 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6650 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6651 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6652 {
6653 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6654 scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6655 }
6656
6657 /**
6658 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6659 * @it__iter: DSQ iterator in progress
6660 * @p: task to transfer
6661 * @dsq_id: DSQ to move @p to
6662 * @enq_flags: SCX_ENQ_*
6663 *
6664 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6665 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6666 * be the destination.
6667 *
6668 * For the transfer to be successful, @p must still be on the DSQ and have been
6669 * queued before the DSQ iteration started. This function doesn't care whether
6670 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6671 * been queued before the iteration started.
6672 *
6673 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6674 *
6675 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6676 * lock (e.g. BPF timers or SYSCALL programs).
6677 *
6678 * Returns %true if @p has been consumed, %false if @p had already been consumed
6679 * or dequeued.
6680 */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6681 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6682 struct task_struct *p, u64 dsq_id,
6683 u64 enq_flags)
6684 {
6685 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6686 p, dsq_id, enq_flags);
6687 }
6688
6689 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6690 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6691 struct task_struct *p, u64 dsq_id,
6692 u64 enq_flags)
6693 {
6694 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6695 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6696 }
6697
6698 /**
6699 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6700 * @it__iter: DSQ iterator in progress
6701 * @p: task to transfer
6702 * @dsq_id: DSQ to move @p to
6703 * @enq_flags: SCX_ENQ_*
6704 *
6705 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6706 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6707 * user DSQ as only user DSQs support priority queue.
6708 *
6709 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6710 * and scx_bpf_dsq_move_set_vtime() to update.
6711 *
6712 * All other aspects are identical to scx_bpf_dsq_move(). See
6713 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6714 */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6715 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6716 struct task_struct *p, u64 dsq_id,
6717 u64 enq_flags)
6718 {
6719 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6720 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6721 }
6722
6723 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6724 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6725 struct task_struct *p, u64 dsq_id,
6726 u64 enq_flags)
6727 {
6728 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6729 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6730 }
6731
6732 __bpf_kfunc_end_defs();
6733
6734 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6735 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6736 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6737 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6738 BTF_ID_FLAGS(func, scx_bpf_consume)
6739 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6740 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6741 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6742 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6743 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6744 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6745 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6746 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6747 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6748
6749 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6750 .owner = THIS_MODULE,
6751 .set = &scx_kfunc_ids_dispatch,
6752 };
6753
6754 __bpf_kfunc_start_defs();
6755
6756 /**
6757 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6758 *
6759 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6760 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6761 * processed tasks. Can only be called from ops.cpu_release().
6762 */
scx_bpf_reenqueue_local(void)6763 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6764 {
6765 LIST_HEAD(tasks);
6766 u32 nr_enqueued = 0;
6767 struct rq *rq;
6768 struct task_struct *p, *n;
6769
6770 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6771 return 0;
6772
6773 rq = cpu_rq(smp_processor_id());
6774 lockdep_assert_rq_held(rq);
6775
6776 /*
6777 * The BPF scheduler may choose to dispatch tasks back to
6778 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6779 * first to avoid processing the same tasks repeatedly.
6780 */
6781 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6782 scx.dsq_list.node) {
6783 /*
6784 * If @p is being migrated, @p's current CPU may not agree with
6785 * its allowed CPUs and the migration_cpu_stop is about to
6786 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6787 *
6788 * While racing sched property changes may also dequeue and
6789 * re-enqueue a migrating task while its current CPU and allowed
6790 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6791 * the current local DSQ for running tasks and thus are not
6792 * visible to the BPF scheduler.
6793 *
6794 * Also skip re-enqueueing tasks that can only run on this
6795 * CPU, as they would just be re-added to the same local
6796 * DSQ without any benefit.
6797 */
6798 if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1)
6799 continue;
6800
6801 dispatch_dequeue(rq, p);
6802 list_add_tail(&p->scx.dsq_list.node, &tasks);
6803 }
6804
6805 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6806 list_del_init(&p->scx.dsq_list.node);
6807 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6808 nr_enqueued++;
6809 }
6810
6811 return nr_enqueued;
6812 }
6813
6814 __bpf_kfunc_end_defs();
6815
6816 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6817 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6818 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6819
6820 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6821 .owner = THIS_MODULE,
6822 .set = &scx_kfunc_ids_cpu_release,
6823 };
6824
6825 __bpf_kfunc_start_defs();
6826
6827 /**
6828 * scx_bpf_create_dsq - Create a custom DSQ
6829 * @dsq_id: DSQ to create
6830 * @node: NUMA node to allocate from
6831 *
6832 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6833 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6834 */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6835 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6836 {
6837 struct scx_dispatch_q *dsq;
6838 struct scx_sched *sch;
6839 s32 ret;
6840
6841 if (unlikely(node >= (int)nr_node_ids ||
6842 (node < 0 && node != NUMA_NO_NODE)))
6843 return -EINVAL;
6844
6845 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
6846 return -EINVAL;
6847
6848 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
6849 if (!dsq)
6850 return -ENOMEM;
6851
6852 init_dsq(dsq, dsq_id);
6853
6854 rcu_read_lock();
6855
6856 sch = rcu_dereference(scx_root);
6857 if (sch)
6858 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
6859 dsq_hash_params);
6860 else
6861 ret = -ENODEV;
6862
6863 rcu_read_unlock();
6864 if (ret)
6865 kfree(dsq);
6866 return ret;
6867 }
6868
6869 __bpf_kfunc_end_defs();
6870
6871 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6872 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6873 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6874 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6875 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6876 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6877 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6878 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6879 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6880 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6881 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6882
6883 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6884 .owner = THIS_MODULE,
6885 .set = &scx_kfunc_ids_unlocked,
6886 };
6887
6888 __bpf_kfunc_start_defs();
6889
6890 /**
6891 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6892 * @cpu: cpu to kick
6893 * @flags: %SCX_KICK_* flags
6894 *
6895 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6896 * trigger rescheduling on a busy CPU. This can be called from any online
6897 * scx_ops operation and the actual kicking is performed asynchronously through
6898 * an irq work.
6899 */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6900 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6901 {
6902 struct rq *this_rq;
6903 unsigned long irq_flags;
6904
6905 if (!kf_cpu_valid(cpu, NULL))
6906 return;
6907
6908 local_irq_save(irq_flags);
6909
6910 this_rq = this_rq();
6911
6912 /*
6913 * While bypassing for PM ops, IRQ handling may not be online which can
6914 * lead to irq_work_queue() malfunction such as infinite busy wait for
6915 * IRQ status update. Suppress kicking.
6916 */
6917 if (scx_rq_bypassing(this_rq))
6918 goto out;
6919
6920 /*
6921 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6922 * rq locks. We can probably be smarter and avoid bouncing if called
6923 * from ops which don't hold a rq lock.
6924 */
6925 if (flags & SCX_KICK_IDLE) {
6926 struct rq *target_rq = cpu_rq(cpu);
6927
6928 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6929 scx_kf_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6930
6931 if (raw_spin_rq_trylock(target_rq)) {
6932 if (can_skip_idle_kick(target_rq)) {
6933 raw_spin_rq_unlock(target_rq);
6934 goto out;
6935 }
6936 raw_spin_rq_unlock(target_rq);
6937 }
6938 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6939 } else {
6940 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6941
6942 if (flags & SCX_KICK_PREEMPT)
6943 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6944 if (flags & SCX_KICK_WAIT)
6945 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6946 }
6947
6948 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6949 out:
6950 local_irq_restore(irq_flags);
6951 }
6952
6953 /**
6954 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6955 * @dsq_id: id of the DSQ
6956 *
6957 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6958 * -%ENOENT is returned.
6959 */
scx_bpf_dsq_nr_queued(u64 dsq_id)6960 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6961 {
6962 struct scx_sched *sch;
6963 struct scx_dispatch_q *dsq;
6964 s32 ret;
6965
6966 preempt_disable();
6967
6968 sch = rcu_dereference_sched(scx_root);
6969 if (unlikely(!sch)) {
6970 ret = -ENODEV;
6971 goto out;
6972 }
6973
6974 if (dsq_id == SCX_DSQ_LOCAL) {
6975 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6976 goto out;
6977 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6978 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6979
6980 if (ops_cpu_valid(sch, cpu, NULL)) {
6981 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6982 goto out;
6983 }
6984 } else {
6985 dsq = find_user_dsq(sch, dsq_id);
6986 if (dsq) {
6987 ret = READ_ONCE(dsq->nr);
6988 goto out;
6989 }
6990 }
6991 ret = -ENOENT;
6992 out:
6993 preempt_enable();
6994 return ret;
6995 }
6996
6997 /**
6998 * scx_bpf_destroy_dsq - Destroy a custom DSQ
6999 * @dsq_id: DSQ to destroy
7000 *
7001 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
7002 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
7003 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7004 * which doesn't exist. Can be called from any online scx_ops operations.
7005 */
scx_bpf_destroy_dsq(u64 dsq_id)7006 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7007 {
7008 struct scx_sched *sch;
7009
7010 rcu_read_lock();
7011 sch = rcu_dereference(scx_root);
7012 if (sch)
7013 destroy_dsq(sch, dsq_id);
7014 rcu_read_unlock();
7015 }
7016
7017 /**
7018 * bpf_iter_scx_dsq_new - Create a DSQ iterator
7019 * @it: iterator to initialize
7020 * @dsq_id: DSQ to iterate
7021 * @flags: %SCX_DSQ_ITER_*
7022 *
7023 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7024 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7025 * tasks which are already queued when this function is invoked.
7026 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)7027 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7028 u64 flags)
7029 {
7030 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7031 struct scx_sched *sch;
7032
7033 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7034 sizeof(struct bpf_iter_scx_dsq));
7035 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7036 __alignof__(struct bpf_iter_scx_dsq));
7037
7038 /*
7039 * next() and destroy() will be called regardless of the return value.
7040 * Always clear $kit->dsq.
7041 */
7042 kit->dsq = NULL;
7043
7044 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
7045 if (unlikely(!sch))
7046 return -ENODEV;
7047
7048 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7049 return -EINVAL;
7050
7051 kit->dsq = find_user_dsq(sch, dsq_id);
7052 if (!kit->dsq)
7053 return -ENOENT;
7054
7055 INIT_LIST_HEAD(&kit->cursor.node);
7056 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7057 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7058
7059 return 0;
7060 }
7061
7062 /**
7063 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7064 * @it: iterator to progress
7065 *
7066 * Return the next task. See bpf_iter_scx_dsq_new().
7067 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)7068 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7069 {
7070 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7071 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7072 struct task_struct *p;
7073 unsigned long flags;
7074
7075 if (!kit->dsq)
7076 return NULL;
7077
7078 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7079
7080 if (list_empty(&kit->cursor.node))
7081 p = NULL;
7082 else
7083 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7084
7085 /*
7086 * Only tasks which were queued before the iteration started are
7087 * visible. This bounds BPF iterations and guarantees that vtime never
7088 * jumps in the other direction while iterating.
7089 */
7090 do {
7091 p = nldsq_next_task(kit->dsq, p, rev);
7092 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7093
7094 if (p) {
7095 if (rev)
7096 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7097 else
7098 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7099 } else {
7100 list_del_init(&kit->cursor.node);
7101 }
7102
7103 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7104
7105 return p;
7106 }
7107
7108 /**
7109 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7110 * @it: iterator to destroy
7111 *
7112 * Undo scx_iter_scx_dsq_new().
7113 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)7114 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7115 {
7116 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7117
7118 if (!kit->dsq)
7119 return;
7120
7121 if (!list_empty(&kit->cursor.node)) {
7122 unsigned long flags;
7123
7124 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7125 list_del_init(&kit->cursor.node);
7126 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7127 }
7128 kit->dsq = NULL;
7129 }
7130
7131 __bpf_kfunc_end_defs();
7132
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)7133 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7134 char *fmt, unsigned long long *data, u32 data__sz)
7135 {
7136 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7137 s32 ret;
7138
7139 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7140 (data__sz && !data)) {
7141 scx_kf_error("invalid data=%p and data__sz=%u", (void *)data, data__sz);
7142 return -EINVAL;
7143 }
7144
7145 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7146 if (ret < 0) {
7147 scx_kf_error("failed to read data fields (%d)", ret);
7148 return ret;
7149 }
7150
7151 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7152 &bprintf_data);
7153 if (ret < 0) {
7154 scx_kf_error("format preparation failed (%d)", ret);
7155 return ret;
7156 }
7157
7158 ret = bstr_printf(line_buf, line_size, fmt,
7159 bprintf_data.bin_args);
7160 bpf_bprintf_cleanup(&bprintf_data);
7161 if (ret < 0) {
7162 scx_kf_error("(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
7163 return ret;
7164 }
7165
7166 return ret;
7167 }
7168
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)7169 static s32 bstr_format(struct scx_bstr_buf *buf,
7170 char *fmt, unsigned long long *data, u32 data__sz)
7171 {
7172 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7173 fmt, data, data__sz);
7174 }
7175
7176 __bpf_kfunc_start_defs();
7177
7178 /**
7179 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7180 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7181 * @fmt: error message format string
7182 * @data: format string parameters packaged using ___bpf_fill() macro
7183 * @data__sz: @data len, must end in '__sz' for the verifier
7184 *
7185 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7186 * disabling.
7187 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)7188 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7189 unsigned long long *data, u32 data__sz)
7190 {
7191 unsigned long flags;
7192
7193 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7194 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7195 scx_kf_exit(SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
7196 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7197 }
7198
7199 /**
7200 * scx_bpf_error_bstr - Indicate fatal error
7201 * @fmt: error message format string
7202 * @data: format string parameters packaged using ___bpf_fill() macro
7203 * @data__sz: @data len, must end in '__sz' for the verifier
7204 *
7205 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7206 * disabling.
7207 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)7208 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7209 u32 data__sz)
7210 {
7211 unsigned long flags;
7212
7213 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7214 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7215 scx_kf_exit(SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
7216 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7217 }
7218
7219 /**
7220 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7221 * @fmt: format string
7222 * @data: format string parameters packaged using ___bpf_fill() macro
7223 * @data__sz: @data len, must end in '__sz' for the verifier
7224 *
7225 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7226 * dump_task() to generate extra debug dump specific to the BPF scheduler.
7227 *
7228 * The extra dump may be multiple lines. A single line may be split over
7229 * multiple calls. The last line is automatically terminated.
7230 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)7231 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7232 u32 data__sz)
7233 {
7234 struct scx_dump_data *dd = &scx_dump_data;
7235 struct scx_bstr_buf *buf = &dd->buf;
7236 s32 ret;
7237
7238 if (raw_smp_processor_id() != dd->cpu) {
7239 scx_kf_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7240 return;
7241 }
7242
7243 /* append the formatted string to the line buf */
7244 ret = __bstr_format(buf->data, buf->line + dd->cursor,
7245 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7246 if (ret < 0) {
7247 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7248 dd->prefix, fmt, data, data__sz, ret);
7249 return;
7250 }
7251
7252 dd->cursor += ret;
7253 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7254
7255 if (!dd->cursor)
7256 return;
7257
7258 /*
7259 * If the line buf overflowed or ends in a newline, flush it into the
7260 * dump. This is to allow the caller to generate a single line over
7261 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7262 * the line buf, the only case which can lead to an unexpected
7263 * truncation is when the caller keeps generating newlines in the middle
7264 * instead of the end consecutively. Don't do that.
7265 */
7266 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7267 ops_dump_flush();
7268 }
7269
7270 /**
7271 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7272 * @cpu: CPU of interest
7273 *
7274 * Return the maximum relative capacity of @cpu in relation to the most
7275 * performant CPU in the system. The return value is in the range [1,
7276 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7277 */
scx_bpf_cpuperf_cap(s32 cpu)7278 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7279 {
7280 if (kf_cpu_valid(cpu, NULL))
7281 return arch_scale_cpu_capacity(cpu);
7282 else
7283 return SCX_CPUPERF_ONE;
7284 }
7285
7286 /**
7287 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7288 * @cpu: CPU of interest
7289 *
7290 * Return the current relative performance of @cpu in relation to its maximum.
7291 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7292 *
7293 * The current performance level of a CPU in relation to the maximum performance
7294 * available in the system can be calculated as follows:
7295 *
7296 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7297 *
7298 * The result is in the range [1, %SCX_CPUPERF_ONE].
7299 */
scx_bpf_cpuperf_cur(s32 cpu)7300 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7301 {
7302 if (kf_cpu_valid(cpu, NULL))
7303 return arch_scale_freq_capacity(cpu);
7304 else
7305 return SCX_CPUPERF_ONE;
7306 }
7307
7308 /**
7309 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7310 * @cpu: CPU of interest
7311 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7312 *
7313 * Set the target performance level of @cpu to @perf. @perf is in linear
7314 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7315 * schedutil cpufreq governor chooses the target frequency.
7316 *
7317 * The actual performance level chosen, CPU grouping, and the overhead and
7318 * latency of the operations are dependent on the hardware and cpufreq driver in
7319 * use. Consult hardware and cpufreq documentation for more information. The
7320 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7321 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)7322 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7323 {
7324 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7325 scx_kf_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7326 return;
7327 }
7328
7329 if (kf_cpu_valid(cpu, NULL)) {
7330 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
7331 struct rq_flags rf;
7332
7333 /*
7334 * When called with an rq lock held, restrict the operation
7335 * to the corresponding CPU to prevent ABBA deadlocks.
7336 */
7337 if (locked_rq && rq != locked_rq) {
7338 scx_kf_error("Invalid target CPU %d", cpu);
7339 return;
7340 }
7341
7342 /*
7343 * If no rq lock is held, allow to operate on any CPU by
7344 * acquiring the corresponding rq lock.
7345 */
7346 if (!locked_rq) {
7347 rq_lock_irqsave(rq, &rf);
7348 update_rq_clock(rq);
7349 }
7350
7351 rq->scx.cpuperf_target = perf;
7352 cpufreq_update_util(rq, 0);
7353
7354 if (!locked_rq)
7355 rq_unlock_irqrestore(rq, &rf);
7356 }
7357 }
7358
7359 /**
7360 * scx_bpf_nr_node_ids - Return the number of possible node IDs
7361 *
7362 * All valid node IDs in the system are smaller than the returned value.
7363 */
scx_bpf_nr_node_ids(void)7364 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
7365 {
7366 return nr_node_ids;
7367 }
7368
7369 /**
7370 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7371 *
7372 * All valid CPU IDs in the system are smaller than the returned value.
7373 */
scx_bpf_nr_cpu_ids(void)7374 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7375 {
7376 return nr_cpu_ids;
7377 }
7378
7379 /**
7380 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7381 */
scx_bpf_get_possible_cpumask(void)7382 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7383 {
7384 return cpu_possible_mask;
7385 }
7386
7387 /**
7388 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7389 */
scx_bpf_get_online_cpumask(void)7390 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7391 {
7392 return cpu_online_mask;
7393 }
7394
7395 /**
7396 * scx_bpf_put_cpumask - Release a possible/online cpumask
7397 * @cpumask: cpumask to release
7398 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)7399 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7400 {
7401 /*
7402 * Empty function body because we aren't actually acquiring or releasing
7403 * a reference to a global cpumask, which is read-only in the caller and
7404 * is never released. The acquire / release semantics here are just used
7405 * to make the cpumask is a trusted pointer in the caller.
7406 */
7407 }
7408
7409 /**
7410 * scx_bpf_task_running - Is task currently running?
7411 * @p: task of interest
7412 */
scx_bpf_task_running(const struct task_struct * p)7413 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7414 {
7415 return task_rq(p)->curr == p;
7416 }
7417
7418 /**
7419 * scx_bpf_task_cpu - CPU a task is currently associated with
7420 * @p: task of interest
7421 */
scx_bpf_task_cpu(const struct task_struct * p)7422 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7423 {
7424 return task_cpu(p);
7425 }
7426
7427 /**
7428 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7429 * @cpu: CPU of the rq
7430 */
scx_bpf_cpu_rq(s32 cpu)7431 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7432 {
7433 if (!kf_cpu_valid(cpu, NULL))
7434 return NULL;
7435
7436 return cpu_rq(cpu);
7437 }
7438
7439 /**
7440 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7441 * @p: task of interest
7442 *
7443 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7444 * from the scheduler's POV. SCX operations should use this function to
7445 * determine @p's current cgroup as, unlike following @p->cgroups,
7446 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7447 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7448 * operations. The restriction guarantees that @p's rq is locked by the caller.
7449 */
7450 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7451 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7452 {
7453 struct task_group *tg = p->sched_task_group;
7454 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7455
7456 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7457 goto out;
7458
7459 cgrp = tg_cgrp(tg);
7460
7461 out:
7462 cgroup_get(cgrp);
7463 return cgrp;
7464 }
7465 #endif
7466
7467 /**
7468 * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7469 * clock for the current CPU. The clock returned is in nanoseconds.
7470 *
7471 * It provides the following properties:
7472 *
7473 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7474 * to account for execution time and track tasks' runtime properties.
7475 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7476 * eventually reads a hardware timestamp counter -- is neither performant nor
7477 * scalable. scx_bpf_now() aims to provide a high-performance clock by
7478 * using the rq clock in the scheduler core whenever possible.
7479 *
7480 * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7481 * scheduler use cases, the required clock resolution is lower than the most
7482 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7483 * uses the rq clock in the scheduler core whenever it is valid. It considers
7484 * that the rq clock is valid from the time the rq clock is updated
7485 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7486 *
7487 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7488 * guarantees the clock never goes backward when comparing them in the same
7489 * CPU. On the other hand, when comparing clocks in different CPUs, there
7490 * is no such guarantee -- the clock can go backward. It provides a
7491 * monotonically *non-decreasing* clock so that it would provide the same
7492 * clock values in two different scx_bpf_now() calls in the same CPU
7493 * during the same period of when the rq clock is valid.
7494 */
scx_bpf_now(void)7495 __bpf_kfunc u64 scx_bpf_now(void)
7496 {
7497 struct rq *rq;
7498 u64 clock;
7499
7500 preempt_disable();
7501
7502 rq = this_rq();
7503 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7504 /*
7505 * If the rq clock is valid, use the cached rq clock.
7506 *
7507 * Note that scx_bpf_now() is re-entrant between a process
7508 * context and an interrupt context (e.g., timer interrupt).
7509 * However, we don't need to consider the race between them
7510 * because such race is not observable from a caller.
7511 */
7512 clock = READ_ONCE(rq->scx.clock);
7513 } else {
7514 /*
7515 * Otherwise, return a fresh rq clock.
7516 *
7517 * The rq clock is updated outside of the rq lock.
7518 * In this case, keep the updated rq clock invalid so the next
7519 * kfunc call outside the rq lock gets a fresh rq clock.
7520 */
7521 clock = sched_clock_cpu(cpu_of(rq));
7522 }
7523
7524 preempt_enable();
7525
7526 return clock;
7527 }
7528
scx_read_events(struct scx_sched * sch,struct scx_event_stats * events)7529 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
7530 {
7531 struct scx_event_stats *e_cpu;
7532 int cpu;
7533
7534 /* Aggregate per-CPU event counters into @events. */
7535 memset(events, 0, sizeof(*events));
7536 for_each_possible_cpu(cpu) {
7537 e_cpu = per_cpu_ptr(sch->event_stats_cpu, cpu);
7538 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
7539 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
7540 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
7541 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
7542 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
7543 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
7544 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
7545 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
7546 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
7547 }
7548 }
7549
7550 /*
7551 * scx_bpf_events - Get a system-wide event counter to
7552 * @events: output buffer from a BPF program
7553 * @events__sz: @events len, must end in '__sz'' for the verifier
7554 */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)7555 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
7556 size_t events__sz)
7557 {
7558 struct scx_sched *sch;
7559 struct scx_event_stats e_sys;
7560
7561 rcu_read_lock();
7562 sch = rcu_dereference(scx_root);
7563 if (sch)
7564 scx_read_events(sch, &e_sys);
7565 else
7566 memset(&e_sys, 0, sizeof(e_sys));
7567 rcu_read_unlock();
7568
7569 /*
7570 * We cannot entirely trust a BPF-provided size since a BPF program
7571 * might be compiled against a different vmlinux.h, of which
7572 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
7573 * (an older vmlinux.h). Hence, we use the smaller size to avoid
7574 * memory corruption.
7575 */
7576 events__sz = min(events__sz, sizeof(*events));
7577 memcpy(events, &e_sys, events__sz);
7578 }
7579
7580 __bpf_kfunc_end_defs();
7581
7582 BTF_KFUNCS_START(scx_kfunc_ids_any)
7583 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7584 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7585 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7586 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7587 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7588 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7589 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7590 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7591 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7592 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7593 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7594 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7595 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
7596 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7597 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7598 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7599 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7600 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7601 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7602 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7603 #ifdef CONFIG_CGROUP_SCHED
7604 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7605 #endif
7606 BTF_ID_FLAGS(func, scx_bpf_now)
7607 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
7608 BTF_KFUNCS_END(scx_kfunc_ids_any)
7609
7610 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7611 .owner = THIS_MODULE,
7612 .set = &scx_kfunc_ids_any,
7613 };
7614
scx_init(void)7615 static int __init scx_init(void)
7616 {
7617 int ret;
7618
7619 /*
7620 * kfunc registration can't be done from init_sched_ext_class() as
7621 * register_btf_kfunc_id_set() needs most of the system to be up.
7622 *
7623 * Some kfuncs are context-sensitive and can only be called from
7624 * specific SCX ops. They are grouped into BTF sets accordingly.
7625 * Unfortunately, BPF currently doesn't have a way of enforcing such
7626 * restrictions. Eventually, the verifier should be able to enforce
7627 * them. For now, register them the same and make each kfunc explicitly
7628 * check using scx_kf_allowed().
7629 */
7630 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7631 &scx_kfunc_set_enqueue_dispatch)) ||
7632 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7633 &scx_kfunc_set_dispatch)) ||
7634 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7635 &scx_kfunc_set_cpu_release)) ||
7636 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7637 &scx_kfunc_set_unlocked)) ||
7638 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7639 &scx_kfunc_set_unlocked)) ||
7640 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7641 &scx_kfunc_set_any)) ||
7642 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7643 &scx_kfunc_set_any)) ||
7644 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7645 &scx_kfunc_set_any))) {
7646 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7647 return ret;
7648 }
7649
7650 ret = scx_idle_init();
7651 if (ret) {
7652 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
7653 return ret;
7654 }
7655
7656 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7657 if (ret) {
7658 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7659 return ret;
7660 }
7661
7662 ret = register_pm_notifier(&scx_pm_notifier);
7663 if (ret) {
7664 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7665 return ret;
7666 }
7667
7668 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7669 if (!scx_kset) {
7670 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7671 return -ENOMEM;
7672 }
7673
7674 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7675 if (ret < 0) {
7676 pr_err("sched_ext: Failed to add global attributes\n");
7677 return ret;
7678 }
7679
7680 return 0;
7681 }
7682 __initcall(scx_init);
7683