1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11
12 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
13
14 enum scx_consts {
15 SCX_DSP_DFL_MAX_BATCH = 32,
16 SCX_DSP_MAX_LOOPS = 32,
17 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
18
19 SCX_EXIT_BT_LEN = 64,
20 SCX_EXIT_MSG_LEN = 1024,
21 SCX_EXIT_DUMP_DFL_LEN = 32768,
22
23 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
24
25 /*
26 * Iterating all tasks may take a while. Periodically drop
27 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
28 */
29 SCX_TASK_ITER_BATCH = 32,
30 };
31
32 enum scx_exit_kind {
33 SCX_EXIT_NONE,
34 SCX_EXIT_DONE,
35
36 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
37 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
38 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
39 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
40
41 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
42 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
43 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
44 };
45
46 /*
47 * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
48 * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
49 * are 64bit of the format:
50 *
51 * Bits: [63 .. 48 47 .. 32 31 .. 0]
52 * [ SYS ACT ] [ SYS RSN ] [ USR ]
53 *
54 * SYS ACT: System-defined exit actions
55 * SYS RSN: System-defined exit reasons
56 * USR : User-defined exit codes and reasons
57 *
58 * Using the above, users may communicate intention and context by ORing system
59 * actions and/or system reasons with a user-defined exit code.
60 */
61 enum scx_exit_code {
62 /* Reasons */
63 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
64
65 /* Actions */
66 SCX_ECODE_ACT_RESTART = 1LLU << 48,
67 };
68
69 /*
70 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
71 * being disabled.
72 */
73 struct scx_exit_info {
74 /* %SCX_EXIT_* - broad category of the exit reason */
75 enum scx_exit_kind kind;
76
77 /* exit code if gracefully exiting */
78 s64 exit_code;
79
80 /* textual representation of the above */
81 const char *reason;
82
83 /* backtrace if exiting due to an error */
84 unsigned long *bt;
85 u32 bt_len;
86
87 /* informational message */
88 char *msg;
89
90 /* debug dump */
91 char *dump;
92 };
93
94 /* sched_ext_ops.flags */
95 enum scx_ops_flags {
96 /*
97 * Keep built-in idle tracking even if ops.update_idle() is implemented.
98 */
99 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
100
101 /*
102 * By default, if there are no other task to run on the CPU, ext core
103 * keeps running the current task even after its slice expires. If this
104 * flag is specified, such tasks are passed to ops.enqueue() with
105 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
106 */
107 SCX_OPS_ENQ_LAST = 1LLU << 1,
108
109 /*
110 * An exiting task may schedule after PF_EXITING is set. In such cases,
111 * bpf_task_from_pid() may not be able to find the task and if the BPF
112 * scheduler depends on pid lookup for dispatching, the task will be
113 * lost leading to various issues including RCU grace period stalls.
114 *
115 * To mask this problem, by default, unhashed tasks are automatically
116 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
117 * depend on pid lookups and wants to handle these tasks directly, the
118 * following flag can be used.
119 */
120 SCX_OPS_ENQ_EXITING = 1LLU << 2,
121
122 /*
123 * If set, only tasks with policy set to SCHED_EXT are attached to
124 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
125 */
126 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
127
128 /*
129 * A migration disabled task can only execute on its current CPU. By
130 * default, such tasks are automatically put on the CPU's local DSQ with
131 * the default slice on enqueue. If this ops flag is set, they also go
132 * through ops.enqueue().
133 *
134 * A migration disabled task never invokes ops.select_cpu() as it can
135 * only select the current CPU. Also, p->cpus_ptr will only contain its
136 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
137 * and thus may disagree with cpumask_weight(p->cpus_ptr).
138 */
139 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
140
141 /*
142 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
143 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
144 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
145 * transfers. When this optimization is enabled, ops.select_cpu() is
146 * skipped in some cases (when racing against the wakee switching out).
147 * As the BPF scheduler may depend on ops.select_cpu() being invoked
148 * during wakeups, queued wakeup is disabled by default.
149 *
150 * If this ops flag is set, queued wakeup optimization is enabled and
151 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
152 * wakee's CPU without preceding ops.select_cpu() even for tasks which
153 * may be executed on multiple CPUs.
154 */
155 SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
156
157 /*
158 * If set, enable per-node idle cpumasks. If clear, use a single global
159 * flat idle cpumask.
160 */
161 SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
162
163 /*
164 * CPU cgroup support flags
165 */
166 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
167
168 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
169 SCX_OPS_ENQ_LAST |
170 SCX_OPS_ENQ_EXITING |
171 SCX_OPS_ENQ_MIGRATION_DISABLED |
172 SCX_OPS_ALLOW_QUEUED_WAKEUP |
173 SCX_OPS_SWITCH_PARTIAL |
174 SCX_OPS_BUILTIN_IDLE_PER_NODE |
175 SCX_OPS_HAS_CGROUP_WEIGHT,
176
177 /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
178 __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56,
179
180 SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56,
181 };
182
183 /* argument container for ops.init_task() */
184 struct scx_init_task_args {
185 /*
186 * Set if ops.init_task() is being invoked on the fork path, as opposed
187 * to the scheduler transition path.
188 */
189 bool fork;
190 #ifdef CONFIG_EXT_GROUP_SCHED
191 /* the cgroup the task is joining */
192 struct cgroup *cgroup;
193 #endif
194 };
195
196 /* argument container for ops.exit_task() */
197 struct scx_exit_task_args {
198 /* Whether the task exited before running on sched_ext. */
199 bool cancelled;
200 };
201
202 /* argument container for ops->cgroup_init() */
203 struct scx_cgroup_init_args {
204 /* the weight of the cgroup [1..10000] */
205 u32 weight;
206 };
207
208 enum scx_cpu_preempt_reason {
209 /* next task is being scheduled by &sched_class_rt */
210 SCX_CPU_PREEMPT_RT,
211 /* next task is being scheduled by &sched_class_dl */
212 SCX_CPU_PREEMPT_DL,
213 /* next task is being scheduled by &sched_class_stop */
214 SCX_CPU_PREEMPT_STOP,
215 /* unknown reason for SCX being preempted */
216 SCX_CPU_PREEMPT_UNKNOWN,
217 };
218
219 /*
220 * Argument container for ops->cpu_acquire(). Currently empty, but may be
221 * expanded in the future.
222 */
223 struct scx_cpu_acquire_args {};
224
225 /* argument container for ops->cpu_release() */
226 struct scx_cpu_release_args {
227 /* the reason the CPU was preempted */
228 enum scx_cpu_preempt_reason reason;
229
230 /* the task that's going to be scheduled on the CPU */
231 struct task_struct *task;
232 };
233
234 /*
235 * Informational context provided to dump operations.
236 */
237 struct scx_dump_ctx {
238 enum scx_exit_kind kind;
239 s64 exit_code;
240 const char *reason;
241 u64 at_ns;
242 u64 at_jiffies;
243 };
244
245 /**
246 * struct sched_ext_ops - Operation table for BPF scheduler implementation
247 *
248 * A BPF scheduler can implement an arbitrary scheduling policy by
249 * implementing and loading operations in this table. Note that a userland
250 * scheduling policy can also be implemented using the BPF scheduler
251 * as a shim layer.
252 */
253 struct sched_ext_ops {
254 /**
255 * @select_cpu: Pick the target CPU for a task which is being woken up
256 * @p: task being woken up
257 * @prev_cpu: the cpu @p was on before sleeping
258 * @wake_flags: SCX_WAKE_*
259 *
260 * Decision made here isn't final. @p may be moved to any CPU while it
261 * is getting dispatched for execution later. However, as @p is not on
262 * the rq at this point, getting the eventual execution CPU right here
263 * saves a small bit of overhead down the line.
264 *
265 * If an idle CPU is returned, the CPU is kicked and will try to
266 * dispatch. While an explicit custom mechanism can be added,
267 * select_cpu() serves as the default way to wake up idle CPUs.
268 *
269 * @p may be inserted into a DSQ directly by calling
270 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
271 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
272 * of the CPU returned by this operation.
273 *
274 * Note that select_cpu() is never called for tasks that can only run
275 * on a single CPU or tasks with migration disabled, as they don't have
276 * the option to select a different CPU. See select_task_rq() for
277 * details.
278 */
279 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
280
281 /**
282 * @enqueue: Enqueue a task on the BPF scheduler
283 * @p: task being enqueued
284 * @enq_flags: %SCX_ENQ_*
285 *
286 * @p is ready to run. Insert directly into a DSQ by calling
287 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
288 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
289 * the task will stall.
290 *
291 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
292 * skipped.
293 */
294 void (*enqueue)(struct task_struct *p, u64 enq_flags);
295
296 /**
297 * @dequeue: Remove a task from the BPF scheduler
298 * @p: task being dequeued
299 * @deq_flags: %SCX_DEQ_*
300 *
301 * Remove @p from the BPF scheduler. This is usually called to isolate
302 * the task while updating its scheduling properties (e.g. priority).
303 *
304 * The ext core keeps track of whether the BPF side owns a given task or
305 * not and can gracefully ignore spurious dispatches from BPF side,
306 * which makes it safe to not implement this method. However, depending
307 * on the scheduling logic, this can lead to confusing behaviors - e.g.
308 * scheduling position not being updated across a priority change.
309 */
310 void (*dequeue)(struct task_struct *p, u64 deq_flags);
311
312 /**
313 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
314 * @cpu: CPU to dispatch tasks for
315 * @prev: previous task being switched out
316 *
317 * Called when a CPU's local dsq is empty. The operation should dispatch
318 * one or more tasks from the BPF scheduler into the DSQs using
319 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
320 * using scx_bpf_dsq_move_to_local().
321 *
322 * The maximum number of times scx_bpf_dsq_insert() can be called
323 * without an intervening scx_bpf_dsq_move_to_local() is specified by
324 * ops.dispatch_max_batch. See the comments on top of the two functions
325 * for more details.
326 *
327 * When not %NULL, @prev is an SCX task with its slice depleted. If
328 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
329 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
330 * ops.dispatch() returns. To keep executing @prev, return without
331 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
332 */
333 void (*dispatch)(s32 cpu, struct task_struct *prev);
334
335 /**
336 * @tick: Periodic tick
337 * @p: task running currently
338 *
339 * This operation is called every 1/HZ seconds on CPUs which are
340 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
341 * immediate dispatch cycle on the CPU.
342 */
343 void (*tick)(struct task_struct *p);
344
345 /**
346 * @runnable: A task is becoming runnable on its associated CPU
347 * @p: task becoming runnable
348 * @enq_flags: %SCX_ENQ_*
349 *
350 * This and the following three functions can be used to track a task's
351 * execution state transitions. A task becomes ->runnable() on a CPU,
352 * and then goes through one or more ->running() and ->stopping() pairs
353 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
354 * done running on the CPU.
355 *
356 * @p is becoming runnable on the CPU because it's
357 *
358 * - waking up (%SCX_ENQ_WAKEUP)
359 * - being moved from another CPU
360 * - being restored after temporarily taken off the queue for an
361 * attribute change.
362 *
363 * This and ->enqueue() are related but not coupled. This operation
364 * notifies @p's state transition and may not be followed by ->enqueue()
365 * e.g. when @p is being dispatched to a remote CPU, or when @p is
366 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
367 * task may be ->enqueue()'d without being preceded by this operation
368 * e.g. after exhausting its slice.
369 */
370 void (*runnable)(struct task_struct *p, u64 enq_flags);
371
372 /**
373 * @running: A task is starting to run on its associated CPU
374 * @p: task starting to run
375 *
376 * Note that this callback may be called from a CPU other than the
377 * one the task is going to run on. This can happen when a task
378 * property is changed (i.e., affinity), since scx_next_task_scx(),
379 * which triggers this callback, may run on a CPU different from
380 * the task's assigned CPU.
381 *
382 * Therefore, always use scx_bpf_task_cpu(@p) to determine the
383 * target CPU the task is going to use.
384 *
385 * See ->runnable() for explanation on the task state notifiers.
386 */
387 void (*running)(struct task_struct *p);
388
389 /**
390 * @stopping: A task is stopping execution
391 * @p: task stopping to run
392 * @runnable: is task @p still runnable?
393 *
394 * Note that this callback may be called from a CPU other than the
395 * one the task was running on. This can happen when a task
396 * property is changed (i.e., affinity), since dequeue_task_scx(),
397 * which triggers this callback, may run on a CPU different from
398 * the task's assigned CPU.
399 *
400 * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
401 * the task was running on.
402 *
403 * See ->runnable() for explanation on the task state notifiers. If
404 * !@runnable, ->quiescent() will be invoked after this operation
405 * returns.
406 */
407 void (*stopping)(struct task_struct *p, bool runnable);
408
409 /**
410 * @quiescent: A task is becoming not runnable on its associated CPU
411 * @p: task becoming not runnable
412 * @deq_flags: %SCX_DEQ_*
413 *
414 * See ->runnable() for explanation on the task state notifiers.
415 *
416 * @p is becoming quiescent on the CPU because it's
417 *
418 * - sleeping (%SCX_DEQ_SLEEP)
419 * - being moved to another CPU
420 * - being temporarily taken off the queue for an attribute change
421 * (%SCX_DEQ_SAVE)
422 *
423 * This and ->dequeue() are related but not coupled. This operation
424 * notifies @p's state transition and may not be preceded by ->dequeue()
425 * e.g. when @p is being dispatched to a remote CPU.
426 */
427 void (*quiescent)(struct task_struct *p, u64 deq_flags);
428
429 /**
430 * @yield: Yield CPU
431 * @from: yielding task
432 * @to: optional yield target task
433 *
434 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
435 * The BPF scheduler should ensure that other available tasks are
436 * dispatched before the yielding task. Return value is ignored in this
437 * case.
438 *
439 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
440 * scheduler can implement the request, return %true; otherwise, %false.
441 */
442 bool (*yield)(struct task_struct *from, struct task_struct *to);
443
444 /**
445 * @core_sched_before: Task ordering for core-sched
446 * @a: task A
447 * @b: task B
448 *
449 * Used by core-sched to determine the ordering between two tasks. See
450 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
451 * core-sched.
452 *
453 * Both @a and @b are runnable and may or may not currently be queued on
454 * the BPF scheduler. Should return %true if @a should run before @b.
455 * %false if there's no required ordering or @b should run before @a.
456 *
457 * If not specified, the default is ordering them according to when they
458 * became runnable.
459 */
460 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
461
462 /**
463 * @set_weight: Set task weight
464 * @p: task to set weight for
465 * @weight: new weight [1..10000]
466 *
467 * Update @p's weight to @weight.
468 */
469 void (*set_weight)(struct task_struct *p, u32 weight);
470
471 /**
472 * @set_cpumask: Set CPU affinity
473 * @p: task to set CPU affinity for
474 * @cpumask: cpumask of cpus that @p can run on
475 *
476 * Update @p's CPU affinity to @cpumask.
477 */
478 void (*set_cpumask)(struct task_struct *p,
479 const struct cpumask *cpumask);
480
481 /**
482 * @update_idle: Update the idle state of a CPU
483 * @cpu: CPU to update the idle state for
484 * @idle: whether entering or exiting the idle state
485 *
486 * This operation is called when @rq's CPU goes or leaves the idle
487 * state. By default, implementing this operation disables the built-in
488 * idle CPU tracking and the following helpers become unavailable:
489 *
490 * - scx_bpf_select_cpu_dfl()
491 * - scx_bpf_select_cpu_and()
492 * - scx_bpf_test_and_clear_cpu_idle()
493 * - scx_bpf_pick_idle_cpu()
494 *
495 * The user also must implement ops.select_cpu() as the default
496 * implementation relies on scx_bpf_select_cpu_dfl().
497 *
498 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
499 * tracking.
500 */
501 void (*update_idle)(s32 cpu, bool idle);
502
503 /**
504 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
505 * @cpu: The CPU being acquired by the BPF scheduler.
506 * @args: Acquire arguments, see the struct definition.
507 *
508 * A CPU that was previously released from the BPF scheduler is now once
509 * again under its control.
510 */
511 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
512
513 /**
514 * @cpu_release: A CPU is taken away from the BPF scheduler
515 * @cpu: The CPU being released by the BPF scheduler.
516 * @args: Release arguments, see the struct definition.
517 *
518 * The specified CPU is no longer under the control of the BPF
519 * scheduler. This could be because it was preempted by a higher
520 * priority sched_class, though there may be other reasons as well. The
521 * caller should consult @args->reason to determine the cause.
522 */
523 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
524
525 /**
526 * @init_task: Initialize a task to run in a BPF scheduler
527 * @p: task to initialize for BPF scheduling
528 * @args: init arguments, see the struct definition
529 *
530 * Either we're loading a BPF scheduler or a new task is being forked.
531 * Initialize @p for BPF scheduling. This operation may block and can
532 * be used for allocations, and is called exactly once for a task.
533 *
534 * Return 0 for success, -errno for failure. An error return while
535 * loading will abort loading of the BPF scheduler. During a fork, it
536 * will abort that specific fork.
537 */
538 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
539
540 /**
541 * @exit_task: Exit a previously-running task from the system
542 * @p: task to exit
543 * @args: exit arguments, see the struct definition
544 *
545 * @p is exiting or the BPF scheduler is being unloaded. Perform any
546 * necessary cleanup for @p.
547 */
548 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
549
550 /**
551 * @enable: Enable BPF scheduling for a task
552 * @p: task to enable BPF scheduling for
553 *
554 * Enable @p for BPF scheduling. enable() is called on @p any time it
555 * enters SCX, and is always paired with a matching disable().
556 */
557 void (*enable)(struct task_struct *p);
558
559 /**
560 * @disable: Disable BPF scheduling for a task
561 * @p: task to disable BPF scheduling for
562 *
563 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
564 * Disable BPF scheduling for @p. A disable() call is always matched
565 * with a prior enable() call.
566 */
567 void (*disable)(struct task_struct *p);
568
569 /**
570 * @dump: Dump BPF scheduler state on error
571 * @ctx: debug dump context
572 *
573 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
574 */
575 void (*dump)(struct scx_dump_ctx *ctx);
576
577 /**
578 * @dump_cpu: Dump BPF scheduler state for a CPU on error
579 * @ctx: debug dump context
580 * @cpu: CPU to generate debug dump for
581 * @idle: @cpu is currently idle without any runnable tasks
582 *
583 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
584 * @cpu. If @idle is %true and this operation doesn't produce any
585 * output, @cpu is skipped for dump.
586 */
587 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
588
589 /**
590 * @dump_task: Dump BPF scheduler state for a runnable task on error
591 * @ctx: debug dump context
592 * @p: runnable task to generate debug dump for
593 *
594 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
595 * @p.
596 */
597 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
598
599 #ifdef CONFIG_EXT_GROUP_SCHED
600 /**
601 * @cgroup_init: Initialize a cgroup
602 * @cgrp: cgroup being initialized
603 * @args: init arguments, see the struct definition
604 *
605 * Either the BPF scheduler is being loaded or @cgrp created, initialize
606 * @cgrp for sched_ext. This operation may block.
607 *
608 * Return 0 for success, -errno for failure. An error return while
609 * loading will abort loading of the BPF scheduler. During cgroup
610 * creation, it will abort the specific cgroup creation.
611 */
612 s32 (*cgroup_init)(struct cgroup *cgrp,
613 struct scx_cgroup_init_args *args);
614
615 /**
616 * @cgroup_exit: Exit a cgroup
617 * @cgrp: cgroup being exited
618 *
619 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
620 * @cgrp for sched_ext. This operation my block.
621 */
622 void (*cgroup_exit)(struct cgroup *cgrp);
623
624 /**
625 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
626 * @p: task being moved
627 * @from: cgroup @p is being moved from
628 * @to: cgroup @p is being moved to
629 *
630 * Prepare @p for move from cgroup @from to @to. This operation may
631 * block and can be used for allocations.
632 *
633 * Return 0 for success, -errno for failure. An error return aborts the
634 * migration.
635 */
636 s32 (*cgroup_prep_move)(struct task_struct *p,
637 struct cgroup *from, struct cgroup *to);
638
639 /**
640 * @cgroup_move: Commit cgroup move
641 * @p: task being moved
642 * @from: cgroup @p is being moved from
643 * @to: cgroup @p is being moved to
644 *
645 * Commit the move. @p is dequeued during this operation.
646 */
647 void (*cgroup_move)(struct task_struct *p,
648 struct cgroup *from, struct cgroup *to);
649
650 /**
651 * @cgroup_cancel_move: Cancel cgroup move
652 * @p: task whose cgroup move is being canceled
653 * @from: cgroup @p was being moved from
654 * @to: cgroup @p was being moved to
655 *
656 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
657 * Undo the preparation.
658 */
659 void (*cgroup_cancel_move)(struct task_struct *p,
660 struct cgroup *from, struct cgroup *to);
661
662 /**
663 * @cgroup_set_weight: A cgroup's weight is being changed
664 * @cgrp: cgroup whose weight is being updated
665 * @weight: new weight [1..10000]
666 *
667 * Update @tg's weight to @weight.
668 */
669 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
670 #endif /* CONFIG_EXT_GROUP_SCHED */
671
672 /*
673 * All online ops must come before ops.cpu_online().
674 */
675
676 /**
677 * @cpu_online: A CPU became online
678 * @cpu: CPU which just came up
679 *
680 * @cpu just came online. @cpu will not call ops.enqueue() or
681 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
682 */
683 void (*cpu_online)(s32 cpu);
684
685 /**
686 * @cpu_offline: A CPU is going offline
687 * @cpu: CPU which is going offline
688 *
689 * @cpu is going offline. @cpu will not call ops.enqueue() or
690 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
691 */
692 void (*cpu_offline)(s32 cpu);
693
694 /*
695 * All CPU hotplug ops must come before ops.init().
696 */
697
698 /**
699 * @init: Initialize the BPF scheduler
700 */
701 s32 (*init)(void);
702
703 /**
704 * @exit: Clean up after the BPF scheduler
705 * @info: Exit info
706 *
707 * ops.exit() is also called on ops.init() failure, which is a bit
708 * unusual. This is to allow rich reporting through @info on how
709 * ops.init() failed.
710 */
711 void (*exit)(struct scx_exit_info *info);
712
713 /**
714 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
715 */
716 u32 dispatch_max_batch;
717
718 /**
719 * @flags: %SCX_OPS_* flags
720 */
721 u64 flags;
722
723 /**
724 * @timeout_ms: The maximum amount of time, in milliseconds, that a
725 * runnable task should be able to wait before being scheduled. The
726 * maximum timeout may not exceed the default timeout of 30 seconds.
727 *
728 * Defaults to the maximum allowed timeout value of 30 seconds.
729 */
730 u32 timeout_ms;
731
732 /**
733 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
734 * value of 32768 is used.
735 */
736 u32 exit_dump_len;
737
738 /**
739 * @hotplug_seq: A sequence number that may be set by the scheduler to
740 * detect when a hotplug event has occurred during the loading process.
741 * If 0, no detection occurs. Otherwise, the scheduler will fail to
742 * load if the sequence number does not match @scx_hotplug_seq on the
743 * enable path.
744 */
745 u64 hotplug_seq;
746
747 /**
748 * @name: BPF scheduler's name
749 *
750 * Must be a non-zero valid BPF object name including only isalnum(),
751 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
752 * BPF scheduler is enabled.
753 */
754 char name[SCX_OPS_NAME_LEN];
755
756 /* internal use only, must be NULL */
757 void *priv;
758 };
759
760 enum scx_opi {
761 SCX_OPI_BEGIN = 0,
762 SCX_OPI_NORMAL_BEGIN = 0,
763 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
764 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
765 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
766 SCX_OPI_END = SCX_OP_IDX(init),
767 };
768
769 /*
770 * Collection of event counters. Event types are placed in descending order.
771 */
772 struct scx_event_stats {
773 /*
774 * If ops.select_cpu() returns a CPU which can't be used by the task,
775 * the core scheduler code silently picks a fallback CPU.
776 */
777 s64 SCX_EV_SELECT_CPU_FALLBACK;
778
779 /*
780 * When dispatching to a local DSQ, the CPU may have gone offline in
781 * the meantime. In this case, the task is bounced to the global DSQ.
782 */
783 s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
784
785 /*
786 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
787 * continued to run because there were no other tasks on the CPU.
788 */
789 s64 SCX_EV_DISPATCH_KEEP_LAST;
790
791 /*
792 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
793 * is dispatched to a local DSQ when exiting.
794 */
795 s64 SCX_EV_ENQ_SKIP_EXITING;
796
797 /*
798 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
799 * migration disabled task skips ops.enqueue() and is dispatched to its
800 * local DSQ.
801 */
802 s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
803
804 /*
805 * Total number of times a task's time slice was refilled with the
806 * default value (SCX_SLICE_DFL).
807 */
808 s64 SCX_EV_REFILL_SLICE_DFL;
809
810 /*
811 * The total duration of bypass modes in nanoseconds.
812 */
813 s64 SCX_EV_BYPASS_DURATION;
814
815 /*
816 * The number of tasks dispatched in the bypassing mode.
817 */
818 s64 SCX_EV_BYPASS_DISPATCH;
819
820 /*
821 * The number of times the bypassing mode has been activated.
822 */
823 s64 SCX_EV_BYPASS_ACTIVATE;
824 };
825
826 struct scx_sched {
827 struct sched_ext_ops ops;
828 DECLARE_BITMAP(has_op, SCX_OPI_END);
829
830 /*
831 * Dispatch queues.
832 *
833 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
834 * This is to avoid live-locking in bypass mode where all tasks are
835 * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
836 * per-node split isn't sufficient, it can be further split.
837 */
838 struct rhashtable dsq_hash;
839 struct scx_dispatch_q **global_dsqs;
840
841 /*
842 * The event counters are in a per-CPU variable to minimize the
843 * accounting overhead. A system-wide view on the event counter is
844 * constructed when requested by scx_bpf_events().
845 */
846 struct scx_event_stats __percpu *event_stats_cpu;
847
848 bool warned_zero_slice;
849
850 atomic_t exit_kind;
851 struct scx_exit_info *exit_info;
852
853 struct kobject kobj;
854
855 struct kthread_worker *helper;
856 struct irq_work error_irq_work;
857 struct kthread_work disable_work;
858 struct rcu_work rcu_work;
859 };
860
861 enum scx_wake_flags {
862 /* expose select WF_* flags as enums */
863 SCX_WAKE_FORK = WF_FORK,
864 SCX_WAKE_TTWU = WF_TTWU,
865 SCX_WAKE_SYNC = WF_SYNC,
866 };
867
868 enum scx_enq_flags {
869 /* expose select ENQUEUE_* flags as enums */
870 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
871 SCX_ENQ_HEAD = ENQUEUE_HEAD,
872 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
873
874 /* high 32bits are SCX specific */
875
876 /*
877 * Set the following to trigger preemption when calling
878 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
879 * current task is cleared to zero and the CPU is kicked into the
880 * scheduling path. Implies %SCX_ENQ_HEAD.
881 */
882 SCX_ENQ_PREEMPT = 1LLU << 32,
883
884 /*
885 * The task being enqueued was previously enqueued on the current CPU's
886 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
887 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
888 * invoked in a ->cpu_release() callback, and the task is again
889 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
890 * task will not be scheduled on the CPU until at least the next invocation
891 * of the ->cpu_acquire() callback.
892 */
893 SCX_ENQ_REENQ = 1LLU << 40,
894
895 /*
896 * The task being enqueued is the only task available for the cpu. By
897 * default, ext core keeps executing such tasks but when
898 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
899 * %SCX_ENQ_LAST flag set.
900 *
901 * The BPF scheduler is responsible for triggering a follow-up
902 * scheduling event. Otherwise, Execution may stall.
903 */
904 SCX_ENQ_LAST = 1LLU << 41,
905
906 /* high 8 bits are internal */
907 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
908
909 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
910 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
911 };
912
913 enum scx_deq_flags {
914 /* expose select DEQUEUE_* flags as enums */
915 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
916
917 /* high 32bits are SCX specific */
918
919 /*
920 * The generic core-sched layer decided to execute the task even though
921 * it hasn't been dispatched yet. Dequeue from the BPF side.
922 */
923 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
924 };
925
926 enum scx_pick_idle_cpu_flags {
927 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
928 SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
929 };
930
931 enum scx_kick_flags {
932 /*
933 * Kick the target CPU if idle. Guarantees that the target CPU goes
934 * through at least one full scheduling cycle before going idle. If the
935 * target CPU can be determined to be currently not idle and going to go
936 * through a scheduling cycle before going idle, noop.
937 */
938 SCX_KICK_IDLE = 1LLU << 0,
939
940 /*
941 * Preempt the current task and execute the dispatch path. If the
942 * current task of the target CPU is an SCX task, its ->scx.slice is
943 * cleared to zero before the scheduling path is invoked so that the
944 * task expires and the dispatch path is invoked.
945 */
946 SCX_KICK_PREEMPT = 1LLU << 1,
947
948 /*
949 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
950 * return after the target CPU finishes picking the next task.
951 */
952 SCX_KICK_WAIT = 1LLU << 2,
953 };
954
955 enum scx_tg_flags {
956 SCX_TG_ONLINE = 1U << 0,
957 SCX_TG_INITED = 1U << 1,
958 };
959
960 enum scx_enable_state {
961 SCX_ENABLING,
962 SCX_ENABLED,
963 SCX_DISABLING,
964 SCX_DISABLED,
965 };
966
967 static const char *scx_enable_state_str[] = {
968 [SCX_ENABLING] = "enabling",
969 [SCX_ENABLED] = "enabled",
970 [SCX_DISABLING] = "disabling",
971 [SCX_DISABLED] = "disabled",
972 };
973
974 /*
975 * sched_ext_entity->ops_state
976 *
977 * Used to track the task ownership between the SCX core and the BPF scheduler.
978 * State transitions look as follows:
979 *
980 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
981 * ^ | |
982 * | v v
983 * \-------------------------------/
984 *
985 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
986 * sites for explanations on the conditions being waited upon and why they are
987 * safe. Transitions out of them into NONE or QUEUED must store_release and the
988 * waiters should load_acquire.
989 *
990 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
991 * any given task can be dispatched by the BPF scheduler at all times and thus
992 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
993 * to try to dispatch any task anytime regardless of its state as the SCX core
994 * can safely reject invalid dispatches.
995 */
996 enum scx_ops_state {
997 SCX_OPSS_NONE, /* owned by the SCX core */
998 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
999 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
1000 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
1001
1002 /*
1003 * QSEQ brands each QUEUED instance so that, when dispatch races
1004 * dequeue/requeue, the dispatcher can tell whether it still has a claim
1005 * on the task being dispatched.
1006 *
1007 * As some 32bit archs can't do 64bit store_release/load_acquire,
1008 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
1009 * 32bit machines. The dispatch race window QSEQ protects is very narrow
1010 * and runs with IRQ disabled. 30 bits should be sufficient.
1011 */
1012 SCX_OPSS_QSEQ_SHIFT = 2,
1013 };
1014
1015 /* Use macros to ensure that the type is unsigned long for the masks */
1016 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
1017 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
1018
1019 /*
1020 * NOTE: sched_ext is in the process of growing multiple scheduler support and
1021 * scx_root usage is in a transitional state. Naked dereferences are safe if the
1022 * caller is one of the tasks attached to SCX and explicit RCU dereference is
1023 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
1024 * are used as temporary markers to indicate that the dereferences need to be
1025 * updated to point to the associated scheduler instances rather than scx_root.
1026 */
1027 static struct scx_sched __rcu *scx_root;
1028
1029 /*
1030 * During exit, a task may schedule after losing its PIDs. When disabling the
1031 * BPF scheduler, we need to be able to iterate tasks in every state to
1032 * guarantee system safety. Maintain a dedicated task list which contains every
1033 * task between its fork and eventual free.
1034 */
1035 static DEFINE_SPINLOCK(scx_tasks_lock);
1036 static LIST_HEAD(scx_tasks);
1037
1038 /* ops enable/disable */
1039 static DEFINE_MUTEX(scx_enable_mutex);
1040 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
1041 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
1042 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
1043 static unsigned long scx_in_softlockup;
1044 static atomic_t scx_breather_depth = ATOMIC_INIT(0);
1045 static int scx_bypass_depth;
1046 static bool scx_init_task_enabled;
1047 static bool scx_switching_all;
1048 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
1049
1050 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
1051 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
1052
1053 /*
1054 * A monotically increasing sequence number that is incremented every time a
1055 * scheduler is enabled. This can be used by to check if any custom sched_ext
1056 * scheduler has ever been used in the system.
1057 */
1058 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
1059
1060 /*
1061 * The maximum amount of time in jiffies that a task may be runnable without
1062 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
1063 * scx_error().
1064 */
1065 static unsigned long scx_watchdog_timeout;
1066
1067 /*
1068 * The last time the delayed work was run. This delayed work relies on
1069 * ksoftirqd being able to run to service timer interrupts, so it's possible
1070 * that this work itself could get wedged. To account for this, we check that
1071 * it's not stalled in the timer tick, and trigger an error if it is.
1072 */
1073 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
1074
1075 static struct delayed_work scx_watchdog_work;
1076
1077 /* for %SCX_KICK_WAIT */
1078 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
1079
1080 /*
1081 * Direct dispatch marker.
1082 *
1083 * Non-NULL values are used for direct dispatch from enqueue path. A valid
1084 * pointer points to the task currently being enqueued. An ERR_PTR value is used
1085 * to indicate that direct dispatch has already happened.
1086 */
1087 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
1088
1089 static const struct rhashtable_params dsq_hash_params = {
1090 .key_len = sizeof_field(struct scx_dispatch_q, id),
1091 .key_offset = offsetof(struct scx_dispatch_q, id),
1092 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
1093 };
1094
1095 static LLIST_HEAD(dsqs_to_free);
1096
1097 /* dispatch buf */
1098 struct scx_dsp_buf_ent {
1099 struct task_struct *task;
1100 unsigned long qseq;
1101 u64 dsq_id;
1102 u64 enq_flags;
1103 };
1104
1105 static u32 scx_dsp_max_batch;
1106
1107 struct scx_dsp_ctx {
1108 struct rq *rq;
1109 u32 cursor;
1110 u32 nr_tasks;
1111 struct scx_dsp_buf_ent buf[];
1112 };
1113
1114 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
1115
1116 /* string formatting from BPF */
1117 struct scx_bstr_buf {
1118 u64 data[MAX_BPRINTF_VARARGS];
1119 char line[SCX_EXIT_MSG_LEN];
1120 };
1121
1122 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
1123 static struct scx_bstr_buf scx_exit_bstr_buf;
1124
1125 /* ops debug dump */
1126 struct scx_dump_data {
1127 s32 cpu;
1128 bool first;
1129 s32 cursor;
1130 struct seq_buf *s;
1131 const char *prefix;
1132 struct scx_bstr_buf buf;
1133 };
1134
1135 static struct scx_dump_data scx_dump_data = {
1136 .cpu = -1,
1137 };
1138
1139 /* /sys/kernel/sched_ext interface */
1140 static struct kset *scx_kset;
1141
1142 #define CREATE_TRACE_POINTS
1143 #include <trace/events/sched_ext.h>
1144
1145 static void process_ddsp_deferred_locals(struct rq *rq);
1146 static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1147 static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
1148 s64 exit_code, const char *fmt, va_list args);
1149
scx_exit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)1150 static __printf(4, 5) void scx_exit(struct scx_sched *sch,
1151 enum scx_exit_kind kind, s64 exit_code,
1152 const char *fmt, ...)
1153 {
1154 va_list args;
1155
1156 va_start(args, fmt);
1157 scx_vexit(sch, kind, exit_code, fmt, args);
1158 va_end(args);
1159 }
1160
scx_kf_exit(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)1161 static __printf(3, 4) void scx_kf_exit(enum scx_exit_kind kind, s64 exit_code,
1162 const char *fmt, ...)
1163 {
1164 struct scx_sched *sch;
1165 va_list args;
1166
1167 rcu_read_lock();
1168 sch = rcu_dereference(scx_root);
1169 if (sch) {
1170 va_start(args, fmt);
1171 scx_vexit(sch, kind, exit_code, fmt, args);
1172 va_end(args);
1173 }
1174 rcu_read_unlock();
1175 }
1176
1177 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
1178 #define scx_kf_error(fmt, args...) scx_kf_exit(SCX_EXIT_ERROR, 0, fmt, ##args)
1179
1180 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op)
1181
jiffies_delta_msecs(unsigned long at,unsigned long now)1182 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1183 {
1184 if (time_after(at, now))
1185 return jiffies_to_msecs(at - now);
1186 else
1187 return -(long)jiffies_to_msecs(now - at);
1188 }
1189
1190 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1191 static u32 higher_bits(u32 flags)
1192 {
1193 return ~((1 << fls(flags)) - 1);
1194 }
1195
1196 /* return the mask with only the highest bit set */
highest_bit(u32 flags)1197 static u32 highest_bit(u32 flags)
1198 {
1199 int bit = fls(flags);
1200 return ((u64)1 << bit) >> 1;
1201 }
1202
u32_before(u32 a,u32 b)1203 static bool u32_before(u32 a, u32 b)
1204 {
1205 return (s32)(a - b) < 0;
1206 }
1207
find_global_dsq(struct task_struct * p)1208 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1209 {
1210 struct scx_sched *sch = scx_root;
1211
1212 return sch->global_dsqs[cpu_to_node(task_cpu(p))];
1213 }
1214
find_user_dsq(struct scx_sched * sch,u64 dsq_id)1215 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
1216 {
1217 return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params);
1218 }
1219
1220 /*
1221 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1222 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1223 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1224 * whether it's running from an allowed context.
1225 *
1226 * @mask is constant, always inline to cull the mask calculations.
1227 */
scx_kf_allow(u32 mask)1228 static __always_inline void scx_kf_allow(u32 mask)
1229 {
1230 /* nesting is allowed only in increasing scx_kf_mask order */
1231 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1232 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1233 current->scx.kf_mask, mask);
1234 current->scx.kf_mask |= mask;
1235 barrier();
1236 }
1237
scx_kf_disallow(u32 mask)1238 static void scx_kf_disallow(u32 mask)
1239 {
1240 barrier();
1241 current->scx.kf_mask &= ~mask;
1242 }
1243
1244 /*
1245 * Track the rq currently locked.
1246 *
1247 * This allows kfuncs to safely operate on rq from any scx ops callback,
1248 * knowing which rq is already locked.
1249 */
1250 static DEFINE_PER_CPU(struct rq *, locked_rq);
1251
update_locked_rq(struct rq * rq)1252 static inline void update_locked_rq(struct rq *rq)
1253 {
1254 /*
1255 * Check whether @rq is actually locked. This can help expose bugs
1256 * or incorrect assumptions about the context in which a kfunc or
1257 * callback is executed.
1258 */
1259 if (rq)
1260 lockdep_assert_rq_held(rq);
1261 __this_cpu_write(locked_rq, rq);
1262 }
1263
1264 /*
1265 * Return the rq currently locked from an scx callback, or NULL if no rq is
1266 * locked.
1267 */
scx_locked_rq(void)1268 static inline struct rq *scx_locked_rq(void)
1269 {
1270 return __this_cpu_read(locked_rq);
1271 }
1272
1273 #define SCX_CALL_OP(sch, mask, op, rq, args...) \
1274 do { \
1275 update_locked_rq(rq); \
1276 if (mask) { \
1277 scx_kf_allow(mask); \
1278 (sch)->ops.op(args); \
1279 scx_kf_disallow(mask); \
1280 } else { \
1281 (sch)->ops.op(args); \
1282 } \
1283 update_locked_rq(NULL); \
1284 } while (0)
1285
1286 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
1287 ({ \
1288 __typeof__((sch)->ops.op(args)) __ret; \
1289 \
1290 update_locked_rq(rq); \
1291 if (mask) { \
1292 scx_kf_allow(mask); \
1293 __ret = (sch)->ops.op(args); \
1294 scx_kf_disallow(mask); \
1295 } else { \
1296 __ret = (sch)->ops.op(args); \
1297 } \
1298 update_locked_rq(NULL); \
1299 __ret; \
1300 })
1301
1302 /*
1303 * Some kfuncs are allowed only on the tasks that are subjects of the
1304 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1305 * restrictions, the following SCX_CALL_OP_*() variants should be used when
1306 * invoking scx_ops operations that take task arguments. These can only be used
1307 * for non-nesting operations due to the way the tasks are tracked.
1308 *
1309 * kfuncs which can only operate on such tasks can in turn use
1310 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1311 * the specific task.
1312 */
1313 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
1314 do { \
1315 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1316 current->scx.kf_tasks[0] = task; \
1317 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
1318 current->scx.kf_tasks[0] = NULL; \
1319 } while (0)
1320
1321 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
1322 ({ \
1323 __typeof__((sch)->ops.op(task, ##args)) __ret; \
1324 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1325 current->scx.kf_tasks[0] = task; \
1326 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
1327 current->scx.kf_tasks[0] = NULL; \
1328 __ret; \
1329 })
1330
1331 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \
1332 ({ \
1333 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
1334 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
1335 current->scx.kf_tasks[0] = task0; \
1336 current->scx.kf_tasks[1] = task1; \
1337 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
1338 current->scx.kf_tasks[0] = NULL; \
1339 current->scx.kf_tasks[1] = NULL; \
1340 __ret; \
1341 })
1342
1343 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1344 static __always_inline bool scx_kf_allowed(u32 mask)
1345 {
1346 if (unlikely(!(current->scx.kf_mask & mask))) {
1347 scx_kf_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1348 mask, current->scx.kf_mask);
1349 return false;
1350 }
1351
1352 /*
1353 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1354 * DISPATCH must not be called if we're running DEQUEUE which is nested
1355 * inside ops.dispatch(). We don't need to check boundaries for any
1356 * blocking kfuncs as the verifier ensures they're only called from
1357 * sleepable progs.
1358 */
1359 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1360 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1361 scx_kf_error("cpu_release kfunc called from a nested operation");
1362 return false;
1363 }
1364
1365 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1366 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1367 scx_kf_error("dispatch kfunc called from a nested operation");
1368 return false;
1369 }
1370
1371 return true;
1372 }
1373
1374 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1375 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1376 struct task_struct *p)
1377 {
1378 if (!scx_kf_allowed(mask))
1379 return false;
1380
1381 if (unlikely((p != current->scx.kf_tasks[0] &&
1382 p != current->scx.kf_tasks[1]))) {
1383 scx_kf_error("called on a task not being operated on");
1384 return false;
1385 }
1386
1387 return true;
1388 }
1389
1390 /**
1391 * nldsq_next_task - Iterate to the next task in a non-local DSQ
1392 * @dsq: user dsq being iterated
1393 * @cur: current position, %NULL to start iteration
1394 * @rev: walk backwards
1395 *
1396 * Returns %NULL when iteration is finished.
1397 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1398 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1399 struct task_struct *cur, bool rev)
1400 {
1401 struct list_head *list_node;
1402 struct scx_dsq_list_node *dsq_lnode;
1403
1404 lockdep_assert_held(&dsq->lock);
1405
1406 if (cur)
1407 list_node = &cur->scx.dsq_list.node;
1408 else
1409 list_node = &dsq->list;
1410
1411 /* find the next task, need to skip BPF iteration cursors */
1412 do {
1413 if (rev)
1414 list_node = list_node->prev;
1415 else
1416 list_node = list_node->next;
1417
1418 if (list_node == &dsq->list)
1419 return NULL;
1420
1421 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1422 node);
1423 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1424
1425 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1426 }
1427
1428 #define nldsq_for_each_task(p, dsq) \
1429 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1430 (p) = nldsq_next_task((dsq), (p), false))
1431
1432
1433 /*
1434 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1435 * dispatch order. BPF-visible iterator is opaque and larger to allow future
1436 * changes without breaking backward compatibility. Can be used with
1437 * bpf_for_each(). See bpf_iter_scx_dsq_*().
1438 */
1439 enum scx_dsq_iter_flags {
1440 /* iterate in the reverse dispatch order */
1441 SCX_DSQ_ITER_REV = 1U << 16,
1442
1443 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
1444 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
1445
1446 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
1447 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
1448 __SCX_DSQ_ITER_HAS_SLICE |
1449 __SCX_DSQ_ITER_HAS_VTIME,
1450 };
1451
1452 struct bpf_iter_scx_dsq_kern {
1453 struct scx_dsq_list_node cursor;
1454 struct scx_dispatch_q *dsq;
1455 u64 slice;
1456 u64 vtime;
1457 } __attribute__((aligned(8)));
1458
1459 struct bpf_iter_scx_dsq {
1460 u64 __opaque[6];
1461 } __attribute__((aligned(8)));
1462
1463
1464 /*
1465 * SCX task iterator.
1466 */
1467 struct scx_task_iter {
1468 struct sched_ext_entity cursor;
1469 struct task_struct *locked;
1470 struct rq *rq;
1471 struct rq_flags rf;
1472 u32 cnt;
1473 };
1474
1475 /**
1476 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1477 * @iter: iterator to init
1478 *
1479 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1480 * must eventually be stopped with scx_task_iter_stop().
1481 *
1482 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1483 * between this and the first next() call or between any two next() calls. If
1484 * the locks are released between two next() calls, the caller is responsible
1485 * for ensuring that the task being iterated remains accessible either through
1486 * RCU read lock or obtaining a reference count.
1487 *
1488 * All tasks which existed when the iteration started are guaranteed to be
1489 * visited as long as they still exist.
1490 */
scx_task_iter_start(struct scx_task_iter * iter)1491 static void scx_task_iter_start(struct scx_task_iter *iter)
1492 {
1493 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1494 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1495
1496 spin_lock_irq(&scx_tasks_lock);
1497
1498 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1499 list_add(&iter->cursor.tasks_node, &scx_tasks);
1500 iter->locked = NULL;
1501 iter->cnt = 0;
1502 }
1503
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)1504 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1505 {
1506 if (iter->locked) {
1507 task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1508 iter->locked = NULL;
1509 }
1510 }
1511
1512 /**
1513 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1514 * @iter: iterator to unlock
1515 *
1516 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1517 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1518 * This function can be safely called anytime during an iteration.
1519 */
scx_task_iter_unlock(struct scx_task_iter * iter)1520 static void scx_task_iter_unlock(struct scx_task_iter *iter)
1521 {
1522 __scx_task_iter_rq_unlock(iter);
1523 spin_unlock_irq(&scx_tasks_lock);
1524 }
1525
1526 /**
1527 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1528 * @iter: iterator to re-lock
1529 *
1530 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1531 * doesn't re-lock the rq lock. Must be called before other iterator operations.
1532 */
scx_task_iter_relock(struct scx_task_iter * iter)1533 static void scx_task_iter_relock(struct scx_task_iter *iter)
1534 {
1535 spin_lock_irq(&scx_tasks_lock);
1536 }
1537
1538 /**
1539 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1540 * @iter: iterator to exit
1541 *
1542 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1543 * which is released on return. If the iterator holds a task's rq lock, that rq
1544 * lock is also released. See scx_task_iter_start() for details.
1545 */
scx_task_iter_stop(struct scx_task_iter * iter)1546 static void scx_task_iter_stop(struct scx_task_iter *iter)
1547 {
1548 list_del_init(&iter->cursor.tasks_node);
1549 scx_task_iter_unlock(iter);
1550 }
1551
1552 /**
1553 * scx_task_iter_next - Next task
1554 * @iter: iterator to walk
1555 *
1556 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1557 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
1558 * by holding scx_tasks_lock for too long.
1559 */
scx_task_iter_next(struct scx_task_iter * iter)1560 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1561 {
1562 struct list_head *cursor = &iter->cursor.tasks_node;
1563 struct sched_ext_entity *pos;
1564
1565 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
1566 scx_task_iter_unlock(iter);
1567 cond_resched();
1568 scx_task_iter_relock(iter);
1569 }
1570
1571 list_for_each_entry(pos, cursor, tasks_node) {
1572 if (&pos->tasks_node == &scx_tasks)
1573 return NULL;
1574 if (!(pos->flags & SCX_TASK_CURSOR)) {
1575 list_move(cursor, &pos->tasks_node);
1576 return container_of(pos, struct task_struct, scx);
1577 }
1578 }
1579
1580 /* can't happen, should always terminate at scx_tasks above */
1581 BUG();
1582 }
1583
1584 /**
1585 * scx_task_iter_next_locked - Next non-idle task with its rq locked
1586 * @iter: iterator to walk
1587 *
1588 * Visit the non-idle task with its rq lock held. Allows callers to specify
1589 * whether they would like to filter out dead tasks. See scx_task_iter_start()
1590 * for details.
1591 */
scx_task_iter_next_locked(struct scx_task_iter * iter)1592 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1593 {
1594 struct task_struct *p;
1595
1596 __scx_task_iter_rq_unlock(iter);
1597
1598 while ((p = scx_task_iter_next(iter))) {
1599 /*
1600 * scx_task_iter is used to prepare and move tasks into SCX
1601 * while loading the BPF scheduler and vice-versa while
1602 * unloading. The init_tasks ("swappers") should be excluded
1603 * from the iteration because:
1604 *
1605 * - It's unsafe to use __setschduler_prio() on an init_task to
1606 * determine the sched_class to use as it won't preserve its
1607 * idle_sched_class.
1608 *
1609 * - ops.init/exit_task() can easily be confused if called with
1610 * init_tasks as they, e.g., share PID 0.
1611 *
1612 * As init_tasks are never scheduled through SCX, they can be
1613 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1614 * doesn't work here:
1615 *
1616 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1617 * yet been onlined.
1618 *
1619 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1620 * play_idle_precise() used by CONFIG_IDLE_INJECT.
1621 *
1622 * Test for idle_sched_class as only init_tasks are on it.
1623 */
1624 if (p->sched_class != &idle_sched_class)
1625 break;
1626 }
1627 if (!p)
1628 return NULL;
1629
1630 iter->rq = task_rq_lock(p, &iter->rf);
1631 iter->locked = p;
1632
1633 return p;
1634 }
1635
1636 /**
1637 * scx_add_event - Increase an event counter for 'name' by 'cnt'
1638 * @sch: scx_sched to account events for
1639 * @name: an event name defined in struct scx_event_stats
1640 * @cnt: the number of the event occured
1641 *
1642 * This can be used when preemption is not disabled.
1643 */
1644 #define scx_add_event(sch, name, cnt) do { \
1645 this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1646 trace_sched_ext_event(#name, (cnt)); \
1647 } while(0)
1648
1649 /**
1650 * __scx_add_event - Increase an event counter for 'name' by 'cnt'
1651 * @sch: scx_sched to account events for
1652 * @name: an event name defined in struct scx_event_stats
1653 * @cnt: the number of the event occured
1654 *
1655 * This should be used only when preemption is disabled.
1656 */
1657 #define __scx_add_event(sch, name, cnt) do { \
1658 __this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1659 trace_sched_ext_event(#name, cnt); \
1660 } while(0)
1661
1662 /**
1663 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
1664 * @dst_e: destination event stats
1665 * @src_e: source event stats
1666 * @kind: a kind of event to be aggregated
1667 */
1668 #define scx_agg_event(dst_e, src_e, kind) do { \
1669 (dst_e)->kind += READ_ONCE((src_e)->kind); \
1670 } while(0)
1671
1672 /**
1673 * scx_dump_event - Dump an event 'kind' in 'events' to 's'
1674 * @s: output seq_buf
1675 * @events: event stats
1676 * @kind: a kind of event to dump
1677 */
1678 #define scx_dump_event(s, events, kind) do { \
1679 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \
1680 } while (0)
1681
1682
1683 static void scx_read_events(struct scx_sched *sch,
1684 struct scx_event_stats *events);
1685
scx_enable_state(void)1686 static enum scx_enable_state scx_enable_state(void)
1687 {
1688 return atomic_read(&scx_enable_state_var);
1689 }
1690
scx_set_enable_state(enum scx_enable_state to)1691 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
1692 {
1693 return atomic_xchg(&scx_enable_state_var, to);
1694 }
1695
scx_tryset_enable_state(enum scx_enable_state to,enum scx_enable_state from)1696 static bool scx_tryset_enable_state(enum scx_enable_state to,
1697 enum scx_enable_state from)
1698 {
1699 int from_v = from;
1700
1701 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
1702 }
1703
scx_rq_bypassing(struct rq * rq)1704 static bool scx_rq_bypassing(struct rq *rq)
1705 {
1706 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1707 }
1708
1709 /**
1710 * wait_ops_state - Busy-wait the specified ops state to end
1711 * @p: target task
1712 * @opss: state to wait the end of
1713 *
1714 * Busy-wait for @p to transition out of @opss. This can only be used when the
1715 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1716 * has load_acquire semantics to ensure that the caller can see the updates made
1717 * in the enqueueing and dispatching paths.
1718 */
wait_ops_state(struct task_struct * p,unsigned long opss)1719 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1720 {
1721 do {
1722 cpu_relax();
1723 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1724 }
1725
__cpu_valid(s32 cpu)1726 static inline bool __cpu_valid(s32 cpu)
1727 {
1728 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
1729 }
1730
1731 /**
1732 * ops_cpu_valid - Verify a cpu number, to be used on ops input args
1733 * @sch: scx_sched to abort on error
1734 * @cpu: cpu number which came from a BPF ops
1735 * @where: extra information reported on error
1736 *
1737 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1738 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1739 * an ops error.
1740 */
ops_cpu_valid(struct scx_sched * sch,s32 cpu,const char * where)1741 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
1742 {
1743 if (__cpu_valid(cpu)) {
1744 return true;
1745 } else {
1746 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1747 return false;
1748 }
1749 }
1750
1751 /**
1752 * kf_cpu_valid - Verify a CPU number, to be used on kfunc input args
1753 * @cpu: cpu number which came from a BPF ops
1754 * @where: extra information reported on error
1755 *
1756 * The same as ops_cpu_valid() but @sch is implicit.
1757 */
kf_cpu_valid(u32 cpu,const char * where)1758 static bool kf_cpu_valid(u32 cpu, const char *where)
1759 {
1760 if (__cpu_valid(cpu)) {
1761 return true;
1762 } else {
1763 scx_kf_error("invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1764 return false;
1765 }
1766 }
1767
1768 /**
1769 * ops_sanitize_err - Sanitize a -errno value
1770 * @sch: scx_sched to error out on error
1771 * @ops_name: operation to blame on failure
1772 * @err: -errno value to sanitize
1773 *
1774 * Verify @err is a valid -errno. If not, trigger scx_error() and return
1775 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1776 * cause misbehaviors. For an example, a large negative return from
1777 * ops.init_task() triggers an oops when passed up the call chain because the
1778 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1779 * handled as a pointer.
1780 */
ops_sanitize_err(struct scx_sched * sch,const char * ops_name,s32 err)1781 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
1782 {
1783 if (err < 0 && err >= -MAX_ERRNO)
1784 return err;
1785
1786 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
1787 return -EPROTO;
1788 }
1789
run_deferred(struct rq * rq)1790 static void run_deferred(struct rq *rq)
1791 {
1792 process_ddsp_deferred_locals(rq);
1793 }
1794
1795 #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1796 static void deferred_bal_cb_workfn(struct rq *rq)
1797 {
1798 run_deferred(rq);
1799 }
1800 #endif
1801
deferred_irq_workfn(struct irq_work * irq_work)1802 static void deferred_irq_workfn(struct irq_work *irq_work)
1803 {
1804 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1805
1806 raw_spin_rq_lock(rq);
1807 run_deferred(rq);
1808 raw_spin_rq_unlock(rq);
1809 }
1810
1811 /**
1812 * schedule_deferred - Schedule execution of deferred actions on an rq
1813 * @rq: target rq
1814 *
1815 * Schedule execution of deferred actions on @rq. Must be called with @rq
1816 * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1817 * can unlock @rq to e.g. migrate tasks to other rqs.
1818 */
schedule_deferred(struct rq * rq)1819 static void schedule_deferred(struct rq *rq)
1820 {
1821 lockdep_assert_rq_held(rq);
1822
1823 #ifdef CONFIG_SMP
1824 /*
1825 * If in the middle of waking up a task, task_woken_scx() will be called
1826 * afterwards which will then run the deferred actions, no need to
1827 * schedule anything.
1828 */
1829 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1830 return;
1831
1832 /*
1833 * If in balance, the balance callbacks will be called before rq lock is
1834 * released. Schedule one.
1835 */
1836 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1837 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1838 deferred_bal_cb_workfn);
1839 return;
1840 }
1841 #endif
1842 /*
1843 * No scheduler hooks available. Queue an irq work. They are executed on
1844 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1845 * The above WAKEUP and BALANCE paths should cover most of the cases and
1846 * the time to IRQ re-enable shouldn't be long.
1847 */
1848 irq_work_queue(&rq->scx.deferred_irq_work);
1849 }
1850
1851 /**
1852 * touch_core_sched - Update timestamp used for core-sched task ordering
1853 * @rq: rq to read clock from, must be locked
1854 * @p: task to update the timestamp for
1855 *
1856 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1857 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1858 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1859 * exhaustion).
1860 */
touch_core_sched(struct rq * rq,struct task_struct * p)1861 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1862 {
1863 lockdep_assert_rq_held(rq);
1864
1865 #ifdef CONFIG_SCHED_CORE
1866 /*
1867 * It's okay to update the timestamp spuriously. Use
1868 * sched_core_disabled() which is cheaper than enabled().
1869 *
1870 * As this is used to determine ordering between tasks of sibling CPUs,
1871 * it may be better to use per-core dispatch sequence instead.
1872 */
1873 if (!sched_core_disabled())
1874 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1875 #endif
1876 }
1877
1878 /**
1879 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1880 * @rq: rq to read clock from, must be locked
1881 * @p: task being dispatched
1882 *
1883 * If the BPF scheduler implements custom core-sched ordering via
1884 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1885 * ordering within each local DSQ. This function is called from dispatch paths
1886 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1887 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1888 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1889 {
1890 lockdep_assert_rq_held(rq);
1891
1892 #ifdef CONFIG_SCHED_CORE
1893 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
1894 touch_core_sched(rq, p);
1895 #endif
1896 }
1897
update_curr_scx(struct rq * rq)1898 static void update_curr_scx(struct rq *rq)
1899 {
1900 struct task_struct *curr = rq->curr;
1901 s64 delta_exec;
1902
1903 delta_exec = update_curr_common(rq);
1904 if (unlikely(delta_exec <= 0))
1905 return;
1906
1907 if (curr->scx.slice != SCX_SLICE_INF) {
1908 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1909 if (!curr->scx.slice)
1910 touch_core_sched(rq, curr);
1911 }
1912 }
1913
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1914 static bool scx_dsq_priq_less(struct rb_node *node_a,
1915 const struct rb_node *node_b)
1916 {
1917 const struct task_struct *a =
1918 container_of(node_a, struct task_struct, scx.dsq_priq);
1919 const struct task_struct *b =
1920 container_of(node_b, struct task_struct, scx.dsq_priq);
1921
1922 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1923 }
1924
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1925 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1926 {
1927 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1928 WRITE_ONCE(dsq->nr, dsq->nr + delta);
1929 }
1930
refill_task_slice_dfl(struct task_struct * p)1931 static void refill_task_slice_dfl(struct task_struct *p)
1932 {
1933 p->scx.slice = SCX_SLICE_DFL;
1934 __scx_add_event(scx_root, SCX_EV_REFILL_SLICE_DFL, 1);
1935 }
1936
dispatch_enqueue(struct scx_sched * sch,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1937 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1938 struct task_struct *p, u64 enq_flags)
1939 {
1940 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1941
1942 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1943 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1944 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1945
1946 if (!is_local) {
1947 raw_spin_lock(&dsq->lock);
1948 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1949 scx_error(sch, "attempting to dispatch to a destroyed dsq");
1950 /* fall back to the global dsq */
1951 raw_spin_unlock(&dsq->lock);
1952 dsq = find_global_dsq(p);
1953 raw_spin_lock(&dsq->lock);
1954 }
1955 }
1956
1957 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1958 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1959 /*
1960 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1961 * their FIFO queues. To avoid confusion and accidentally
1962 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1963 * disallow any internal DSQ from doing vtime ordering of
1964 * tasks.
1965 */
1966 scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1967 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1968 }
1969
1970 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1971 struct rb_node *rbp;
1972
1973 /*
1974 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1975 * linked to both the rbtree and list on PRIQs, this can only be
1976 * tested easily when adding the first task.
1977 */
1978 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1979 nldsq_next_task(dsq, NULL, false)))
1980 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1981 dsq->id);
1982
1983 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1984 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1985
1986 /*
1987 * Find the previous task and insert after it on the list so
1988 * that @dsq->list is vtime ordered.
1989 */
1990 rbp = rb_prev(&p->scx.dsq_priq);
1991 if (rbp) {
1992 struct task_struct *prev =
1993 container_of(rbp, struct task_struct,
1994 scx.dsq_priq);
1995 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1996 } else {
1997 list_add(&p->scx.dsq_list.node, &dsq->list);
1998 }
1999 } else {
2000 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
2001 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
2002 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
2003 dsq->id);
2004
2005 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2006 list_add(&p->scx.dsq_list.node, &dsq->list);
2007 else
2008 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
2009 }
2010
2011 /* seq records the order tasks are queued, used by BPF DSQ iterator */
2012 dsq->seq++;
2013 p->scx.dsq_seq = dsq->seq;
2014
2015 dsq_mod_nr(dsq, 1);
2016 p->scx.dsq = dsq;
2017
2018 /*
2019 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
2020 * direct dispatch path, but we clear them here because the direct
2021 * dispatch verdict may be overridden on the enqueue path during e.g.
2022 * bypass.
2023 */
2024 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
2025 p->scx.ddsp_enq_flags = 0;
2026
2027 /*
2028 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
2029 * match waiters' load_acquire.
2030 */
2031 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
2032 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2033
2034 if (is_local) {
2035 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
2036 bool preempt = false;
2037
2038 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
2039 rq->curr->sched_class == &ext_sched_class) {
2040 rq->curr->scx.slice = 0;
2041 preempt = true;
2042 }
2043
2044 if (preempt || sched_class_above(&ext_sched_class,
2045 rq->curr->sched_class))
2046 resched_curr(rq);
2047 } else {
2048 raw_spin_unlock(&dsq->lock);
2049 }
2050 }
2051
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)2052 static void task_unlink_from_dsq(struct task_struct *p,
2053 struct scx_dispatch_q *dsq)
2054 {
2055 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
2056
2057 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
2058 rb_erase(&p->scx.dsq_priq, &dsq->priq);
2059 RB_CLEAR_NODE(&p->scx.dsq_priq);
2060 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
2061 }
2062
2063 list_del_init(&p->scx.dsq_list.node);
2064 dsq_mod_nr(dsq, -1);
2065 }
2066
dispatch_dequeue(struct rq * rq,struct task_struct * p)2067 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
2068 {
2069 struct scx_dispatch_q *dsq = p->scx.dsq;
2070 bool is_local = dsq == &rq->scx.local_dsq;
2071
2072 if (!dsq) {
2073 /*
2074 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
2075 * Unlinking is all that's needed to cancel.
2076 */
2077 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
2078 list_del_init(&p->scx.dsq_list.node);
2079
2080 /*
2081 * When dispatching directly from the BPF scheduler to a local
2082 * DSQ, the task isn't associated with any DSQ but
2083 * @p->scx.holding_cpu may be set under the protection of
2084 * %SCX_OPSS_DISPATCHING.
2085 */
2086 if (p->scx.holding_cpu >= 0)
2087 p->scx.holding_cpu = -1;
2088
2089 return;
2090 }
2091
2092 if (!is_local)
2093 raw_spin_lock(&dsq->lock);
2094
2095 /*
2096 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
2097 * change underneath us.
2098 */
2099 if (p->scx.holding_cpu < 0) {
2100 /* @p must still be on @dsq, dequeue */
2101 task_unlink_from_dsq(p, dsq);
2102 } else {
2103 /*
2104 * We're racing against dispatch_to_local_dsq() which already
2105 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
2106 * holding_cpu which tells dispatch_to_local_dsq() that it lost
2107 * the race.
2108 */
2109 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
2110 p->scx.holding_cpu = -1;
2111 }
2112 p->scx.dsq = NULL;
2113
2114 if (!is_local)
2115 raw_spin_unlock(&dsq->lock);
2116 }
2117
find_dsq_for_dispatch(struct scx_sched * sch,struct rq * rq,u64 dsq_id,struct task_struct * p)2118 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
2119 struct rq *rq, u64 dsq_id,
2120 struct task_struct *p)
2121 {
2122 struct scx_dispatch_q *dsq;
2123
2124 if (dsq_id == SCX_DSQ_LOCAL)
2125 return &rq->scx.local_dsq;
2126
2127 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
2128 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
2129
2130 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2131 return find_global_dsq(p);
2132
2133 return &cpu_rq(cpu)->scx.local_dsq;
2134 }
2135
2136 if (dsq_id == SCX_DSQ_GLOBAL)
2137 dsq = find_global_dsq(p);
2138 else
2139 dsq = find_user_dsq(sch, dsq_id);
2140
2141 if (unlikely(!dsq)) {
2142 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
2143 dsq_id, p->comm, p->pid);
2144 return find_global_dsq(p);
2145 }
2146
2147 return dsq;
2148 }
2149
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)2150 static void mark_direct_dispatch(struct task_struct *ddsp_task,
2151 struct task_struct *p, u64 dsq_id,
2152 u64 enq_flags)
2153 {
2154 /*
2155 * Mark that dispatch already happened from ops.select_cpu() or
2156 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
2157 * which can never match a valid task pointer.
2158 */
2159 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
2160
2161 /* @p must match the task on the enqueue path */
2162 if (unlikely(p != ddsp_task)) {
2163 if (IS_ERR(ddsp_task))
2164 scx_kf_error("%s[%d] already direct-dispatched",
2165 p->comm, p->pid);
2166 else
2167 scx_kf_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
2168 ddsp_task->comm, ddsp_task->pid,
2169 p->comm, p->pid);
2170 return;
2171 }
2172
2173 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
2174 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
2175
2176 p->scx.ddsp_dsq_id = dsq_id;
2177 p->scx.ddsp_enq_flags = enq_flags;
2178 }
2179
direct_dispatch(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)2180 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
2181 u64 enq_flags)
2182 {
2183 struct rq *rq = task_rq(p);
2184 struct scx_dispatch_q *dsq =
2185 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2186
2187 touch_core_sched_dispatch(rq, p);
2188
2189 p->scx.ddsp_enq_flags |= enq_flags;
2190
2191 /*
2192 * We are in the enqueue path with @rq locked and pinned, and thus can't
2193 * double lock a remote rq and enqueue to its local DSQ. For
2194 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
2195 * the enqueue so that it's executed when @rq can be unlocked.
2196 */
2197 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
2198 unsigned long opss;
2199
2200 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
2201
2202 switch (opss & SCX_OPSS_STATE_MASK) {
2203 case SCX_OPSS_NONE:
2204 break;
2205 case SCX_OPSS_QUEUEING:
2206 /*
2207 * As @p was never passed to the BPF side, _release is
2208 * not strictly necessary. Still do it for consistency.
2209 */
2210 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2211 break;
2212 default:
2213 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
2214 p->comm, p->pid, opss);
2215 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2216 break;
2217 }
2218
2219 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
2220 list_add_tail(&p->scx.dsq_list.node,
2221 &rq->scx.ddsp_deferred_locals);
2222 schedule_deferred(rq);
2223 return;
2224 }
2225
2226 dispatch_enqueue(sch, dsq, p,
2227 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
2228 }
2229
scx_rq_online(struct rq * rq)2230 static bool scx_rq_online(struct rq *rq)
2231 {
2232 /*
2233 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
2234 * the online state as seen from the BPF scheduler. cpu_active() test
2235 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
2236 * stay set until the current scheduling operation is complete even if
2237 * we aren't locking @rq.
2238 */
2239 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
2240 }
2241
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)2242 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
2243 int sticky_cpu)
2244 {
2245 struct scx_sched *sch = scx_root;
2246 struct task_struct **ddsp_taskp;
2247 unsigned long qseq;
2248
2249 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
2250
2251 /* rq migration */
2252 if (sticky_cpu == cpu_of(rq))
2253 goto local_norefill;
2254
2255 /*
2256 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
2257 * is offline and are just running the hotplug path. Don't bother the
2258 * BPF scheduler.
2259 */
2260 if (!scx_rq_online(rq))
2261 goto local;
2262
2263 if (scx_rq_bypassing(rq)) {
2264 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2265 goto global;
2266 }
2267
2268 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2269 goto direct;
2270
2271 /* see %SCX_OPS_ENQ_EXITING */
2272 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
2273 unlikely(p->flags & PF_EXITING)) {
2274 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
2275 goto local;
2276 }
2277
2278 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
2279 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
2280 is_migration_disabled(p)) {
2281 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
2282 goto local;
2283 }
2284
2285 if (unlikely(!SCX_HAS_OP(sch, enqueue)))
2286 goto global;
2287
2288 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2289 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2290
2291 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2292 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2293
2294 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2295 WARN_ON_ONCE(*ddsp_taskp);
2296 *ddsp_taskp = p;
2297
2298 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
2299
2300 *ddsp_taskp = NULL;
2301 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2302 goto direct;
2303
2304 /*
2305 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2306 * dequeue may be waiting. The store_release matches their load_acquire.
2307 */
2308 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2309 return;
2310
2311 direct:
2312 direct_dispatch(sch, p, enq_flags);
2313 return;
2314
2315 local:
2316 /*
2317 * For task-ordering, slice refill must be treated as implying the end
2318 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2319 * higher priority it becomes from scx_prio_less()'s POV.
2320 */
2321 touch_core_sched(rq, p);
2322 refill_task_slice_dfl(p);
2323 local_norefill:
2324 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
2325 return;
2326
2327 global:
2328 touch_core_sched(rq, p); /* see the comment in local: */
2329 refill_task_slice_dfl(p);
2330 dispatch_enqueue(sch, find_global_dsq(p), p, enq_flags);
2331 }
2332
task_runnable(const struct task_struct * p)2333 static bool task_runnable(const struct task_struct *p)
2334 {
2335 return !list_empty(&p->scx.runnable_node);
2336 }
2337
set_task_runnable(struct rq * rq,struct task_struct * p)2338 static void set_task_runnable(struct rq *rq, struct task_struct *p)
2339 {
2340 lockdep_assert_rq_held(rq);
2341
2342 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2343 p->scx.runnable_at = jiffies;
2344 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2345 }
2346
2347 /*
2348 * list_add_tail() must be used. scx_bypass() depends on tasks being
2349 * appended to the runnable_list.
2350 */
2351 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2352 }
2353
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2354 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2355 {
2356 list_del_init(&p->scx.runnable_node);
2357 if (reset_runnable_at)
2358 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2359 }
2360
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2361 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2362 {
2363 struct scx_sched *sch = scx_root;
2364 int sticky_cpu = p->scx.sticky_cpu;
2365
2366 if (enq_flags & ENQUEUE_WAKEUP)
2367 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2368
2369 enq_flags |= rq->scx.extra_enq_flags;
2370
2371 if (sticky_cpu >= 0)
2372 p->scx.sticky_cpu = -1;
2373
2374 /*
2375 * Restoring a running task will be immediately followed by
2376 * set_next_task_scx() which expects the task to not be on the BPF
2377 * scheduler as tasks can only start running through local DSQs. Force
2378 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2379 */
2380 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2381 sticky_cpu = cpu_of(rq);
2382
2383 if (p->scx.flags & SCX_TASK_QUEUED) {
2384 WARN_ON_ONCE(!task_runnable(p));
2385 goto out;
2386 }
2387
2388 set_task_runnable(rq, p);
2389 p->scx.flags |= SCX_TASK_QUEUED;
2390 rq->scx.nr_running++;
2391 add_nr_running(rq, 1);
2392
2393 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
2394 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
2395
2396 if (enq_flags & SCX_ENQ_WAKEUP)
2397 touch_core_sched(rq, p);
2398
2399 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2400 out:
2401 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2402
2403 if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
2404 unlikely(cpu_of(rq) != p->scx.selected_cpu))
2405 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
2406 }
2407
ops_dequeue(struct rq * rq,struct task_struct * p,u64 deq_flags)2408 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
2409 {
2410 struct scx_sched *sch = scx_root;
2411 unsigned long opss;
2412
2413 /* dequeue is always temporary, don't reset runnable_at */
2414 clr_task_runnable(p, false);
2415
2416 /* acquire ensures that we see the preceding updates on QUEUED */
2417 opss = atomic_long_read_acquire(&p->scx.ops_state);
2418
2419 switch (opss & SCX_OPSS_STATE_MASK) {
2420 case SCX_OPSS_NONE:
2421 break;
2422 case SCX_OPSS_QUEUEING:
2423 /*
2424 * QUEUEING is started and finished while holding @p's rq lock.
2425 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2426 */
2427 BUG();
2428 case SCX_OPSS_QUEUED:
2429 if (SCX_HAS_OP(sch, dequeue))
2430 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
2431 p, deq_flags);
2432
2433 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2434 SCX_OPSS_NONE))
2435 break;
2436 fallthrough;
2437 case SCX_OPSS_DISPATCHING:
2438 /*
2439 * If @p is being dispatched from the BPF scheduler to a DSQ,
2440 * wait for the transfer to complete so that @p doesn't get
2441 * added to its DSQ after dequeueing is complete.
2442 *
2443 * As we're waiting on DISPATCHING with the rq locked, the
2444 * dispatching side shouldn't try to lock the rq while
2445 * DISPATCHING is set. See dispatch_to_local_dsq().
2446 *
2447 * DISPATCHING shouldn't have qseq set and control can reach
2448 * here with NONE @opss from the above QUEUED case block.
2449 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2450 */
2451 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2452 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2453 break;
2454 }
2455 }
2456
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2457 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2458 {
2459 struct scx_sched *sch = scx_root;
2460
2461 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2462 WARN_ON_ONCE(task_runnable(p));
2463 return true;
2464 }
2465
2466 ops_dequeue(rq, p, deq_flags);
2467
2468 /*
2469 * A currently running task which is going off @rq first gets dequeued
2470 * and then stops running. As we want running <-> stopping transitions
2471 * to be contained within runnable <-> quiescent transitions, trigger
2472 * ->stopping() early here instead of in put_prev_task_scx().
2473 *
2474 * @p may go through multiple stopping <-> running transitions between
2475 * here and put_prev_task_scx() if task attribute changes occur while
2476 * balance_scx() leaves @rq unlocked. However, they don't contain any
2477 * information meaningful to the BPF scheduler and can be suppressed by
2478 * skipping the callbacks if the task is !QUEUED.
2479 */
2480 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
2481 update_curr_scx(rq);
2482 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
2483 }
2484
2485 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
2486 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
2487
2488 if (deq_flags & SCX_DEQ_SLEEP)
2489 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2490 else
2491 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2492
2493 p->scx.flags &= ~SCX_TASK_QUEUED;
2494 rq->scx.nr_running--;
2495 sub_nr_running(rq, 1);
2496
2497 dispatch_dequeue(rq, p);
2498 return true;
2499 }
2500
yield_task_scx(struct rq * rq)2501 static void yield_task_scx(struct rq *rq)
2502 {
2503 struct scx_sched *sch = scx_root;
2504 struct task_struct *p = rq->curr;
2505
2506 if (SCX_HAS_OP(sch, yield))
2507 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
2508 else
2509 p->scx.slice = 0;
2510 }
2511
yield_to_task_scx(struct rq * rq,struct task_struct * to)2512 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2513 {
2514 struct scx_sched *sch = scx_root;
2515 struct task_struct *from = rq->curr;
2516
2517 if (SCX_HAS_OP(sch, yield))
2518 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
2519 from, to);
2520 else
2521 return false;
2522 }
2523
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2524 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2525 struct scx_dispatch_q *src_dsq,
2526 struct rq *dst_rq)
2527 {
2528 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2529
2530 /* @dsq is locked and @p is on @dst_rq */
2531 lockdep_assert_held(&src_dsq->lock);
2532 lockdep_assert_rq_held(dst_rq);
2533
2534 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2535
2536 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2537 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2538 else
2539 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2540
2541 dsq_mod_nr(dst_dsq, 1);
2542 p->scx.dsq = dst_dsq;
2543 }
2544
2545 #ifdef CONFIG_SMP
2546 /**
2547 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2548 * @p: task to move
2549 * @enq_flags: %SCX_ENQ_*
2550 * @src_rq: rq to move the task from, locked on entry, released on return
2551 * @dst_rq: rq to move the task into, locked on return
2552 *
2553 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2554 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2555 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2556 struct rq *src_rq, struct rq *dst_rq)
2557 {
2558 lockdep_assert_rq_held(src_rq);
2559
2560 /* the following marks @p MIGRATING which excludes dequeue */
2561 deactivate_task(src_rq, p, 0);
2562 set_task_cpu(p, cpu_of(dst_rq));
2563 p->scx.sticky_cpu = cpu_of(dst_rq);
2564
2565 raw_spin_rq_unlock(src_rq);
2566 raw_spin_rq_lock(dst_rq);
2567
2568 /*
2569 * We want to pass scx-specific enq_flags but activate_task() will
2570 * truncate the upper 32 bit. As we own @rq, we can pass them through
2571 * @rq->scx.extra_enq_flags instead.
2572 */
2573 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2574 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2575 dst_rq->scx.extra_enq_flags = enq_flags;
2576 activate_task(dst_rq, p, 0);
2577 dst_rq->scx.extra_enq_flags = 0;
2578 }
2579
2580 /*
2581 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2582 * differences:
2583 *
2584 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2585 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2586 * this CPU?".
2587 *
2588 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2589 * must be allowed to finish on the CPU that it's currently on regardless of
2590 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2591 * BPF scheduler shouldn't attempt to migrate a task which has migration
2592 * disabled.
2593 *
2594 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2595 * no to the BPF scheduler initiated migrations while offline.
2596 *
2597 * The caller must ensure that @p and @rq are on different CPUs.
2598 */
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)2599 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
2600 struct task_struct *p, struct rq *rq,
2601 bool enforce)
2602 {
2603 int cpu = cpu_of(rq);
2604
2605 WARN_ON_ONCE(task_cpu(p) == cpu);
2606
2607 /*
2608 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2609 * the pinned CPU in migrate_disable_switch() while @p is being switched
2610 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2611 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2612 * @p passing the below task_allowed_on_cpu() check while migration is
2613 * disabled.
2614 *
2615 * Test the migration disabled state first as the race window is narrow
2616 * and the BPF scheduler failing to check migration disabled state can
2617 * easily be masked if task_allowed_on_cpu() is done first.
2618 */
2619 if (unlikely(is_migration_disabled(p))) {
2620 if (enforce)
2621 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2622 p->comm, p->pid, task_cpu(p), cpu);
2623 return false;
2624 }
2625
2626 /*
2627 * We don't require the BPF scheduler to avoid dispatching to offline
2628 * CPUs mostly for convenience but also because CPUs can go offline
2629 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2630 * picked CPU is outside the allowed mask.
2631 */
2632 if (!task_allowed_on_cpu(p, cpu)) {
2633 if (enforce)
2634 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2635 cpu, p->comm, p->pid);
2636 return false;
2637 }
2638
2639 if (!scx_rq_online(rq)) {
2640 if (enforce)
2641 __scx_add_event(scx_root,
2642 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
2643 return false;
2644 }
2645
2646 return true;
2647 }
2648
2649 /**
2650 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2651 * @p: target task
2652 * @dsq: locked DSQ @p is currently on
2653 * @src_rq: rq @p is currently on, stable with @dsq locked
2654 *
2655 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2656 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2657 * required when transferring into a local DSQ. Even when transferring into a
2658 * non-local DSQ, it's better to use the same mechanism to protect against
2659 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2660 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2661 *
2662 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2663 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2664 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2665 * dancing from our side.
2666 *
2667 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2668 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2669 * would be cleared to -1. While other cpus may have updated it to different
2670 * values afterwards, as this operation can't be preempted or recurse, the
2671 * holding_cpu can never become this CPU again before we're done. Thus, we can
2672 * tell whether we lost to dequeue by testing whether the holding_cpu still
2673 * points to this CPU. See dispatch_dequeue() for the counterpart.
2674 *
2675 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2676 * still valid. %false if lost to dequeue.
2677 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2678 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2679 struct scx_dispatch_q *dsq,
2680 struct rq *src_rq)
2681 {
2682 s32 cpu = raw_smp_processor_id();
2683
2684 lockdep_assert_held(&dsq->lock);
2685
2686 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2687 task_unlink_from_dsq(p, dsq);
2688 p->scx.holding_cpu = cpu;
2689
2690 raw_spin_unlock(&dsq->lock);
2691 raw_spin_rq_lock(src_rq);
2692
2693 /* task_rq couldn't have changed if we're still the holding cpu */
2694 return likely(p->scx.holding_cpu == cpu) &&
2695 !WARN_ON_ONCE(src_rq != task_rq(p));
2696 }
2697
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2698 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2699 struct scx_dispatch_q *dsq, struct rq *src_rq)
2700 {
2701 raw_spin_rq_unlock(this_rq);
2702
2703 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2704 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2705 return true;
2706 } else {
2707 raw_spin_rq_unlock(src_rq);
2708 raw_spin_rq_lock(this_rq);
2709 return false;
2710 }
2711 }
2712 #else /* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2713 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)2714 static inline bool task_can_run_on_remote_rq(struct scx_sched *sch, struct task_struct *p, struct rq *rq, bool enforce) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2715 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2716 #endif /* CONFIG_SMP */
2717
2718 /**
2719 * move_task_between_dsqs() - Move a task from one DSQ to another
2720 * @sch: scx_sched being operated on
2721 * @p: target task
2722 * @enq_flags: %SCX_ENQ_*
2723 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2724 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2725 *
2726 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2727 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2728 * will change. As @p's task_rq is locked, this function doesn't need to use the
2729 * holding_cpu mechanism.
2730 *
2731 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2732 * return value, is locked.
2733 */
move_task_between_dsqs(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)2734 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
2735 struct task_struct *p, u64 enq_flags,
2736 struct scx_dispatch_q *src_dsq,
2737 struct scx_dispatch_q *dst_dsq)
2738 {
2739 struct rq *src_rq = task_rq(p), *dst_rq;
2740
2741 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2742 lockdep_assert_held(&src_dsq->lock);
2743 lockdep_assert_rq_held(src_rq);
2744
2745 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2746 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2747 if (src_rq != dst_rq &&
2748 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2749 dst_dsq = find_global_dsq(p);
2750 dst_rq = src_rq;
2751 }
2752 } else {
2753 /* no need to migrate if destination is a non-local DSQ */
2754 dst_rq = src_rq;
2755 }
2756
2757 /*
2758 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2759 * CPU, @p will be migrated.
2760 */
2761 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2762 /* @p is going from a non-local DSQ to a local DSQ */
2763 if (src_rq == dst_rq) {
2764 task_unlink_from_dsq(p, src_dsq);
2765 move_local_task_to_local_dsq(p, enq_flags,
2766 src_dsq, dst_rq);
2767 raw_spin_unlock(&src_dsq->lock);
2768 } else {
2769 raw_spin_unlock(&src_dsq->lock);
2770 move_remote_task_to_local_dsq(p, enq_flags,
2771 src_rq, dst_rq);
2772 }
2773 } else {
2774 /*
2775 * @p is going from a non-local DSQ to a non-local DSQ. As
2776 * $src_dsq is already locked, do an abbreviated dequeue.
2777 */
2778 task_unlink_from_dsq(p, src_dsq);
2779 p->scx.dsq = NULL;
2780 raw_spin_unlock(&src_dsq->lock);
2781
2782 dispatch_enqueue(sch, dst_dsq, p, enq_flags);
2783 }
2784
2785 return dst_rq;
2786 }
2787
2788 /*
2789 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2790 * banging on the same DSQ on a large NUMA system to the point where switching
2791 * to the bypass mode can take a long time. Inject artificial delays while the
2792 * bypass mode is switching to guarantee timely completion.
2793 */
scx_breather(struct rq * rq)2794 static void scx_breather(struct rq *rq)
2795 {
2796 u64 until;
2797
2798 lockdep_assert_rq_held(rq);
2799
2800 if (likely(!atomic_read(&scx_breather_depth)))
2801 return;
2802
2803 raw_spin_rq_unlock(rq);
2804
2805 until = ktime_get_ns() + NSEC_PER_MSEC;
2806
2807 do {
2808 int cnt = 1024;
2809 while (atomic_read(&scx_breather_depth) && --cnt)
2810 cpu_relax();
2811 } while (atomic_read(&scx_breather_depth) &&
2812 time_before64(ktime_get_ns(), until));
2813
2814 raw_spin_rq_lock(rq);
2815 }
2816
consume_dispatch_q(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq)2817 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
2818 struct scx_dispatch_q *dsq)
2819 {
2820 struct task_struct *p;
2821 retry:
2822 /*
2823 * This retry loop can repeatedly race against scx_bypass() dequeueing
2824 * tasks from @dsq trying to put the system into the bypass mode. On
2825 * some multi-socket machines (e.g. 2x Intel 8480c), this can live-lock
2826 * the machine into soft lockups. Give a breather.
2827 */
2828 scx_breather(rq);
2829
2830 /*
2831 * The caller can't expect to successfully consume a task if the task's
2832 * addition to @dsq isn't guaranteed to be visible somehow. Test
2833 * @dsq->list without locking and skip if it seems empty.
2834 */
2835 if (list_empty(&dsq->list))
2836 return false;
2837
2838 raw_spin_lock(&dsq->lock);
2839
2840 nldsq_for_each_task(p, dsq) {
2841 struct rq *task_rq = task_rq(p);
2842
2843 if (rq == task_rq) {
2844 task_unlink_from_dsq(p, dsq);
2845 move_local_task_to_local_dsq(p, 0, dsq, rq);
2846 raw_spin_unlock(&dsq->lock);
2847 return true;
2848 }
2849
2850 if (task_can_run_on_remote_rq(sch, p, rq, false)) {
2851 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2852 return true;
2853 goto retry;
2854 }
2855 }
2856
2857 raw_spin_unlock(&dsq->lock);
2858 return false;
2859 }
2860
consume_global_dsq(struct scx_sched * sch,struct rq * rq)2861 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
2862 {
2863 int node = cpu_to_node(cpu_of(rq));
2864
2865 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
2866 }
2867
2868 /**
2869 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2870 * @sch: scx_sched being operated on
2871 * @rq: current rq which is locked
2872 * @dst_dsq: destination DSQ
2873 * @p: task to dispatch
2874 * @enq_flags: %SCX_ENQ_*
2875 *
2876 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2877 * DSQ. This function performs all the synchronization dancing needed because
2878 * local DSQs are protected with rq locks.
2879 *
2880 * The caller must have exclusive ownership of @p (e.g. through
2881 * %SCX_OPSS_DISPATCHING).
2882 */
dispatch_to_local_dsq(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2883 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2884 struct scx_dispatch_q *dst_dsq,
2885 struct task_struct *p, u64 enq_flags)
2886 {
2887 struct rq *src_rq = task_rq(p);
2888 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2889 #ifdef CONFIG_SMP
2890 struct rq *locked_rq = rq;
2891 #endif
2892
2893 /*
2894 * We're synchronized against dequeue through DISPATCHING. As @p can't
2895 * be dequeued, its task_rq and cpus_allowed are stable too.
2896 *
2897 * If dispatching to @rq that @p is already on, no lock dancing needed.
2898 */
2899 if (rq == src_rq && rq == dst_rq) {
2900 dispatch_enqueue(sch, dst_dsq, p,
2901 enq_flags | SCX_ENQ_CLEAR_OPSS);
2902 return;
2903 }
2904
2905 #ifdef CONFIG_SMP
2906 if (src_rq != dst_rq &&
2907 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2908 dispatch_enqueue(sch, find_global_dsq(p), p,
2909 enq_flags | SCX_ENQ_CLEAR_OPSS);
2910 return;
2911 }
2912
2913 /*
2914 * @p is on a possibly remote @src_rq which we need to lock to move the
2915 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2916 * on DISPATCHING, so we can't grab @src_rq lock while holding
2917 * DISPATCHING.
2918 *
2919 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2920 * we're moving from a DSQ and use the same mechanism - mark the task
2921 * under transfer with holding_cpu, release DISPATCHING and then follow
2922 * the same protocol. See unlink_dsq_and_lock_src_rq().
2923 */
2924 p->scx.holding_cpu = raw_smp_processor_id();
2925
2926 /* store_release ensures that dequeue sees the above */
2927 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2928
2929 /* switch to @src_rq lock */
2930 if (locked_rq != src_rq) {
2931 raw_spin_rq_unlock(locked_rq);
2932 locked_rq = src_rq;
2933 raw_spin_rq_lock(src_rq);
2934 }
2935
2936 /* task_rq couldn't have changed if we're still the holding cpu */
2937 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2938 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2939 /*
2940 * If @p is staying on the same rq, there's no need to go
2941 * through the full deactivate/activate cycle. Optimize by
2942 * abbreviating move_remote_task_to_local_dsq().
2943 */
2944 if (src_rq == dst_rq) {
2945 p->scx.holding_cpu = -1;
2946 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
2947 enq_flags);
2948 } else {
2949 move_remote_task_to_local_dsq(p, enq_flags,
2950 src_rq, dst_rq);
2951 /* task has been moved to dst_rq, which is now locked */
2952 locked_rq = dst_rq;
2953 }
2954
2955 /* if the destination CPU is idle, wake it up */
2956 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2957 resched_curr(dst_rq);
2958 }
2959
2960 /* switch back to @rq lock */
2961 if (locked_rq != rq) {
2962 raw_spin_rq_unlock(locked_rq);
2963 raw_spin_rq_lock(rq);
2964 }
2965 #else /* CONFIG_SMP */
2966 BUG(); /* control can not reach here on UP */
2967 #endif /* CONFIG_SMP */
2968 }
2969
2970 /**
2971 * finish_dispatch - Asynchronously finish dispatching a task
2972 * @rq: current rq which is locked
2973 * @p: task to finish dispatching
2974 * @qseq_at_dispatch: qseq when @p started getting dispatched
2975 * @dsq_id: destination DSQ ID
2976 * @enq_flags: %SCX_ENQ_*
2977 *
2978 * Dispatching to local DSQs may need to wait for queueing to complete or
2979 * require rq lock dancing. As we don't wanna do either while inside
2980 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2981 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2982 * task and its qseq. Once ops.dispatch() returns, this function is called to
2983 * finish up.
2984 *
2985 * There is no guarantee that @p is still valid for dispatching or even that it
2986 * was valid in the first place. Make sure that the task is still owned by the
2987 * BPF scheduler and claim the ownership before dispatching.
2988 */
finish_dispatch(struct scx_sched * sch,struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2989 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2990 struct task_struct *p,
2991 unsigned long qseq_at_dispatch,
2992 u64 dsq_id, u64 enq_flags)
2993 {
2994 struct scx_dispatch_q *dsq;
2995 unsigned long opss;
2996
2997 touch_core_sched_dispatch(rq, p);
2998 retry:
2999 /*
3000 * No need for _acquire here. @p is accessed only after a successful
3001 * try_cmpxchg to DISPATCHING.
3002 */
3003 opss = atomic_long_read(&p->scx.ops_state);
3004
3005 switch (opss & SCX_OPSS_STATE_MASK) {
3006 case SCX_OPSS_DISPATCHING:
3007 case SCX_OPSS_NONE:
3008 /* someone else already got to it */
3009 return;
3010 case SCX_OPSS_QUEUED:
3011 /*
3012 * If qseq doesn't match, @p has gone through at least one
3013 * dispatch/dequeue and re-enqueue cycle between
3014 * scx_bpf_dsq_insert() and here and we have no claim on it.
3015 */
3016 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
3017 return;
3018
3019 /*
3020 * While we know @p is accessible, we don't yet have a claim on
3021 * it - the BPF scheduler is allowed to dispatch tasks
3022 * spuriously and there can be a racing dequeue attempt. Let's
3023 * claim @p by atomically transitioning it from QUEUED to
3024 * DISPATCHING.
3025 */
3026 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
3027 SCX_OPSS_DISPATCHING)))
3028 break;
3029 goto retry;
3030 case SCX_OPSS_QUEUEING:
3031 /*
3032 * do_enqueue_task() is in the process of transferring the task
3033 * to the BPF scheduler while holding @p's rq lock. As we aren't
3034 * holding any kernel or BPF resource that the enqueue path may
3035 * depend upon, it's safe to wait.
3036 */
3037 wait_ops_state(p, opss);
3038 goto retry;
3039 }
3040
3041 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
3042
3043 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
3044
3045 if (dsq->id == SCX_DSQ_LOCAL)
3046 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
3047 else
3048 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
3049 }
3050
flush_dispatch_buf(struct scx_sched * sch,struct rq * rq)3051 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
3052 {
3053 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
3054 u32 u;
3055
3056 for (u = 0; u < dspc->cursor; u++) {
3057 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
3058
3059 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
3060 ent->enq_flags);
3061 }
3062
3063 dspc->nr_tasks += dspc->cursor;
3064 dspc->cursor = 0;
3065 }
3066
balance_one(struct rq * rq,struct task_struct * prev)3067 static int balance_one(struct rq *rq, struct task_struct *prev)
3068 {
3069 struct scx_sched *sch = scx_root;
3070 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
3071 bool prev_on_scx = prev->sched_class == &ext_sched_class;
3072 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
3073 int nr_loops = SCX_DSP_MAX_LOOPS;
3074
3075 lockdep_assert_rq_held(rq);
3076 rq->scx.flags |= SCX_RQ_IN_BALANCE;
3077 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
3078
3079 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
3080 unlikely(rq->scx.cpu_released)) {
3081 /*
3082 * If the previous sched_class for the current CPU was not SCX,
3083 * notify the BPF scheduler that it again has control of the
3084 * core. This callback complements ->cpu_release(), which is
3085 * emitted in switch_class().
3086 */
3087 if (SCX_HAS_OP(sch, cpu_acquire))
3088 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
3089 cpu_of(rq), NULL);
3090 rq->scx.cpu_released = false;
3091 }
3092
3093 if (prev_on_scx) {
3094 update_curr_scx(rq);
3095
3096 /*
3097 * If @prev is runnable & has slice left, it has priority and
3098 * fetching more just increases latency for the fetched tasks.
3099 * Tell pick_task_scx() to keep running @prev. If the BPF
3100 * scheduler wants to handle this explicitly, it should
3101 * implement ->cpu_release().
3102 *
3103 * See scx_disable_workfn() for the explanation on the bypassing
3104 * test.
3105 */
3106 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
3107 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3108 goto has_tasks;
3109 }
3110 }
3111
3112 /* if there already are tasks to run, nothing to do */
3113 if (rq->scx.local_dsq.nr)
3114 goto has_tasks;
3115
3116 if (consume_global_dsq(sch, rq))
3117 goto has_tasks;
3118
3119 if (unlikely(!SCX_HAS_OP(sch, dispatch)) ||
3120 scx_rq_bypassing(rq) || !scx_rq_online(rq))
3121 goto no_tasks;
3122
3123 dspc->rq = rq;
3124
3125 /*
3126 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
3127 * the local DSQ might still end up empty after a successful
3128 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
3129 * produced some tasks, retry. The BPF scheduler may depend on this
3130 * looping behavior to simplify its implementation.
3131 */
3132 do {
3133 dspc->nr_tasks = 0;
3134
3135 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
3136 cpu_of(rq), prev_on_scx ? prev : NULL);
3137
3138 flush_dispatch_buf(sch, rq);
3139
3140 if (prev_on_rq && prev->scx.slice) {
3141 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3142 goto has_tasks;
3143 }
3144 if (rq->scx.local_dsq.nr)
3145 goto has_tasks;
3146 if (consume_global_dsq(sch, rq))
3147 goto has_tasks;
3148
3149 /*
3150 * ops.dispatch() can trap us in this loop by repeatedly
3151 * dispatching ineligible tasks. Break out once in a while to
3152 * allow the watchdog to run. As IRQ can't be enabled in
3153 * balance(), we want to complete this scheduling cycle and then
3154 * start a new one. IOW, we want to call resched_curr() on the
3155 * next, most likely idle, task, not the current one. Use
3156 * scx_bpf_kick_cpu() for deferred kicking.
3157 */
3158 if (unlikely(!--nr_loops)) {
3159 scx_bpf_kick_cpu(cpu_of(rq), 0);
3160 break;
3161 }
3162 } while (dspc->nr_tasks);
3163
3164 no_tasks:
3165 /*
3166 * Didn't find another task to run. Keep running @prev unless
3167 * %SCX_OPS_ENQ_LAST is in effect.
3168 */
3169 if (prev_on_rq &&
3170 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
3171 rq->scx.flags |= SCX_RQ_BAL_KEEP;
3172 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
3173 goto has_tasks;
3174 }
3175 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3176 return false;
3177
3178 has_tasks:
3179 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
3180 return true;
3181 }
3182
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)3183 static int balance_scx(struct rq *rq, struct task_struct *prev,
3184 struct rq_flags *rf)
3185 {
3186 int ret;
3187
3188 rq_unpin_lock(rq, rf);
3189
3190 ret = balance_one(rq, prev);
3191
3192 #ifdef CONFIG_SCHED_SMT
3193 /*
3194 * When core-sched is enabled, this ops.balance() call will be followed
3195 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
3196 * siblings too.
3197 */
3198 if (sched_core_enabled(rq)) {
3199 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
3200 int scpu;
3201
3202 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
3203 struct rq *srq = cpu_rq(scpu);
3204 struct task_struct *sprev = srq->curr;
3205
3206 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
3207 update_rq_clock(srq);
3208 balance_one(srq, sprev);
3209 }
3210 }
3211 #endif
3212 rq_repin_lock(rq, rf);
3213
3214 return ret;
3215 }
3216
process_ddsp_deferred_locals(struct rq * rq)3217 static void process_ddsp_deferred_locals(struct rq *rq)
3218 {
3219 struct task_struct *p;
3220
3221 lockdep_assert_rq_held(rq);
3222
3223 /*
3224 * Now that @rq can be unlocked, execute the deferred enqueueing of
3225 * tasks directly dispatched to the local DSQs of other CPUs. See
3226 * direct_dispatch(). Keep popping from the head instead of using
3227 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
3228 * temporarily.
3229 */
3230 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3231 struct task_struct, scx.dsq_list.node))) {
3232 struct scx_sched *sch = scx_root;
3233 struct scx_dispatch_q *dsq;
3234
3235 list_del_init(&p->scx.dsq_list.node);
3236
3237 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
3238 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
3239 dispatch_to_local_dsq(sch, rq, dsq, p,
3240 p->scx.ddsp_enq_flags);
3241 }
3242 }
3243
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)3244 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
3245 {
3246 struct scx_sched *sch = scx_root;
3247
3248 if (p->scx.flags & SCX_TASK_QUEUED) {
3249 /*
3250 * Core-sched might decide to execute @p before it is
3251 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
3252 */
3253 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
3254 dispatch_dequeue(rq, p);
3255 }
3256
3257 p->se.exec_start = rq_clock_task(rq);
3258
3259 /* see dequeue_task_scx() on why we skip when !QUEUED */
3260 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
3261 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
3262
3263 clr_task_runnable(p, true);
3264
3265 /*
3266 * @p is getting newly scheduled or got kicked after someone updated its
3267 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
3268 */
3269 if ((p->scx.slice == SCX_SLICE_INF) !=
3270 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
3271 if (p->scx.slice == SCX_SLICE_INF)
3272 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
3273 else
3274 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
3275
3276 sched_update_tick_dependency(rq);
3277
3278 /*
3279 * For now, let's refresh the load_avgs just when transitioning
3280 * in and out of nohz. In the future, we might want to add a
3281 * mechanism which calls the following periodically on
3282 * tick-stopped CPUs.
3283 */
3284 update_other_load_avgs(rq);
3285 }
3286 }
3287
3288 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)3289 preempt_reason_from_class(const struct sched_class *class)
3290 {
3291 #ifdef CONFIG_SMP
3292 if (class == &stop_sched_class)
3293 return SCX_CPU_PREEMPT_STOP;
3294 #endif
3295 if (class == &dl_sched_class)
3296 return SCX_CPU_PREEMPT_DL;
3297 if (class == &rt_sched_class)
3298 return SCX_CPU_PREEMPT_RT;
3299 return SCX_CPU_PREEMPT_UNKNOWN;
3300 }
3301
switch_class(struct rq * rq,struct task_struct * next)3302 static void switch_class(struct rq *rq, struct task_struct *next)
3303 {
3304 struct scx_sched *sch = scx_root;
3305 const struct sched_class *next_class = next->sched_class;
3306
3307 #ifdef CONFIG_SMP
3308 /*
3309 * Pairs with the smp_load_acquire() issued by a CPU in
3310 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3311 * resched.
3312 */
3313 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3314 #endif
3315 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
3316 return;
3317
3318 /*
3319 * The callback is conceptually meant to convey that the CPU is no
3320 * longer under the control of SCX. Therefore, don't invoke the callback
3321 * if the next class is below SCX (in which case the BPF scheduler has
3322 * actively decided not to schedule any tasks on the CPU).
3323 */
3324 if (sched_class_above(&ext_sched_class, next_class))
3325 return;
3326
3327 /*
3328 * At this point we know that SCX was preempted by a higher priority
3329 * sched_class, so invoke the ->cpu_release() callback if we have not
3330 * done so already. We only send the callback once between SCX being
3331 * preempted, and it regaining control of the CPU.
3332 *
3333 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
3334 * next time that balance_scx() is invoked.
3335 */
3336 if (!rq->scx.cpu_released) {
3337 if (SCX_HAS_OP(sch, cpu_release)) {
3338 struct scx_cpu_release_args args = {
3339 .reason = preempt_reason_from_class(next_class),
3340 .task = next,
3341 };
3342
3343 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
3344 cpu_of(rq), &args);
3345 }
3346 rq->scx.cpu_released = true;
3347 }
3348 }
3349
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)3350 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3351 struct task_struct *next)
3352 {
3353 struct scx_sched *sch = scx_root;
3354 update_curr_scx(rq);
3355
3356 /* see dequeue_task_scx() on why we skip when !QUEUED */
3357 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3358 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
3359
3360 if (p->scx.flags & SCX_TASK_QUEUED) {
3361 set_task_runnable(rq, p);
3362
3363 /*
3364 * If @p has slice left and is being put, @p is getting
3365 * preempted by a higher priority scheduler class or core-sched
3366 * forcing a different task. Leave it at the head of the local
3367 * DSQ.
3368 */
3369 if (p->scx.slice && !scx_rq_bypassing(rq)) {
3370 dispatch_enqueue(sch, &rq->scx.local_dsq, p,
3371 SCX_ENQ_HEAD);
3372 goto switch_class;
3373 }
3374
3375 /*
3376 * If @p is runnable but we're about to enter a lower
3377 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3378 * ops.enqueue() that @p is the only one available for this cpu,
3379 * which should trigger an explicit follow-up scheduling event.
3380 */
3381 if (sched_class_above(&ext_sched_class, next->sched_class)) {
3382 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
3383 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3384 } else {
3385 do_enqueue_task(rq, p, 0, -1);
3386 }
3387 }
3388
3389 switch_class:
3390 if (next && next->sched_class != &ext_sched_class)
3391 switch_class(rq, next);
3392 }
3393
first_local_task(struct rq * rq)3394 static struct task_struct *first_local_task(struct rq *rq)
3395 {
3396 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3397 struct task_struct, scx.dsq_list.node);
3398 }
3399
pick_task_scx(struct rq * rq)3400 static struct task_struct *pick_task_scx(struct rq *rq)
3401 {
3402 struct task_struct *prev = rq->curr;
3403 struct task_struct *p;
3404 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3405 bool kick_idle = false;
3406
3407 /*
3408 * WORKAROUND:
3409 *
3410 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
3411 * have gone through balance_scx(). Unfortunately, there currently is a
3412 * bug where fair could say yes on balance() but no on pick_task(),
3413 * which then ends up calling pick_task_scx() without preceding
3414 * balance_scx().
3415 *
3416 * Keep running @prev if possible and avoid stalling from entering idle
3417 * without balancing.
3418 *
3419 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
3420 * if pick_task_scx() is called without preceding balance_scx().
3421 */
3422 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3423 if (prev->scx.flags & SCX_TASK_QUEUED) {
3424 keep_prev = true;
3425 } else {
3426 keep_prev = false;
3427 kick_idle = true;
3428 }
3429 } else if (unlikely(keep_prev &&
3430 prev->sched_class != &ext_sched_class)) {
3431 /*
3432 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
3433 * conditional on scx_enabled() and may have been skipped.
3434 */
3435 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
3436 keep_prev = false;
3437 }
3438
3439 /*
3440 * If balance_scx() is telling us to keep running @prev, replenish slice
3441 * if necessary and keep running @prev. Otherwise, pop the first one
3442 * from the local DSQ.
3443 */
3444 if (keep_prev) {
3445 p = prev;
3446 if (!p->scx.slice)
3447 refill_task_slice_dfl(p);
3448 } else {
3449 p = first_local_task(rq);
3450 if (!p) {
3451 if (kick_idle)
3452 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
3453 return NULL;
3454 }
3455
3456 if (unlikely(!p->scx.slice)) {
3457 struct scx_sched *sch = scx_root;
3458
3459 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
3460 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3461 p->comm, p->pid, __func__);
3462 sch->warned_zero_slice = true;
3463 }
3464 refill_task_slice_dfl(p);
3465 }
3466 }
3467
3468 return p;
3469 }
3470
3471 #ifdef CONFIG_SCHED_CORE
3472 /**
3473 * scx_prio_less - Task ordering for core-sched
3474 * @a: task A
3475 * @b: task B
3476 * @in_fi: in forced idle state
3477 *
3478 * Core-sched is implemented as an additional scheduling layer on top of the
3479 * usual sched_class'es and needs to find out the expected task ordering. For
3480 * SCX, core-sched calls this function to interrogate the task ordering.
3481 *
3482 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3483 * to implement the default task ordering. The older the timestamp, the higher
3484 * priority the task - the global FIFO ordering matching the default scheduling
3485 * behavior.
3486 *
3487 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3488 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3489 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3490 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3491 bool in_fi)
3492 {
3493 struct scx_sched *sch = scx_root;
3494
3495 /*
3496 * The const qualifiers are dropped from task_struct pointers when
3497 * calling ops.core_sched_before(). Accesses are controlled by the
3498 * verifier.
3499 */
3500 if (SCX_HAS_OP(sch, core_sched_before) &&
3501 !scx_rq_bypassing(task_rq(a)))
3502 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
3503 NULL,
3504 (struct task_struct *)a,
3505 (struct task_struct *)b);
3506 else
3507 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3508 }
3509 #endif /* CONFIG_SCHED_CORE */
3510
3511 #ifdef CONFIG_SMP
3512
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3513 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3514 {
3515 struct scx_sched *sch = scx_root;
3516 bool rq_bypass;
3517
3518 /*
3519 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3520 * can be a good migration opportunity with low cache and memory
3521 * footprint. Returning a CPU different than @prev_cpu triggers
3522 * immediate rq migration. However, for SCX, as the current rq
3523 * association doesn't dictate where the task is going to run, this
3524 * doesn't fit well. If necessary, we can later add a dedicated method
3525 * which can decide to preempt self to force it through the regular
3526 * scheduling path.
3527 */
3528 if (unlikely(wake_flags & WF_EXEC))
3529 return prev_cpu;
3530
3531 rq_bypass = scx_rq_bypassing(task_rq(p));
3532 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
3533 s32 cpu;
3534 struct task_struct **ddsp_taskp;
3535
3536 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3537 WARN_ON_ONCE(*ddsp_taskp);
3538 *ddsp_taskp = p;
3539
3540 cpu = SCX_CALL_OP_TASK_RET(sch,
3541 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3542 select_cpu, NULL, p, prev_cpu,
3543 wake_flags);
3544 p->scx.selected_cpu = cpu;
3545 *ddsp_taskp = NULL;
3546 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
3547 return cpu;
3548 else
3549 return prev_cpu;
3550 } else {
3551 s32 cpu;
3552
3553 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
3554 if (cpu >= 0) {
3555 refill_task_slice_dfl(p);
3556 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3557 } else {
3558 cpu = prev_cpu;
3559 }
3560 p->scx.selected_cpu = cpu;
3561
3562 if (rq_bypass)
3563 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
3564 return cpu;
3565 }
3566 }
3567
task_woken_scx(struct rq * rq,struct task_struct * p)3568 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3569 {
3570 run_deferred(rq);
3571 }
3572
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3573 static void set_cpus_allowed_scx(struct task_struct *p,
3574 struct affinity_context *ac)
3575 {
3576 struct scx_sched *sch = scx_root;
3577
3578 set_cpus_allowed_common(p, ac);
3579
3580 /*
3581 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3582 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3583 * scheduler the effective one.
3584 *
3585 * Fine-grained memory write control is enforced by BPF making the const
3586 * designation pointless. Cast it away when calling the operation.
3587 */
3588 if (SCX_HAS_OP(sch, set_cpumask))
3589 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
3590 p, (struct cpumask *)p->cpus_ptr);
3591 }
3592
handle_hotplug(struct rq * rq,bool online)3593 static void handle_hotplug(struct rq *rq, bool online)
3594 {
3595 struct scx_sched *sch = scx_root;
3596 int cpu = cpu_of(rq);
3597
3598 atomic_long_inc(&scx_hotplug_seq);
3599
3600 /*
3601 * scx_root updates are protected by cpus_read_lock() and will stay
3602 * stable here. Note that we can't depend on scx_enabled() test as the
3603 * hotplug ops need to be enabled before __scx_enabled is set.
3604 */
3605 if (unlikely(!sch))
3606 return;
3607
3608 if (scx_enabled())
3609 scx_idle_update_selcpu_topology(&sch->ops);
3610
3611 if (online && SCX_HAS_OP(sch, cpu_online))
3612 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
3613 else if (!online && SCX_HAS_OP(sch, cpu_offline))
3614 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
3615 else
3616 scx_exit(sch, SCX_EXIT_UNREG_KERN,
3617 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3618 "cpu %d going %s, exiting scheduler", cpu,
3619 online ? "online" : "offline");
3620 }
3621
scx_rq_activate(struct rq * rq)3622 void scx_rq_activate(struct rq *rq)
3623 {
3624 handle_hotplug(rq, true);
3625 }
3626
scx_rq_deactivate(struct rq * rq)3627 void scx_rq_deactivate(struct rq *rq)
3628 {
3629 handle_hotplug(rq, false);
3630 }
3631
rq_online_scx(struct rq * rq)3632 static void rq_online_scx(struct rq *rq)
3633 {
3634 rq->scx.flags |= SCX_RQ_ONLINE;
3635 }
3636
rq_offline_scx(struct rq * rq)3637 static void rq_offline_scx(struct rq *rq)
3638 {
3639 rq->scx.flags &= ~SCX_RQ_ONLINE;
3640 }
3641
3642 #endif /* CONFIG_SMP */
3643
check_rq_for_timeouts(struct rq * rq)3644 static bool check_rq_for_timeouts(struct rq *rq)
3645 {
3646 struct scx_sched *sch;
3647 struct task_struct *p;
3648 struct rq_flags rf;
3649 bool timed_out = false;
3650
3651 rq_lock_irqsave(rq, &rf);
3652 sch = rcu_dereference_bh(scx_root);
3653 if (unlikely(!sch))
3654 goto out_unlock;
3655
3656 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3657 unsigned long last_runnable = p->scx.runnable_at;
3658
3659 if (unlikely(time_after(jiffies,
3660 last_runnable + scx_watchdog_timeout))) {
3661 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3662
3663 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3664 "%s[%d] failed to run for %u.%03us",
3665 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
3666 timed_out = true;
3667 break;
3668 }
3669 }
3670 out_unlock:
3671 rq_unlock_irqrestore(rq, &rf);
3672 return timed_out;
3673 }
3674
scx_watchdog_workfn(struct work_struct * work)3675 static void scx_watchdog_workfn(struct work_struct *work)
3676 {
3677 int cpu;
3678
3679 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3680
3681 for_each_online_cpu(cpu) {
3682 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3683 break;
3684
3685 cond_resched();
3686 }
3687 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3688 scx_watchdog_timeout / 2);
3689 }
3690
scx_tick(struct rq * rq)3691 void scx_tick(struct rq *rq)
3692 {
3693 struct scx_sched *sch;
3694 unsigned long last_check;
3695
3696 if (!scx_enabled())
3697 return;
3698
3699 sch = rcu_dereference_bh(scx_root);
3700 if (unlikely(!sch))
3701 return;
3702
3703 last_check = READ_ONCE(scx_watchdog_timestamp);
3704 if (unlikely(time_after(jiffies,
3705 last_check + READ_ONCE(scx_watchdog_timeout)))) {
3706 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3707
3708 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3709 "watchdog failed to check in for %u.%03us",
3710 dur_ms / 1000, dur_ms % 1000);
3711 }
3712
3713 update_other_load_avgs(rq);
3714 }
3715
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3716 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3717 {
3718 struct scx_sched *sch = scx_root;
3719
3720 update_curr_scx(rq);
3721
3722 /*
3723 * While disabling, always resched and refresh core-sched timestamp as
3724 * we can't trust the slice management or ops.core_sched_before().
3725 */
3726 if (scx_rq_bypassing(rq)) {
3727 curr->scx.slice = 0;
3728 touch_core_sched(rq, curr);
3729 } else if (SCX_HAS_OP(sch, tick)) {
3730 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
3731 }
3732
3733 if (!curr->scx.slice)
3734 resched_curr(rq);
3735 }
3736
3737 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3738 static struct cgroup *tg_cgrp(struct task_group *tg)
3739 {
3740 /*
3741 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3742 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3743 * root cgroup.
3744 */
3745 if (tg && tg->css.cgroup)
3746 return tg->css.cgroup;
3747 else
3748 return &cgrp_dfl_root.cgrp;
3749 }
3750
3751 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3752
3753 #else /* CONFIG_EXT_GROUP_SCHED */
3754
3755 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3756
3757 #endif /* CONFIG_EXT_GROUP_SCHED */
3758
scx_get_task_state(const struct task_struct * p)3759 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3760 {
3761 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3762 }
3763
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3764 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3765 {
3766 enum scx_task_state prev_state = scx_get_task_state(p);
3767 bool warn = false;
3768
3769 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3770
3771 switch (state) {
3772 case SCX_TASK_NONE:
3773 break;
3774 case SCX_TASK_INIT:
3775 warn = prev_state != SCX_TASK_NONE;
3776 break;
3777 case SCX_TASK_READY:
3778 warn = prev_state == SCX_TASK_NONE;
3779 break;
3780 case SCX_TASK_ENABLED:
3781 warn = prev_state != SCX_TASK_READY;
3782 break;
3783 default:
3784 warn = true;
3785 return;
3786 }
3787
3788 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3789 prev_state, state, p->comm, p->pid);
3790
3791 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3792 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3793 }
3794
scx_init_task(struct task_struct * p,struct task_group * tg,bool fork)3795 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3796 {
3797 struct scx_sched *sch = scx_root;
3798 int ret;
3799
3800 p->scx.disallow = false;
3801
3802 if (SCX_HAS_OP(sch, init_task)) {
3803 struct scx_init_task_args args = {
3804 SCX_INIT_TASK_ARGS_CGROUP(tg)
3805 .fork = fork,
3806 };
3807
3808 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
3809 p, &args);
3810 if (unlikely(ret)) {
3811 ret = ops_sanitize_err(sch, "init_task", ret);
3812 return ret;
3813 }
3814 }
3815
3816 scx_set_task_state(p, SCX_TASK_INIT);
3817
3818 if (p->scx.disallow) {
3819 if (!fork) {
3820 struct rq *rq;
3821 struct rq_flags rf;
3822
3823 rq = task_rq_lock(p, &rf);
3824
3825 /*
3826 * We're in the load path and @p->policy will be applied
3827 * right after. Reverting @p->policy here and rejecting
3828 * %SCHED_EXT transitions from scx_check_setscheduler()
3829 * guarantees that if ops.init_task() sets @p->disallow,
3830 * @p can never be in SCX.
3831 */
3832 if (p->policy == SCHED_EXT) {
3833 p->policy = SCHED_NORMAL;
3834 atomic_long_inc(&scx_nr_rejected);
3835 }
3836
3837 task_rq_unlock(rq, p, &rf);
3838 } else if (p->policy == SCHED_EXT) {
3839 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3840 p->comm, p->pid);
3841 }
3842 }
3843
3844 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3845 return 0;
3846 }
3847
scx_enable_task(struct task_struct * p)3848 static void scx_enable_task(struct task_struct *p)
3849 {
3850 struct scx_sched *sch = scx_root;
3851 struct rq *rq = task_rq(p);
3852 u32 weight;
3853
3854 lockdep_assert_rq_held(rq);
3855
3856 /*
3857 * Set the weight before calling ops.enable() so that the scheduler
3858 * doesn't see a stale value if they inspect the task struct.
3859 */
3860 if (task_has_idle_policy(p))
3861 weight = WEIGHT_IDLEPRIO;
3862 else
3863 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3864
3865 p->scx.weight = sched_weight_to_cgroup(weight);
3866
3867 if (SCX_HAS_OP(sch, enable))
3868 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
3869 scx_set_task_state(p, SCX_TASK_ENABLED);
3870
3871 if (SCX_HAS_OP(sch, set_weight))
3872 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3873 p, p->scx.weight);
3874 }
3875
scx_disable_task(struct task_struct * p)3876 static void scx_disable_task(struct task_struct *p)
3877 {
3878 struct scx_sched *sch = scx_root;
3879 struct rq *rq = task_rq(p);
3880
3881 lockdep_assert_rq_held(rq);
3882 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3883
3884 if (SCX_HAS_OP(sch, disable))
3885 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
3886 scx_set_task_state(p, SCX_TASK_READY);
3887 }
3888
scx_exit_task(struct task_struct * p)3889 static void scx_exit_task(struct task_struct *p)
3890 {
3891 struct scx_sched *sch = scx_root;
3892 struct scx_exit_task_args args = {
3893 .cancelled = false,
3894 };
3895
3896 lockdep_assert_rq_held(task_rq(p));
3897
3898 switch (scx_get_task_state(p)) {
3899 case SCX_TASK_NONE:
3900 return;
3901 case SCX_TASK_INIT:
3902 args.cancelled = true;
3903 break;
3904 case SCX_TASK_READY:
3905 break;
3906 case SCX_TASK_ENABLED:
3907 scx_disable_task(p);
3908 break;
3909 default:
3910 WARN_ON_ONCE(true);
3911 return;
3912 }
3913
3914 if (SCX_HAS_OP(sch, exit_task))
3915 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
3916 p, &args);
3917 scx_set_task_state(p, SCX_TASK_NONE);
3918 }
3919
init_scx_entity(struct sched_ext_entity * scx)3920 void init_scx_entity(struct sched_ext_entity *scx)
3921 {
3922 memset(scx, 0, sizeof(*scx));
3923 INIT_LIST_HEAD(&scx->dsq_list.node);
3924 RB_CLEAR_NODE(&scx->dsq_priq);
3925 scx->sticky_cpu = -1;
3926 scx->holding_cpu = -1;
3927 INIT_LIST_HEAD(&scx->runnable_node);
3928 scx->runnable_at = jiffies;
3929 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3930 scx->slice = SCX_SLICE_DFL;
3931 }
3932
scx_pre_fork(struct task_struct * p)3933 void scx_pre_fork(struct task_struct *p)
3934 {
3935 /*
3936 * BPF scheduler enable/disable paths want to be able to iterate and
3937 * update all tasks which can become complex when racing forks. As
3938 * enable/disable are very cold paths, let's use a percpu_rwsem to
3939 * exclude forks.
3940 */
3941 percpu_down_read(&scx_fork_rwsem);
3942 }
3943
scx_fork(struct task_struct * p)3944 int scx_fork(struct task_struct *p)
3945 {
3946 percpu_rwsem_assert_held(&scx_fork_rwsem);
3947
3948 if (scx_init_task_enabled)
3949 return scx_init_task(p, task_group(p), true);
3950 else
3951 return 0;
3952 }
3953
scx_post_fork(struct task_struct * p)3954 void scx_post_fork(struct task_struct *p)
3955 {
3956 if (scx_init_task_enabled) {
3957 scx_set_task_state(p, SCX_TASK_READY);
3958
3959 /*
3960 * Enable the task immediately if it's running on sched_ext.
3961 * Otherwise, it'll be enabled in switching_to_scx() if and
3962 * when it's ever configured to run with a SCHED_EXT policy.
3963 */
3964 if (p->sched_class == &ext_sched_class) {
3965 struct rq_flags rf;
3966 struct rq *rq;
3967
3968 rq = task_rq_lock(p, &rf);
3969 scx_enable_task(p);
3970 task_rq_unlock(rq, p, &rf);
3971 }
3972 }
3973
3974 spin_lock_irq(&scx_tasks_lock);
3975 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3976 spin_unlock_irq(&scx_tasks_lock);
3977
3978 percpu_up_read(&scx_fork_rwsem);
3979 }
3980
scx_cancel_fork(struct task_struct * p)3981 void scx_cancel_fork(struct task_struct *p)
3982 {
3983 if (scx_enabled()) {
3984 struct rq *rq;
3985 struct rq_flags rf;
3986
3987 rq = task_rq_lock(p, &rf);
3988 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3989 scx_exit_task(p);
3990 task_rq_unlock(rq, p, &rf);
3991 }
3992
3993 percpu_up_read(&scx_fork_rwsem);
3994 }
3995
sched_ext_free(struct task_struct * p)3996 void sched_ext_free(struct task_struct *p)
3997 {
3998 unsigned long flags;
3999
4000 spin_lock_irqsave(&scx_tasks_lock, flags);
4001 list_del_init(&p->scx.tasks_node);
4002 spin_unlock_irqrestore(&scx_tasks_lock, flags);
4003
4004 /*
4005 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
4006 * transitions can't race us. Disable ops for @p.
4007 */
4008 if (scx_get_task_state(p) != SCX_TASK_NONE) {
4009 struct rq_flags rf;
4010 struct rq *rq;
4011
4012 rq = task_rq_lock(p, &rf);
4013 scx_exit_task(p);
4014 task_rq_unlock(rq, p, &rf);
4015 }
4016 }
4017
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)4018 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
4019 const struct load_weight *lw)
4020 {
4021 struct scx_sched *sch = scx_root;
4022
4023 lockdep_assert_rq_held(task_rq(p));
4024
4025 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
4026 if (SCX_HAS_OP(sch, set_weight))
4027 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
4028 p, p->scx.weight);
4029 }
4030
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)4031 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
4032 {
4033 }
4034
switching_to_scx(struct rq * rq,struct task_struct * p)4035 static void switching_to_scx(struct rq *rq, struct task_struct *p)
4036 {
4037 struct scx_sched *sch = scx_root;
4038
4039 scx_enable_task(p);
4040
4041 /*
4042 * set_cpus_allowed_scx() is not called while @p is associated with a
4043 * different scheduler class. Keep the BPF scheduler up-to-date.
4044 */
4045 if (SCX_HAS_OP(sch, set_cpumask))
4046 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
4047 p, (struct cpumask *)p->cpus_ptr);
4048 }
4049
switched_from_scx(struct rq * rq,struct task_struct * p)4050 static void switched_from_scx(struct rq *rq, struct task_struct *p)
4051 {
4052 scx_disable_task(p);
4053 }
4054
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)4055 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)4056 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
4057
scx_check_setscheduler(struct task_struct * p,int policy)4058 int scx_check_setscheduler(struct task_struct *p, int policy)
4059 {
4060 lockdep_assert_rq_held(task_rq(p));
4061
4062 /* if disallow, reject transitioning into SCX */
4063 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
4064 p->policy != policy && policy == SCHED_EXT)
4065 return -EACCES;
4066
4067 return 0;
4068 }
4069
4070 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)4071 bool scx_can_stop_tick(struct rq *rq)
4072 {
4073 struct task_struct *p = rq->curr;
4074
4075 if (scx_rq_bypassing(rq))
4076 return false;
4077
4078 if (p->sched_class != &ext_sched_class)
4079 return true;
4080
4081 /*
4082 * @rq can dispatch from different DSQs, so we can't tell whether it
4083 * needs the tick or not by looking at nr_running. Allow stopping ticks
4084 * iff the BPF scheduler indicated so. See set_next_task_scx().
4085 */
4086 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4087 }
4088 #endif
4089
4090 #ifdef CONFIG_EXT_GROUP_SCHED
4091
4092 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
4093 static bool scx_cgroup_enabled;
4094
scx_tg_init(struct task_group * tg)4095 void scx_tg_init(struct task_group *tg)
4096 {
4097 tg->scx_weight = CGROUP_WEIGHT_DFL;
4098 }
4099
scx_tg_online(struct task_group * tg)4100 int scx_tg_online(struct task_group *tg)
4101 {
4102 struct scx_sched *sch = scx_root;
4103 int ret = 0;
4104
4105 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4106
4107 percpu_down_read(&scx_cgroup_rwsem);
4108
4109 if (scx_cgroup_enabled) {
4110 if (SCX_HAS_OP(sch, cgroup_init)) {
4111 struct scx_cgroup_init_args args =
4112 { .weight = tg->scx_weight };
4113
4114 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
4115 NULL, tg->css.cgroup, &args);
4116 if (ret)
4117 ret = ops_sanitize_err(sch, "cgroup_init", ret);
4118 }
4119 if (ret == 0)
4120 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4121 } else {
4122 tg->scx_flags |= SCX_TG_ONLINE;
4123 }
4124
4125 percpu_up_read(&scx_cgroup_rwsem);
4126 return ret;
4127 }
4128
scx_tg_offline(struct task_group * tg)4129 void scx_tg_offline(struct task_group *tg)
4130 {
4131 struct scx_sched *sch = scx_root;
4132
4133 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
4134
4135 percpu_down_read(&scx_cgroup_rwsem);
4136
4137 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
4138 (tg->scx_flags & SCX_TG_INITED))
4139 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4140 tg->css.cgroup);
4141 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4142
4143 percpu_up_read(&scx_cgroup_rwsem);
4144 }
4145
scx_cgroup_can_attach(struct cgroup_taskset * tset)4146 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4147 {
4148 struct scx_sched *sch = scx_root;
4149 struct cgroup_subsys_state *css;
4150 struct task_struct *p;
4151 int ret;
4152
4153 /* released in scx_finish/cancel_attach() */
4154 percpu_down_read(&scx_cgroup_rwsem);
4155
4156 if (!scx_cgroup_enabled)
4157 return 0;
4158
4159 cgroup_taskset_for_each(p, css, tset) {
4160 struct cgroup *from = tg_cgrp(task_group(p));
4161 struct cgroup *to = tg_cgrp(css_tg(css));
4162
4163 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4164
4165 /*
4166 * sched_move_task() omits identity migrations. Let's match the
4167 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4168 * always match one-to-one.
4169 */
4170 if (from == to)
4171 continue;
4172
4173 if (SCX_HAS_OP(sch, cgroup_prep_move)) {
4174 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
4175 cgroup_prep_move, NULL,
4176 p, from, css->cgroup);
4177 if (ret)
4178 goto err;
4179 }
4180
4181 p->scx.cgrp_moving_from = from;
4182 }
4183
4184 return 0;
4185
4186 err:
4187 cgroup_taskset_for_each(p, css, tset) {
4188 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4189 p->scx.cgrp_moving_from)
4190 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4191 p, p->scx.cgrp_moving_from, css->cgroup);
4192 p->scx.cgrp_moving_from = NULL;
4193 }
4194
4195 percpu_up_read(&scx_cgroup_rwsem);
4196 return ops_sanitize_err(sch, "cgroup_prep_move", ret);
4197 }
4198
scx_cgroup_move_task(struct task_struct * p)4199 void scx_cgroup_move_task(struct task_struct *p)
4200 {
4201 struct scx_sched *sch = scx_root;
4202
4203 if (!scx_cgroup_enabled)
4204 return;
4205
4206 /*
4207 * @p must have ops.cgroup_prep_move() called on it and thus
4208 * cgrp_moving_from set.
4209 */
4210 if (SCX_HAS_OP(sch, cgroup_move) &&
4211 !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4212 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
4213 p, p->scx.cgrp_moving_from,
4214 tg_cgrp(task_group(p)));
4215 p->scx.cgrp_moving_from = NULL;
4216 }
4217
scx_cgroup_finish_attach(void)4218 void scx_cgroup_finish_attach(void)
4219 {
4220 percpu_up_read(&scx_cgroup_rwsem);
4221 }
4222
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)4223 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4224 {
4225 struct scx_sched *sch = scx_root;
4226 struct cgroup_subsys_state *css;
4227 struct task_struct *p;
4228
4229 if (!scx_cgroup_enabled)
4230 goto out_unlock;
4231
4232 cgroup_taskset_for_each(p, css, tset) {
4233 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4234 p->scx.cgrp_moving_from)
4235 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4236 p, p->scx.cgrp_moving_from, css->cgroup);
4237 p->scx.cgrp_moving_from = NULL;
4238 }
4239 out_unlock:
4240 percpu_up_read(&scx_cgroup_rwsem);
4241 }
4242
scx_group_set_weight(struct task_group * tg,unsigned long weight)4243 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4244 {
4245 struct scx_sched *sch = scx_root;
4246
4247 percpu_down_read(&scx_cgroup_rwsem);
4248
4249 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
4250 tg->scx_weight != weight)
4251 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
4252 tg_cgrp(tg), weight);
4253
4254 tg->scx_weight = weight;
4255
4256 percpu_up_read(&scx_cgroup_rwsem);
4257 }
4258
scx_group_set_idle(struct task_group * tg,bool idle)4259 void scx_group_set_idle(struct task_group *tg, bool idle)
4260 {
4261 /* TODO: Implement ops->cgroup_set_idle() */
4262 }
4263
scx_cgroup_lock(void)4264 static void scx_cgroup_lock(void)
4265 {
4266 percpu_down_write(&scx_cgroup_rwsem);
4267 }
4268
scx_cgroup_unlock(void)4269 static void scx_cgroup_unlock(void)
4270 {
4271 percpu_up_write(&scx_cgroup_rwsem);
4272 }
4273
4274 #else /* CONFIG_EXT_GROUP_SCHED */
4275
scx_cgroup_lock(void)4276 static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)4277 static inline void scx_cgroup_unlock(void) {}
4278
4279 #endif /* CONFIG_EXT_GROUP_SCHED */
4280
4281 /*
4282 * Omitted operations:
4283 *
4284 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
4285 * isn't tied to the CPU at that point. Preemption is implemented by resetting
4286 * the victim task's slice to 0 and triggering reschedule on the target CPU.
4287 *
4288 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4289 *
4290 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4291 * their current sched_class. Call them directly from sched core instead.
4292 */
4293 DEFINE_SCHED_CLASS(ext) = {
4294 .enqueue_task = enqueue_task_scx,
4295 .dequeue_task = dequeue_task_scx,
4296 .yield_task = yield_task_scx,
4297 .yield_to_task = yield_to_task_scx,
4298
4299 .wakeup_preempt = wakeup_preempt_scx,
4300
4301 .balance = balance_scx,
4302 .pick_task = pick_task_scx,
4303
4304 .put_prev_task = put_prev_task_scx,
4305 .set_next_task = set_next_task_scx,
4306
4307 #ifdef CONFIG_SMP
4308 .select_task_rq = select_task_rq_scx,
4309 .task_woken = task_woken_scx,
4310 .set_cpus_allowed = set_cpus_allowed_scx,
4311
4312 .rq_online = rq_online_scx,
4313 .rq_offline = rq_offline_scx,
4314 #endif
4315
4316 .task_tick = task_tick_scx,
4317
4318 .switching_to = switching_to_scx,
4319 .switched_from = switched_from_scx,
4320 .switched_to = switched_to_scx,
4321 .reweight_task = reweight_task_scx,
4322 .prio_changed = prio_changed_scx,
4323
4324 .update_curr = update_curr_scx,
4325
4326 #ifdef CONFIG_UCLAMP_TASK
4327 .uclamp_enabled = 1,
4328 #endif
4329 };
4330
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)4331 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4332 {
4333 memset(dsq, 0, sizeof(*dsq));
4334
4335 raw_spin_lock_init(&dsq->lock);
4336 INIT_LIST_HEAD(&dsq->list);
4337 dsq->id = dsq_id;
4338 }
4339
free_dsq_irq_workfn(struct irq_work * irq_work)4340 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4341 {
4342 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4343 struct scx_dispatch_q *dsq, *tmp_dsq;
4344
4345 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4346 kfree_rcu(dsq, rcu);
4347 }
4348
4349 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4350
destroy_dsq(struct scx_sched * sch,u64 dsq_id)4351 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
4352 {
4353 struct scx_dispatch_q *dsq;
4354 unsigned long flags;
4355
4356 rcu_read_lock();
4357
4358 dsq = find_user_dsq(sch, dsq_id);
4359 if (!dsq)
4360 goto out_unlock_rcu;
4361
4362 raw_spin_lock_irqsave(&dsq->lock, flags);
4363
4364 if (dsq->nr) {
4365 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4366 dsq->id, dsq->nr);
4367 goto out_unlock_dsq;
4368 }
4369
4370 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
4371 dsq_hash_params))
4372 goto out_unlock_dsq;
4373
4374 /*
4375 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4376 * queueing more tasks. As this function can be called from anywhere,
4377 * freeing is bounced through an irq work to avoid nesting RCU
4378 * operations inside scheduler locks.
4379 */
4380 dsq->id = SCX_DSQ_INVALID;
4381 llist_add(&dsq->free_node, &dsqs_to_free);
4382 irq_work_queue(&free_dsq_irq_work);
4383
4384 out_unlock_dsq:
4385 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4386 out_unlock_rcu:
4387 rcu_read_unlock();
4388 }
4389
4390 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(struct scx_sched * sch)4391 static void scx_cgroup_exit(struct scx_sched *sch)
4392 {
4393 struct cgroup_subsys_state *css;
4394
4395 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4396
4397 scx_cgroup_enabled = false;
4398
4399 /*
4400 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4401 * cgroups and exit all the inited ones, all online cgroups are exited.
4402 */
4403 rcu_read_lock();
4404 css_for_each_descendant_post(css, &root_task_group.css) {
4405 struct task_group *tg = css_tg(css);
4406
4407 if (!(tg->scx_flags & SCX_TG_INITED))
4408 continue;
4409 tg->scx_flags &= ~SCX_TG_INITED;
4410
4411 if (!sch->ops.cgroup_exit)
4412 continue;
4413
4414 if (WARN_ON_ONCE(!css_tryget(css)))
4415 continue;
4416 rcu_read_unlock();
4417
4418 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4419 css->cgroup);
4420
4421 rcu_read_lock();
4422 css_put(css);
4423 }
4424 rcu_read_unlock();
4425 }
4426
scx_cgroup_init(struct scx_sched * sch)4427 static int scx_cgroup_init(struct scx_sched *sch)
4428 {
4429 struct cgroup_subsys_state *css;
4430 int ret;
4431
4432 percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4433
4434 /*
4435 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4436 * cgroups and init, all online cgroups are initialized.
4437 */
4438 rcu_read_lock();
4439 css_for_each_descendant_pre(css, &root_task_group.css) {
4440 struct task_group *tg = css_tg(css);
4441 struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4442
4443 if ((tg->scx_flags &
4444 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4445 continue;
4446
4447 if (!sch->ops.cgroup_init) {
4448 tg->scx_flags |= SCX_TG_INITED;
4449 continue;
4450 }
4451
4452 if (WARN_ON_ONCE(!css_tryget(css)))
4453 continue;
4454 rcu_read_unlock();
4455
4456 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
4457 css->cgroup, &args);
4458 if (ret) {
4459 css_put(css);
4460 scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
4461 return ret;
4462 }
4463 tg->scx_flags |= SCX_TG_INITED;
4464
4465 rcu_read_lock();
4466 css_put(css);
4467 }
4468 rcu_read_unlock();
4469
4470 WARN_ON_ONCE(scx_cgroup_enabled);
4471 scx_cgroup_enabled = true;
4472
4473 return 0;
4474 }
4475
4476 #else
scx_cgroup_exit(struct scx_sched * sch)4477 static void scx_cgroup_exit(struct scx_sched *sch) {}
scx_cgroup_init(struct scx_sched * sch)4478 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
4479 #endif
4480
4481
4482 /********************************************************************************
4483 * Sysfs interface and ops enable/disable.
4484 */
4485
4486 #define SCX_ATTR(_name) \
4487 static struct kobj_attribute scx_attr_##_name = { \
4488 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4489 .show = scx_attr_##_name##_show, \
4490 }
4491
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4492 static ssize_t scx_attr_state_show(struct kobject *kobj,
4493 struct kobj_attribute *ka, char *buf)
4494 {
4495 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
4496 }
4497 SCX_ATTR(state);
4498
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4499 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4500 struct kobj_attribute *ka, char *buf)
4501 {
4502 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4503 }
4504 SCX_ATTR(switch_all);
4505
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4506 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4507 struct kobj_attribute *ka, char *buf)
4508 {
4509 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4510 }
4511 SCX_ATTR(nr_rejected);
4512
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4513 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4514 struct kobj_attribute *ka, char *buf)
4515 {
4516 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4517 }
4518 SCX_ATTR(hotplug_seq);
4519
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4520 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4521 struct kobj_attribute *ka, char *buf)
4522 {
4523 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4524 }
4525 SCX_ATTR(enable_seq);
4526
4527 static struct attribute *scx_global_attrs[] = {
4528 &scx_attr_state.attr,
4529 &scx_attr_switch_all.attr,
4530 &scx_attr_nr_rejected.attr,
4531 &scx_attr_hotplug_seq.attr,
4532 &scx_attr_enable_seq.attr,
4533 NULL,
4534 };
4535
4536 static const struct attribute_group scx_global_attr_group = {
4537 .attrs = scx_global_attrs,
4538 };
4539
4540 static void free_exit_info(struct scx_exit_info *ei);
4541
scx_sched_free_rcu_work(struct work_struct * work)4542 static void scx_sched_free_rcu_work(struct work_struct *work)
4543 {
4544 struct rcu_work *rcu_work = to_rcu_work(work);
4545 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
4546 struct rhashtable_iter rht_iter;
4547 struct scx_dispatch_q *dsq;
4548 int node;
4549
4550 kthread_stop(sch->helper->task);
4551 free_percpu(sch->event_stats_cpu);
4552
4553 for_each_node_state(node, N_POSSIBLE)
4554 kfree(sch->global_dsqs[node]);
4555 kfree(sch->global_dsqs);
4556
4557 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
4558 do {
4559 rhashtable_walk_start(&rht_iter);
4560
4561 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4562 destroy_dsq(sch, dsq->id);
4563
4564 rhashtable_walk_stop(&rht_iter);
4565 } while (dsq == ERR_PTR(-EAGAIN));
4566 rhashtable_walk_exit(&rht_iter);
4567
4568 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4569 free_exit_info(sch->exit_info);
4570 kfree(sch);
4571 }
4572
scx_kobj_release(struct kobject * kobj)4573 static void scx_kobj_release(struct kobject *kobj)
4574 {
4575 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4576
4577 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
4578 queue_rcu_work(system_unbound_wq, &sch->rcu_work);
4579 }
4580
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4581 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4582 struct kobj_attribute *ka, char *buf)
4583 {
4584 return sysfs_emit(buf, "%s\n", scx_root->ops.name);
4585 }
4586 SCX_ATTR(ops);
4587
4588 #define scx_attr_event_show(buf, at, events, kind) ({ \
4589 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \
4590 })
4591
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4592 static ssize_t scx_attr_events_show(struct kobject *kobj,
4593 struct kobj_attribute *ka, char *buf)
4594 {
4595 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4596 struct scx_event_stats events;
4597 int at = 0;
4598
4599 scx_read_events(sch, &events);
4600 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
4601 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4602 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
4603 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
4604 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4605 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
4606 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
4607 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
4608 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
4609 return at;
4610 }
4611 SCX_ATTR(events);
4612
4613 static struct attribute *scx_sched_attrs[] = {
4614 &scx_attr_ops.attr,
4615 &scx_attr_events.attr,
4616 NULL,
4617 };
4618 ATTRIBUTE_GROUPS(scx_sched);
4619
4620 static const struct kobj_type scx_ktype = {
4621 .release = scx_kobj_release,
4622 .sysfs_ops = &kobj_sysfs_ops,
4623 .default_groups = scx_sched_groups,
4624 };
4625
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4626 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4627 {
4628 return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name);
4629 }
4630
4631 static const struct kset_uevent_ops scx_uevent_ops = {
4632 .uevent = scx_uevent,
4633 };
4634
4635 /*
4636 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4637 * sched_class. dl/rt are already handled.
4638 */
task_should_scx(int policy)4639 bool task_should_scx(int policy)
4640 {
4641 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
4642 return false;
4643 if (READ_ONCE(scx_switching_all))
4644 return true;
4645 return policy == SCHED_EXT;
4646 }
4647
scx_allow_ttwu_queue(const struct task_struct * p)4648 bool scx_allow_ttwu_queue(const struct task_struct *p)
4649 {
4650 return !scx_enabled() ||
4651 (scx_root->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) ||
4652 p->sched_class != &ext_sched_class;
4653 }
4654
4655 /**
4656 * scx_softlockup - sched_ext softlockup handler
4657 * @dur_s: number of seconds of CPU stuck due to soft lockup
4658 *
4659 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
4660 * live-lock the system by making many CPUs target the same DSQ to the point
4661 * where soft-lockup detection triggers. This function is called from
4662 * soft-lockup watchdog when the triggering point is close and tries to unjam
4663 * the system by enabling the breather and aborting the BPF scheduler.
4664 */
scx_softlockup(u32 dur_s)4665 void scx_softlockup(u32 dur_s)
4666 {
4667 struct scx_sched *sch;
4668
4669 rcu_read_lock();
4670
4671 sch = rcu_dereference(scx_root);
4672 if (unlikely(!sch))
4673 goto out_unlock;
4674
4675 switch (scx_enable_state()) {
4676 case SCX_ENABLING:
4677 case SCX_ENABLED:
4678 break;
4679 default:
4680 goto out_unlock;
4681 }
4682
4683 /* allow only one instance, cleared at the end of scx_bypass() */
4684 if (test_and_set_bit(0, &scx_in_softlockup))
4685 goto out_unlock;
4686
4687 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
4688 smp_processor_id(), dur_s, scx_root->ops.name);
4689
4690 /*
4691 * Some CPUs may be trapped in the dispatch paths. Enable breather
4692 * immediately; otherwise, we might even be able to get to scx_bypass().
4693 */
4694 atomic_inc(&scx_breather_depth);
4695
4696 scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s);
4697 out_unlock:
4698 rcu_read_unlock();
4699 }
4700
scx_clear_softlockup(void)4701 static void scx_clear_softlockup(void)
4702 {
4703 if (test_and_clear_bit(0, &scx_in_softlockup))
4704 atomic_dec(&scx_breather_depth);
4705 }
4706
4707 /**
4708 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
4709 * @bypass: true for bypass, false for unbypass
4710 *
4711 * Bypassing guarantees that all runnable tasks make forward progress without
4712 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4713 * be held by tasks that the BPF scheduler is forgetting to run, which
4714 * unfortunately also excludes toggling the static branches.
4715 *
4716 * Let's work around by overriding a couple ops and modifying behaviors based on
4717 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4718 * to force global FIFO scheduling.
4719 *
4720 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4721 *
4722 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4723 * %SCX_OPS_ENQ_LAST is also ignored.
4724 *
4725 * - ops.dispatch() is ignored.
4726 *
4727 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4728 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4729 * the tail of the queue with core_sched_at touched.
4730 *
4731 * - pick_next_task() suppresses zero slice warning.
4732 *
4733 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4734 * operations.
4735 *
4736 * - scx_prio_less() reverts to the default core_sched_at order.
4737 */
scx_bypass(bool bypass)4738 static void scx_bypass(bool bypass)
4739 {
4740 static DEFINE_RAW_SPINLOCK(bypass_lock);
4741 static unsigned long bypass_timestamp;
4742 struct scx_sched *sch;
4743 unsigned long flags;
4744 int cpu;
4745
4746 raw_spin_lock_irqsave(&bypass_lock, flags);
4747 sch = rcu_dereference_bh(scx_root);
4748
4749 if (bypass) {
4750 scx_bypass_depth++;
4751 WARN_ON_ONCE(scx_bypass_depth <= 0);
4752 if (scx_bypass_depth != 1)
4753 goto unlock;
4754 bypass_timestamp = ktime_get_ns();
4755 if (sch)
4756 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
4757 } else {
4758 scx_bypass_depth--;
4759 WARN_ON_ONCE(scx_bypass_depth < 0);
4760 if (scx_bypass_depth != 0)
4761 goto unlock;
4762 if (sch)
4763 scx_add_event(sch, SCX_EV_BYPASS_DURATION,
4764 ktime_get_ns() - bypass_timestamp);
4765 }
4766
4767 atomic_inc(&scx_breather_depth);
4768
4769 /*
4770 * No task property is changing. We just need to make sure all currently
4771 * queued tasks are re-queued according to the new scx_rq_bypassing()
4772 * state. As an optimization, walk each rq's runnable_list instead of
4773 * the scx_tasks list.
4774 *
4775 * This function can't trust the scheduler and thus can't use
4776 * cpus_read_lock(). Walk all possible CPUs instead of online.
4777 */
4778 for_each_possible_cpu(cpu) {
4779 struct rq *rq = cpu_rq(cpu);
4780 struct task_struct *p, *n;
4781
4782 raw_spin_rq_lock(rq);
4783
4784 if (bypass) {
4785 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4786 rq->scx.flags |= SCX_RQ_BYPASSING;
4787 } else {
4788 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4789 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4790 }
4791
4792 /*
4793 * We need to guarantee that no tasks are on the BPF scheduler
4794 * while bypassing. Either we see enabled or the enable path
4795 * sees scx_rq_bypassing() before moving tasks to SCX.
4796 */
4797 if (!scx_enabled()) {
4798 raw_spin_rq_unlock(rq);
4799 continue;
4800 }
4801
4802 /*
4803 * The use of list_for_each_entry_safe_reverse() is required
4804 * because each task is going to be removed from and added back
4805 * to the runnable_list during iteration. Because they're added
4806 * to the tail of the list, safe reverse iteration can still
4807 * visit all nodes.
4808 */
4809 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4810 scx.runnable_node) {
4811 struct sched_enq_and_set_ctx ctx;
4812
4813 /* cycling deq/enq is enough, see the function comment */
4814 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4815 sched_enq_and_set_task(&ctx);
4816 }
4817
4818 /* resched to restore ticks and idle state */
4819 if (cpu_online(cpu) || cpu == smp_processor_id())
4820 resched_curr(rq);
4821
4822 raw_spin_rq_unlock(rq);
4823 }
4824
4825 atomic_dec(&scx_breather_depth);
4826 unlock:
4827 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4828 scx_clear_softlockup();
4829 }
4830
free_exit_info(struct scx_exit_info * ei)4831 static void free_exit_info(struct scx_exit_info *ei)
4832 {
4833 kvfree(ei->dump);
4834 kfree(ei->msg);
4835 kfree(ei->bt);
4836 kfree(ei);
4837 }
4838
alloc_exit_info(size_t exit_dump_len)4839 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4840 {
4841 struct scx_exit_info *ei;
4842
4843 ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4844 if (!ei)
4845 return NULL;
4846
4847 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4848 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4849 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
4850
4851 if (!ei->bt || !ei->msg || !ei->dump) {
4852 free_exit_info(ei);
4853 return NULL;
4854 }
4855
4856 return ei;
4857 }
4858
scx_exit_reason(enum scx_exit_kind kind)4859 static const char *scx_exit_reason(enum scx_exit_kind kind)
4860 {
4861 switch (kind) {
4862 case SCX_EXIT_UNREG:
4863 return "unregistered from user space";
4864 case SCX_EXIT_UNREG_BPF:
4865 return "unregistered from BPF";
4866 case SCX_EXIT_UNREG_KERN:
4867 return "unregistered from the main kernel";
4868 case SCX_EXIT_SYSRQ:
4869 return "disabled by sysrq-S";
4870 case SCX_EXIT_ERROR:
4871 return "runtime error";
4872 case SCX_EXIT_ERROR_BPF:
4873 return "scx_bpf_error";
4874 case SCX_EXIT_ERROR_STALL:
4875 return "runnable task stall";
4876 default:
4877 return "<UNKNOWN>";
4878 }
4879 }
4880
scx_disable_workfn(struct kthread_work * work)4881 static void scx_disable_workfn(struct kthread_work *work)
4882 {
4883 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
4884 struct scx_exit_info *ei = sch->exit_info;
4885 struct scx_task_iter sti;
4886 struct task_struct *p;
4887 int kind, cpu;
4888
4889 kind = atomic_read(&sch->exit_kind);
4890 while (true) {
4891 if (kind == SCX_EXIT_DONE) /* already disabled? */
4892 return;
4893 WARN_ON_ONCE(kind == SCX_EXIT_NONE);
4894 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
4895 break;
4896 }
4897 ei->kind = kind;
4898 ei->reason = scx_exit_reason(ei->kind);
4899
4900 /* guarantee forward progress by bypassing scx_ops */
4901 scx_bypass(true);
4902
4903 switch (scx_set_enable_state(SCX_DISABLING)) {
4904 case SCX_DISABLING:
4905 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4906 break;
4907 case SCX_DISABLED:
4908 pr_warn("sched_ext: ops error detected without ops (%s)\n",
4909 sch->exit_info->msg);
4910 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4911 goto done;
4912 default:
4913 break;
4914 }
4915
4916 /*
4917 * Here, every runnable task is guaranteed to make forward progress and
4918 * we can safely use blocking synchronization constructs. Actually
4919 * disable ops.
4920 */
4921 mutex_lock(&scx_enable_mutex);
4922
4923 static_branch_disable(&__scx_switched_all);
4924 WRITE_ONCE(scx_switching_all, false);
4925
4926 /*
4927 * Shut down cgroup support before tasks so that the cgroup attach path
4928 * doesn't race against scx_exit_task().
4929 */
4930 scx_cgroup_lock();
4931 scx_cgroup_exit(sch);
4932 scx_cgroup_unlock();
4933
4934 /*
4935 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4936 * must be switched out and exited synchronously.
4937 */
4938 percpu_down_write(&scx_fork_rwsem);
4939
4940 scx_init_task_enabled = false;
4941
4942 scx_task_iter_start(&sti);
4943 while ((p = scx_task_iter_next_locked(&sti))) {
4944 const struct sched_class *old_class = p->sched_class;
4945 const struct sched_class *new_class =
4946 __setscheduler_class(p->policy, p->prio);
4947 struct sched_enq_and_set_ctx ctx;
4948
4949 if (old_class != new_class && p->se.sched_delayed)
4950 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4951
4952 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4953
4954 p->sched_class = new_class;
4955 check_class_changing(task_rq(p), p, old_class);
4956
4957 sched_enq_and_set_task(&ctx);
4958
4959 check_class_changed(task_rq(p), p, old_class, p->prio);
4960 scx_exit_task(p);
4961 }
4962 scx_task_iter_stop(&sti);
4963 percpu_up_write(&scx_fork_rwsem);
4964
4965 /*
4966 * Invalidate all the rq clocks to prevent getting outdated
4967 * rq clocks from a previous scx scheduler.
4968 */
4969 for_each_possible_cpu(cpu) {
4970 struct rq *rq = cpu_rq(cpu);
4971 scx_rq_clock_invalidate(rq);
4972 }
4973
4974 /* no task is on scx, turn off all the switches and flush in-progress calls */
4975 static_branch_disable(&__scx_enabled);
4976 bitmap_zero(sch->has_op, SCX_OPI_END);
4977 scx_idle_disable();
4978 synchronize_rcu();
4979
4980 if (ei->kind >= SCX_EXIT_ERROR) {
4981 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4982 sch->ops.name, ei->reason);
4983
4984 if (ei->msg[0] != '\0')
4985 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
4986 #ifdef CONFIG_STACKTRACE
4987 stack_trace_print(ei->bt, ei->bt_len, 2);
4988 #endif
4989 } else {
4990 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4991 sch->ops.name, ei->reason);
4992 }
4993
4994 if (sch->ops.exit)
4995 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
4996
4997 cancel_delayed_work_sync(&scx_watchdog_work);
4998
4999 /*
5000 * scx_root clearing must be inside cpus_read_lock(). See
5001 * handle_hotplug().
5002 */
5003 cpus_read_lock();
5004 RCU_INIT_POINTER(scx_root, NULL);
5005 cpus_read_unlock();
5006
5007 /*
5008 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
5009 * could observe an object of the same name still in the hierarchy when
5010 * the next scheduler is loaded.
5011 */
5012 kobject_del(&sch->kobj);
5013
5014 free_percpu(scx_dsp_ctx);
5015 scx_dsp_ctx = NULL;
5016 scx_dsp_max_batch = 0;
5017
5018 mutex_unlock(&scx_enable_mutex);
5019
5020 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5021 done:
5022 scx_bypass(false);
5023 }
5024
scx_disable(enum scx_exit_kind kind)5025 static void scx_disable(enum scx_exit_kind kind)
5026 {
5027 int none = SCX_EXIT_NONE;
5028 struct scx_sched *sch;
5029
5030 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5031 kind = SCX_EXIT_ERROR;
5032
5033 rcu_read_lock();
5034 sch = rcu_dereference(scx_root);
5035 if (sch) {
5036 atomic_try_cmpxchg(&sch->exit_kind, &none, kind);
5037 kthread_queue_work(sch->helper, &sch->disable_work);
5038 }
5039 rcu_read_unlock();
5040 }
5041
dump_newline(struct seq_buf * s)5042 static void dump_newline(struct seq_buf *s)
5043 {
5044 trace_sched_ext_dump("");
5045
5046 /* @s may be zero sized and seq_buf triggers WARN if so */
5047 if (s->size)
5048 seq_buf_putc(s, '\n');
5049 }
5050
dump_line(struct seq_buf * s,const char * fmt,...)5051 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
5052 {
5053 va_list args;
5054
5055 #ifdef CONFIG_TRACEPOINTS
5056 if (trace_sched_ext_dump_enabled()) {
5057 /* protected by scx_dump_state()::dump_lock */
5058 static char line_buf[SCX_EXIT_MSG_LEN];
5059
5060 va_start(args, fmt);
5061 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
5062 va_end(args);
5063
5064 trace_sched_ext_dump(line_buf);
5065 }
5066 #endif
5067 /* @s may be zero sized and seq_buf triggers WARN if so */
5068 if (s->size) {
5069 va_start(args, fmt);
5070 seq_buf_vprintf(s, fmt, args);
5071 va_end(args);
5072
5073 seq_buf_putc(s, '\n');
5074 }
5075 }
5076
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)5077 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
5078 const unsigned long *bt, unsigned int len)
5079 {
5080 unsigned int i;
5081
5082 for (i = 0; i < len; i++)
5083 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
5084 }
5085
ops_dump_init(struct seq_buf * s,const char * prefix)5086 static void ops_dump_init(struct seq_buf *s, const char *prefix)
5087 {
5088 struct scx_dump_data *dd = &scx_dump_data;
5089
5090 lockdep_assert_irqs_disabled();
5091
5092 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
5093 dd->first = true;
5094 dd->cursor = 0;
5095 dd->s = s;
5096 dd->prefix = prefix;
5097 }
5098
ops_dump_flush(void)5099 static void ops_dump_flush(void)
5100 {
5101 struct scx_dump_data *dd = &scx_dump_data;
5102 char *line = dd->buf.line;
5103
5104 if (!dd->cursor)
5105 return;
5106
5107 /*
5108 * There's something to flush and this is the first line. Insert a blank
5109 * line to distinguish ops dump.
5110 */
5111 if (dd->first) {
5112 dump_newline(dd->s);
5113 dd->first = false;
5114 }
5115
5116 /*
5117 * There may be multiple lines in $line. Scan and emit each line
5118 * separately.
5119 */
5120 while (true) {
5121 char *end = line;
5122 char c;
5123
5124 while (*end != '\n' && *end != '\0')
5125 end++;
5126
5127 /*
5128 * If $line overflowed, it may not have newline at the end.
5129 * Always emit with a newline.
5130 */
5131 c = *end;
5132 *end = '\0';
5133 dump_line(dd->s, "%s%s", dd->prefix, line);
5134 if (c == '\0')
5135 break;
5136
5137 /* move to the next line */
5138 end++;
5139 if (*end == '\0')
5140 break;
5141 line = end;
5142 }
5143
5144 dd->cursor = 0;
5145 }
5146
ops_dump_exit(void)5147 static void ops_dump_exit(void)
5148 {
5149 ops_dump_flush();
5150 scx_dump_data.cpu = -1;
5151 }
5152
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)5153 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
5154 struct task_struct *p, char marker)
5155 {
5156 static unsigned long bt[SCX_EXIT_BT_LEN];
5157 struct scx_sched *sch = scx_root;
5158 char dsq_id_buf[19] = "(n/a)";
5159 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
5160 unsigned int bt_len = 0;
5161
5162 if (p->scx.dsq)
5163 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
5164 (unsigned long long)p->scx.dsq->id);
5165
5166 dump_newline(s);
5167 dump_line(s, " %c%c %s[%d] %+ldms",
5168 marker, task_state_to_char(p), p->comm, p->pid,
5169 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
5170 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
5171 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
5172 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
5173 ops_state >> SCX_OPSS_QSEQ_SHIFT);
5174 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s",
5175 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
5176 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u",
5177 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
5178 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
5179
5180 if (SCX_HAS_OP(sch, dump_task)) {
5181 ops_dump_init(s, " ");
5182 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
5183 ops_dump_exit();
5184 }
5185
5186 #ifdef CONFIG_STACKTRACE
5187 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
5188 #endif
5189 if (bt_len) {
5190 dump_newline(s);
5191 dump_stack_trace(s, " ", bt, bt_len);
5192 }
5193 }
5194
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)5195 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
5196 {
5197 static DEFINE_SPINLOCK(dump_lock);
5198 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
5199 struct scx_sched *sch = scx_root;
5200 struct scx_dump_ctx dctx = {
5201 .kind = ei->kind,
5202 .exit_code = ei->exit_code,
5203 .reason = ei->reason,
5204 .at_ns = ktime_get_ns(),
5205 .at_jiffies = jiffies,
5206 };
5207 struct seq_buf s;
5208 struct scx_event_stats events;
5209 unsigned long flags;
5210 char *buf;
5211 int cpu;
5212
5213 spin_lock_irqsave(&dump_lock, flags);
5214
5215 seq_buf_init(&s, ei->dump, dump_len);
5216
5217 if (ei->kind == SCX_EXIT_NONE) {
5218 dump_line(&s, "Debug dump triggered by %s", ei->reason);
5219 } else {
5220 dump_line(&s, "%s[%d] triggered exit kind %d:",
5221 current->comm, current->pid, ei->kind);
5222 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
5223 dump_newline(&s);
5224 dump_line(&s, "Backtrace:");
5225 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
5226 }
5227
5228 if (SCX_HAS_OP(sch, dump)) {
5229 ops_dump_init(&s, "");
5230 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
5231 ops_dump_exit();
5232 }
5233
5234 dump_newline(&s);
5235 dump_line(&s, "CPU states");
5236 dump_line(&s, "----------");
5237
5238 for_each_possible_cpu(cpu) {
5239 struct rq *rq = cpu_rq(cpu);
5240 struct rq_flags rf;
5241 struct task_struct *p;
5242 struct seq_buf ns;
5243 size_t avail, used;
5244 bool idle;
5245
5246 rq_lock(rq, &rf);
5247
5248 idle = list_empty(&rq->scx.runnable_list) &&
5249 rq->curr->sched_class == &idle_sched_class;
5250
5251 if (idle && !SCX_HAS_OP(sch, dump_cpu))
5252 goto next;
5253
5254 /*
5255 * We don't yet know whether ops.dump_cpu() will produce output
5256 * and we may want to skip the default CPU dump if it doesn't.
5257 * Use a nested seq_buf to generate the standard dump so that we
5258 * can decide whether to commit later.
5259 */
5260 avail = seq_buf_get_buf(&s, &buf);
5261 seq_buf_init(&ns, buf, avail);
5262
5263 dump_newline(&ns);
5264 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
5265 cpu, rq->scx.nr_running, rq->scx.flags,
5266 rq->scx.cpu_released, rq->scx.ops_qseq,
5267 rq->scx.pnt_seq);
5268 dump_line(&ns, " curr=%s[%d] class=%ps",
5269 rq->curr->comm, rq->curr->pid,
5270 rq->curr->sched_class);
5271 if (!cpumask_empty(rq->scx.cpus_to_kick))
5272 dump_line(&ns, " cpus_to_kick : %*pb",
5273 cpumask_pr_args(rq->scx.cpus_to_kick));
5274 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
5275 dump_line(&ns, " idle_to_kick : %*pb",
5276 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
5277 if (!cpumask_empty(rq->scx.cpus_to_preempt))
5278 dump_line(&ns, " cpus_to_preempt: %*pb",
5279 cpumask_pr_args(rq->scx.cpus_to_preempt));
5280 if (!cpumask_empty(rq->scx.cpus_to_wait))
5281 dump_line(&ns, " cpus_to_wait : %*pb",
5282 cpumask_pr_args(rq->scx.cpus_to_wait));
5283
5284 used = seq_buf_used(&ns);
5285 if (SCX_HAS_OP(sch, dump_cpu)) {
5286 ops_dump_init(&ns, " ");
5287 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
5288 &dctx, cpu, idle);
5289 ops_dump_exit();
5290 }
5291
5292 /*
5293 * If idle && nothing generated by ops.dump_cpu(), there's
5294 * nothing interesting. Skip.
5295 */
5296 if (idle && used == seq_buf_used(&ns))
5297 goto next;
5298
5299 /*
5300 * $s may already have overflowed when $ns was created. If so,
5301 * calling commit on it will trigger BUG.
5302 */
5303 if (avail) {
5304 seq_buf_commit(&s, seq_buf_used(&ns));
5305 if (seq_buf_has_overflowed(&ns))
5306 seq_buf_set_overflow(&s);
5307 }
5308
5309 if (rq->curr->sched_class == &ext_sched_class)
5310 scx_dump_task(&s, &dctx, rq->curr, '*');
5311
5312 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
5313 scx_dump_task(&s, &dctx, p, ' ');
5314 next:
5315 rq_unlock(rq, &rf);
5316 }
5317
5318 dump_newline(&s);
5319 dump_line(&s, "Event counters");
5320 dump_line(&s, "--------------");
5321
5322 scx_read_events(sch, &events);
5323 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
5324 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
5325 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
5326 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
5327 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
5328 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
5329 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
5330 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
5331 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
5332
5333 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
5334 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
5335 trunc_marker, sizeof(trunc_marker));
5336
5337 spin_unlock_irqrestore(&dump_lock, flags);
5338 }
5339
scx_error_irq_workfn(struct irq_work * irq_work)5340 static void scx_error_irq_workfn(struct irq_work *irq_work)
5341 {
5342 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
5343 struct scx_exit_info *ei = sch->exit_info;
5344
5345 if (ei->kind >= SCX_EXIT_ERROR)
5346 scx_dump_state(ei, sch->ops.exit_dump_len);
5347
5348 kthread_queue_work(sch->helper, &sch->disable_work);
5349 }
5350
scx_vexit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,va_list args)5351 static void scx_vexit(struct scx_sched *sch,
5352 enum scx_exit_kind kind, s64 exit_code,
5353 const char *fmt, va_list args)
5354 {
5355 struct scx_exit_info *ei = sch->exit_info;
5356 int none = SCX_EXIT_NONE;
5357
5358 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
5359 return;
5360
5361 ei->exit_code = exit_code;
5362 #ifdef CONFIG_STACKTRACE
5363 if (kind >= SCX_EXIT_ERROR)
5364 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
5365 #endif
5366 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
5367
5368 /*
5369 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
5370 * in scx_disable_workfn().
5371 */
5372 ei->kind = kind;
5373 ei->reason = scx_exit_reason(ei->kind);
5374
5375 irq_work_queue(&sch->error_irq_work);
5376 }
5377
scx_alloc_and_add_sched(struct sched_ext_ops * ops)5378 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
5379 {
5380 struct scx_sched *sch;
5381 int node, ret;
5382
5383 sch = kzalloc(sizeof(*sch), GFP_KERNEL);
5384 if (!sch)
5385 return ERR_PTR(-ENOMEM);
5386
5387 sch->exit_info = alloc_exit_info(ops->exit_dump_len);
5388 if (!sch->exit_info) {
5389 ret = -ENOMEM;
5390 goto err_free_sch;
5391 }
5392
5393 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
5394 if (ret < 0)
5395 goto err_free_ei;
5396
5397 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
5398 GFP_KERNEL);
5399 if (!sch->global_dsqs) {
5400 ret = -ENOMEM;
5401 goto err_free_hash;
5402 }
5403
5404 for_each_node_state(node, N_POSSIBLE) {
5405 struct scx_dispatch_q *dsq;
5406
5407 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5408 if (!dsq) {
5409 ret = -ENOMEM;
5410 goto err_free_gdsqs;
5411 }
5412
5413 init_dsq(dsq, SCX_DSQ_GLOBAL);
5414 sch->global_dsqs[node] = dsq;
5415 }
5416
5417 sch->event_stats_cpu = alloc_percpu(struct scx_event_stats);
5418 if (!sch->event_stats_cpu)
5419 goto err_free_gdsqs;
5420
5421 sch->helper = kthread_run_worker(0, "sched_ext_helper");
5422 if (!sch->helper)
5423 goto err_free_event_stats;
5424 sched_set_fifo(sch->helper->task);
5425
5426 atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
5427 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
5428 kthread_init_work(&sch->disable_work, scx_disable_workfn);
5429 sch->ops = *ops;
5430 ops->priv = sch;
5431
5432 sch->kobj.kset = scx_kset;
5433 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
5434 if (ret < 0)
5435 goto err_stop_helper;
5436
5437 return sch;
5438
5439 err_stop_helper:
5440 kthread_stop(sch->helper->task);
5441 err_free_event_stats:
5442 free_percpu(sch->event_stats_cpu);
5443 err_free_gdsqs:
5444 for_each_node_state(node, N_POSSIBLE)
5445 kfree(sch->global_dsqs[node]);
5446 kfree(sch->global_dsqs);
5447 err_free_hash:
5448 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
5449 err_free_ei:
5450 free_exit_info(sch->exit_info);
5451 err_free_sch:
5452 kfree(sch);
5453 return ERR_PTR(ret);
5454 }
5455
check_hotplug_seq(struct scx_sched * sch,const struct sched_ext_ops * ops)5456 static void check_hotplug_seq(struct scx_sched *sch,
5457 const struct sched_ext_ops *ops)
5458 {
5459 unsigned long long global_hotplug_seq;
5460
5461 /*
5462 * If a hotplug event has occurred between when a scheduler was
5463 * initialized, and when we were able to attach, exit and notify user
5464 * space about it.
5465 */
5466 if (ops->hotplug_seq) {
5467 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
5468 if (ops->hotplug_seq != global_hotplug_seq) {
5469 scx_exit(sch, SCX_EXIT_UNREG_KERN,
5470 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
5471 "expected hotplug seq %llu did not match actual %llu",
5472 ops->hotplug_seq, global_hotplug_seq);
5473 }
5474 }
5475 }
5476
validate_ops(struct scx_sched * sch,const struct sched_ext_ops * ops)5477 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
5478 {
5479 /*
5480 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5481 * ops.enqueue() callback isn't implemented.
5482 */
5483 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5484 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5485 return -EINVAL;
5486 }
5487
5488 /*
5489 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
5490 * selection policy to be enabled.
5491 */
5492 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
5493 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
5494 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
5495 return -EINVAL;
5496 }
5497
5498 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
5499 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
5500
5501 return 0;
5502 }
5503
scx_enable(struct sched_ext_ops * ops,struct bpf_link * link)5504 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5505 {
5506 struct scx_sched *sch;
5507 struct scx_task_iter sti;
5508 struct task_struct *p;
5509 unsigned long timeout;
5510 int i, cpu, ret;
5511
5512 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5513 cpu_possible_mask)) {
5514 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5515 return -EINVAL;
5516 }
5517
5518 mutex_lock(&scx_enable_mutex);
5519
5520 if (scx_enable_state() != SCX_DISABLED) {
5521 ret = -EBUSY;
5522 goto err_unlock;
5523 }
5524
5525 sch = scx_alloc_and_add_sched(ops);
5526 if (IS_ERR(sch)) {
5527 ret = PTR_ERR(sch);
5528 goto err_unlock;
5529 }
5530
5531 /*
5532 * Transition to ENABLING and clear exit info to arm the disable path.
5533 * Failure triggers full disabling from here on.
5534 */
5535 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
5536 WARN_ON_ONCE(scx_root);
5537
5538 atomic_long_set(&scx_nr_rejected, 0);
5539
5540 for_each_possible_cpu(cpu)
5541 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5542
5543 /*
5544 * Keep CPUs stable during enable so that the BPF scheduler can track
5545 * online CPUs by watching ->on/offline_cpu() after ->init().
5546 */
5547 cpus_read_lock();
5548
5549 /*
5550 * Make the scheduler instance visible. Must be inside cpus_read_lock().
5551 * See handle_hotplug().
5552 */
5553 rcu_assign_pointer(scx_root, sch);
5554
5555 scx_idle_enable(ops);
5556
5557 if (sch->ops.init) {
5558 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
5559 if (ret) {
5560 ret = ops_sanitize_err(sch, "init", ret);
5561 cpus_read_unlock();
5562 scx_error(sch, "ops.init() failed (%d)", ret);
5563 goto err_disable;
5564 }
5565 }
5566
5567 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5568 if (((void (**)(void))ops)[i])
5569 set_bit(i, sch->has_op);
5570
5571 check_hotplug_seq(sch, ops);
5572 scx_idle_update_selcpu_topology(ops);
5573
5574 cpus_read_unlock();
5575
5576 ret = validate_ops(sch, ops);
5577 if (ret)
5578 goto err_disable;
5579
5580 WARN_ON_ONCE(scx_dsp_ctx);
5581 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5582 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5583 scx_dsp_max_batch),
5584 __alignof__(struct scx_dsp_ctx));
5585 if (!scx_dsp_ctx) {
5586 ret = -ENOMEM;
5587 goto err_disable;
5588 }
5589
5590 if (ops->timeout_ms)
5591 timeout = msecs_to_jiffies(ops->timeout_ms);
5592 else
5593 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5594
5595 WRITE_ONCE(scx_watchdog_timeout, timeout);
5596 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5597 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5598 scx_watchdog_timeout / 2);
5599
5600 /*
5601 * Once __scx_enabled is set, %current can be switched to SCX anytime.
5602 * This can lead to stalls as some BPF schedulers (e.g. userspace
5603 * scheduling) may not function correctly before all tasks are switched.
5604 * Init in bypass mode to guarantee forward progress.
5605 */
5606 scx_bypass(true);
5607
5608 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5609 if (((void (**)(void))ops)[i])
5610 set_bit(i, sch->has_op);
5611
5612 if (sch->ops.cpu_acquire || sch->ops.cpu_release)
5613 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
5614
5615 /*
5616 * Lock out forks, cgroup on/offlining and moves before opening the
5617 * floodgate so that they don't wander into the operations prematurely.
5618 */
5619 percpu_down_write(&scx_fork_rwsem);
5620
5621 WARN_ON_ONCE(scx_init_task_enabled);
5622 scx_init_task_enabled = true;
5623
5624 /*
5625 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5626 * preventing new tasks from being added. No need to exclude tasks
5627 * leaving as sched_ext_free() can handle both prepped and enabled
5628 * tasks. Prep all tasks first and then enable them with preemption
5629 * disabled.
5630 *
5631 * All cgroups should be initialized before scx_init_task() so that the
5632 * BPF scheduler can reliably track each task's cgroup membership from
5633 * scx_init_task(). Lock out cgroup on/offlining and task migrations
5634 * while tasks are being initialized so that scx_cgroup_can_attach()
5635 * never sees uninitialized tasks.
5636 */
5637 scx_cgroup_lock();
5638 ret = scx_cgroup_init(sch);
5639 if (ret)
5640 goto err_disable_unlock_all;
5641
5642 scx_task_iter_start(&sti);
5643 while ((p = scx_task_iter_next_locked(&sti))) {
5644 /*
5645 * @p may already be dead, have lost all its usages counts and
5646 * be waiting for RCU grace period before being freed. @p can't
5647 * be initialized for SCX in such cases and should be ignored.
5648 */
5649 if (!tryget_task_struct(p))
5650 continue;
5651
5652 scx_task_iter_unlock(&sti);
5653
5654 ret = scx_init_task(p, task_group(p), false);
5655 if (ret) {
5656 put_task_struct(p);
5657 scx_task_iter_relock(&sti);
5658 scx_task_iter_stop(&sti);
5659 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
5660 ret, p->comm, p->pid);
5661 goto err_disable_unlock_all;
5662 }
5663
5664 scx_set_task_state(p, SCX_TASK_READY);
5665
5666 put_task_struct(p);
5667 scx_task_iter_relock(&sti);
5668 }
5669 scx_task_iter_stop(&sti);
5670 scx_cgroup_unlock();
5671 percpu_up_write(&scx_fork_rwsem);
5672
5673 /*
5674 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5675 * all eligible tasks.
5676 */
5677 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5678 static_branch_enable(&__scx_enabled);
5679
5680 /*
5681 * We're fully committed and can't fail. The task READY -> ENABLED
5682 * transitions here are synchronized against sched_ext_free() through
5683 * scx_tasks_lock.
5684 */
5685 percpu_down_write(&scx_fork_rwsem);
5686 scx_task_iter_start(&sti);
5687 while ((p = scx_task_iter_next_locked(&sti))) {
5688 const struct sched_class *old_class = p->sched_class;
5689 const struct sched_class *new_class =
5690 __setscheduler_class(p->policy, p->prio);
5691 struct sched_enq_and_set_ctx ctx;
5692
5693 if (old_class != new_class && p->se.sched_delayed)
5694 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5695
5696 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5697
5698 p->scx.slice = SCX_SLICE_DFL;
5699 p->sched_class = new_class;
5700 check_class_changing(task_rq(p), p, old_class);
5701
5702 sched_enq_and_set_task(&ctx);
5703
5704 check_class_changed(task_rq(p), p, old_class, p->prio);
5705 }
5706 scx_task_iter_stop(&sti);
5707 percpu_up_write(&scx_fork_rwsem);
5708
5709 scx_bypass(false);
5710
5711 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
5712 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
5713 goto err_disable;
5714 }
5715
5716 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5717 static_branch_enable(&__scx_switched_all);
5718
5719 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5720 sch->ops.name, scx_switched_all() ? "" : " (partial)");
5721 kobject_uevent(&sch->kobj, KOBJ_ADD);
5722 mutex_unlock(&scx_enable_mutex);
5723
5724 atomic_long_inc(&scx_enable_seq);
5725
5726 return 0;
5727
5728 err_unlock:
5729 mutex_unlock(&scx_enable_mutex);
5730 return ret;
5731
5732 err_disable_unlock_all:
5733 scx_cgroup_unlock();
5734 percpu_up_write(&scx_fork_rwsem);
5735 scx_bypass(false);
5736 err_disable:
5737 mutex_unlock(&scx_enable_mutex);
5738 /*
5739 * Returning an error code here would not pass all the error information
5740 * to userspace. Record errno using scx_error() for cases scx_error()
5741 * wasn't already invoked and exit indicating success so that the error
5742 * is notified through ops.exit() with all the details.
5743 *
5744 * Flush scx_disable_work to ensure that error is reported before init
5745 * completion. sch's base reference will be put by bpf_scx_unreg().
5746 */
5747 scx_error(sch, "scx_enable() failed (%d)", ret);
5748 kthread_flush_work(&sch->disable_work);
5749 return 0;
5750 }
5751
5752
5753 /********************************************************************************
5754 * bpf_struct_ops plumbing.
5755 */
5756 #include <linux/bpf_verifier.h>
5757 #include <linux/bpf.h>
5758 #include <linux/btf.h>
5759
5760 static const struct btf_type *task_struct_type;
5761
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5762 static bool bpf_scx_is_valid_access(int off, int size,
5763 enum bpf_access_type type,
5764 const struct bpf_prog *prog,
5765 struct bpf_insn_access_aux *info)
5766 {
5767 if (type != BPF_READ)
5768 return false;
5769 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5770 return false;
5771 if (off % size != 0)
5772 return false;
5773
5774 return btf_ctx_access(off, size, type, prog, info);
5775 }
5776
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5777 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5778 const struct bpf_reg_state *reg, int off,
5779 int size)
5780 {
5781 const struct btf_type *t;
5782
5783 t = btf_type_by_id(reg->btf, reg->btf_id);
5784 if (t == task_struct_type) {
5785 if (off >= offsetof(struct task_struct, scx.slice) &&
5786 off + size <= offsetofend(struct task_struct, scx.slice))
5787 return SCALAR_VALUE;
5788 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5789 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5790 return SCALAR_VALUE;
5791 if (off >= offsetof(struct task_struct, scx.disallow) &&
5792 off + size <= offsetofend(struct task_struct, scx.disallow))
5793 return SCALAR_VALUE;
5794 }
5795
5796 return -EACCES;
5797 }
5798
5799 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5800 .get_func_proto = bpf_base_func_proto,
5801 .is_valid_access = bpf_scx_is_valid_access,
5802 .btf_struct_access = bpf_scx_btf_struct_access,
5803 };
5804
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5805 static int bpf_scx_init_member(const struct btf_type *t,
5806 const struct btf_member *member,
5807 void *kdata, const void *udata)
5808 {
5809 const struct sched_ext_ops *uops = udata;
5810 struct sched_ext_ops *ops = kdata;
5811 u32 moff = __btf_member_bit_offset(t, member) / 8;
5812 int ret;
5813
5814 switch (moff) {
5815 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5816 if (*(u32 *)(udata + moff) > INT_MAX)
5817 return -E2BIG;
5818 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5819 return 1;
5820 case offsetof(struct sched_ext_ops, flags):
5821 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5822 return -EINVAL;
5823 ops->flags = *(u64 *)(udata + moff);
5824 return 1;
5825 case offsetof(struct sched_ext_ops, name):
5826 ret = bpf_obj_name_cpy(ops->name, uops->name,
5827 sizeof(ops->name));
5828 if (ret < 0)
5829 return ret;
5830 if (ret == 0)
5831 return -EINVAL;
5832 return 1;
5833 case offsetof(struct sched_ext_ops, timeout_ms):
5834 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5835 SCX_WATCHDOG_MAX_TIMEOUT)
5836 return -E2BIG;
5837 ops->timeout_ms = *(u32 *)(udata + moff);
5838 return 1;
5839 case offsetof(struct sched_ext_ops, exit_dump_len):
5840 ops->exit_dump_len =
5841 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5842 return 1;
5843 case offsetof(struct sched_ext_ops, hotplug_seq):
5844 ops->hotplug_seq = *(u64 *)(udata + moff);
5845 return 1;
5846 }
5847
5848 return 0;
5849 }
5850
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5851 static int bpf_scx_check_member(const struct btf_type *t,
5852 const struct btf_member *member,
5853 const struct bpf_prog *prog)
5854 {
5855 u32 moff = __btf_member_bit_offset(t, member) / 8;
5856
5857 switch (moff) {
5858 case offsetof(struct sched_ext_ops, init_task):
5859 #ifdef CONFIG_EXT_GROUP_SCHED
5860 case offsetof(struct sched_ext_ops, cgroup_init):
5861 case offsetof(struct sched_ext_ops, cgroup_exit):
5862 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5863 #endif
5864 case offsetof(struct sched_ext_ops, cpu_online):
5865 case offsetof(struct sched_ext_ops, cpu_offline):
5866 case offsetof(struct sched_ext_ops, init):
5867 case offsetof(struct sched_ext_ops, exit):
5868 break;
5869 default:
5870 if (prog->sleepable)
5871 return -EINVAL;
5872 }
5873
5874 return 0;
5875 }
5876
bpf_scx_reg(void * kdata,struct bpf_link * link)5877 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5878 {
5879 return scx_enable(kdata, link);
5880 }
5881
bpf_scx_unreg(void * kdata,struct bpf_link * link)5882 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5883 {
5884 struct sched_ext_ops *ops = kdata;
5885 struct scx_sched *sch = ops->priv;
5886
5887 scx_disable(SCX_EXIT_UNREG);
5888 kthread_flush_work(&sch->disable_work);
5889 kobject_put(&sch->kobj);
5890 }
5891
bpf_scx_init(struct btf * btf)5892 static int bpf_scx_init(struct btf *btf)
5893 {
5894 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5895
5896 return 0;
5897 }
5898
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5899 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5900 {
5901 /*
5902 * sched_ext does not support updating the actively-loaded BPF
5903 * scheduler, as registering a BPF scheduler can always fail if the
5904 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5905 * etc. Similarly, we can always race with unregistration happening
5906 * elsewhere, such as with sysrq.
5907 */
5908 return -EOPNOTSUPP;
5909 }
5910
bpf_scx_validate(void * kdata)5911 static int bpf_scx_validate(void *kdata)
5912 {
5913 return 0;
5914 }
5915
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5916 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)5917 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)5918 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)5919 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)5920 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)5921 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)5922 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)5923 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)5924 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)5925 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)5926 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)5927 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)5928 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)5929 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)5930 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)5931 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)5932 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)5933 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)5934 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)5935 static void sched_ext_ops__disable(struct task_struct *p) {}
5936 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5937 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)5938 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5939 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5940 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5941 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)5942 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
5943 #endif
sched_ext_ops__cpu_online(s32 cpu)5944 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)5945 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)5946 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)5947 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)5948 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5949 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)5950 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5951
5952 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5953 .select_cpu = sched_ext_ops__select_cpu,
5954 .enqueue = sched_ext_ops__enqueue,
5955 .dequeue = sched_ext_ops__dequeue,
5956 .dispatch = sched_ext_ops__dispatch,
5957 .tick = sched_ext_ops__tick,
5958 .runnable = sched_ext_ops__runnable,
5959 .running = sched_ext_ops__running,
5960 .stopping = sched_ext_ops__stopping,
5961 .quiescent = sched_ext_ops__quiescent,
5962 .yield = sched_ext_ops__yield,
5963 .core_sched_before = sched_ext_ops__core_sched_before,
5964 .set_weight = sched_ext_ops__set_weight,
5965 .set_cpumask = sched_ext_ops__set_cpumask,
5966 .update_idle = sched_ext_ops__update_idle,
5967 .cpu_acquire = sched_ext_ops__cpu_acquire,
5968 .cpu_release = sched_ext_ops__cpu_release,
5969 .init_task = sched_ext_ops__init_task,
5970 .exit_task = sched_ext_ops__exit_task,
5971 .enable = sched_ext_ops__enable,
5972 .disable = sched_ext_ops__disable,
5973 #ifdef CONFIG_EXT_GROUP_SCHED
5974 .cgroup_init = sched_ext_ops__cgroup_init,
5975 .cgroup_exit = sched_ext_ops__cgroup_exit,
5976 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
5977 .cgroup_move = sched_ext_ops__cgroup_move,
5978 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
5979 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
5980 #endif
5981 .cpu_online = sched_ext_ops__cpu_online,
5982 .cpu_offline = sched_ext_ops__cpu_offline,
5983 .init = sched_ext_ops__init,
5984 .exit = sched_ext_ops__exit,
5985 .dump = sched_ext_ops__dump,
5986 .dump_cpu = sched_ext_ops__dump_cpu,
5987 .dump_task = sched_ext_ops__dump_task,
5988 };
5989
5990 static struct bpf_struct_ops bpf_sched_ext_ops = {
5991 .verifier_ops = &bpf_scx_verifier_ops,
5992 .reg = bpf_scx_reg,
5993 .unreg = bpf_scx_unreg,
5994 .check_member = bpf_scx_check_member,
5995 .init_member = bpf_scx_init_member,
5996 .init = bpf_scx_init,
5997 .update = bpf_scx_update,
5998 .validate = bpf_scx_validate,
5999 .name = "sched_ext_ops",
6000 .owner = THIS_MODULE,
6001 .cfi_stubs = &__bpf_ops_sched_ext_ops
6002 };
6003
6004
6005 /********************************************************************************
6006 * System integration and init.
6007 */
6008
sysrq_handle_sched_ext_reset(u8 key)6009 static void sysrq_handle_sched_ext_reset(u8 key)
6010 {
6011 scx_disable(SCX_EXIT_SYSRQ);
6012 }
6013
6014 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
6015 .handler = sysrq_handle_sched_ext_reset,
6016 .help_msg = "reset-sched-ext(S)",
6017 .action_msg = "Disable sched_ext and revert all tasks to CFS",
6018 .enable_mask = SYSRQ_ENABLE_RTNICE,
6019 };
6020
sysrq_handle_sched_ext_dump(u8 key)6021 static void sysrq_handle_sched_ext_dump(u8 key)
6022 {
6023 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
6024
6025 if (scx_enabled())
6026 scx_dump_state(&ei, 0);
6027 }
6028
6029 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
6030 .handler = sysrq_handle_sched_ext_dump,
6031 .help_msg = "dump-sched-ext(D)",
6032 .action_msg = "Trigger sched_ext debug dump",
6033 .enable_mask = SYSRQ_ENABLE_RTNICE,
6034 };
6035
can_skip_idle_kick(struct rq * rq)6036 static bool can_skip_idle_kick(struct rq *rq)
6037 {
6038 lockdep_assert_rq_held(rq);
6039
6040 /*
6041 * We can skip idle kicking if @rq is going to go through at least one
6042 * full SCX scheduling cycle before going idle. Just checking whether
6043 * curr is not idle is insufficient because we could be racing
6044 * balance_one() trying to pull the next task from a remote rq, which
6045 * may fail, and @rq may become idle afterwards.
6046 *
6047 * The race window is small and we don't and can't guarantee that @rq is
6048 * only kicked while idle anyway. Skip only when sure.
6049 */
6050 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
6051 }
6052
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)6053 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
6054 {
6055 struct rq *rq = cpu_rq(cpu);
6056 struct scx_rq *this_scx = &this_rq->scx;
6057 bool should_wait = false;
6058 unsigned long flags;
6059
6060 raw_spin_rq_lock_irqsave(rq, flags);
6061
6062 /*
6063 * During CPU hotplug, a CPU may depend on kicking itself to make
6064 * forward progress. Allow kicking self regardless of online state.
6065 */
6066 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
6067 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
6068 if (rq->curr->sched_class == &ext_sched_class)
6069 rq->curr->scx.slice = 0;
6070 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6071 }
6072
6073 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
6074 pseqs[cpu] = rq->scx.pnt_seq;
6075 should_wait = true;
6076 }
6077
6078 resched_curr(rq);
6079 } else {
6080 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
6081 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6082 }
6083
6084 raw_spin_rq_unlock_irqrestore(rq, flags);
6085
6086 return should_wait;
6087 }
6088
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)6089 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
6090 {
6091 struct rq *rq = cpu_rq(cpu);
6092 unsigned long flags;
6093
6094 raw_spin_rq_lock_irqsave(rq, flags);
6095
6096 if (!can_skip_idle_kick(rq) &&
6097 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
6098 resched_curr(rq);
6099
6100 raw_spin_rq_unlock_irqrestore(rq, flags);
6101 }
6102
kick_cpus_irq_workfn(struct irq_work * irq_work)6103 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
6104 {
6105 struct rq *this_rq = this_rq();
6106 struct scx_rq *this_scx = &this_rq->scx;
6107 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
6108 bool should_wait = false;
6109 s32 cpu;
6110
6111 for_each_cpu(cpu, this_scx->cpus_to_kick) {
6112 should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
6113 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
6114 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6115 }
6116
6117 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
6118 kick_one_cpu_if_idle(cpu, this_rq);
6119 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
6120 }
6121
6122 if (!should_wait)
6123 return;
6124
6125 for_each_cpu(cpu, this_scx->cpus_to_wait) {
6126 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
6127
6128 if (cpu != cpu_of(this_rq)) {
6129 /*
6130 * Pairs with smp_store_release() issued by this CPU in
6131 * switch_class() on the resched path.
6132 *
6133 * We busy-wait here to guarantee that no other task can
6134 * be scheduled on our core before the target CPU has
6135 * entered the resched path.
6136 */
6137 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
6138 cpu_relax();
6139 }
6140
6141 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
6142 }
6143 }
6144
6145 /**
6146 * print_scx_info - print out sched_ext scheduler state
6147 * @log_lvl: the log level to use when printing
6148 * @p: target task
6149 *
6150 * If a sched_ext scheduler is enabled, print the name and state of the
6151 * scheduler. If @p is on sched_ext, print further information about the task.
6152 *
6153 * This function can be safely called on any task as long as the task_struct
6154 * itself is accessible. While safe, this function isn't synchronized and may
6155 * print out mixups or garbages of limited length.
6156 */
print_scx_info(const char * log_lvl,struct task_struct * p)6157 void print_scx_info(const char *log_lvl, struct task_struct *p)
6158 {
6159 struct scx_sched *sch = scx_root;
6160 enum scx_enable_state state = scx_enable_state();
6161 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
6162 char runnable_at_buf[22] = "?";
6163 struct sched_class *class;
6164 unsigned long runnable_at;
6165
6166 if (state == SCX_DISABLED)
6167 return;
6168
6169 /*
6170 * Carefully check if the task was running on sched_ext, and then
6171 * carefully copy the time it's been runnable, and its state.
6172 */
6173 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
6174 class != &ext_sched_class) {
6175 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
6176 scx_enable_state_str[state], all);
6177 return;
6178 }
6179
6180 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
6181 sizeof(runnable_at)))
6182 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
6183 jiffies_delta_msecs(runnable_at, jiffies));
6184
6185 /* print everything onto one line to conserve console space */
6186 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
6187 log_lvl, sch->ops.name, scx_enable_state_str[state], all,
6188 runnable_at_buf);
6189 }
6190
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)6191 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
6192 {
6193 /*
6194 * SCX schedulers often have userspace components which are sometimes
6195 * involved in critial scheduling paths. PM operations involve freezing
6196 * userspace which can lead to scheduling misbehaviors including stalls.
6197 * Let's bypass while PM operations are in progress.
6198 */
6199 switch (event) {
6200 case PM_HIBERNATION_PREPARE:
6201 case PM_SUSPEND_PREPARE:
6202 case PM_RESTORE_PREPARE:
6203 scx_bypass(true);
6204 break;
6205 case PM_POST_HIBERNATION:
6206 case PM_POST_SUSPEND:
6207 case PM_POST_RESTORE:
6208 scx_bypass(false);
6209 break;
6210 }
6211
6212 return NOTIFY_OK;
6213 }
6214
6215 static struct notifier_block scx_pm_notifier = {
6216 .notifier_call = scx_pm_handler,
6217 };
6218
init_sched_ext_class(void)6219 void __init init_sched_ext_class(void)
6220 {
6221 s32 cpu, v;
6222
6223 /*
6224 * The following is to prevent the compiler from optimizing out the enum
6225 * definitions so that BPF scheduler implementations can use them
6226 * through the generated vmlinux.h.
6227 */
6228 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
6229 SCX_TG_ONLINE);
6230
6231 scx_idle_init_masks();
6232
6233 scx_kick_cpus_pnt_seqs =
6234 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
6235 __alignof__(scx_kick_cpus_pnt_seqs[0]));
6236 BUG_ON(!scx_kick_cpus_pnt_seqs);
6237
6238 for_each_possible_cpu(cpu) {
6239 struct rq *rq = cpu_rq(cpu);
6240 int n = cpu_to_node(cpu);
6241
6242 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
6243 INIT_LIST_HEAD(&rq->scx.runnable_list);
6244 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
6245
6246 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
6247 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
6248 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
6249 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
6250 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
6251 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
6252
6253 if (cpu_online(cpu))
6254 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
6255 }
6256
6257 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
6258 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
6259 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
6260 }
6261
6262
6263 /********************************************************************************
6264 * Helpers that can be called from the BPF scheduler.
6265 */
scx_dsq_insert_preamble(struct task_struct * p,u64 enq_flags)6266 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
6267 {
6268 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
6269 return false;
6270
6271 lockdep_assert_irqs_disabled();
6272
6273 if (unlikely(!p)) {
6274 scx_kf_error("called with NULL task");
6275 return false;
6276 }
6277
6278 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
6279 scx_kf_error("invalid enq_flags 0x%llx", enq_flags);
6280 return false;
6281 }
6282
6283 return true;
6284 }
6285
scx_dsq_insert_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)6286 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id,
6287 u64 enq_flags)
6288 {
6289 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6290 struct task_struct *ddsp_task;
6291
6292 ddsp_task = __this_cpu_read(direct_dispatch_task);
6293 if (ddsp_task) {
6294 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
6295 return;
6296 }
6297
6298 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
6299 scx_kf_error("dispatch buffer overflow");
6300 return;
6301 }
6302
6303 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
6304 .task = p,
6305 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
6306 .dsq_id = dsq_id,
6307 .enq_flags = enq_flags,
6308 };
6309 }
6310
6311 __bpf_kfunc_start_defs();
6312
6313 /**
6314 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6315 * @p: task_struct to insert
6316 * @dsq_id: DSQ to insert into
6317 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6318 * @enq_flags: SCX_ENQ_*
6319 *
6320 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6321 * call this function spuriously. Can be called from ops.enqueue(),
6322 * ops.select_cpu(), and ops.dispatch().
6323 *
6324 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6325 * and @p must match the task being enqueued.
6326 *
6327 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6328 * will be directly inserted into the corresponding dispatch queue after
6329 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6330 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6331 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6332 * task is inserted.
6333 *
6334 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6335 * and this function can be called upto ops.dispatch_max_batch times to insert
6336 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6337 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6338 *
6339 * This function doesn't have any locking restrictions and may be called under
6340 * BPF locks (in the future when BPF introduces more flexible locking).
6341 *
6342 * @p is allowed to run for @slice. The scheduling path is triggered on slice
6343 * exhaustion. If zero, the current residual slice is maintained. If
6344 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6345 * scx_bpf_kick_cpu() to trigger scheduling.
6346 */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6347 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
6348 u64 enq_flags)
6349 {
6350 if (!scx_dsq_insert_preamble(p, enq_flags))
6351 return;
6352
6353 if (slice)
6354 p->scx.slice = slice;
6355 else
6356 p->scx.slice = p->scx.slice ?: 1;
6357
6358 scx_dsq_insert_commit(p, dsq_id, enq_flags);
6359 }
6360
6361 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6362 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6363 u64 enq_flags)
6364 {
6365 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()");
6366 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags);
6367 }
6368
6369 /**
6370 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6371 * @p: task_struct to insert
6372 * @dsq_id: DSQ to insert into
6373 * @slice: duration @p can run for in nsecs, 0 to keep the current value
6374 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6375 * @enq_flags: SCX_ENQ_*
6376 *
6377 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6378 * Tasks queued into the priority queue are ordered by @vtime. All other aspects
6379 * are identical to scx_bpf_dsq_insert().
6380 *
6381 * @vtime ordering is according to time_before64() which considers wrapping. A
6382 * numerically larger vtime may indicate an earlier position in the ordering and
6383 * vice-versa.
6384 *
6385 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6386 * function must not be called on a DSQ which already has one or more FIFO tasks
6387 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6388 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6389 */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6390 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6391 u64 slice, u64 vtime, u64 enq_flags)
6392 {
6393 if (!scx_dsq_insert_preamble(p, enq_flags))
6394 return;
6395
6396 if (slice)
6397 p->scx.slice = slice;
6398 else
6399 p->scx.slice = p->scx.slice ?: 1;
6400
6401 p->scx.dsq_vtime = vtime;
6402
6403 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6404 }
6405
6406 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6407 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6408 u64 slice, u64 vtime, u64 enq_flags)
6409 {
6410 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()");
6411 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags);
6412 }
6413
6414 __bpf_kfunc_end_defs();
6415
6416 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6417 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6418 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6419 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6420 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6421 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6422
6423 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6424 .owner = THIS_MODULE,
6425 .set = &scx_kfunc_ids_enqueue_dispatch,
6426 };
6427
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6428 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6429 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6430 {
6431 struct scx_sched *sch = scx_root;
6432 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6433 struct rq *this_rq, *src_rq, *locked_rq;
6434 bool dispatched = false;
6435 bool in_balance;
6436 unsigned long flags;
6437
6438 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6439 return false;
6440
6441 /*
6442 * Can be called from either ops.dispatch() locking this_rq() or any
6443 * context where no rq lock is held. If latter, lock @p's task_rq which
6444 * we'll likely need anyway.
6445 */
6446 src_rq = task_rq(p);
6447
6448 local_irq_save(flags);
6449 this_rq = this_rq();
6450 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6451
6452 if (in_balance) {
6453 if (this_rq != src_rq) {
6454 raw_spin_rq_unlock(this_rq);
6455 raw_spin_rq_lock(src_rq);
6456 }
6457 } else {
6458 raw_spin_rq_lock(src_rq);
6459 }
6460
6461 /*
6462 * If the BPF scheduler keeps calling this function repeatedly, it can
6463 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
6464 * breather if necessary.
6465 */
6466 scx_breather(src_rq);
6467
6468 locked_rq = src_rq;
6469 raw_spin_lock(&src_dsq->lock);
6470
6471 /*
6472 * Did someone else get to it? @p could have already left $src_dsq, got
6473 * re-enqueud, or be in the process of being consumed by someone else.
6474 */
6475 if (unlikely(p->scx.dsq != src_dsq ||
6476 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6477 p->scx.holding_cpu >= 0) ||
6478 WARN_ON_ONCE(src_rq != task_rq(p))) {
6479 raw_spin_unlock(&src_dsq->lock);
6480 goto out;
6481 }
6482
6483 /* @p is still on $src_dsq and stable, determine the destination */
6484 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
6485
6486 /*
6487 * Apply vtime and slice updates before moving so that the new time is
6488 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6489 * this is safe as we're locking it.
6490 */
6491 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6492 p->scx.dsq_vtime = kit->vtime;
6493 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6494 p->scx.slice = kit->slice;
6495
6496 /* execute move */
6497 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
6498 dispatched = true;
6499 out:
6500 if (in_balance) {
6501 if (this_rq != locked_rq) {
6502 raw_spin_rq_unlock(locked_rq);
6503 raw_spin_rq_lock(this_rq);
6504 }
6505 } else {
6506 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6507 }
6508
6509 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6510 __SCX_DSQ_ITER_HAS_VTIME);
6511 return dispatched;
6512 }
6513
6514 __bpf_kfunc_start_defs();
6515
6516 /**
6517 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6518 *
6519 * Can only be called from ops.dispatch().
6520 */
scx_bpf_dispatch_nr_slots(void)6521 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6522 {
6523 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6524 return 0;
6525
6526 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6527 }
6528
6529 /**
6530 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6531 *
6532 * Cancel the latest dispatch. Can be called multiple times to cancel further
6533 * dispatches. Can only be called from ops.dispatch().
6534 */
scx_bpf_dispatch_cancel(void)6535 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6536 {
6537 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6538
6539 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6540 return;
6541
6542 if (dspc->cursor > 0)
6543 dspc->cursor--;
6544 else
6545 scx_kf_error("dispatch buffer underflow");
6546 }
6547
6548 /**
6549 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6550 * @dsq_id: DSQ to move task from
6551 *
6552 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6553 * local DSQ for execution. Can only be called from ops.dispatch().
6554 *
6555 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6556 * before trying to move from the specified DSQ. It may also grab rq locks and
6557 * thus can't be called under any BPF locks.
6558 *
6559 * Returns %true if a task has been moved, %false if there isn't any task to
6560 * move.
6561 */
scx_bpf_dsq_move_to_local(u64 dsq_id)6562 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6563 {
6564 struct scx_sched *sch = scx_root;
6565 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6566 struct scx_dispatch_q *dsq;
6567
6568 if (!scx_kf_allowed(SCX_KF_DISPATCH))
6569 return false;
6570
6571 flush_dispatch_buf(sch, dspc->rq);
6572
6573 dsq = find_user_dsq(sch, dsq_id);
6574 if (unlikely(!dsq)) {
6575 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
6576 return false;
6577 }
6578
6579 if (consume_dispatch_q(sch, dspc->rq, dsq)) {
6580 /*
6581 * A successfully consumed task can be dequeued before it starts
6582 * running while the CPU is trying to migrate other dispatched
6583 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6584 * local DSQ.
6585 */
6586 dspc->nr_tasks++;
6587 return true;
6588 } else {
6589 return false;
6590 }
6591 }
6592
6593 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_consume(u64 dsq_id)6594 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6595 {
6596 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()");
6597 return scx_bpf_dsq_move_to_local(dsq_id);
6598 }
6599
6600 /**
6601 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6602 * @it__iter: DSQ iterator in progress
6603 * @slice: duration the moved task can run for in nsecs
6604 *
6605 * Override the slice of the next task that will be moved from @it__iter using
6606 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6607 * slice duration is kept.
6608 */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6609 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6610 u64 slice)
6611 {
6612 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6613
6614 kit->slice = slice;
6615 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6616 }
6617
6618 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6619 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6620 struct bpf_iter_scx_dsq *it__iter, u64 slice)
6621 {
6622 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6623 scx_bpf_dsq_move_set_slice(it__iter, slice);
6624 }
6625
6626 /**
6627 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6628 * @it__iter: DSQ iterator in progress
6629 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6630 *
6631 * Override the vtime of the next task that will be moved from @it__iter using
6632 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6633 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6634 * override is ignored and cleared.
6635 */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6636 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6637 u64 vtime)
6638 {
6639 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6640
6641 kit->vtime = vtime;
6642 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6643 }
6644
6645 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6646 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6647 struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6648 {
6649 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6650 scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6651 }
6652
6653 /**
6654 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6655 * @it__iter: DSQ iterator in progress
6656 * @p: task to transfer
6657 * @dsq_id: DSQ to move @p to
6658 * @enq_flags: SCX_ENQ_*
6659 *
6660 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6661 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6662 * be the destination.
6663 *
6664 * For the transfer to be successful, @p must still be on the DSQ and have been
6665 * queued before the DSQ iteration started. This function doesn't care whether
6666 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6667 * been queued before the iteration started.
6668 *
6669 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6670 *
6671 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6672 * lock (e.g. BPF timers or SYSCALL programs).
6673 *
6674 * Returns %true if @p has been consumed, %false if @p had already been consumed
6675 * or dequeued.
6676 */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6677 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6678 struct task_struct *p, u64 dsq_id,
6679 u64 enq_flags)
6680 {
6681 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6682 p, dsq_id, enq_flags);
6683 }
6684
6685 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6686 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6687 struct task_struct *p, u64 dsq_id,
6688 u64 enq_flags)
6689 {
6690 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6691 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
6692 }
6693
6694 /**
6695 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6696 * @it__iter: DSQ iterator in progress
6697 * @p: task to transfer
6698 * @dsq_id: DSQ to move @p to
6699 * @enq_flags: SCX_ENQ_*
6700 *
6701 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6702 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6703 * user DSQ as only user DSQs support priority queue.
6704 *
6705 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6706 * and scx_bpf_dsq_move_set_vtime() to update.
6707 *
6708 * All other aspects are identical to scx_bpf_dsq_move(). See
6709 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6710 */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6711 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6712 struct task_struct *p, u64 dsq_id,
6713 u64 enq_flags)
6714 {
6715 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6716 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6717 }
6718
6719 /* for backward compatibility, will be removed in v6.15 */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6720 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6721 struct task_struct *p, u64 dsq_id,
6722 u64 enq_flags)
6723 {
6724 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6725 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
6726 }
6727
6728 __bpf_kfunc_end_defs();
6729
6730 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6731 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6732 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6733 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6734 BTF_ID_FLAGS(func, scx_bpf_consume)
6735 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6736 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6737 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6738 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6739 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6740 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6741 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6742 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6743 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6744
6745 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6746 .owner = THIS_MODULE,
6747 .set = &scx_kfunc_ids_dispatch,
6748 };
6749
6750 __bpf_kfunc_start_defs();
6751
6752 /**
6753 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6754 *
6755 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6756 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6757 * processed tasks. Can only be called from ops.cpu_release().
6758 */
scx_bpf_reenqueue_local(void)6759 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6760 {
6761 LIST_HEAD(tasks);
6762 u32 nr_enqueued = 0;
6763 struct rq *rq;
6764 struct task_struct *p, *n;
6765
6766 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6767 return 0;
6768
6769 rq = cpu_rq(smp_processor_id());
6770 lockdep_assert_rq_held(rq);
6771
6772 /*
6773 * The BPF scheduler may choose to dispatch tasks back to
6774 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6775 * first to avoid processing the same tasks repeatedly.
6776 */
6777 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6778 scx.dsq_list.node) {
6779 /*
6780 * If @p is being migrated, @p's current CPU may not agree with
6781 * its allowed CPUs and the migration_cpu_stop is about to
6782 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6783 *
6784 * While racing sched property changes may also dequeue and
6785 * re-enqueue a migrating task while its current CPU and allowed
6786 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6787 * the current local DSQ for running tasks and thus are not
6788 * visible to the BPF scheduler.
6789 *
6790 * Also skip re-enqueueing tasks that can only run on this
6791 * CPU, as they would just be re-added to the same local
6792 * DSQ without any benefit.
6793 */
6794 if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1)
6795 continue;
6796
6797 dispatch_dequeue(rq, p);
6798 list_add_tail(&p->scx.dsq_list.node, &tasks);
6799 }
6800
6801 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6802 list_del_init(&p->scx.dsq_list.node);
6803 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6804 nr_enqueued++;
6805 }
6806
6807 return nr_enqueued;
6808 }
6809
6810 __bpf_kfunc_end_defs();
6811
6812 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6813 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6814 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6815
6816 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6817 .owner = THIS_MODULE,
6818 .set = &scx_kfunc_ids_cpu_release,
6819 };
6820
6821 __bpf_kfunc_start_defs();
6822
6823 /**
6824 * scx_bpf_create_dsq - Create a custom DSQ
6825 * @dsq_id: DSQ to create
6826 * @node: NUMA node to allocate from
6827 *
6828 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6829 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6830 */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6831 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6832 {
6833 struct scx_dispatch_q *dsq;
6834 struct scx_sched *sch;
6835 s32 ret;
6836
6837 if (unlikely(node >= (int)nr_node_ids ||
6838 (node < 0 && node != NUMA_NO_NODE)))
6839 return -EINVAL;
6840
6841 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
6842 return -EINVAL;
6843
6844 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
6845 if (!dsq)
6846 return -ENOMEM;
6847
6848 init_dsq(dsq, dsq_id);
6849
6850 rcu_read_lock();
6851
6852 sch = rcu_dereference(scx_root);
6853 if (sch)
6854 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
6855 dsq_hash_params);
6856 else
6857 ret = -ENODEV;
6858
6859 rcu_read_unlock();
6860 if (ret)
6861 kfree(dsq);
6862 return ret;
6863 }
6864
6865 __bpf_kfunc_end_defs();
6866
6867 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6868 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6869 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6870 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6871 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6872 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6873 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6874 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6875 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6876 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6877 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6878
6879 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6880 .owner = THIS_MODULE,
6881 .set = &scx_kfunc_ids_unlocked,
6882 };
6883
6884 __bpf_kfunc_start_defs();
6885
6886 /**
6887 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6888 * @cpu: cpu to kick
6889 * @flags: %SCX_KICK_* flags
6890 *
6891 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6892 * trigger rescheduling on a busy CPU. This can be called from any online
6893 * scx_ops operation and the actual kicking is performed asynchronously through
6894 * an irq work.
6895 */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6896 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6897 {
6898 struct rq *this_rq;
6899 unsigned long irq_flags;
6900
6901 if (!kf_cpu_valid(cpu, NULL))
6902 return;
6903
6904 local_irq_save(irq_flags);
6905
6906 this_rq = this_rq();
6907
6908 /*
6909 * While bypassing for PM ops, IRQ handling may not be online which can
6910 * lead to irq_work_queue() malfunction such as infinite busy wait for
6911 * IRQ status update. Suppress kicking.
6912 */
6913 if (scx_rq_bypassing(this_rq))
6914 goto out;
6915
6916 /*
6917 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6918 * rq locks. We can probably be smarter and avoid bouncing if called
6919 * from ops which don't hold a rq lock.
6920 */
6921 if (flags & SCX_KICK_IDLE) {
6922 struct rq *target_rq = cpu_rq(cpu);
6923
6924 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6925 scx_kf_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6926
6927 if (raw_spin_rq_trylock(target_rq)) {
6928 if (can_skip_idle_kick(target_rq)) {
6929 raw_spin_rq_unlock(target_rq);
6930 goto out;
6931 }
6932 raw_spin_rq_unlock(target_rq);
6933 }
6934 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6935 } else {
6936 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6937
6938 if (flags & SCX_KICK_PREEMPT)
6939 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6940 if (flags & SCX_KICK_WAIT)
6941 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6942 }
6943
6944 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6945 out:
6946 local_irq_restore(irq_flags);
6947 }
6948
6949 /**
6950 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6951 * @dsq_id: id of the DSQ
6952 *
6953 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6954 * -%ENOENT is returned.
6955 */
scx_bpf_dsq_nr_queued(u64 dsq_id)6956 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6957 {
6958 struct scx_sched *sch;
6959 struct scx_dispatch_q *dsq;
6960 s32 ret;
6961
6962 preempt_disable();
6963
6964 sch = rcu_dereference_sched(scx_root);
6965 if (unlikely(!sch)) {
6966 ret = -ENODEV;
6967 goto out;
6968 }
6969
6970 if (dsq_id == SCX_DSQ_LOCAL) {
6971 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6972 goto out;
6973 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6974 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6975
6976 if (ops_cpu_valid(sch, cpu, NULL)) {
6977 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6978 goto out;
6979 }
6980 } else {
6981 dsq = find_user_dsq(sch, dsq_id);
6982 if (dsq) {
6983 ret = READ_ONCE(dsq->nr);
6984 goto out;
6985 }
6986 }
6987 ret = -ENOENT;
6988 out:
6989 preempt_enable();
6990 return ret;
6991 }
6992
6993 /**
6994 * scx_bpf_destroy_dsq - Destroy a custom DSQ
6995 * @dsq_id: DSQ to destroy
6996 *
6997 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6998 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6999 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
7000 * which doesn't exist. Can be called from any online scx_ops operations.
7001 */
scx_bpf_destroy_dsq(u64 dsq_id)7002 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
7003 {
7004 struct scx_sched *sch;
7005
7006 rcu_read_lock();
7007 sch = rcu_dereference(scx_root);
7008 if (sch)
7009 destroy_dsq(sch, dsq_id);
7010 rcu_read_unlock();
7011 }
7012
7013 /**
7014 * bpf_iter_scx_dsq_new - Create a DSQ iterator
7015 * @it: iterator to initialize
7016 * @dsq_id: DSQ to iterate
7017 * @flags: %SCX_DSQ_ITER_*
7018 *
7019 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
7020 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
7021 * tasks which are already queued when this function is invoked.
7022 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)7023 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
7024 u64 flags)
7025 {
7026 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7027 struct scx_sched *sch;
7028
7029 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
7030 sizeof(struct bpf_iter_scx_dsq));
7031 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
7032 __alignof__(struct bpf_iter_scx_dsq));
7033
7034 /*
7035 * next() and destroy() will be called regardless of the return value.
7036 * Always clear $kit->dsq.
7037 */
7038 kit->dsq = NULL;
7039
7040 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
7041 if (unlikely(!sch))
7042 return -ENODEV;
7043
7044 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
7045 return -EINVAL;
7046
7047 kit->dsq = find_user_dsq(sch, dsq_id);
7048 if (!kit->dsq)
7049 return -ENOENT;
7050
7051 INIT_LIST_HEAD(&kit->cursor.node);
7052 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
7053 kit->cursor.priv = READ_ONCE(kit->dsq->seq);
7054
7055 return 0;
7056 }
7057
7058 /**
7059 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
7060 * @it: iterator to progress
7061 *
7062 * Return the next task. See bpf_iter_scx_dsq_new().
7063 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)7064 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
7065 {
7066 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7067 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
7068 struct task_struct *p;
7069 unsigned long flags;
7070
7071 if (!kit->dsq)
7072 return NULL;
7073
7074 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7075
7076 if (list_empty(&kit->cursor.node))
7077 p = NULL;
7078 else
7079 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
7080
7081 /*
7082 * Only tasks which were queued before the iteration started are
7083 * visible. This bounds BPF iterations and guarantees that vtime never
7084 * jumps in the other direction while iterating.
7085 */
7086 do {
7087 p = nldsq_next_task(kit->dsq, p, rev);
7088 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
7089
7090 if (p) {
7091 if (rev)
7092 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
7093 else
7094 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
7095 } else {
7096 list_del_init(&kit->cursor.node);
7097 }
7098
7099 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7100
7101 return p;
7102 }
7103
7104 /**
7105 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
7106 * @it: iterator to destroy
7107 *
7108 * Undo scx_iter_scx_dsq_new().
7109 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)7110 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
7111 {
7112 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
7113
7114 if (!kit->dsq)
7115 return;
7116
7117 if (!list_empty(&kit->cursor.node)) {
7118 unsigned long flags;
7119
7120 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
7121 list_del_init(&kit->cursor.node);
7122 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
7123 }
7124 kit->dsq = NULL;
7125 }
7126
7127 __bpf_kfunc_end_defs();
7128
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)7129 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
7130 char *fmt, unsigned long long *data, u32 data__sz)
7131 {
7132 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
7133 s32 ret;
7134
7135 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
7136 (data__sz && !data)) {
7137 scx_kf_error("invalid data=%p and data__sz=%u", (void *)data, data__sz);
7138 return -EINVAL;
7139 }
7140
7141 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
7142 if (ret < 0) {
7143 scx_kf_error("failed to read data fields (%d)", ret);
7144 return ret;
7145 }
7146
7147 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
7148 &bprintf_data);
7149 if (ret < 0) {
7150 scx_kf_error("format preparation failed (%d)", ret);
7151 return ret;
7152 }
7153
7154 ret = bstr_printf(line_buf, line_size, fmt,
7155 bprintf_data.bin_args);
7156 bpf_bprintf_cleanup(&bprintf_data);
7157 if (ret < 0) {
7158 scx_kf_error("(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
7159 return ret;
7160 }
7161
7162 return ret;
7163 }
7164
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)7165 static s32 bstr_format(struct scx_bstr_buf *buf,
7166 char *fmt, unsigned long long *data, u32 data__sz)
7167 {
7168 return __bstr_format(buf->data, buf->line, sizeof(buf->line),
7169 fmt, data, data__sz);
7170 }
7171
7172 __bpf_kfunc_start_defs();
7173
7174 /**
7175 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
7176 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
7177 * @fmt: error message format string
7178 * @data: format string parameters packaged using ___bpf_fill() macro
7179 * @data__sz: @data len, must end in '__sz' for the verifier
7180 *
7181 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
7182 * disabling.
7183 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)7184 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
7185 unsigned long long *data, u32 data__sz)
7186 {
7187 unsigned long flags;
7188
7189 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7190 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7191 scx_kf_exit(SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
7192 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7193 }
7194
7195 /**
7196 * scx_bpf_error_bstr - Indicate fatal error
7197 * @fmt: error message format string
7198 * @data: format string parameters packaged using ___bpf_fill() macro
7199 * @data__sz: @data len, must end in '__sz' for the verifier
7200 *
7201 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
7202 * disabling.
7203 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)7204 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
7205 u32 data__sz)
7206 {
7207 unsigned long flags;
7208
7209 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
7210 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
7211 scx_kf_exit(SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
7212 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
7213 }
7214
7215 /**
7216 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
7217 * @fmt: format string
7218 * @data: format string parameters packaged using ___bpf_fill() macro
7219 * @data__sz: @data len, must end in '__sz' for the verifier
7220 *
7221 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
7222 * dump_task() to generate extra debug dump specific to the BPF scheduler.
7223 *
7224 * The extra dump may be multiple lines. A single line may be split over
7225 * multiple calls. The last line is automatically terminated.
7226 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)7227 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
7228 u32 data__sz)
7229 {
7230 struct scx_dump_data *dd = &scx_dump_data;
7231 struct scx_bstr_buf *buf = &dd->buf;
7232 s32 ret;
7233
7234 if (raw_smp_processor_id() != dd->cpu) {
7235 scx_kf_error("scx_bpf_dump() must only be called from ops.dump() and friends");
7236 return;
7237 }
7238
7239 /* append the formatted string to the line buf */
7240 ret = __bstr_format(buf->data, buf->line + dd->cursor,
7241 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
7242 if (ret < 0) {
7243 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
7244 dd->prefix, fmt, data, data__sz, ret);
7245 return;
7246 }
7247
7248 dd->cursor += ret;
7249 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
7250
7251 if (!dd->cursor)
7252 return;
7253
7254 /*
7255 * If the line buf overflowed or ends in a newline, flush it into the
7256 * dump. This is to allow the caller to generate a single line over
7257 * multiple calls. As ops_dump_flush() can also handle multiple lines in
7258 * the line buf, the only case which can lead to an unexpected
7259 * truncation is when the caller keeps generating newlines in the middle
7260 * instead of the end consecutively. Don't do that.
7261 */
7262 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
7263 ops_dump_flush();
7264 }
7265
7266 /**
7267 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7268 * @cpu: CPU of interest
7269 *
7270 * Return the maximum relative capacity of @cpu in relation to the most
7271 * performant CPU in the system. The return value is in the range [1,
7272 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7273 */
scx_bpf_cpuperf_cap(s32 cpu)7274 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7275 {
7276 if (kf_cpu_valid(cpu, NULL))
7277 return arch_scale_cpu_capacity(cpu);
7278 else
7279 return SCX_CPUPERF_ONE;
7280 }
7281
7282 /**
7283 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7284 * @cpu: CPU of interest
7285 *
7286 * Return the current relative performance of @cpu in relation to its maximum.
7287 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7288 *
7289 * The current performance level of a CPU in relation to the maximum performance
7290 * available in the system can be calculated as follows:
7291 *
7292 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7293 *
7294 * The result is in the range [1, %SCX_CPUPERF_ONE].
7295 */
scx_bpf_cpuperf_cur(s32 cpu)7296 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7297 {
7298 if (kf_cpu_valid(cpu, NULL))
7299 return arch_scale_freq_capacity(cpu);
7300 else
7301 return SCX_CPUPERF_ONE;
7302 }
7303
7304 /**
7305 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7306 * @cpu: CPU of interest
7307 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7308 *
7309 * Set the target performance level of @cpu to @perf. @perf is in linear
7310 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7311 * schedutil cpufreq governor chooses the target frequency.
7312 *
7313 * The actual performance level chosen, CPU grouping, and the overhead and
7314 * latency of the operations are dependent on the hardware and cpufreq driver in
7315 * use. Consult hardware and cpufreq documentation for more information. The
7316 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7317 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)7318 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7319 {
7320 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7321 scx_kf_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
7322 return;
7323 }
7324
7325 if (kf_cpu_valid(cpu, NULL)) {
7326 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
7327 struct rq_flags rf;
7328
7329 /*
7330 * When called with an rq lock held, restrict the operation
7331 * to the corresponding CPU to prevent ABBA deadlocks.
7332 */
7333 if (locked_rq && rq != locked_rq) {
7334 scx_kf_error("Invalid target CPU %d", cpu);
7335 return;
7336 }
7337
7338 /*
7339 * If no rq lock is held, allow to operate on any CPU by
7340 * acquiring the corresponding rq lock.
7341 */
7342 if (!locked_rq) {
7343 rq_lock_irqsave(rq, &rf);
7344 update_rq_clock(rq);
7345 }
7346
7347 rq->scx.cpuperf_target = perf;
7348 cpufreq_update_util(rq, 0);
7349
7350 if (!locked_rq)
7351 rq_unlock_irqrestore(rq, &rf);
7352 }
7353 }
7354
7355 /**
7356 * scx_bpf_nr_node_ids - Return the number of possible node IDs
7357 *
7358 * All valid node IDs in the system are smaller than the returned value.
7359 */
scx_bpf_nr_node_ids(void)7360 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
7361 {
7362 return nr_node_ids;
7363 }
7364
7365 /**
7366 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7367 *
7368 * All valid CPU IDs in the system are smaller than the returned value.
7369 */
scx_bpf_nr_cpu_ids(void)7370 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7371 {
7372 return nr_cpu_ids;
7373 }
7374
7375 /**
7376 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7377 */
scx_bpf_get_possible_cpumask(void)7378 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7379 {
7380 return cpu_possible_mask;
7381 }
7382
7383 /**
7384 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7385 */
scx_bpf_get_online_cpumask(void)7386 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7387 {
7388 return cpu_online_mask;
7389 }
7390
7391 /**
7392 * scx_bpf_put_cpumask - Release a possible/online cpumask
7393 * @cpumask: cpumask to release
7394 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)7395 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7396 {
7397 /*
7398 * Empty function body because we aren't actually acquiring or releasing
7399 * a reference to a global cpumask, which is read-only in the caller and
7400 * is never released. The acquire / release semantics here are just used
7401 * to make the cpumask is a trusted pointer in the caller.
7402 */
7403 }
7404
7405 /**
7406 * scx_bpf_task_running - Is task currently running?
7407 * @p: task of interest
7408 */
scx_bpf_task_running(const struct task_struct * p)7409 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7410 {
7411 return task_rq(p)->curr == p;
7412 }
7413
7414 /**
7415 * scx_bpf_task_cpu - CPU a task is currently associated with
7416 * @p: task of interest
7417 */
scx_bpf_task_cpu(const struct task_struct * p)7418 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7419 {
7420 return task_cpu(p);
7421 }
7422
7423 /**
7424 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7425 * @cpu: CPU of the rq
7426 */
scx_bpf_cpu_rq(s32 cpu)7427 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7428 {
7429 if (!kf_cpu_valid(cpu, NULL))
7430 return NULL;
7431
7432 return cpu_rq(cpu);
7433 }
7434
7435 /**
7436 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7437 * @p: task of interest
7438 *
7439 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7440 * from the scheduler's POV. SCX operations should use this function to
7441 * determine @p's current cgroup as, unlike following @p->cgroups,
7442 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7443 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7444 * operations. The restriction guarantees that @p's rq is locked by the caller.
7445 */
7446 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7447 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7448 {
7449 struct task_group *tg = p->sched_task_group;
7450 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7451
7452 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7453 goto out;
7454
7455 cgrp = tg_cgrp(tg);
7456
7457 out:
7458 cgroup_get(cgrp);
7459 return cgrp;
7460 }
7461 #endif
7462
7463 /**
7464 * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7465 * clock for the current CPU. The clock returned is in nanoseconds.
7466 *
7467 * It provides the following properties:
7468 *
7469 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7470 * to account for execution time and track tasks' runtime properties.
7471 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7472 * eventually reads a hardware timestamp counter -- is neither performant nor
7473 * scalable. scx_bpf_now() aims to provide a high-performance clock by
7474 * using the rq clock in the scheduler core whenever possible.
7475 *
7476 * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7477 * scheduler use cases, the required clock resolution is lower than the most
7478 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7479 * uses the rq clock in the scheduler core whenever it is valid. It considers
7480 * that the rq clock is valid from the time the rq clock is updated
7481 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7482 *
7483 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7484 * guarantees the clock never goes backward when comparing them in the same
7485 * CPU. On the other hand, when comparing clocks in different CPUs, there
7486 * is no such guarantee -- the clock can go backward. It provides a
7487 * monotonically *non-decreasing* clock so that it would provide the same
7488 * clock values in two different scx_bpf_now() calls in the same CPU
7489 * during the same period of when the rq clock is valid.
7490 */
scx_bpf_now(void)7491 __bpf_kfunc u64 scx_bpf_now(void)
7492 {
7493 struct rq *rq;
7494 u64 clock;
7495
7496 preempt_disable();
7497
7498 rq = this_rq();
7499 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7500 /*
7501 * If the rq clock is valid, use the cached rq clock.
7502 *
7503 * Note that scx_bpf_now() is re-entrant between a process
7504 * context and an interrupt context (e.g., timer interrupt).
7505 * However, we don't need to consider the race between them
7506 * because such race is not observable from a caller.
7507 */
7508 clock = READ_ONCE(rq->scx.clock);
7509 } else {
7510 /*
7511 * Otherwise, return a fresh rq clock.
7512 *
7513 * The rq clock is updated outside of the rq lock.
7514 * In this case, keep the updated rq clock invalid so the next
7515 * kfunc call outside the rq lock gets a fresh rq clock.
7516 */
7517 clock = sched_clock_cpu(cpu_of(rq));
7518 }
7519
7520 preempt_enable();
7521
7522 return clock;
7523 }
7524
scx_read_events(struct scx_sched * sch,struct scx_event_stats * events)7525 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
7526 {
7527 struct scx_event_stats *e_cpu;
7528 int cpu;
7529
7530 /* Aggregate per-CPU event counters into @events. */
7531 memset(events, 0, sizeof(*events));
7532 for_each_possible_cpu(cpu) {
7533 e_cpu = per_cpu_ptr(sch->event_stats_cpu, cpu);
7534 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
7535 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
7536 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
7537 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
7538 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
7539 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
7540 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
7541 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
7542 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
7543 }
7544 }
7545
7546 /*
7547 * scx_bpf_events - Get a system-wide event counter to
7548 * @events: output buffer from a BPF program
7549 * @events__sz: @events len, must end in '__sz'' for the verifier
7550 */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)7551 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
7552 size_t events__sz)
7553 {
7554 struct scx_sched *sch;
7555 struct scx_event_stats e_sys;
7556
7557 rcu_read_lock();
7558 sch = rcu_dereference(scx_root);
7559 if (sch)
7560 scx_read_events(sch, &e_sys);
7561 else
7562 memset(&e_sys, 0, sizeof(e_sys));
7563 rcu_read_unlock();
7564
7565 /*
7566 * We cannot entirely trust a BPF-provided size since a BPF program
7567 * might be compiled against a different vmlinux.h, of which
7568 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
7569 * (an older vmlinux.h). Hence, we use the smaller size to avoid
7570 * memory corruption.
7571 */
7572 events__sz = min(events__sz, sizeof(*events));
7573 memcpy(events, &e_sys, events__sz);
7574 }
7575
7576 __bpf_kfunc_end_defs();
7577
7578 BTF_KFUNCS_START(scx_kfunc_ids_any)
7579 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7580 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7581 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7582 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7583 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7584 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7585 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7586 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7587 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7588 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7589 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7590 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7591 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
7592 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7593 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7594 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7595 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7596 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7597 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7598 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7599 #ifdef CONFIG_CGROUP_SCHED
7600 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7601 #endif
7602 BTF_ID_FLAGS(func, scx_bpf_now)
7603 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
7604 BTF_KFUNCS_END(scx_kfunc_ids_any)
7605
7606 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7607 .owner = THIS_MODULE,
7608 .set = &scx_kfunc_ids_any,
7609 };
7610
scx_init(void)7611 static int __init scx_init(void)
7612 {
7613 int ret;
7614
7615 /*
7616 * kfunc registration can't be done from init_sched_ext_class() as
7617 * register_btf_kfunc_id_set() needs most of the system to be up.
7618 *
7619 * Some kfuncs are context-sensitive and can only be called from
7620 * specific SCX ops. They are grouped into BTF sets accordingly.
7621 * Unfortunately, BPF currently doesn't have a way of enforcing such
7622 * restrictions. Eventually, the verifier should be able to enforce
7623 * them. For now, register them the same and make each kfunc explicitly
7624 * check using scx_kf_allowed().
7625 */
7626 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7627 &scx_kfunc_set_enqueue_dispatch)) ||
7628 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7629 &scx_kfunc_set_dispatch)) ||
7630 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7631 &scx_kfunc_set_cpu_release)) ||
7632 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7633 &scx_kfunc_set_unlocked)) ||
7634 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7635 &scx_kfunc_set_unlocked)) ||
7636 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7637 &scx_kfunc_set_any)) ||
7638 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7639 &scx_kfunc_set_any)) ||
7640 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7641 &scx_kfunc_set_any))) {
7642 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7643 return ret;
7644 }
7645
7646 ret = scx_idle_init();
7647 if (ret) {
7648 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
7649 return ret;
7650 }
7651
7652 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7653 if (ret) {
7654 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7655 return ret;
7656 }
7657
7658 ret = register_pm_notifier(&scx_pm_notifier);
7659 if (ret) {
7660 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7661 return ret;
7662 }
7663
7664 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7665 if (!scx_kset) {
7666 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7667 return -ENOMEM;
7668 }
7669
7670 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7671 if (ret < 0) {
7672 pr_err("sched_ext: Failed to add global attributes\n");
7673 return ret;
7674 }
7675
7676 return 0;
7677 }
7678 __initcall(scx_init);
7679