1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
7 */
8 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
9
10 enum scx_consts {
11 SCX_DSP_DFL_MAX_BATCH = 32,
12 SCX_DSP_MAX_LOOPS = 32,
13 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
14
15 SCX_EXIT_BT_LEN = 64,
16 SCX_EXIT_MSG_LEN = 1024,
17 SCX_EXIT_DUMP_DFL_LEN = 32768,
18
19 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
20
21 /*
22 * Iterating all tasks may take a while. Periodically drop
23 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
24 */
25 SCX_TASK_ITER_BATCH = 32,
26
27 SCX_BYPASS_LB_DFL_INTV_US = 500 * USEC_PER_MSEC,
28 SCX_BYPASS_LB_DONOR_PCT = 125,
29 SCX_BYPASS_LB_MIN_DELTA_DIV = 4,
30 SCX_BYPASS_LB_BATCH = 256,
31 };
32
33 enum scx_exit_kind {
34 SCX_EXIT_NONE,
35 SCX_EXIT_DONE,
36
37 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
38 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
39 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
40 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
41
42 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
43 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
44 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
45 };
46
47 /*
48 * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
49 * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
50 * are 64bit of the format:
51 *
52 * Bits: [63 .. 48 47 .. 32 31 .. 0]
53 * [ SYS ACT ] [ SYS RSN ] [ USR ]
54 *
55 * SYS ACT: System-defined exit actions
56 * SYS RSN: System-defined exit reasons
57 * USR : User-defined exit codes and reasons
58 *
59 * Using the above, users may communicate intention and context by ORing system
60 * actions and/or system reasons with a user-defined exit code.
61 */
62 enum scx_exit_code {
63 /* Reasons */
64 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
65
66 /* Actions */
67 SCX_ECODE_ACT_RESTART = 1LLU << 48,
68 };
69
70 enum scx_exit_flags {
71 /*
72 * ops.exit() may be called even if the loading failed before ops.init()
73 * finishes successfully. This is because ops.exit() allows rich exit
74 * info communication. The following flag indicates whether ops.init()
75 * finished successfully.
76 */
77 SCX_EFLAG_INITIALIZED,
78 };
79
80 /*
81 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
82 * being disabled.
83 */
84 struct scx_exit_info {
85 /* %SCX_EXIT_* - broad category of the exit reason */
86 enum scx_exit_kind kind;
87
88 /* exit code if gracefully exiting */
89 s64 exit_code;
90
91 /* %SCX_EFLAG_* */
92 u64 flags;
93
94 /* textual representation of the above */
95 const char *reason;
96
97 /* backtrace if exiting due to an error */
98 unsigned long *bt;
99 u32 bt_len;
100
101 /* informational message */
102 char *msg;
103
104 /* debug dump */
105 char *dump;
106 };
107
108 /* sched_ext_ops.flags */
109 enum scx_ops_flags {
110 /*
111 * Keep built-in idle tracking even if ops.update_idle() is implemented.
112 */
113 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
114
115 /*
116 * By default, if there are no other task to run on the CPU, ext core
117 * keeps running the current task even after its slice expires. If this
118 * flag is specified, such tasks are passed to ops.enqueue() with
119 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
120 */
121 SCX_OPS_ENQ_LAST = 1LLU << 1,
122
123 /*
124 * An exiting task may schedule after PF_EXITING is set. In such cases,
125 * bpf_task_from_pid() may not be able to find the task and if the BPF
126 * scheduler depends on pid lookup for dispatching, the task will be
127 * lost leading to various issues including RCU grace period stalls.
128 *
129 * To mask this problem, by default, unhashed tasks are automatically
130 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
131 * depend on pid lookups and wants to handle these tasks directly, the
132 * following flag can be used.
133 */
134 SCX_OPS_ENQ_EXITING = 1LLU << 2,
135
136 /*
137 * If set, only tasks with policy set to SCHED_EXT are attached to
138 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
139 */
140 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
141
142 /*
143 * A migration disabled task can only execute on its current CPU. By
144 * default, such tasks are automatically put on the CPU's local DSQ with
145 * the default slice on enqueue. If this ops flag is set, they also go
146 * through ops.enqueue().
147 *
148 * A migration disabled task never invokes ops.select_cpu() as it can
149 * only select the current CPU. Also, p->cpus_ptr will only contain its
150 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
151 * and thus may disagree with cpumask_weight(p->cpus_ptr).
152 */
153 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
154
155 /*
156 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
157 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
158 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
159 * transfers. When this optimization is enabled, ops.select_cpu() is
160 * skipped in some cases (when racing against the wakee switching out).
161 * As the BPF scheduler may depend on ops.select_cpu() being invoked
162 * during wakeups, queued wakeup is disabled by default.
163 *
164 * If this ops flag is set, queued wakeup optimization is enabled and
165 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
166 * wakee's CPU without preceding ops.select_cpu() even for tasks which
167 * may be executed on multiple CPUs.
168 */
169 SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
170
171 /*
172 * If set, enable per-node idle cpumasks. If clear, use a single global
173 * flat idle cpumask.
174 */
175 SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
176
177 /*
178 * CPU cgroup support flags
179 */
180 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
181
182 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
183 SCX_OPS_ENQ_LAST |
184 SCX_OPS_ENQ_EXITING |
185 SCX_OPS_ENQ_MIGRATION_DISABLED |
186 SCX_OPS_ALLOW_QUEUED_WAKEUP |
187 SCX_OPS_SWITCH_PARTIAL |
188 SCX_OPS_BUILTIN_IDLE_PER_NODE |
189 SCX_OPS_HAS_CGROUP_WEIGHT,
190
191 /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
192 __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56,
193
194 SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56,
195 };
196
197 /* argument container for ops.init_task() */
198 struct scx_init_task_args {
199 /*
200 * Set if ops.init_task() is being invoked on the fork path, as opposed
201 * to the scheduler transition path.
202 */
203 bool fork;
204 #ifdef CONFIG_EXT_GROUP_SCHED
205 /* the cgroup the task is joining */
206 struct cgroup *cgroup;
207 #endif
208 };
209
210 /* argument container for ops.exit_task() */
211 struct scx_exit_task_args {
212 /* Whether the task exited before running on sched_ext. */
213 bool cancelled;
214 };
215
216 /* argument container for ops->cgroup_init() */
217 struct scx_cgroup_init_args {
218 /* the weight of the cgroup [1..10000] */
219 u32 weight;
220
221 /* bandwidth control parameters from cpu.max and cpu.max.burst */
222 u64 bw_period_us;
223 u64 bw_quota_us;
224 u64 bw_burst_us;
225 };
226
227 enum scx_cpu_preempt_reason {
228 /* next task is being scheduled by &sched_class_rt */
229 SCX_CPU_PREEMPT_RT,
230 /* next task is being scheduled by &sched_class_dl */
231 SCX_CPU_PREEMPT_DL,
232 /* next task is being scheduled by &sched_class_stop */
233 SCX_CPU_PREEMPT_STOP,
234 /* unknown reason for SCX being preempted */
235 SCX_CPU_PREEMPT_UNKNOWN,
236 };
237
238 /*
239 * Argument container for ops->cpu_acquire(). Currently empty, but may be
240 * expanded in the future.
241 */
242 struct scx_cpu_acquire_args {};
243
244 /* argument container for ops->cpu_release() */
245 struct scx_cpu_release_args {
246 /* the reason the CPU was preempted */
247 enum scx_cpu_preempt_reason reason;
248
249 /* the task that's going to be scheduled on the CPU */
250 struct task_struct *task;
251 };
252
253 /*
254 * Informational context provided to dump operations.
255 */
256 struct scx_dump_ctx {
257 enum scx_exit_kind kind;
258 s64 exit_code;
259 const char *reason;
260 u64 at_ns;
261 u64 at_jiffies;
262 };
263
264 /**
265 * struct sched_ext_ops - Operation table for BPF scheduler implementation
266 *
267 * A BPF scheduler can implement an arbitrary scheduling policy by
268 * implementing and loading operations in this table. Note that a userland
269 * scheduling policy can also be implemented using the BPF scheduler
270 * as a shim layer.
271 */
272 struct sched_ext_ops {
273 /**
274 * @select_cpu: Pick the target CPU for a task which is being woken up
275 * @p: task being woken up
276 * @prev_cpu: the cpu @p was on before sleeping
277 * @wake_flags: SCX_WAKE_*
278 *
279 * Decision made here isn't final. @p may be moved to any CPU while it
280 * is getting dispatched for execution later. However, as @p is not on
281 * the rq at this point, getting the eventual execution CPU right here
282 * saves a small bit of overhead down the line.
283 *
284 * If an idle CPU is returned, the CPU is kicked and will try to
285 * dispatch. While an explicit custom mechanism can be added,
286 * select_cpu() serves as the default way to wake up idle CPUs.
287 *
288 * @p may be inserted into a DSQ directly by calling
289 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
290 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
291 * of the CPU returned by this operation.
292 *
293 * Note that select_cpu() is never called for tasks that can only run
294 * on a single CPU or tasks with migration disabled, as they don't have
295 * the option to select a different CPU. See select_task_rq() for
296 * details.
297 */
298 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
299
300 /**
301 * @enqueue: Enqueue a task on the BPF scheduler
302 * @p: task being enqueued
303 * @enq_flags: %SCX_ENQ_*
304 *
305 * @p is ready to run. Insert directly into a DSQ by calling
306 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
307 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
308 * the task will stall.
309 *
310 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
311 * skipped.
312 */
313 void (*enqueue)(struct task_struct *p, u64 enq_flags);
314
315 /**
316 * @dequeue: Remove a task from the BPF scheduler
317 * @p: task being dequeued
318 * @deq_flags: %SCX_DEQ_*
319 *
320 * Remove @p from the BPF scheduler. This is usually called to isolate
321 * the task while updating its scheduling properties (e.g. priority).
322 *
323 * The ext core keeps track of whether the BPF side owns a given task or
324 * not and can gracefully ignore spurious dispatches from BPF side,
325 * which makes it safe to not implement this method. However, depending
326 * on the scheduling logic, this can lead to confusing behaviors - e.g.
327 * scheduling position not being updated across a priority change.
328 */
329 void (*dequeue)(struct task_struct *p, u64 deq_flags);
330
331 /**
332 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
333 * @cpu: CPU to dispatch tasks for
334 * @prev: previous task being switched out
335 *
336 * Called when a CPU's local dsq is empty. The operation should dispatch
337 * one or more tasks from the BPF scheduler into the DSQs using
338 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
339 * using scx_bpf_dsq_move_to_local().
340 *
341 * The maximum number of times scx_bpf_dsq_insert() can be called
342 * without an intervening scx_bpf_dsq_move_to_local() is specified by
343 * ops.dispatch_max_batch. See the comments on top of the two functions
344 * for more details.
345 *
346 * When not %NULL, @prev is an SCX task with its slice depleted. If
347 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
348 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
349 * ops.dispatch() returns. To keep executing @prev, return without
350 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
351 */
352 void (*dispatch)(s32 cpu, struct task_struct *prev);
353
354 /**
355 * @tick: Periodic tick
356 * @p: task running currently
357 *
358 * This operation is called every 1/HZ seconds on CPUs which are
359 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
360 * immediate dispatch cycle on the CPU.
361 */
362 void (*tick)(struct task_struct *p);
363
364 /**
365 * @runnable: A task is becoming runnable on its associated CPU
366 * @p: task becoming runnable
367 * @enq_flags: %SCX_ENQ_*
368 *
369 * This and the following three functions can be used to track a task's
370 * execution state transitions. A task becomes ->runnable() on a CPU,
371 * and then goes through one or more ->running() and ->stopping() pairs
372 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
373 * done running on the CPU.
374 *
375 * @p is becoming runnable on the CPU because it's
376 *
377 * - waking up (%SCX_ENQ_WAKEUP)
378 * - being moved from another CPU
379 * - being restored after temporarily taken off the queue for an
380 * attribute change.
381 *
382 * This and ->enqueue() are related but not coupled. This operation
383 * notifies @p's state transition and may not be followed by ->enqueue()
384 * e.g. when @p is being dispatched to a remote CPU, or when @p is
385 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
386 * task may be ->enqueue()'d without being preceded by this operation
387 * e.g. after exhausting its slice.
388 */
389 void (*runnable)(struct task_struct *p, u64 enq_flags);
390
391 /**
392 * @running: A task is starting to run on its associated CPU
393 * @p: task starting to run
394 *
395 * Note that this callback may be called from a CPU other than the
396 * one the task is going to run on. This can happen when a task
397 * property is changed (i.e., affinity), since scx_next_task_scx(),
398 * which triggers this callback, may run on a CPU different from
399 * the task's assigned CPU.
400 *
401 * Therefore, always use scx_bpf_task_cpu(@p) to determine the
402 * target CPU the task is going to use.
403 *
404 * See ->runnable() for explanation on the task state notifiers.
405 */
406 void (*running)(struct task_struct *p);
407
408 /**
409 * @stopping: A task is stopping execution
410 * @p: task stopping to run
411 * @runnable: is task @p still runnable?
412 *
413 * Note that this callback may be called from a CPU other than the
414 * one the task was running on. This can happen when a task
415 * property is changed (i.e., affinity), since dequeue_task_scx(),
416 * which triggers this callback, may run on a CPU different from
417 * the task's assigned CPU.
418 *
419 * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
420 * the task was running on.
421 *
422 * See ->runnable() for explanation on the task state notifiers. If
423 * !@runnable, ->quiescent() will be invoked after this operation
424 * returns.
425 */
426 void (*stopping)(struct task_struct *p, bool runnable);
427
428 /**
429 * @quiescent: A task is becoming not runnable on its associated CPU
430 * @p: task becoming not runnable
431 * @deq_flags: %SCX_DEQ_*
432 *
433 * See ->runnable() for explanation on the task state notifiers.
434 *
435 * @p is becoming quiescent on the CPU because it's
436 *
437 * - sleeping (%SCX_DEQ_SLEEP)
438 * - being moved to another CPU
439 * - being temporarily taken off the queue for an attribute change
440 * (%SCX_DEQ_SAVE)
441 *
442 * This and ->dequeue() are related but not coupled. This operation
443 * notifies @p's state transition and may not be preceded by ->dequeue()
444 * e.g. when @p is being dispatched to a remote CPU.
445 */
446 void (*quiescent)(struct task_struct *p, u64 deq_flags);
447
448 /**
449 * @yield: Yield CPU
450 * @from: yielding task
451 * @to: optional yield target task
452 *
453 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
454 * The BPF scheduler should ensure that other available tasks are
455 * dispatched before the yielding task. Return value is ignored in this
456 * case.
457 *
458 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
459 * scheduler can implement the request, return %true; otherwise, %false.
460 */
461 bool (*yield)(struct task_struct *from, struct task_struct *to);
462
463 /**
464 * @core_sched_before: Task ordering for core-sched
465 * @a: task A
466 * @b: task B
467 *
468 * Used by core-sched to determine the ordering between two tasks. See
469 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
470 * core-sched.
471 *
472 * Both @a and @b are runnable and may or may not currently be queued on
473 * the BPF scheduler. Should return %true if @a should run before @b.
474 * %false if there's no required ordering or @b should run before @a.
475 *
476 * If not specified, the default is ordering them according to when they
477 * became runnable.
478 */
479 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
480
481 /**
482 * @set_weight: Set task weight
483 * @p: task to set weight for
484 * @weight: new weight [1..10000]
485 *
486 * Update @p's weight to @weight.
487 */
488 void (*set_weight)(struct task_struct *p, u32 weight);
489
490 /**
491 * @set_cpumask: Set CPU affinity
492 * @p: task to set CPU affinity for
493 * @cpumask: cpumask of cpus that @p can run on
494 *
495 * Update @p's CPU affinity to @cpumask.
496 */
497 void (*set_cpumask)(struct task_struct *p,
498 const struct cpumask *cpumask);
499
500 /**
501 * @update_idle: Update the idle state of a CPU
502 * @cpu: CPU to update the idle state for
503 * @idle: whether entering or exiting the idle state
504 *
505 * This operation is called when @rq's CPU goes or leaves the idle
506 * state. By default, implementing this operation disables the built-in
507 * idle CPU tracking and the following helpers become unavailable:
508 *
509 * - scx_bpf_select_cpu_dfl()
510 * - scx_bpf_select_cpu_and()
511 * - scx_bpf_test_and_clear_cpu_idle()
512 * - scx_bpf_pick_idle_cpu()
513 *
514 * The user also must implement ops.select_cpu() as the default
515 * implementation relies on scx_bpf_select_cpu_dfl().
516 *
517 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
518 * tracking.
519 */
520 void (*update_idle)(s32 cpu, bool idle);
521
522 /**
523 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
524 * @cpu: The CPU being acquired by the BPF scheduler.
525 * @args: Acquire arguments, see the struct definition.
526 *
527 * A CPU that was previously released from the BPF scheduler is now once
528 * again under its control.
529 */
530 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
531
532 /**
533 * @cpu_release: A CPU is taken away from the BPF scheduler
534 * @cpu: The CPU being released by the BPF scheduler.
535 * @args: Release arguments, see the struct definition.
536 *
537 * The specified CPU is no longer under the control of the BPF
538 * scheduler. This could be because it was preempted by a higher
539 * priority sched_class, though there may be other reasons as well. The
540 * caller should consult @args->reason to determine the cause.
541 */
542 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
543
544 /**
545 * @init_task: Initialize a task to run in a BPF scheduler
546 * @p: task to initialize for BPF scheduling
547 * @args: init arguments, see the struct definition
548 *
549 * Either we're loading a BPF scheduler or a new task is being forked.
550 * Initialize @p for BPF scheduling. This operation may block and can
551 * be used for allocations, and is called exactly once for a task.
552 *
553 * Return 0 for success, -errno for failure. An error return while
554 * loading will abort loading of the BPF scheduler. During a fork, it
555 * will abort that specific fork.
556 */
557 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
558
559 /**
560 * @exit_task: Exit a previously-running task from the system
561 * @p: task to exit
562 * @args: exit arguments, see the struct definition
563 *
564 * @p is exiting or the BPF scheduler is being unloaded. Perform any
565 * necessary cleanup for @p.
566 */
567 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
568
569 /**
570 * @enable: Enable BPF scheduling for a task
571 * @p: task to enable BPF scheduling for
572 *
573 * Enable @p for BPF scheduling. enable() is called on @p any time it
574 * enters SCX, and is always paired with a matching disable().
575 */
576 void (*enable)(struct task_struct *p);
577
578 /**
579 * @disable: Disable BPF scheduling for a task
580 * @p: task to disable BPF scheduling for
581 *
582 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
583 * Disable BPF scheduling for @p. A disable() call is always matched
584 * with a prior enable() call.
585 */
586 void (*disable)(struct task_struct *p);
587
588 /**
589 * @dump: Dump BPF scheduler state on error
590 * @ctx: debug dump context
591 *
592 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
593 */
594 void (*dump)(struct scx_dump_ctx *ctx);
595
596 /**
597 * @dump_cpu: Dump BPF scheduler state for a CPU on error
598 * @ctx: debug dump context
599 * @cpu: CPU to generate debug dump for
600 * @idle: @cpu is currently idle without any runnable tasks
601 *
602 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
603 * @cpu. If @idle is %true and this operation doesn't produce any
604 * output, @cpu is skipped for dump.
605 */
606 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
607
608 /**
609 * @dump_task: Dump BPF scheduler state for a runnable task on error
610 * @ctx: debug dump context
611 * @p: runnable task to generate debug dump for
612 *
613 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
614 * @p.
615 */
616 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
617
618 #ifdef CONFIG_EXT_GROUP_SCHED
619 /**
620 * @cgroup_init: Initialize a cgroup
621 * @cgrp: cgroup being initialized
622 * @args: init arguments, see the struct definition
623 *
624 * Either the BPF scheduler is being loaded or @cgrp created, initialize
625 * @cgrp for sched_ext. This operation may block.
626 *
627 * Return 0 for success, -errno for failure. An error return while
628 * loading will abort loading of the BPF scheduler. During cgroup
629 * creation, it will abort the specific cgroup creation.
630 */
631 s32 (*cgroup_init)(struct cgroup *cgrp,
632 struct scx_cgroup_init_args *args);
633
634 /**
635 * @cgroup_exit: Exit a cgroup
636 * @cgrp: cgroup being exited
637 *
638 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
639 * @cgrp for sched_ext. This operation my block.
640 */
641 void (*cgroup_exit)(struct cgroup *cgrp);
642
643 /**
644 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
645 * @p: task being moved
646 * @from: cgroup @p is being moved from
647 * @to: cgroup @p is being moved to
648 *
649 * Prepare @p for move from cgroup @from to @to. This operation may
650 * block and can be used for allocations.
651 *
652 * Return 0 for success, -errno for failure. An error return aborts the
653 * migration.
654 */
655 s32 (*cgroup_prep_move)(struct task_struct *p,
656 struct cgroup *from, struct cgroup *to);
657
658 /**
659 * @cgroup_move: Commit cgroup move
660 * @p: task being moved
661 * @from: cgroup @p is being moved from
662 * @to: cgroup @p is being moved to
663 *
664 * Commit the move. @p is dequeued during this operation.
665 */
666 void (*cgroup_move)(struct task_struct *p,
667 struct cgroup *from, struct cgroup *to);
668
669 /**
670 * @cgroup_cancel_move: Cancel cgroup move
671 * @p: task whose cgroup move is being canceled
672 * @from: cgroup @p was being moved from
673 * @to: cgroup @p was being moved to
674 *
675 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
676 * Undo the preparation.
677 */
678 void (*cgroup_cancel_move)(struct task_struct *p,
679 struct cgroup *from, struct cgroup *to);
680
681 /**
682 * @cgroup_set_weight: A cgroup's weight is being changed
683 * @cgrp: cgroup whose weight is being updated
684 * @weight: new weight [1..10000]
685 *
686 * Update @cgrp's weight to @weight.
687 */
688 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
689
690 /**
691 * @cgroup_set_bandwidth: A cgroup's bandwidth is being changed
692 * @cgrp: cgroup whose bandwidth is being updated
693 * @period_us: bandwidth control period
694 * @quota_us: bandwidth control quota
695 * @burst_us: bandwidth control burst
696 *
697 * Update @cgrp's bandwidth control parameters. This is from the cpu.max
698 * cgroup interface.
699 *
700 * @quota_us / @period_us determines the CPU bandwidth @cgrp is entitled
701 * to. For example, if @period_us is 1_000_000 and @quota_us is
702 * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be
703 * interpreted in the same fashion and specifies how much @cgrp can
704 * burst temporarily. The specific control mechanism and thus the
705 * interpretation of @period_us and burstiness is up to the BPF
706 * scheduler.
707 */
708 void (*cgroup_set_bandwidth)(struct cgroup *cgrp,
709 u64 period_us, u64 quota_us, u64 burst_us);
710
711 /**
712 * @cgroup_set_idle: A cgroup's idle state is being changed
713 * @cgrp: cgroup whose idle state is being updated
714 * @idle: whether the cgroup is entering or exiting idle state
715 *
716 * Update @cgrp's idle state to @idle. This callback is invoked when
717 * a cgroup transitions between idle and non-idle states, allowing the
718 * BPF scheduler to adjust its behavior accordingly.
719 */
720 void (*cgroup_set_idle)(struct cgroup *cgrp, bool idle);
721
722 #endif /* CONFIG_EXT_GROUP_SCHED */
723
724 /*
725 * All online ops must come before ops.cpu_online().
726 */
727
728 /**
729 * @cpu_online: A CPU became online
730 * @cpu: CPU which just came up
731 *
732 * @cpu just came online. @cpu will not call ops.enqueue() or
733 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
734 */
735 void (*cpu_online)(s32 cpu);
736
737 /**
738 * @cpu_offline: A CPU is going offline
739 * @cpu: CPU which is going offline
740 *
741 * @cpu is going offline. @cpu will not call ops.enqueue() or
742 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
743 */
744 void (*cpu_offline)(s32 cpu);
745
746 /*
747 * All CPU hotplug ops must come before ops.init().
748 */
749
750 /**
751 * @init: Initialize the BPF scheduler
752 */
753 s32 (*init)(void);
754
755 /**
756 * @exit: Clean up after the BPF scheduler
757 * @info: Exit info
758 *
759 * ops.exit() is also called on ops.init() failure, which is a bit
760 * unusual. This is to allow rich reporting through @info on how
761 * ops.init() failed.
762 */
763 void (*exit)(struct scx_exit_info *info);
764
765 /**
766 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
767 */
768 u32 dispatch_max_batch;
769
770 /**
771 * @flags: %SCX_OPS_* flags
772 */
773 u64 flags;
774
775 /**
776 * @timeout_ms: The maximum amount of time, in milliseconds, that a
777 * runnable task should be able to wait before being scheduled. The
778 * maximum timeout may not exceed the default timeout of 30 seconds.
779 *
780 * Defaults to the maximum allowed timeout value of 30 seconds.
781 */
782 u32 timeout_ms;
783
784 /**
785 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
786 * value of 32768 is used.
787 */
788 u32 exit_dump_len;
789
790 /**
791 * @hotplug_seq: A sequence number that may be set by the scheduler to
792 * detect when a hotplug event has occurred during the loading process.
793 * If 0, no detection occurs. Otherwise, the scheduler will fail to
794 * load if the sequence number does not match @scx_hotplug_seq on the
795 * enable path.
796 */
797 u64 hotplug_seq;
798
799 /**
800 * @name: BPF scheduler's name
801 *
802 * Must be a non-zero valid BPF object name including only isalnum(),
803 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
804 * BPF scheduler is enabled.
805 */
806 char name[SCX_OPS_NAME_LEN];
807
808 /* internal use only, must be NULL */
809 void *priv;
810 };
811
812 enum scx_opi {
813 SCX_OPI_BEGIN = 0,
814 SCX_OPI_NORMAL_BEGIN = 0,
815 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
816 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
817 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
818 SCX_OPI_END = SCX_OP_IDX(init),
819 };
820
821 /*
822 * Collection of event counters. Event types are placed in descending order.
823 */
824 struct scx_event_stats {
825 /*
826 * If ops.select_cpu() returns a CPU which can't be used by the task,
827 * the core scheduler code silently picks a fallback CPU.
828 */
829 s64 SCX_EV_SELECT_CPU_FALLBACK;
830
831 /*
832 * When dispatching to a local DSQ, the CPU may have gone offline in
833 * the meantime. In this case, the task is bounced to the global DSQ.
834 */
835 s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
836
837 /*
838 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
839 * continued to run because there were no other tasks on the CPU.
840 */
841 s64 SCX_EV_DISPATCH_KEEP_LAST;
842
843 /*
844 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
845 * is dispatched to a local DSQ when exiting.
846 */
847 s64 SCX_EV_ENQ_SKIP_EXITING;
848
849 /*
850 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
851 * migration disabled task skips ops.enqueue() and is dispatched to its
852 * local DSQ.
853 */
854 s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
855
856 /*
857 * Total number of times a task's time slice was refilled with the
858 * default value (SCX_SLICE_DFL).
859 */
860 s64 SCX_EV_REFILL_SLICE_DFL;
861
862 /*
863 * The total duration of bypass modes in nanoseconds.
864 */
865 s64 SCX_EV_BYPASS_DURATION;
866
867 /*
868 * The number of tasks dispatched in the bypassing mode.
869 */
870 s64 SCX_EV_BYPASS_DISPATCH;
871
872 /*
873 * The number of times the bypassing mode has been activated.
874 */
875 s64 SCX_EV_BYPASS_ACTIVATE;
876 };
877
878 struct scx_sched_pcpu {
879 /*
880 * The event counters are in a per-CPU variable to minimize the
881 * accounting overhead. A system-wide view on the event counter is
882 * constructed when requested by scx_bpf_events().
883 */
884 struct scx_event_stats event_stats;
885 };
886
887 struct scx_sched {
888 struct sched_ext_ops ops;
889 DECLARE_BITMAP(has_op, SCX_OPI_END);
890
891 /*
892 * Dispatch queues.
893 *
894 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
895 * This is to avoid live-locking in bypass mode where all tasks are
896 * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
897 * per-node split isn't sufficient, it can be further split.
898 */
899 struct rhashtable dsq_hash;
900 struct scx_dispatch_q **global_dsqs;
901 struct scx_sched_pcpu __percpu *pcpu;
902
903 /*
904 * Updates to the following warned bitfields can race causing RMW issues
905 * but it doesn't really matter.
906 */
907 bool warned_zero_slice:1;
908 bool warned_deprecated_rq:1;
909
910 atomic_t exit_kind;
911 struct scx_exit_info *exit_info;
912
913 struct kobject kobj;
914
915 struct kthread_worker *helper;
916 struct irq_work error_irq_work;
917 struct kthread_work disable_work;
918 struct rcu_work rcu_work;
919 };
920
921 enum scx_wake_flags {
922 /* expose select WF_* flags as enums */
923 SCX_WAKE_FORK = WF_FORK,
924 SCX_WAKE_TTWU = WF_TTWU,
925 SCX_WAKE_SYNC = WF_SYNC,
926 };
927
928 enum scx_enq_flags {
929 /* expose select ENQUEUE_* flags as enums */
930 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
931 SCX_ENQ_HEAD = ENQUEUE_HEAD,
932 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
933
934 /* high 32bits are SCX specific */
935
936 /*
937 * Set the following to trigger preemption when calling
938 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
939 * current task is cleared to zero and the CPU is kicked into the
940 * scheduling path. Implies %SCX_ENQ_HEAD.
941 */
942 SCX_ENQ_PREEMPT = 1LLU << 32,
943
944 /*
945 * The task being enqueued was previously enqueued on the current CPU's
946 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
947 * scx_bpf_reenqueue_local() kfunc. If scx_bpf_reenqueue_local() was
948 * invoked in a ->cpu_release() callback, and the task is again
949 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
950 * task will not be scheduled on the CPU until at least the next invocation
951 * of the ->cpu_acquire() callback.
952 */
953 SCX_ENQ_REENQ = 1LLU << 40,
954
955 /*
956 * The task being enqueued is the only task available for the cpu. By
957 * default, ext core keeps executing such tasks but when
958 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
959 * %SCX_ENQ_LAST flag set.
960 *
961 * The BPF scheduler is responsible for triggering a follow-up
962 * scheduling event. Otherwise, Execution may stall.
963 */
964 SCX_ENQ_LAST = 1LLU << 41,
965
966 /* high 8 bits are internal */
967 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
968
969 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
970 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
971 SCX_ENQ_NESTED = 1LLU << 58,
972 };
973
974 enum scx_deq_flags {
975 /* expose select DEQUEUE_* flags as enums */
976 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
977
978 /* high 32bits are SCX specific */
979
980 /*
981 * The generic core-sched layer decided to execute the task even though
982 * it hasn't been dispatched yet. Dequeue from the BPF side.
983 */
984 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
985 };
986
987 enum scx_pick_idle_cpu_flags {
988 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
989 SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
990 };
991
992 enum scx_kick_flags {
993 /*
994 * Kick the target CPU if idle. Guarantees that the target CPU goes
995 * through at least one full scheduling cycle before going idle. If the
996 * target CPU can be determined to be currently not idle and going to go
997 * through a scheduling cycle before going idle, noop.
998 */
999 SCX_KICK_IDLE = 1LLU << 0,
1000
1001 /*
1002 * Preempt the current task and execute the dispatch path. If the
1003 * current task of the target CPU is an SCX task, its ->scx.slice is
1004 * cleared to zero before the scheduling path is invoked so that the
1005 * task expires and the dispatch path is invoked.
1006 */
1007 SCX_KICK_PREEMPT = 1LLU << 1,
1008
1009 /*
1010 * The scx_bpf_kick_cpu() call will return after the current SCX task of
1011 * the target CPU switches out. This can be used to implement e.g. core
1012 * scheduling. This has no effect if the current task on the target CPU
1013 * is not on SCX.
1014 */
1015 SCX_KICK_WAIT = 1LLU << 2,
1016 };
1017
1018 enum scx_tg_flags {
1019 SCX_TG_ONLINE = 1U << 0,
1020 SCX_TG_INITED = 1U << 1,
1021 };
1022
1023 enum scx_enable_state {
1024 SCX_ENABLING,
1025 SCX_ENABLED,
1026 SCX_DISABLING,
1027 SCX_DISABLED,
1028 };
1029
1030 static const char *scx_enable_state_str[] = {
1031 [SCX_ENABLING] = "enabling",
1032 [SCX_ENABLED] = "enabled",
1033 [SCX_DISABLING] = "disabling",
1034 [SCX_DISABLED] = "disabled",
1035 };
1036
1037 /*
1038 * sched_ext_entity->ops_state
1039 *
1040 * Used to track the task ownership between the SCX core and the BPF scheduler.
1041 * State transitions look as follows:
1042 *
1043 * NONE -> QUEUEING -> QUEUED -> DISPATCHING
1044 * ^ | |
1045 * | v v
1046 * \-------------------------------/
1047 *
1048 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
1049 * sites for explanations on the conditions being waited upon and why they are
1050 * safe. Transitions out of them into NONE or QUEUED must store_release and the
1051 * waiters should load_acquire.
1052 *
1053 * Tracking scx_ops_state enables sched_ext core to reliably determine whether
1054 * any given task can be dispatched by the BPF scheduler at all times and thus
1055 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
1056 * to try to dispatch any task anytime regardless of its state as the SCX core
1057 * can safely reject invalid dispatches.
1058 */
1059 enum scx_ops_state {
1060 SCX_OPSS_NONE, /* owned by the SCX core */
1061 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
1062 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
1063 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
1064
1065 /*
1066 * QSEQ brands each QUEUED instance so that, when dispatch races
1067 * dequeue/requeue, the dispatcher can tell whether it still has a claim
1068 * on the task being dispatched.
1069 *
1070 * As some 32bit archs can't do 64bit store_release/load_acquire,
1071 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
1072 * 32bit machines. The dispatch race window QSEQ protects is very narrow
1073 * and runs with IRQ disabled. 30 bits should be sufficient.
1074 */
1075 SCX_OPSS_QSEQ_SHIFT = 2,
1076 };
1077
1078 /* Use macros to ensure that the type is unsigned long for the masks */
1079 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
1080 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
1081
1082 DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
1083
1084 /*
1085 * Return the rq currently locked from an scx callback, or NULL if no rq is
1086 * locked.
1087 */
scx_locked_rq(void)1088 static inline struct rq *scx_locked_rq(void)
1089 {
1090 return __this_cpu_read(scx_locked_rq_state);
1091 }
1092
scx_kf_allowed_if_unlocked(void)1093 static inline bool scx_kf_allowed_if_unlocked(void)
1094 {
1095 return !current->scx.kf_mask;
1096 }
1097
scx_rq_bypassing(struct rq * rq)1098 static inline bool scx_rq_bypassing(struct rq *rq)
1099 {
1100 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1101 }
1102