10c2b8356STejun Heo /* SPDX-License-Identifier: GPL-2.0 */
20c2b8356STejun Heo /*
30c2b8356STejun Heo * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
40c2b8356STejun Heo *
50c2b8356STejun Heo * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
60c2b8356STejun Heo * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
70c2b8356STejun Heo */
80c2b8356STejun Heo #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
90c2b8356STejun Heo
100c2b8356STejun Heo enum scx_consts {
110c2b8356STejun Heo SCX_DSP_DFL_MAX_BATCH = 32,
120c2b8356STejun Heo SCX_DSP_MAX_LOOPS = 32,
130c2b8356STejun Heo SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
140c2b8356STejun Heo
150c2b8356STejun Heo SCX_EXIT_BT_LEN = 64,
160c2b8356STejun Heo SCX_EXIT_MSG_LEN = 1024,
170c2b8356STejun Heo SCX_EXIT_DUMP_DFL_LEN = 32768,
180c2b8356STejun Heo
190c2b8356STejun Heo SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
200c2b8356STejun Heo
210c2b8356STejun Heo /*
220c2b8356STejun Heo * Iterating all tasks may take a while. Periodically drop
230c2b8356STejun Heo * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
240c2b8356STejun Heo */
250c2b8356STejun Heo SCX_TASK_ITER_BATCH = 32,
260c2b8356STejun Heo };
270c2b8356STejun Heo
280c2b8356STejun Heo enum scx_exit_kind {
290c2b8356STejun Heo SCX_EXIT_NONE,
300c2b8356STejun Heo SCX_EXIT_DONE,
310c2b8356STejun Heo
320c2b8356STejun Heo SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
330c2b8356STejun Heo SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
340c2b8356STejun Heo SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
350c2b8356STejun Heo SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
360c2b8356STejun Heo
370c2b8356STejun Heo SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
380c2b8356STejun Heo SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
390c2b8356STejun Heo SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
400c2b8356STejun Heo };
410c2b8356STejun Heo
420c2b8356STejun Heo /*
430c2b8356STejun Heo * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
440c2b8356STejun Heo * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
450c2b8356STejun Heo * are 64bit of the format:
460c2b8356STejun Heo *
470c2b8356STejun Heo * Bits: [63 .. 48 47 .. 32 31 .. 0]
480c2b8356STejun Heo * [ SYS ACT ] [ SYS RSN ] [ USR ]
490c2b8356STejun Heo *
500c2b8356STejun Heo * SYS ACT: System-defined exit actions
510c2b8356STejun Heo * SYS RSN: System-defined exit reasons
520c2b8356STejun Heo * USR : User-defined exit codes and reasons
530c2b8356STejun Heo *
540c2b8356STejun Heo * Using the above, users may communicate intention and context by ORing system
550c2b8356STejun Heo * actions and/or system reasons with a user-defined exit code.
560c2b8356STejun Heo */
570c2b8356STejun Heo enum scx_exit_code {
580c2b8356STejun Heo /* Reasons */
590c2b8356STejun Heo SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
600c2b8356STejun Heo
610c2b8356STejun Heo /* Actions */
620c2b8356STejun Heo SCX_ECODE_ACT_RESTART = 1LLU << 48,
630c2b8356STejun Heo };
640c2b8356STejun Heo
65*f3aec2adSTejun Heo enum scx_exit_flags {
66*f3aec2adSTejun Heo /*
67*f3aec2adSTejun Heo * ops.exit() may be called even if the loading failed before ops.init()
68*f3aec2adSTejun Heo * finishes successfully. This is because ops.exit() allows rich exit
69*f3aec2adSTejun Heo * info communication. The following flag indicates whether ops.init()
70*f3aec2adSTejun Heo * finished successfully.
71*f3aec2adSTejun Heo */
72*f3aec2adSTejun Heo SCX_EFLAG_INITIALIZED,
73*f3aec2adSTejun Heo };
74*f3aec2adSTejun Heo
750c2b8356STejun Heo /*
760c2b8356STejun Heo * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
770c2b8356STejun Heo * being disabled.
780c2b8356STejun Heo */
790c2b8356STejun Heo struct scx_exit_info {
800c2b8356STejun Heo /* %SCX_EXIT_* - broad category of the exit reason */
810c2b8356STejun Heo enum scx_exit_kind kind;
820c2b8356STejun Heo
830c2b8356STejun Heo /* exit code if gracefully exiting */
840c2b8356STejun Heo s64 exit_code;
850c2b8356STejun Heo
86*f3aec2adSTejun Heo /* %SCX_EFLAG_* */
87*f3aec2adSTejun Heo u64 flags;
88*f3aec2adSTejun Heo
890c2b8356STejun Heo /* textual representation of the above */
900c2b8356STejun Heo const char *reason;
910c2b8356STejun Heo
920c2b8356STejun Heo /* backtrace if exiting due to an error */
930c2b8356STejun Heo unsigned long *bt;
940c2b8356STejun Heo u32 bt_len;
950c2b8356STejun Heo
960c2b8356STejun Heo /* informational message */
970c2b8356STejun Heo char *msg;
980c2b8356STejun Heo
990c2b8356STejun Heo /* debug dump */
1000c2b8356STejun Heo char *dump;
1010c2b8356STejun Heo };
1020c2b8356STejun Heo
1030c2b8356STejun Heo /* sched_ext_ops.flags */
1040c2b8356STejun Heo enum scx_ops_flags {
1050c2b8356STejun Heo /*
1060c2b8356STejun Heo * Keep built-in idle tracking even if ops.update_idle() is implemented.
1070c2b8356STejun Heo */
1080c2b8356STejun Heo SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
1090c2b8356STejun Heo
1100c2b8356STejun Heo /*
1110c2b8356STejun Heo * By default, if there are no other task to run on the CPU, ext core
1120c2b8356STejun Heo * keeps running the current task even after its slice expires. If this
1130c2b8356STejun Heo * flag is specified, such tasks are passed to ops.enqueue() with
1140c2b8356STejun Heo * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
1150c2b8356STejun Heo */
1160c2b8356STejun Heo SCX_OPS_ENQ_LAST = 1LLU << 1,
1170c2b8356STejun Heo
1180c2b8356STejun Heo /*
1190c2b8356STejun Heo * An exiting task may schedule after PF_EXITING is set. In such cases,
1200c2b8356STejun Heo * bpf_task_from_pid() may not be able to find the task and if the BPF
1210c2b8356STejun Heo * scheduler depends on pid lookup for dispatching, the task will be
1220c2b8356STejun Heo * lost leading to various issues including RCU grace period stalls.
1230c2b8356STejun Heo *
1240c2b8356STejun Heo * To mask this problem, by default, unhashed tasks are automatically
1250c2b8356STejun Heo * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
1260c2b8356STejun Heo * depend on pid lookups and wants to handle these tasks directly, the
1270c2b8356STejun Heo * following flag can be used.
1280c2b8356STejun Heo */
1290c2b8356STejun Heo SCX_OPS_ENQ_EXITING = 1LLU << 2,
1300c2b8356STejun Heo
1310c2b8356STejun Heo /*
1320c2b8356STejun Heo * If set, only tasks with policy set to SCHED_EXT are attached to
1330c2b8356STejun Heo * sched_ext. If clear, SCHED_NORMAL tasks are also included.
1340c2b8356STejun Heo */
1350c2b8356STejun Heo SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
1360c2b8356STejun Heo
1370c2b8356STejun Heo /*
1380c2b8356STejun Heo * A migration disabled task can only execute on its current CPU. By
1390c2b8356STejun Heo * default, such tasks are automatically put on the CPU's local DSQ with
1400c2b8356STejun Heo * the default slice on enqueue. If this ops flag is set, they also go
1410c2b8356STejun Heo * through ops.enqueue().
1420c2b8356STejun Heo *
1430c2b8356STejun Heo * A migration disabled task never invokes ops.select_cpu() as it can
1440c2b8356STejun Heo * only select the current CPU. Also, p->cpus_ptr will only contain its
1450c2b8356STejun Heo * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
1460c2b8356STejun Heo * and thus may disagree with cpumask_weight(p->cpus_ptr).
1470c2b8356STejun Heo */
1480c2b8356STejun Heo SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
1490c2b8356STejun Heo
1500c2b8356STejun Heo /*
1510c2b8356STejun Heo * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
1520c2b8356STejun Heo * ops.enqueue() on the ops.select_cpu() selected or the wakee's
1530c2b8356STejun Heo * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
1540c2b8356STejun Heo * transfers. When this optimization is enabled, ops.select_cpu() is
1550c2b8356STejun Heo * skipped in some cases (when racing against the wakee switching out).
1560c2b8356STejun Heo * As the BPF scheduler may depend on ops.select_cpu() being invoked
1570c2b8356STejun Heo * during wakeups, queued wakeup is disabled by default.
1580c2b8356STejun Heo *
1590c2b8356STejun Heo * If this ops flag is set, queued wakeup optimization is enabled and
1600c2b8356STejun Heo * the BPF scheduler must be able to handle ops.enqueue() invoked on the
1610c2b8356STejun Heo * wakee's CPU without preceding ops.select_cpu() even for tasks which
1620c2b8356STejun Heo * may be executed on multiple CPUs.
1630c2b8356STejun Heo */
1640c2b8356STejun Heo SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
1650c2b8356STejun Heo
1660c2b8356STejun Heo /*
1670c2b8356STejun Heo * If set, enable per-node idle cpumasks. If clear, use a single global
1680c2b8356STejun Heo * flat idle cpumask.
1690c2b8356STejun Heo */
1700c2b8356STejun Heo SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
1710c2b8356STejun Heo
1720c2b8356STejun Heo /*
1730c2b8356STejun Heo * CPU cgroup support flags
1740c2b8356STejun Heo */
1750c2b8356STejun Heo SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
1760c2b8356STejun Heo
1770c2b8356STejun Heo SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
1780c2b8356STejun Heo SCX_OPS_ENQ_LAST |
1790c2b8356STejun Heo SCX_OPS_ENQ_EXITING |
1800c2b8356STejun Heo SCX_OPS_ENQ_MIGRATION_DISABLED |
1810c2b8356STejun Heo SCX_OPS_ALLOW_QUEUED_WAKEUP |
1820c2b8356STejun Heo SCX_OPS_SWITCH_PARTIAL |
1830c2b8356STejun Heo SCX_OPS_BUILTIN_IDLE_PER_NODE |
1840c2b8356STejun Heo SCX_OPS_HAS_CGROUP_WEIGHT,
1850c2b8356STejun Heo
1860c2b8356STejun Heo /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
1870c2b8356STejun Heo __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56,
1880c2b8356STejun Heo
1890c2b8356STejun Heo SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56,
1900c2b8356STejun Heo };
1910c2b8356STejun Heo
1920c2b8356STejun Heo /* argument container for ops.init_task() */
1930c2b8356STejun Heo struct scx_init_task_args {
1940c2b8356STejun Heo /*
1950c2b8356STejun Heo * Set if ops.init_task() is being invoked on the fork path, as opposed
1960c2b8356STejun Heo * to the scheduler transition path.
1970c2b8356STejun Heo */
1980c2b8356STejun Heo bool fork;
1990c2b8356STejun Heo #ifdef CONFIG_EXT_GROUP_SCHED
2000c2b8356STejun Heo /* the cgroup the task is joining */
2010c2b8356STejun Heo struct cgroup *cgroup;
2020c2b8356STejun Heo #endif
2030c2b8356STejun Heo };
2040c2b8356STejun Heo
2050c2b8356STejun Heo /* argument container for ops.exit_task() */
2060c2b8356STejun Heo struct scx_exit_task_args {
2070c2b8356STejun Heo /* Whether the task exited before running on sched_ext. */
2080c2b8356STejun Heo bool cancelled;
2090c2b8356STejun Heo };
2100c2b8356STejun Heo
2110c2b8356STejun Heo /* argument container for ops->cgroup_init() */
2120c2b8356STejun Heo struct scx_cgroup_init_args {
2130c2b8356STejun Heo /* the weight of the cgroup [1..10000] */
2140c2b8356STejun Heo u32 weight;
2150c2b8356STejun Heo
2160c2b8356STejun Heo /* bandwidth control parameters from cpu.max and cpu.max.burst */
2170c2b8356STejun Heo u64 bw_period_us;
2180c2b8356STejun Heo u64 bw_quota_us;
2190c2b8356STejun Heo u64 bw_burst_us;
2200c2b8356STejun Heo };
2210c2b8356STejun Heo
2220c2b8356STejun Heo enum scx_cpu_preempt_reason {
2230c2b8356STejun Heo /* next task is being scheduled by &sched_class_rt */
2240c2b8356STejun Heo SCX_CPU_PREEMPT_RT,
2250c2b8356STejun Heo /* next task is being scheduled by &sched_class_dl */
2260c2b8356STejun Heo SCX_CPU_PREEMPT_DL,
2270c2b8356STejun Heo /* next task is being scheduled by &sched_class_stop */
2280c2b8356STejun Heo SCX_CPU_PREEMPT_STOP,
2290c2b8356STejun Heo /* unknown reason for SCX being preempted */
2300c2b8356STejun Heo SCX_CPU_PREEMPT_UNKNOWN,
2310c2b8356STejun Heo };
2320c2b8356STejun Heo
2330c2b8356STejun Heo /*
2340c2b8356STejun Heo * Argument container for ops->cpu_acquire(). Currently empty, but may be
2350c2b8356STejun Heo * expanded in the future.
2360c2b8356STejun Heo */
2370c2b8356STejun Heo struct scx_cpu_acquire_args {};
2380c2b8356STejun Heo
2390c2b8356STejun Heo /* argument container for ops->cpu_release() */
2400c2b8356STejun Heo struct scx_cpu_release_args {
2410c2b8356STejun Heo /* the reason the CPU was preempted */
2420c2b8356STejun Heo enum scx_cpu_preempt_reason reason;
2430c2b8356STejun Heo
2440c2b8356STejun Heo /* the task that's going to be scheduled on the CPU */
2450c2b8356STejun Heo struct task_struct *task;
2460c2b8356STejun Heo };
2470c2b8356STejun Heo
2480c2b8356STejun Heo /*
2490c2b8356STejun Heo * Informational context provided to dump operations.
2500c2b8356STejun Heo */
2510c2b8356STejun Heo struct scx_dump_ctx {
2520c2b8356STejun Heo enum scx_exit_kind kind;
2530c2b8356STejun Heo s64 exit_code;
2540c2b8356STejun Heo const char *reason;
2550c2b8356STejun Heo u64 at_ns;
2560c2b8356STejun Heo u64 at_jiffies;
2570c2b8356STejun Heo };
2580c2b8356STejun Heo
2590c2b8356STejun Heo /**
2600c2b8356STejun Heo * struct sched_ext_ops - Operation table for BPF scheduler implementation
2610c2b8356STejun Heo *
2620c2b8356STejun Heo * A BPF scheduler can implement an arbitrary scheduling policy by
2630c2b8356STejun Heo * implementing and loading operations in this table. Note that a userland
2640c2b8356STejun Heo * scheduling policy can also be implemented using the BPF scheduler
2650c2b8356STejun Heo * as a shim layer.
2660c2b8356STejun Heo */
2670c2b8356STejun Heo struct sched_ext_ops {
2680c2b8356STejun Heo /**
2690c2b8356STejun Heo * @select_cpu: Pick the target CPU for a task which is being woken up
2700c2b8356STejun Heo * @p: task being woken up
2710c2b8356STejun Heo * @prev_cpu: the cpu @p was on before sleeping
2720c2b8356STejun Heo * @wake_flags: SCX_WAKE_*
2730c2b8356STejun Heo *
2740c2b8356STejun Heo * Decision made here isn't final. @p may be moved to any CPU while it
2750c2b8356STejun Heo * is getting dispatched for execution later. However, as @p is not on
2760c2b8356STejun Heo * the rq at this point, getting the eventual execution CPU right here
2770c2b8356STejun Heo * saves a small bit of overhead down the line.
2780c2b8356STejun Heo *
2790c2b8356STejun Heo * If an idle CPU is returned, the CPU is kicked and will try to
2800c2b8356STejun Heo * dispatch. While an explicit custom mechanism can be added,
2810c2b8356STejun Heo * select_cpu() serves as the default way to wake up idle CPUs.
2820c2b8356STejun Heo *
2830c2b8356STejun Heo * @p may be inserted into a DSQ directly by calling
2840c2b8356STejun Heo * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
2850c2b8356STejun Heo * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
2860c2b8356STejun Heo * of the CPU returned by this operation.
2870c2b8356STejun Heo *
2880c2b8356STejun Heo * Note that select_cpu() is never called for tasks that can only run
2890c2b8356STejun Heo * on a single CPU or tasks with migration disabled, as they don't have
2900c2b8356STejun Heo * the option to select a different CPU. See select_task_rq() for
2910c2b8356STejun Heo * details.
2920c2b8356STejun Heo */
2930c2b8356STejun Heo s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
2940c2b8356STejun Heo
2950c2b8356STejun Heo /**
2960c2b8356STejun Heo * @enqueue: Enqueue a task on the BPF scheduler
2970c2b8356STejun Heo * @p: task being enqueued
2980c2b8356STejun Heo * @enq_flags: %SCX_ENQ_*
2990c2b8356STejun Heo *
3000c2b8356STejun Heo * @p is ready to run. Insert directly into a DSQ by calling
3010c2b8356STejun Heo * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
3020c2b8356STejun Heo * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
3030c2b8356STejun Heo * the task will stall.
3040c2b8356STejun Heo *
3050c2b8356STejun Heo * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
3060c2b8356STejun Heo * skipped.
3070c2b8356STejun Heo */
3080c2b8356STejun Heo void (*enqueue)(struct task_struct *p, u64 enq_flags);
3090c2b8356STejun Heo
3100c2b8356STejun Heo /**
3110c2b8356STejun Heo * @dequeue: Remove a task from the BPF scheduler
3120c2b8356STejun Heo * @p: task being dequeued
3130c2b8356STejun Heo * @deq_flags: %SCX_DEQ_*
3140c2b8356STejun Heo *
3150c2b8356STejun Heo * Remove @p from the BPF scheduler. This is usually called to isolate
3160c2b8356STejun Heo * the task while updating its scheduling properties (e.g. priority).
3170c2b8356STejun Heo *
3180c2b8356STejun Heo * The ext core keeps track of whether the BPF side owns a given task or
3190c2b8356STejun Heo * not and can gracefully ignore spurious dispatches from BPF side,
3200c2b8356STejun Heo * which makes it safe to not implement this method. However, depending
3210c2b8356STejun Heo * on the scheduling logic, this can lead to confusing behaviors - e.g.
3220c2b8356STejun Heo * scheduling position not being updated across a priority change.
3230c2b8356STejun Heo */
3240c2b8356STejun Heo void (*dequeue)(struct task_struct *p, u64 deq_flags);
3250c2b8356STejun Heo
3260c2b8356STejun Heo /**
3270c2b8356STejun Heo * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
3280c2b8356STejun Heo * @cpu: CPU to dispatch tasks for
3290c2b8356STejun Heo * @prev: previous task being switched out
3300c2b8356STejun Heo *
3310c2b8356STejun Heo * Called when a CPU's local dsq is empty. The operation should dispatch
3320c2b8356STejun Heo * one or more tasks from the BPF scheduler into the DSQs using
3330c2b8356STejun Heo * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
3340c2b8356STejun Heo * using scx_bpf_dsq_move_to_local().
3350c2b8356STejun Heo *
3360c2b8356STejun Heo * The maximum number of times scx_bpf_dsq_insert() can be called
3370c2b8356STejun Heo * without an intervening scx_bpf_dsq_move_to_local() is specified by
3380c2b8356STejun Heo * ops.dispatch_max_batch. See the comments on top of the two functions
3390c2b8356STejun Heo * for more details.
3400c2b8356STejun Heo *
3410c2b8356STejun Heo * When not %NULL, @prev is an SCX task with its slice depleted. If
3420c2b8356STejun Heo * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
3430c2b8356STejun Heo * @prev->scx.flags, it is not enqueued yet and will be enqueued after
3440c2b8356STejun Heo * ops.dispatch() returns. To keep executing @prev, return without
3450c2b8356STejun Heo * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
3460c2b8356STejun Heo */
3470c2b8356STejun Heo void (*dispatch)(s32 cpu, struct task_struct *prev);
3480c2b8356STejun Heo
3490c2b8356STejun Heo /**
3500c2b8356STejun Heo * @tick: Periodic tick
3510c2b8356STejun Heo * @p: task running currently
3520c2b8356STejun Heo *
3530c2b8356STejun Heo * This operation is called every 1/HZ seconds on CPUs which are
3540c2b8356STejun Heo * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
3550c2b8356STejun Heo * immediate dispatch cycle on the CPU.
3560c2b8356STejun Heo */
3570c2b8356STejun Heo void (*tick)(struct task_struct *p);
3580c2b8356STejun Heo
3590c2b8356STejun Heo /**
3600c2b8356STejun Heo * @runnable: A task is becoming runnable on its associated CPU
3610c2b8356STejun Heo * @p: task becoming runnable
3620c2b8356STejun Heo * @enq_flags: %SCX_ENQ_*
3630c2b8356STejun Heo *
3640c2b8356STejun Heo * This and the following three functions can be used to track a task's
3650c2b8356STejun Heo * execution state transitions. A task becomes ->runnable() on a CPU,
3660c2b8356STejun Heo * and then goes through one or more ->running() and ->stopping() pairs
3670c2b8356STejun Heo * as it runs on the CPU, and eventually becomes ->quiescent() when it's
3680c2b8356STejun Heo * done running on the CPU.
3690c2b8356STejun Heo *
3700c2b8356STejun Heo * @p is becoming runnable on the CPU because it's
3710c2b8356STejun Heo *
3720c2b8356STejun Heo * - waking up (%SCX_ENQ_WAKEUP)
3730c2b8356STejun Heo * - being moved from another CPU
3740c2b8356STejun Heo * - being restored after temporarily taken off the queue for an
3750c2b8356STejun Heo * attribute change.
3760c2b8356STejun Heo *
3770c2b8356STejun Heo * This and ->enqueue() are related but not coupled. This operation
3780c2b8356STejun Heo * notifies @p's state transition and may not be followed by ->enqueue()
3790c2b8356STejun Heo * e.g. when @p is being dispatched to a remote CPU, or when @p is
3800c2b8356STejun Heo * being enqueued on a CPU experiencing a hotplug event. Likewise, a
3810c2b8356STejun Heo * task may be ->enqueue()'d without being preceded by this operation
3820c2b8356STejun Heo * e.g. after exhausting its slice.
3830c2b8356STejun Heo */
3840c2b8356STejun Heo void (*runnable)(struct task_struct *p, u64 enq_flags);
3850c2b8356STejun Heo
3860c2b8356STejun Heo /**
3870c2b8356STejun Heo * @running: A task is starting to run on its associated CPU
3880c2b8356STejun Heo * @p: task starting to run
3890c2b8356STejun Heo *
3900c2b8356STejun Heo * Note that this callback may be called from a CPU other than the
3910c2b8356STejun Heo * one the task is going to run on. This can happen when a task
3920c2b8356STejun Heo * property is changed (i.e., affinity), since scx_next_task_scx(),
3930c2b8356STejun Heo * which triggers this callback, may run on a CPU different from
3940c2b8356STejun Heo * the task's assigned CPU.
3950c2b8356STejun Heo *
3960c2b8356STejun Heo * Therefore, always use scx_bpf_task_cpu(@p) to determine the
3970c2b8356STejun Heo * target CPU the task is going to use.
3980c2b8356STejun Heo *
3990c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers.
4000c2b8356STejun Heo */
4010c2b8356STejun Heo void (*running)(struct task_struct *p);
4020c2b8356STejun Heo
4030c2b8356STejun Heo /**
4040c2b8356STejun Heo * @stopping: A task is stopping execution
4050c2b8356STejun Heo * @p: task stopping to run
4060c2b8356STejun Heo * @runnable: is task @p still runnable?
4070c2b8356STejun Heo *
4080c2b8356STejun Heo * Note that this callback may be called from a CPU other than the
4090c2b8356STejun Heo * one the task was running on. This can happen when a task
4100c2b8356STejun Heo * property is changed (i.e., affinity), since dequeue_task_scx(),
4110c2b8356STejun Heo * which triggers this callback, may run on a CPU different from
4120c2b8356STejun Heo * the task's assigned CPU.
4130c2b8356STejun Heo *
4140c2b8356STejun Heo * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
4150c2b8356STejun Heo * the task was running on.
4160c2b8356STejun Heo *
4170c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers. If
4180c2b8356STejun Heo * !@runnable, ->quiescent() will be invoked after this operation
4190c2b8356STejun Heo * returns.
4200c2b8356STejun Heo */
4210c2b8356STejun Heo void (*stopping)(struct task_struct *p, bool runnable);
4220c2b8356STejun Heo
4230c2b8356STejun Heo /**
4240c2b8356STejun Heo * @quiescent: A task is becoming not runnable on its associated CPU
4250c2b8356STejun Heo * @p: task becoming not runnable
4260c2b8356STejun Heo * @deq_flags: %SCX_DEQ_*
4270c2b8356STejun Heo *
4280c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers.
4290c2b8356STejun Heo *
4300c2b8356STejun Heo * @p is becoming quiescent on the CPU because it's
4310c2b8356STejun Heo *
4320c2b8356STejun Heo * - sleeping (%SCX_DEQ_SLEEP)
4330c2b8356STejun Heo * - being moved to another CPU
4340c2b8356STejun Heo * - being temporarily taken off the queue for an attribute change
4350c2b8356STejun Heo * (%SCX_DEQ_SAVE)
4360c2b8356STejun Heo *
4370c2b8356STejun Heo * This and ->dequeue() are related but not coupled. This operation
4380c2b8356STejun Heo * notifies @p's state transition and may not be preceded by ->dequeue()
4390c2b8356STejun Heo * e.g. when @p is being dispatched to a remote CPU.
4400c2b8356STejun Heo */
4410c2b8356STejun Heo void (*quiescent)(struct task_struct *p, u64 deq_flags);
4420c2b8356STejun Heo
4430c2b8356STejun Heo /**
4440c2b8356STejun Heo * @yield: Yield CPU
4450c2b8356STejun Heo * @from: yielding task
4460c2b8356STejun Heo * @to: optional yield target task
4470c2b8356STejun Heo *
4480c2b8356STejun Heo * If @to is NULL, @from is yielding the CPU to other runnable tasks.
4490c2b8356STejun Heo * The BPF scheduler should ensure that other available tasks are
4500c2b8356STejun Heo * dispatched before the yielding task. Return value is ignored in this
4510c2b8356STejun Heo * case.
4520c2b8356STejun Heo *
4530c2b8356STejun Heo * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
4540c2b8356STejun Heo * scheduler can implement the request, return %true; otherwise, %false.
4550c2b8356STejun Heo */
4560c2b8356STejun Heo bool (*yield)(struct task_struct *from, struct task_struct *to);
4570c2b8356STejun Heo
4580c2b8356STejun Heo /**
4590c2b8356STejun Heo * @core_sched_before: Task ordering for core-sched
4600c2b8356STejun Heo * @a: task A
4610c2b8356STejun Heo * @b: task B
4620c2b8356STejun Heo *
4630c2b8356STejun Heo * Used by core-sched to determine the ordering between two tasks. See
4640c2b8356STejun Heo * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
4650c2b8356STejun Heo * core-sched.
4660c2b8356STejun Heo *
4670c2b8356STejun Heo * Both @a and @b are runnable and may or may not currently be queued on
4680c2b8356STejun Heo * the BPF scheduler. Should return %true if @a should run before @b.
4690c2b8356STejun Heo * %false if there's no required ordering or @b should run before @a.
4700c2b8356STejun Heo *
4710c2b8356STejun Heo * If not specified, the default is ordering them according to when they
4720c2b8356STejun Heo * became runnable.
4730c2b8356STejun Heo */
4740c2b8356STejun Heo bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
4750c2b8356STejun Heo
4760c2b8356STejun Heo /**
4770c2b8356STejun Heo * @set_weight: Set task weight
4780c2b8356STejun Heo * @p: task to set weight for
4790c2b8356STejun Heo * @weight: new weight [1..10000]
4800c2b8356STejun Heo *
4810c2b8356STejun Heo * Update @p's weight to @weight.
4820c2b8356STejun Heo */
4830c2b8356STejun Heo void (*set_weight)(struct task_struct *p, u32 weight);
4840c2b8356STejun Heo
4850c2b8356STejun Heo /**
4860c2b8356STejun Heo * @set_cpumask: Set CPU affinity
4870c2b8356STejun Heo * @p: task to set CPU affinity for
4880c2b8356STejun Heo * @cpumask: cpumask of cpus that @p can run on
4890c2b8356STejun Heo *
4900c2b8356STejun Heo * Update @p's CPU affinity to @cpumask.
4910c2b8356STejun Heo */
4920c2b8356STejun Heo void (*set_cpumask)(struct task_struct *p,
4930c2b8356STejun Heo const struct cpumask *cpumask);
4940c2b8356STejun Heo
4950c2b8356STejun Heo /**
4960c2b8356STejun Heo * @update_idle: Update the idle state of a CPU
4970c2b8356STejun Heo * @cpu: CPU to update the idle state for
4980c2b8356STejun Heo * @idle: whether entering or exiting the idle state
4990c2b8356STejun Heo *
5000c2b8356STejun Heo * This operation is called when @rq's CPU goes or leaves the idle
5010c2b8356STejun Heo * state. By default, implementing this operation disables the built-in
5020c2b8356STejun Heo * idle CPU tracking and the following helpers become unavailable:
5030c2b8356STejun Heo *
5040c2b8356STejun Heo * - scx_bpf_select_cpu_dfl()
5050c2b8356STejun Heo * - scx_bpf_select_cpu_and()
5060c2b8356STejun Heo * - scx_bpf_test_and_clear_cpu_idle()
5070c2b8356STejun Heo * - scx_bpf_pick_idle_cpu()
5080c2b8356STejun Heo *
5090c2b8356STejun Heo * The user also must implement ops.select_cpu() as the default
5100c2b8356STejun Heo * implementation relies on scx_bpf_select_cpu_dfl().
5110c2b8356STejun Heo *
5120c2b8356STejun Heo * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
5130c2b8356STejun Heo * tracking.
5140c2b8356STejun Heo */
5150c2b8356STejun Heo void (*update_idle)(s32 cpu, bool idle);
5160c2b8356STejun Heo
5170c2b8356STejun Heo /**
5180c2b8356STejun Heo * @cpu_acquire: A CPU is becoming available to the BPF scheduler
5190c2b8356STejun Heo * @cpu: The CPU being acquired by the BPF scheduler.
5200c2b8356STejun Heo * @args: Acquire arguments, see the struct definition.
5210c2b8356STejun Heo *
5220c2b8356STejun Heo * A CPU that was previously released from the BPF scheduler is now once
5230c2b8356STejun Heo * again under its control.
5240c2b8356STejun Heo */
5250c2b8356STejun Heo void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
5260c2b8356STejun Heo
5270c2b8356STejun Heo /**
5280c2b8356STejun Heo * @cpu_release: A CPU is taken away from the BPF scheduler
5290c2b8356STejun Heo * @cpu: The CPU being released by the BPF scheduler.
5300c2b8356STejun Heo * @args: Release arguments, see the struct definition.
5310c2b8356STejun Heo *
5320c2b8356STejun Heo * The specified CPU is no longer under the control of the BPF
5330c2b8356STejun Heo * scheduler. This could be because it was preempted by a higher
5340c2b8356STejun Heo * priority sched_class, though there may be other reasons as well. The
5350c2b8356STejun Heo * caller should consult @args->reason to determine the cause.
5360c2b8356STejun Heo */
5370c2b8356STejun Heo void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
5380c2b8356STejun Heo
5390c2b8356STejun Heo /**
5400c2b8356STejun Heo * @init_task: Initialize a task to run in a BPF scheduler
5410c2b8356STejun Heo * @p: task to initialize for BPF scheduling
5420c2b8356STejun Heo * @args: init arguments, see the struct definition
5430c2b8356STejun Heo *
5440c2b8356STejun Heo * Either we're loading a BPF scheduler or a new task is being forked.
5450c2b8356STejun Heo * Initialize @p for BPF scheduling. This operation may block and can
5460c2b8356STejun Heo * be used for allocations, and is called exactly once for a task.
5470c2b8356STejun Heo *
5480c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return while
5490c2b8356STejun Heo * loading will abort loading of the BPF scheduler. During a fork, it
5500c2b8356STejun Heo * will abort that specific fork.
5510c2b8356STejun Heo */
5520c2b8356STejun Heo s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
5530c2b8356STejun Heo
5540c2b8356STejun Heo /**
5550c2b8356STejun Heo * @exit_task: Exit a previously-running task from the system
5560c2b8356STejun Heo * @p: task to exit
5570c2b8356STejun Heo * @args: exit arguments, see the struct definition
5580c2b8356STejun Heo *
5590c2b8356STejun Heo * @p is exiting or the BPF scheduler is being unloaded. Perform any
5600c2b8356STejun Heo * necessary cleanup for @p.
5610c2b8356STejun Heo */
5620c2b8356STejun Heo void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
5630c2b8356STejun Heo
5640c2b8356STejun Heo /**
5650c2b8356STejun Heo * @enable: Enable BPF scheduling for a task
5660c2b8356STejun Heo * @p: task to enable BPF scheduling for
5670c2b8356STejun Heo *
5680c2b8356STejun Heo * Enable @p for BPF scheduling. enable() is called on @p any time it
5690c2b8356STejun Heo * enters SCX, and is always paired with a matching disable().
5700c2b8356STejun Heo */
5710c2b8356STejun Heo void (*enable)(struct task_struct *p);
5720c2b8356STejun Heo
5730c2b8356STejun Heo /**
5740c2b8356STejun Heo * @disable: Disable BPF scheduling for a task
5750c2b8356STejun Heo * @p: task to disable BPF scheduling for
5760c2b8356STejun Heo *
5770c2b8356STejun Heo * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
5780c2b8356STejun Heo * Disable BPF scheduling for @p. A disable() call is always matched
5790c2b8356STejun Heo * with a prior enable() call.
5800c2b8356STejun Heo */
5810c2b8356STejun Heo void (*disable)(struct task_struct *p);
5820c2b8356STejun Heo
5830c2b8356STejun Heo /**
5840c2b8356STejun Heo * @dump: Dump BPF scheduler state on error
5850c2b8356STejun Heo * @ctx: debug dump context
5860c2b8356STejun Heo *
5870c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
5880c2b8356STejun Heo */
5890c2b8356STejun Heo void (*dump)(struct scx_dump_ctx *ctx);
5900c2b8356STejun Heo
5910c2b8356STejun Heo /**
5920c2b8356STejun Heo * @dump_cpu: Dump BPF scheduler state for a CPU on error
5930c2b8356STejun Heo * @ctx: debug dump context
5940c2b8356STejun Heo * @cpu: CPU to generate debug dump for
5950c2b8356STejun Heo * @idle: @cpu is currently idle without any runnable tasks
5960c2b8356STejun Heo *
5970c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
5980c2b8356STejun Heo * @cpu. If @idle is %true and this operation doesn't produce any
5990c2b8356STejun Heo * output, @cpu is skipped for dump.
6000c2b8356STejun Heo */
6010c2b8356STejun Heo void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
6020c2b8356STejun Heo
6030c2b8356STejun Heo /**
6040c2b8356STejun Heo * @dump_task: Dump BPF scheduler state for a runnable task on error
6050c2b8356STejun Heo * @ctx: debug dump context
6060c2b8356STejun Heo * @p: runnable task to generate debug dump for
6070c2b8356STejun Heo *
6080c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
6090c2b8356STejun Heo * @p.
6100c2b8356STejun Heo */
6110c2b8356STejun Heo void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
6120c2b8356STejun Heo
6130c2b8356STejun Heo #ifdef CONFIG_EXT_GROUP_SCHED
6140c2b8356STejun Heo /**
6150c2b8356STejun Heo * @cgroup_init: Initialize a cgroup
6160c2b8356STejun Heo * @cgrp: cgroup being initialized
6170c2b8356STejun Heo * @args: init arguments, see the struct definition
6180c2b8356STejun Heo *
6190c2b8356STejun Heo * Either the BPF scheduler is being loaded or @cgrp created, initialize
6200c2b8356STejun Heo * @cgrp for sched_ext. This operation may block.
6210c2b8356STejun Heo *
6220c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return while
6230c2b8356STejun Heo * loading will abort loading of the BPF scheduler. During cgroup
6240c2b8356STejun Heo * creation, it will abort the specific cgroup creation.
6250c2b8356STejun Heo */
6260c2b8356STejun Heo s32 (*cgroup_init)(struct cgroup *cgrp,
6270c2b8356STejun Heo struct scx_cgroup_init_args *args);
6280c2b8356STejun Heo
6290c2b8356STejun Heo /**
6300c2b8356STejun Heo * @cgroup_exit: Exit a cgroup
6310c2b8356STejun Heo * @cgrp: cgroup being exited
6320c2b8356STejun Heo *
6330c2b8356STejun Heo * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
6340c2b8356STejun Heo * @cgrp for sched_ext. This operation my block.
6350c2b8356STejun Heo */
6360c2b8356STejun Heo void (*cgroup_exit)(struct cgroup *cgrp);
6370c2b8356STejun Heo
6380c2b8356STejun Heo /**
6390c2b8356STejun Heo * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
6400c2b8356STejun Heo * @p: task being moved
6410c2b8356STejun Heo * @from: cgroup @p is being moved from
6420c2b8356STejun Heo * @to: cgroup @p is being moved to
6430c2b8356STejun Heo *
6440c2b8356STejun Heo * Prepare @p for move from cgroup @from to @to. This operation may
6450c2b8356STejun Heo * block and can be used for allocations.
6460c2b8356STejun Heo *
6470c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return aborts the
6480c2b8356STejun Heo * migration.
6490c2b8356STejun Heo */
6500c2b8356STejun Heo s32 (*cgroup_prep_move)(struct task_struct *p,
6510c2b8356STejun Heo struct cgroup *from, struct cgroup *to);
6520c2b8356STejun Heo
6530c2b8356STejun Heo /**
6540c2b8356STejun Heo * @cgroup_move: Commit cgroup move
6550c2b8356STejun Heo * @p: task being moved
6560c2b8356STejun Heo * @from: cgroup @p is being moved from
6570c2b8356STejun Heo * @to: cgroup @p is being moved to
6580c2b8356STejun Heo *
6590c2b8356STejun Heo * Commit the move. @p is dequeued during this operation.
6600c2b8356STejun Heo */
6610c2b8356STejun Heo void (*cgroup_move)(struct task_struct *p,
6620c2b8356STejun Heo struct cgroup *from, struct cgroup *to);
6630c2b8356STejun Heo
6640c2b8356STejun Heo /**
6650c2b8356STejun Heo * @cgroup_cancel_move: Cancel cgroup move
6660c2b8356STejun Heo * @p: task whose cgroup move is being canceled
6670c2b8356STejun Heo * @from: cgroup @p was being moved from
6680c2b8356STejun Heo * @to: cgroup @p was being moved to
6690c2b8356STejun Heo *
6700c2b8356STejun Heo * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
6710c2b8356STejun Heo * Undo the preparation.
6720c2b8356STejun Heo */
6730c2b8356STejun Heo void (*cgroup_cancel_move)(struct task_struct *p,
6740c2b8356STejun Heo struct cgroup *from, struct cgroup *to);
6750c2b8356STejun Heo
6760c2b8356STejun Heo /**
6770c2b8356STejun Heo * @cgroup_set_weight: A cgroup's weight is being changed
6780c2b8356STejun Heo * @cgrp: cgroup whose weight is being updated
6790c2b8356STejun Heo * @weight: new weight [1..10000]
6800c2b8356STejun Heo *
6810c2b8356STejun Heo * Update @cgrp's weight to @weight.
6820c2b8356STejun Heo */
6830c2b8356STejun Heo void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
6840c2b8356STejun Heo
6850c2b8356STejun Heo /**
6860c2b8356STejun Heo * @cgroup_set_bandwidth: A cgroup's bandwidth is being changed
6870c2b8356STejun Heo * @cgrp: cgroup whose bandwidth is being updated
6880c2b8356STejun Heo * @period_us: bandwidth control period
6890c2b8356STejun Heo * @quota_us: bandwidth control quota
6900c2b8356STejun Heo * @burst_us: bandwidth control burst
6910c2b8356STejun Heo *
6920c2b8356STejun Heo * Update @cgrp's bandwidth control parameters. This is from the cpu.max
6930c2b8356STejun Heo * cgroup interface.
6940c2b8356STejun Heo *
6950c2b8356STejun Heo * @quota_us / @period_us determines the CPU bandwidth @cgrp is entitled
6960c2b8356STejun Heo * to. For example, if @period_us is 1_000_000 and @quota_us is
6970c2b8356STejun Heo * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be
6980c2b8356STejun Heo * interpreted in the same fashion and specifies how much @cgrp can
6990c2b8356STejun Heo * burst temporarily. The specific control mechanism and thus the
7000c2b8356STejun Heo * interpretation of @period_us and burstiness is upto to the BPF
7010c2b8356STejun Heo * scheduler.
7020c2b8356STejun Heo */
7030c2b8356STejun Heo void (*cgroup_set_bandwidth)(struct cgroup *cgrp,
7040c2b8356STejun Heo u64 period_us, u64 quota_us, u64 burst_us);
7050c2b8356STejun Heo
7060c2b8356STejun Heo #endif /* CONFIG_EXT_GROUP_SCHED */
7070c2b8356STejun Heo
7080c2b8356STejun Heo /*
7090c2b8356STejun Heo * All online ops must come before ops.cpu_online().
7100c2b8356STejun Heo */
7110c2b8356STejun Heo
7120c2b8356STejun Heo /**
7130c2b8356STejun Heo * @cpu_online: A CPU became online
7140c2b8356STejun Heo * @cpu: CPU which just came up
7150c2b8356STejun Heo *
7160c2b8356STejun Heo * @cpu just came online. @cpu will not call ops.enqueue() or
7170c2b8356STejun Heo * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
7180c2b8356STejun Heo */
7190c2b8356STejun Heo void (*cpu_online)(s32 cpu);
7200c2b8356STejun Heo
7210c2b8356STejun Heo /**
7220c2b8356STejun Heo * @cpu_offline: A CPU is going offline
7230c2b8356STejun Heo * @cpu: CPU which is going offline
7240c2b8356STejun Heo *
7250c2b8356STejun Heo * @cpu is going offline. @cpu will not call ops.enqueue() or
7260c2b8356STejun Heo * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
7270c2b8356STejun Heo */
7280c2b8356STejun Heo void (*cpu_offline)(s32 cpu);
7290c2b8356STejun Heo
7300c2b8356STejun Heo /*
7310c2b8356STejun Heo * All CPU hotplug ops must come before ops.init().
7320c2b8356STejun Heo */
7330c2b8356STejun Heo
7340c2b8356STejun Heo /**
7350c2b8356STejun Heo * @init: Initialize the BPF scheduler
7360c2b8356STejun Heo */
7370c2b8356STejun Heo s32 (*init)(void);
7380c2b8356STejun Heo
7390c2b8356STejun Heo /**
7400c2b8356STejun Heo * @exit: Clean up after the BPF scheduler
7410c2b8356STejun Heo * @info: Exit info
7420c2b8356STejun Heo *
7430c2b8356STejun Heo * ops.exit() is also called on ops.init() failure, which is a bit
7440c2b8356STejun Heo * unusual. This is to allow rich reporting through @info on how
7450c2b8356STejun Heo * ops.init() failed.
7460c2b8356STejun Heo */
7470c2b8356STejun Heo void (*exit)(struct scx_exit_info *info);
7480c2b8356STejun Heo
7490c2b8356STejun Heo /**
7500c2b8356STejun Heo * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
7510c2b8356STejun Heo */
7520c2b8356STejun Heo u32 dispatch_max_batch;
7530c2b8356STejun Heo
7540c2b8356STejun Heo /**
7550c2b8356STejun Heo * @flags: %SCX_OPS_* flags
7560c2b8356STejun Heo */
7570c2b8356STejun Heo u64 flags;
7580c2b8356STejun Heo
7590c2b8356STejun Heo /**
7600c2b8356STejun Heo * @timeout_ms: The maximum amount of time, in milliseconds, that a
7610c2b8356STejun Heo * runnable task should be able to wait before being scheduled. The
7620c2b8356STejun Heo * maximum timeout may not exceed the default timeout of 30 seconds.
7630c2b8356STejun Heo *
7640c2b8356STejun Heo * Defaults to the maximum allowed timeout value of 30 seconds.
7650c2b8356STejun Heo */
7660c2b8356STejun Heo u32 timeout_ms;
7670c2b8356STejun Heo
7680c2b8356STejun Heo /**
7690c2b8356STejun Heo * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
7700c2b8356STejun Heo * value of 32768 is used.
7710c2b8356STejun Heo */
7720c2b8356STejun Heo u32 exit_dump_len;
7730c2b8356STejun Heo
7740c2b8356STejun Heo /**
7750c2b8356STejun Heo * @hotplug_seq: A sequence number that may be set by the scheduler to
7760c2b8356STejun Heo * detect when a hotplug event has occurred during the loading process.
7770c2b8356STejun Heo * If 0, no detection occurs. Otherwise, the scheduler will fail to
7780c2b8356STejun Heo * load if the sequence number does not match @scx_hotplug_seq on the
7790c2b8356STejun Heo * enable path.
7800c2b8356STejun Heo */
7810c2b8356STejun Heo u64 hotplug_seq;
7820c2b8356STejun Heo
7830c2b8356STejun Heo /**
7840c2b8356STejun Heo * @name: BPF scheduler's name
7850c2b8356STejun Heo *
7860c2b8356STejun Heo * Must be a non-zero valid BPF object name including only isalnum(),
7870c2b8356STejun Heo * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
7880c2b8356STejun Heo * BPF scheduler is enabled.
7890c2b8356STejun Heo */
7900c2b8356STejun Heo char name[SCX_OPS_NAME_LEN];
7910c2b8356STejun Heo
7920c2b8356STejun Heo /* internal use only, must be NULL */
7930c2b8356STejun Heo void *priv;
7940c2b8356STejun Heo };
7950c2b8356STejun Heo
7960c2b8356STejun Heo enum scx_opi {
7970c2b8356STejun Heo SCX_OPI_BEGIN = 0,
7980c2b8356STejun Heo SCX_OPI_NORMAL_BEGIN = 0,
7990c2b8356STejun Heo SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
8000c2b8356STejun Heo SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
8010c2b8356STejun Heo SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
8020c2b8356STejun Heo SCX_OPI_END = SCX_OP_IDX(init),
8030c2b8356STejun Heo };
8040c2b8356STejun Heo
8050c2b8356STejun Heo /*
8060c2b8356STejun Heo * Collection of event counters. Event types are placed in descending order.
8070c2b8356STejun Heo */
8080c2b8356STejun Heo struct scx_event_stats {
8090c2b8356STejun Heo /*
8100c2b8356STejun Heo * If ops.select_cpu() returns a CPU which can't be used by the task,
8110c2b8356STejun Heo * the core scheduler code silently picks a fallback CPU.
8120c2b8356STejun Heo */
8130c2b8356STejun Heo s64 SCX_EV_SELECT_CPU_FALLBACK;
8140c2b8356STejun Heo
8150c2b8356STejun Heo /*
8160c2b8356STejun Heo * When dispatching to a local DSQ, the CPU may have gone offline in
8170c2b8356STejun Heo * the meantime. In this case, the task is bounced to the global DSQ.
8180c2b8356STejun Heo */
8190c2b8356STejun Heo s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
8200c2b8356STejun Heo
8210c2b8356STejun Heo /*
8220c2b8356STejun Heo * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
8230c2b8356STejun Heo * continued to run because there were no other tasks on the CPU.
8240c2b8356STejun Heo */
8250c2b8356STejun Heo s64 SCX_EV_DISPATCH_KEEP_LAST;
8260c2b8356STejun Heo
8270c2b8356STejun Heo /*
8280c2b8356STejun Heo * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
8290c2b8356STejun Heo * is dispatched to a local DSQ when exiting.
8300c2b8356STejun Heo */
8310c2b8356STejun Heo s64 SCX_EV_ENQ_SKIP_EXITING;
8320c2b8356STejun Heo
8330c2b8356STejun Heo /*
8340c2b8356STejun Heo * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
8350c2b8356STejun Heo * migration disabled task skips ops.enqueue() and is dispatched to its
8360c2b8356STejun Heo * local DSQ.
8370c2b8356STejun Heo */
8380c2b8356STejun Heo s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
8390c2b8356STejun Heo
8400c2b8356STejun Heo /*
8410c2b8356STejun Heo * Total number of times a task's time slice was refilled with the
8420c2b8356STejun Heo * default value (SCX_SLICE_DFL).
8430c2b8356STejun Heo */
8440c2b8356STejun Heo s64 SCX_EV_REFILL_SLICE_DFL;
8450c2b8356STejun Heo
8460c2b8356STejun Heo /*
8470c2b8356STejun Heo * The total duration of bypass modes in nanoseconds.
8480c2b8356STejun Heo */
8490c2b8356STejun Heo s64 SCX_EV_BYPASS_DURATION;
8500c2b8356STejun Heo
8510c2b8356STejun Heo /*
8520c2b8356STejun Heo * The number of tasks dispatched in the bypassing mode.
8530c2b8356STejun Heo */
8540c2b8356STejun Heo s64 SCX_EV_BYPASS_DISPATCH;
8550c2b8356STejun Heo
8560c2b8356STejun Heo /*
8570c2b8356STejun Heo * The number of times the bypassing mode has been activated.
8580c2b8356STejun Heo */
8590c2b8356STejun Heo s64 SCX_EV_BYPASS_ACTIVATE;
8600c2b8356STejun Heo };
8610c2b8356STejun Heo
862bcb7c230STejun Heo struct scx_sched_pcpu {
863bcb7c230STejun Heo /*
864bcb7c230STejun Heo * The event counters are in a per-CPU variable to minimize the
865bcb7c230STejun Heo * accounting overhead. A system-wide view on the event counter is
866bcb7c230STejun Heo * constructed when requested by scx_bpf_events().
867bcb7c230STejun Heo */
868bcb7c230STejun Heo struct scx_event_stats event_stats;
869bcb7c230STejun Heo };
870bcb7c230STejun Heo
8710c2b8356STejun Heo struct scx_sched {
8720c2b8356STejun Heo struct sched_ext_ops ops;
8730c2b8356STejun Heo DECLARE_BITMAP(has_op, SCX_OPI_END);
8740c2b8356STejun Heo
8750c2b8356STejun Heo /*
8760c2b8356STejun Heo * Dispatch queues.
8770c2b8356STejun Heo *
8780c2b8356STejun Heo * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
8790c2b8356STejun Heo * This is to avoid live-locking in bypass mode where all tasks are
8800c2b8356STejun Heo * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
8810c2b8356STejun Heo * per-node split isn't sufficient, it can be further split.
8820c2b8356STejun Heo */
8830c2b8356STejun Heo struct rhashtable dsq_hash;
8840c2b8356STejun Heo struct scx_dispatch_q **global_dsqs;
885bcb7c230STejun Heo struct scx_sched_pcpu __percpu *pcpu;
8860c2b8356STejun Heo
887c7e73974STejun Heo bool warned_zero_slice:1;
888c7e73974STejun Heo bool warned_deprecated_rq:1;
8890c2b8356STejun Heo
8900c2b8356STejun Heo atomic_t exit_kind;
8910c2b8356STejun Heo struct scx_exit_info *exit_info;
8920c2b8356STejun Heo
8930c2b8356STejun Heo struct kobject kobj;
8940c2b8356STejun Heo
8950c2b8356STejun Heo struct kthread_worker *helper;
8960c2b8356STejun Heo struct irq_work error_irq_work;
8970c2b8356STejun Heo struct kthread_work disable_work;
8980c2b8356STejun Heo struct rcu_work rcu_work;
8990c2b8356STejun Heo };
9000c2b8356STejun Heo
9010c2b8356STejun Heo enum scx_wake_flags {
9020c2b8356STejun Heo /* expose select WF_* flags as enums */
9030c2b8356STejun Heo SCX_WAKE_FORK = WF_FORK,
9040c2b8356STejun Heo SCX_WAKE_TTWU = WF_TTWU,
9050c2b8356STejun Heo SCX_WAKE_SYNC = WF_SYNC,
9060c2b8356STejun Heo };
9070c2b8356STejun Heo
9080c2b8356STejun Heo enum scx_enq_flags {
9090c2b8356STejun Heo /* expose select ENQUEUE_* flags as enums */
9100c2b8356STejun Heo SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
9110c2b8356STejun Heo SCX_ENQ_HEAD = ENQUEUE_HEAD,
9120c2b8356STejun Heo SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
9130c2b8356STejun Heo
9140c2b8356STejun Heo /* high 32bits are SCX specific */
9150c2b8356STejun Heo
9160c2b8356STejun Heo /*
9170c2b8356STejun Heo * Set the following to trigger preemption when calling
9180c2b8356STejun Heo * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
9190c2b8356STejun Heo * current task is cleared to zero and the CPU is kicked into the
9200c2b8356STejun Heo * scheduling path. Implies %SCX_ENQ_HEAD.
9210c2b8356STejun Heo */
9220c2b8356STejun Heo SCX_ENQ_PREEMPT = 1LLU << 32,
9230c2b8356STejun Heo
9240c2b8356STejun Heo /*
9250c2b8356STejun Heo * The task being enqueued was previously enqueued on the current CPU's
9260c2b8356STejun Heo * %SCX_DSQ_LOCAL, but was removed from it in a call to the
9270c2b8356STejun Heo * scx_bpf_reenqueue_local() kfunc. If scx_bpf_reenqueue_local() was
9280c2b8356STejun Heo * invoked in a ->cpu_release() callback, and the task is again
9290c2b8356STejun Heo * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
9300c2b8356STejun Heo * task will not be scheduled on the CPU until at least the next invocation
9310c2b8356STejun Heo * of the ->cpu_acquire() callback.
9320c2b8356STejun Heo */
9330c2b8356STejun Heo SCX_ENQ_REENQ = 1LLU << 40,
9340c2b8356STejun Heo
9350c2b8356STejun Heo /*
9360c2b8356STejun Heo * The task being enqueued is the only task available for the cpu. By
9370c2b8356STejun Heo * default, ext core keeps executing such tasks but when
9380c2b8356STejun Heo * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
9390c2b8356STejun Heo * %SCX_ENQ_LAST flag set.
9400c2b8356STejun Heo *
9410c2b8356STejun Heo * The BPF scheduler is responsible for triggering a follow-up
9420c2b8356STejun Heo * scheduling event. Otherwise, Execution may stall.
9430c2b8356STejun Heo */
9440c2b8356STejun Heo SCX_ENQ_LAST = 1LLU << 41,
9450c2b8356STejun Heo
9460c2b8356STejun Heo /* high 8 bits are internal */
9470c2b8356STejun Heo __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
9480c2b8356STejun Heo
9490c2b8356STejun Heo SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
9500c2b8356STejun Heo SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
9510c2b8356STejun Heo };
9520c2b8356STejun Heo
9530c2b8356STejun Heo enum scx_deq_flags {
9540c2b8356STejun Heo /* expose select DEQUEUE_* flags as enums */
9550c2b8356STejun Heo SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
9560c2b8356STejun Heo
9570c2b8356STejun Heo /* high 32bits are SCX specific */
9580c2b8356STejun Heo
9590c2b8356STejun Heo /*
9600c2b8356STejun Heo * The generic core-sched layer decided to execute the task even though
9610c2b8356STejun Heo * it hasn't been dispatched yet. Dequeue from the BPF side.
9620c2b8356STejun Heo */
9630c2b8356STejun Heo SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
9640c2b8356STejun Heo };
9650c2b8356STejun Heo
9660c2b8356STejun Heo enum scx_pick_idle_cpu_flags {
9670c2b8356STejun Heo SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
9680c2b8356STejun Heo SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
9690c2b8356STejun Heo };
9700c2b8356STejun Heo
9710c2b8356STejun Heo enum scx_kick_flags {
9720c2b8356STejun Heo /*
9730c2b8356STejun Heo * Kick the target CPU if idle. Guarantees that the target CPU goes
9740c2b8356STejun Heo * through at least one full scheduling cycle before going idle. If the
9750c2b8356STejun Heo * target CPU can be determined to be currently not idle and going to go
9760c2b8356STejun Heo * through a scheduling cycle before going idle, noop.
9770c2b8356STejun Heo */
9780c2b8356STejun Heo SCX_KICK_IDLE = 1LLU << 0,
9790c2b8356STejun Heo
9800c2b8356STejun Heo /*
9810c2b8356STejun Heo * Preempt the current task and execute the dispatch path. If the
9820c2b8356STejun Heo * current task of the target CPU is an SCX task, its ->scx.slice is
9830c2b8356STejun Heo * cleared to zero before the scheduling path is invoked so that the
9840c2b8356STejun Heo * task expires and the dispatch path is invoked.
9850c2b8356STejun Heo */
9860c2b8356STejun Heo SCX_KICK_PREEMPT = 1LLU << 1,
9870c2b8356STejun Heo
9880c2b8356STejun Heo /*
9890c2b8356STejun Heo * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
9900c2b8356STejun Heo * return after the target CPU finishes picking the next task.
9910c2b8356STejun Heo */
9920c2b8356STejun Heo SCX_KICK_WAIT = 1LLU << 2,
9930c2b8356STejun Heo };
9940c2b8356STejun Heo
9950c2b8356STejun Heo enum scx_tg_flags {
9960c2b8356STejun Heo SCX_TG_ONLINE = 1U << 0,
9970c2b8356STejun Heo SCX_TG_INITED = 1U << 1,
9980c2b8356STejun Heo };
9990c2b8356STejun Heo
10000c2b8356STejun Heo enum scx_enable_state {
10010c2b8356STejun Heo SCX_ENABLING,
10020c2b8356STejun Heo SCX_ENABLED,
10030c2b8356STejun Heo SCX_DISABLING,
10040c2b8356STejun Heo SCX_DISABLED,
10050c2b8356STejun Heo };
10060c2b8356STejun Heo
10070c2b8356STejun Heo static const char *scx_enable_state_str[] = {
10080c2b8356STejun Heo [SCX_ENABLING] = "enabling",
10090c2b8356STejun Heo [SCX_ENABLED] = "enabled",
10100c2b8356STejun Heo [SCX_DISABLING] = "disabling",
10110c2b8356STejun Heo [SCX_DISABLED] = "disabled",
10120c2b8356STejun Heo };
10130c2b8356STejun Heo
10140c2b8356STejun Heo /*
10150c2b8356STejun Heo * sched_ext_entity->ops_state
10160c2b8356STejun Heo *
10170c2b8356STejun Heo * Used to track the task ownership between the SCX core and the BPF scheduler.
10180c2b8356STejun Heo * State transitions look as follows:
10190c2b8356STejun Heo *
10200c2b8356STejun Heo * NONE -> QUEUEING -> QUEUED -> DISPATCHING
10210c2b8356STejun Heo * ^ | |
10220c2b8356STejun Heo * | v v
10230c2b8356STejun Heo * \-------------------------------/
10240c2b8356STejun Heo *
10250c2b8356STejun Heo * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
10260c2b8356STejun Heo * sites for explanations on the conditions being waited upon and why they are
10270c2b8356STejun Heo * safe. Transitions out of them into NONE or QUEUED must store_release and the
10280c2b8356STejun Heo * waiters should load_acquire.
10290c2b8356STejun Heo *
10300c2b8356STejun Heo * Tracking scx_ops_state enables sched_ext core to reliably determine whether
10310c2b8356STejun Heo * any given task can be dispatched by the BPF scheduler at all times and thus
10320c2b8356STejun Heo * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
10330c2b8356STejun Heo * to try to dispatch any task anytime regardless of its state as the SCX core
10340c2b8356STejun Heo * can safely reject invalid dispatches.
10350c2b8356STejun Heo */
10360c2b8356STejun Heo enum scx_ops_state {
10370c2b8356STejun Heo SCX_OPSS_NONE, /* owned by the SCX core */
10380c2b8356STejun Heo SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
10390c2b8356STejun Heo SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
10400c2b8356STejun Heo SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
10410c2b8356STejun Heo
10420c2b8356STejun Heo /*
10430c2b8356STejun Heo * QSEQ brands each QUEUED instance so that, when dispatch races
10440c2b8356STejun Heo * dequeue/requeue, the dispatcher can tell whether it still has a claim
10450c2b8356STejun Heo * on the task being dispatched.
10460c2b8356STejun Heo *
10470c2b8356STejun Heo * As some 32bit archs can't do 64bit store_release/load_acquire,
10480c2b8356STejun Heo * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
10490c2b8356STejun Heo * 32bit machines. The dispatch race window QSEQ protects is very narrow
10500c2b8356STejun Heo * and runs with IRQ disabled. 30 bits should be sufficient.
10510c2b8356STejun Heo */
10520c2b8356STejun Heo SCX_OPSS_QSEQ_SHIFT = 2,
10530c2b8356STejun Heo };
10540c2b8356STejun Heo
10550c2b8356STejun Heo /* Use macros to ensure that the type is unsigned long for the masks */
10560c2b8356STejun Heo #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
10570c2b8356STejun Heo #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
10580c2b8356STejun Heo
10590c2b8356STejun Heo DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
10600c2b8356STejun Heo
10610c2b8356STejun Heo /*
10620c2b8356STejun Heo * Return the rq currently locked from an scx callback, or NULL if no rq is
10630c2b8356STejun Heo * locked.
10640c2b8356STejun Heo */
scx_locked_rq(void)10650c2b8356STejun Heo static inline struct rq *scx_locked_rq(void)
10660c2b8356STejun Heo {
10670c2b8356STejun Heo return __this_cpu_read(scx_locked_rq_state);
10680c2b8356STejun Heo }
10690c2b8356STejun Heo
scx_kf_allowed_if_unlocked(void)10700c2b8356STejun Heo static inline bool scx_kf_allowed_if_unlocked(void)
10710c2b8356STejun Heo {
10720c2b8356STejun Heo return !current->scx.kf_mask;
10730c2b8356STejun Heo }
10740c2b8356STejun Heo
scx_rq_bypassing(struct rq * rq)10750c2b8356STejun Heo static inline bool scx_rq_bypassing(struct rq *rq)
10760c2b8356STejun Heo {
10770c2b8356STejun Heo return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
10780c2b8356STejun Heo }
1079