10c2b8356STejun Heo /* SPDX-License-Identifier: GPL-2.0 */ 20c2b8356STejun Heo /* 30c2b8356STejun Heo * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 40c2b8356STejun Heo * 50c2b8356STejun Heo * Copyright (c) 2025 Meta Platforms, Inc. and affiliates. 60c2b8356STejun Heo * Copyright (c) 2025 Tejun Heo <tj@kernel.org> 70c2b8356STejun Heo */ 80c2b8356STejun Heo #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) 90c2b8356STejun Heo 100c2b8356STejun Heo enum scx_consts { 110c2b8356STejun Heo SCX_DSP_DFL_MAX_BATCH = 32, 120c2b8356STejun Heo SCX_DSP_MAX_LOOPS = 32, 130c2b8356STejun Heo SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, 140c2b8356STejun Heo 150c2b8356STejun Heo SCX_EXIT_BT_LEN = 64, 160c2b8356STejun Heo SCX_EXIT_MSG_LEN = 1024, 170c2b8356STejun Heo SCX_EXIT_DUMP_DFL_LEN = 32768, 180c2b8356STejun Heo 190c2b8356STejun Heo SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE, 200c2b8356STejun Heo 210c2b8356STejun Heo /* 220c2b8356STejun Heo * Iterating all tasks may take a while. Periodically drop 230c2b8356STejun Heo * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls. 240c2b8356STejun Heo */ 250c2b8356STejun Heo SCX_TASK_ITER_BATCH = 32, 260c2b8356STejun Heo }; 270c2b8356STejun Heo 280c2b8356STejun Heo enum scx_exit_kind { 290c2b8356STejun Heo SCX_EXIT_NONE, 300c2b8356STejun Heo SCX_EXIT_DONE, 310c2b8356STejun Heo 320c2b8356STejun Heo SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */ 330c2b8356STejun Heo SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */ 340c2b8356STejun Heo SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */ 350c2b8356STejun Heo SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ 360c2b8356STejun Heo 370c2b8356STejun Heo SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ 380c2b8356STejun Heo SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ 390c2b8356STejun Heo SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ 400c2b8356STejun Heo }; 410c2b8356STejun Heo 420c2b8356STejun Heo /* 430c2b8356STejun Heo * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(), 440c2b8356STejun Heo * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes 450c2b8356STejun Heo * are 64bit of the format: 460c2b8356STejun Heo * 470c2b8356STejun Heo * Bits: [63 .. 48 47 .. 32 31 .. 0] 480c2b8356STejun Heo * [ SYS ACT ] [ SYS RSN ] [ USR ] 490c2b8356STejun Heo * 500c2b8356STejun Heo * SYS ACT: System-defined exit actions 510c2b8356STejun Heo * SYS RSN: System-defined exit reasons 520c2b8356STejun Heo * USR : User-defined exit codes and reasons 530c2b8356STejun Heo * 540c2b8356STejun Heo * Using the above, users may communicate intention and context by ORing system 550c2b8356STejun Heo * actions and/or system reasons with a user-defined exit code. 560c2b8356STejun Heo */ 570c2b8356STejun Heo enum scx_exit_code { 580c2b8356STejun Heo /* Reasons */ 590c2b8356STejun Heo SCX_ECODE_RSN_HOTPLUG = 1LLU << 32, 600c2b8356STejun Heo 610c2b8356STejun Heo /* Actions */ 620c2b8356STejun Heo SCX_ECODE_ACT_RESTART = 1LLU << 48, 630c2b8356STejun Heo }; 640c2b8356STejun Heo 650c2b8356STejun Heo /* 660c2b8356STejun Heo * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is 670c2b8356STejun Heo * being disabled. 680c2b8356STejun Heo */ 690c2b8356STejun Heo struct scx_exit_info { 700c2b8356STejun Heo /* %SCX_EXIT_* - broad category of the exit reason */ 710c2b8356STejun Heo enum scx_exit_kind kind; 720c2b8356STejun Heo 730c2b8356STejun Heo /* exit code if gracefully exiting */ 740c2b8356STejun Heo s64 exit_code; 750c2b8356STejun Heo 760c2b8356STejun Heo /* textual representation of the above */ 770c2b8356STejun Heo const char *reason; 780c2b8356STejun Heo 790c2b8356STejun Heo /* backtrace if exiting due to an error */ 800c2b8356STejun Heo unsigned long *bt; 810c2b8356STejun Heo u32 bt_len; 820c2b8356STejun Heo 830c2b8356STejun Heo /* informational message */ 840c2b8356STejun Heo char *msg; 850c2b8356STejun Heo 860c2b8356STejun Heo /* debug dump */ 870c2b8356STejun Heo char *dump; 880c2b8356STejun Heo }; 890c2b8356STejun Heo 900c2b8356STejun Heo /* sched_ext_ops.flags */ 910c2b8356STejun Heo enum scx_ops_flags { 920c2b8356STejun Heo /* 930c2b8356STejun Heo * Keep built-in idle tracking even if ops.update_idle() is implemented. 940c2b8356STejun Heo */ 950c2b8356STejun Heo SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0, 960c2b8356STejun Heo 970c2b8356STejun Heo /* 980c2b8356STejun Heo * By default, if there are no other task to run on the CPU, ext core 990c2b8356STejun Heo * keeps running the current task even after its slice expires. If this 1000c2b8356STejun Heo * flag is specified, such tasks are passed to ops.enqueue() with 1010c2b8356STejun Heo * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info. 1020c2b8356STejun Heo */ 1030c2b8356STejun Heo SCX_OPS_ENQ_LAST = 1LLU << 1, 1040c2b8356STejun Heo 1050c2b8356STejun Heo /* 1060c2b8356STejun Heo * An exiting task may schedule after PF_EXITING is set. In such cases, 1070c2b8356STejun Heo * bpf_task_from_pid() may not be able to find the task and if the BPF 1080c2b8356STejun Heo * scheduler depends on pid lookup for dispatching, the task will be 1090c2b8356STejun Heo * lost leading to various issues including RCU grace period stalls. 1100c2b8356STejun Heo * 1110c2b8356STejun Heo * To mask this problem, by default, unhashed tasks are automatically 1120c2b8356STejun Heo * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't 1130c2b8356STejun Heo * depend on pid lookups and wants to handle these tasks directly, the 1140c2b8356STejun Heo * following flag can be used. 1150c2b8356STejun Heo */ 1160c2b8356STejun Heo SCX_OPS_ENQ_EXITING = 1LLU << 2, 1170c2b8356STejun Heo 1180c2b8356STejun Heo /* 1190c2b8356STejun Heo * If set, only tasks with policy set to SCHED_EXT are attached to 1200c2b8356STejun Heo * sched_ext. If clear, SCHED_NORMAL tasks are also included. 1210c2b8356STejun Heo */ 1220c2b8356STejun Heo SCX_OPS_SWITCH_PARTIAL = 1LLU << 3, 1230c2b8356STejun Heo 1240c2b8356STejun Heo /* 1250c2b8356STejun Heo * A migration disabled task can only execute on its current CPU. By 1260c2b8356STejun Heo * default, such tasks are automatically put on the CPU's local DSQ with 1270c2b8356STejun Heo * the default slice on enqueue. If this ops flag is set, they also go 1280c2b8356STejun Heo * through ops.enqueue(). 1290c2b8356STejun Heo * 1300c2b8356STejun Heo * A migration disabled task never invokes ops.select_cpu() as it can 1310c2b8356STejun Heo * only select the current CPU. Also, p->cpus_ptr will only contain its 1320c2b8356STejun Heo * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr 1330c2b8356STejun Heo * and thus may disagree with cpumask_weight(p->cpus_ptr). 1340c2b8356STejun Heo */ 1350c2b8356STejun Heo SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4, 1360c2b8356STejun Heo 1370c2b8356STejun Heo /* 1380c2b8356STejun Heo * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes 1390c2b8356STejun Heo * ops.enqueue() on the ops.select_cpu() selected or the wakee's 1400c2b8356STejun Heo * previous CPU via IPI (inter-processor interrupt) to reduce cacheline 1410c2b8356STejun Heo * transfers. When this optimization is enabled, ops.select_cpu() is 1420c2b8356STejun Heo * skipped in some cases (when racing against the wakee switching out). 1430c2b8356STejun Heo * As the BPF scheduler may depend on ops.select_cpu() being invoked 1440c2b8356STejun Heo * during wakeups, queued wakeup is disabled by default. 1450c2b8356STejun Heo * 1460c2b8356STejun Heo * If this ops flag is set, queued wakeup optimization is enabled and 1470c2b8356STejun Heo * the BPF scheduler must be able to handle ops.enqueue() invoked on the 1480c2b8356STejun Heo * wakee's CPU without preceding ops.select_cpu() even for tasks which 1490c2b8356STejun Heo * may be executed on multiple CPUs. 1500c2b8356STejun Heo */ 1510c2b8356STejun Heo SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5, 1520c2b8356STejun Heo 1530c2b8356STejun Heo /* 1540c2b8356STejun Heo * If set, enable per-node idle cpumasks. If clear, use a single global 1550c2b8356STejun Heo * flat idle cpumask. 1560c2b8356STejun Heo */ 1570c2b8356STejun Heo SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6, 1580c2b8356STejun Heo 1590c2b8356STejun Heo /* 1600c2b8356STejun Heo * CPU cgroup support flags 1610c2b8356STejun Heo */ 1620c2b8356STejun Heo SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */ 1630c2b8356STejun Heo 1640c2b8356STejun Heo SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 1650c2b8356STejun Heo SCX_OPS_ENQ_LAST | 1660c2b8356STejun Heo SCX_OPS_ENQ_EXITING | 1670c2b8356STejun Heo SCX_OPS_ENQ_MIGRATION_DISABLED | 1680c2b8356STejun Heo SCX_OPS_ALLOW_QUEUED_WAKEUP | 1690c2b8356STejun Heo SCX_OPS_SWITCH_PARTIAL | 1700c2b8356STejun Heo SCX_OPS_BUILTIN_IDLE_PER_NODE | 1710c2b8356STejun Heo SCX_OPS_HAS_CGROUP_WEIGHT, 1720c2b8356STejun Heo 1730c2b8356STejun Heo /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */ 1740c2b8356STejun Heo __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56, 1750c2b8356STejun Heo 1760c2b8356STejun Heo SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56, 1770c2b8356STejun Heo }; 1780c2b8356STejun Heo 1790c2b8356STejun Heo /* argument container for ops.init_task() */ 1800c2b8356STejun Heo struct scx_init_task_args { 1810c2b8356STejun Heo /* 1820c2b8356STejun Heo * Set if ops.init_task() is being invoked on the fork path, as opposed 1830c2b8356STejun Heo * to the scheduler transition path. 1840c2b8356STejun Heo */ 1850c2b8356STejun Heo bool fork; 1860c2b8356STejun Heo #ifdef CONFIG_EXT_GROUP_SCHED 1870c2b8356STejun Heo /* the cgroup the task is joining */ 1880c2b8356STejun Heo struct cgroup *cgroup; 1890c2b8356STejun Heo #endif 1900c2b8356STejun Heo }; 1910c2b8356STejun Heo 1920c2b8356STejun Heo /* argument container for ops.exit_task() */ 1930c2b8356STejun Heo struct scx_exit_task_args { 1940c2b8356STejun Heo /* Whether the task exited before running on sched_ext. */ 1950c2b8356STejun Heo bool cancelled; 1960c2b8356STejun Heo }; 1970c2b8356STejun Heo 1980c2b8356STejun Heo /* argument container for ops->cgroup_init() */ 1990c2b8356STejun Heo struct scx_cgroup_init_args { 2000c2b8356STejun Heo /* the weight of the cgroup [1..10000] */ 2010c2b8356STejun Heo u32 weight; 2020c2b8356STejun Heo 2030c2b8356STejun Heo /* bandwidth control parameters from cpu.max and cpu.max.burst */ 2040c2b8356STejun Heo u64 bw_period_us; 2050c2b8356STejun Heo u64 bw_quota_us; 2060c2b8356STejun Heo u64 bw_burst_us; 2070c2b8356STejun Heo }; 2080c2b8356STejun Heo 2090c2b8356STejun Heo enum scx_cpu_preempt_reason { 2100c2b8356STejun Heo /* next task is being scheduled by &sched_class_rt */ 2110c2b8356STejun Heo SCX_CPU_PREEMPT_RT, 2120c2b8356STejun Heo /* next task is being scheduled by &sched_class_dl */ 2130c2b8356STejun Heo SCX_CPU_PREEMPT_DL, 2140c2b8356STejun Heo /* next task is being scheduled by &sched_class_stop */ 2150c2b8356STejun Heo SCX_CPU_PREEMPT_STOP, 2160c2b8356STejun Heo /* unknown reason for SCX being preempted */ 2170c2b8356STejun Heo SCX_CPU_PREEMPT_UNKNOWN, 2180c2b8356STejun Heo }; 2190c2b8356STejun Heo 2200c2b8356STejun Heo /* 2210c2b8356STejun Heo * Argument container for ops->cpu_acquire(). Currently empty, but may be 2220c2b8356STejun Heo * expanded in the future. 2230c2b8356STejun Heo */ 2240c2b8356STejun Heo struct scx_cpu_acquire_args {}; 2250c2b8356STejun Heo 2260c2b8356STejun Heo /* argument container for ops->cpu_release() */ 2270c2b8356STejun Heo struct scx_cpu_release_args { 2280c2b8356STejun Heo /* the reason the CPU was preempted */ 2290c2b8356STejun Heo enum scx_cpu_preempt_reason reason; 2300c2b8356STejun Heo 2310c2b8356STejun Heo /* the task that's going to be scheduled on the CPU */ 2320c2b8356STejun Heo struct task_struct *task; 2330c2b8356STejun Heo }; 2340c2b8356STejun Heo 2350c2b8356STejun Heo /* 2360c2b8356STejun Heo * Informational context provided to dump operations. 2370c2b8356STejun Heo */ 2380c2b8356STejun Heo struct scx_dump_ctx { 2390c2b8356STejun Heo enum scx_exit_kind kind; 2400c2b8356STejun Heo s64 exit_code; 2410c2b8356STejun Heo const char *reason; 2420c2b8356STejun Heo u64 at_ns; 2430c2b8356STejun Heo u64 at_jiffies; 2440c2b8356STejun Heo }; 2450c2b8356STejun Heo 2460c2b8356STejun Heo /** 2470c2b8356STejun Heo * struct sched_ext_ops - Operation table for BPF scheduler implementation 2480c2b8356STejun Heo * 2490c2b8356STejun Heo * A BPF scheduler can implement an arbitrary scheduling policy by 2500c2b8356STejun Heo * implementing and loading operations in this table. Note that a userland 2510c2b8356STejun Heo * scheduling policy can also be implemented using the BPF scheduler 2520c2b8356STejun Heo * as a shim layer. 2530c2b8356STejun Heo */ 2540c2b8356STejun Heo struct sched_ext_ops { 2550c2b8356STejun Heo /** 2560c2b8356STejun Heo * @select_cpu: Pick the target CPU for a task which is being woken up 2570c2b8356STejun Heo * @p: task being woken up 2580c2b8356STejun Heo * @prev_cpu: the cpu @p was on before sleeping 2590c2b8356STejun Heo * @wake_flags: SCX_WAKE_* 2600c2b8356STejun Heo * 2610c2b8356STejun Heo * Decision made here isn't final. @p may be moved to any CPU while it 2620c2b8356STejun Heo * is getting dispatched for execution later. However, as @p is not on 2630c2b8356STejun Heo * the rq at this point, getting the eventual execution CPU right here 2640c2b8356STejun Heo * saves a small bit of overhead down the line. 2650c2b8356STejun Heo * 2660c2b8356STejun Heo * If an idle CPU is returned, the CPU is kicked and will try to 2670c2b8356STejun Heo * dispatch. While an explicit custom mechanism can be added, 2680c2b8356STejun Heo * select_cpu() serves as the default way to wake up idle CPUs. 2690c2b8356STejun Heo * 2700c2b8356STejun Heo * @p may be inserted into a DSQ directly by calling 2710c2b8356STejun Heo * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped. 2720c2b8356STejun Heo * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ 2730c2b8356STejun Heo * of the CPU returned by this operation. 2740c2b8356STejun Heo * 2750c2b8356STejun Heo * Note that select_cpu() is never called for tasks that can only run 2760c2b8356STejun Heo * on a single CPU or tasks with migration disabled, as they don't have 2770c2b8356STejun Heo * the option to select a different CPU. See select_task_rq() for 2780c2b8356STejun Heo * details. 2790c2b8356STejun Heo */ 2800c2b8356STejun Heo s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); 2810c2b8356STejun Heo 2820c2b8356STejun Heo /** 2830c2b8356STejun Heo * @enqueue: Enqueue a task on the BPF scheduler 2840c2b8356STejun Heo * @p: task being enqueued 2850c2b8356STejun Heo * @enq_flags: %SCX_ENQ_* 2860c2b8356STejun Heo * 2870c2b8356STejun Heo * @p is ready to run. Insert directly into a DSQ by calling 2880c2b8356STejun Heo * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly 2890c2b8356STejun Heo * inserted, the bpf scheduler owns @p and if it fails to dispatch @p, 2900c2b8356STejun Heo * the task will stall. 2910c2b8356STejun Heo * 2920c2b8356STejun Heo * If @p was inserted into a DSQ from ops.select_cpu(), this callback is 2930c2b8356STejun Heo * skipped. 2940c2b8356STejun Heo */ 2950c2b8356STejun Heo void (*enqueue)(struct task_struct *p, u64 enq_flags); 2960c2b8356STejun Heo 2970c2b8356STejun Heo /** 2980c2b8356STejun Heo * @dequeue: Remove a task from the BPF scheduler 2990c2b8356STejun Heo * @p: task being dequeued 3000c2b8356STejun Heo * @deq_flags: %SCX_DEQ_* 3010c2b8356STejun Heo * 3020c2b8356STejun Heo * Remove @p from the BPF scheduler. This is usually called to isolate 3030c2b8356STejun Heo * the task while updating its scheduling properties (e.g. priority). 3040c2b8356STejun Heo * 3050c2b8356STejun Heo * The ext core keeps track of whether the BPF side owns a given task or 3060c2b8356STejun Heo * not and can gracefully ignore spurious dispatches from BPF side, 3070c2b8356STejun Heo * which makes it safe to not implement this method. However, depending 3080c2b8356STejun Heo * on the scheduling logic, this can lead to confusing behaviors - e.g. 3090c2b8356STejun Heo * scheduling position not being updated across a priority change. 3100c2b8356STejun Heo */ 3110c2b8356STejun Heo void (*dequeue)(struct task_struct *p, u64 deq_flags); 3120c2b8356STejun Heo 3130c2b8356STejun Heo /** 3140c2b8356STejun Heo * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs 3150c2b8356STejun Heo * @cpu: CPU to dispatch tasks for 3160c2b8356STejun Heo * @prev: previous task being switched out 3170c2b8356STejun Heo * 3180c2b8356STejun Heo * Called when a CPU's local dsq is empty. The operation should dispatch 3190c2b8356STejun Heo * one or more tasks from the BPF scheduler into the DSQs using 3200c2b8356STejun Heo * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ 3210c2b8356STejun Heo * using scx_bpf_dsq_move_to_local(). 3220c2b8356STejun Heo * 3230c2b8356STejun Heo * The maximum number of times scx_bpf_dsq_insert() can be called 3240c2b8356STejun Heo * without an intervening scx_bpf_dsq_move_to_local() is specified by 3250c2b8356STejun Heo * ops.dispatch_max_batch. See the comments on top of the two functions 3260c2b8356STejun Heo * for more details. 3270c2b8356STejun Heo * 3280c2b8356STejun Heo * When not %NULL, @prev is an SCX task with its slice depleted. If 3290c2b8356STejun Heo * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in 3300c2b8356STejun Heo * @prev->scx.flags, it is not enqueued yet and will be enqueued after 3310c2b8356STejun Heo * ops.dispatch() returns. To keep executing @prev, return without 3320c2b8356STejun Heo * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST. 3330c2b8356STejun Heo */ 3340c2b8356STejun Heo void (*dispatch)(s32 cpu, struct task_struct *prev); 3350c2b8356STejun Heo 3360c2b8356STejun Heo /** 3370c2b8356STejun Heo * @tick: Periodic tick 3380c2b8356STejun Heo * @p: task running currently 3390c2b8356STejun Heo * 3400c2b8356STejun Heo * This operation is called every 1/HZ seconds on CPUs which are 3410c2b8356STejun Heo * executing an SCX task. Setting @p->scx.slice to 0 will trigger an 3420c2b8356STejun Heo * immediate dispatch cycle on the CPU. 3430c2b8356STejun Heo */ 3440c2b8356STejun Heo void (*tick)(struct task_struct *p); 3450c2b8356STejun Heo 3460c2b8356STejun Heo /** 3470c2b8356STejun Heo * @runnable: A task is becoming runnable on its associated CPU 3480c2b8356STejun Heo * @p: task becoming runnable 3490c2b8356STejun Heo * @enq_flags: %SCX_ENQ_* 3500c2b8356STejun Heo * 3510c2b8356STejun Heo * This and the following three functions can be used to track a task's 3520c2b8356STejun Heo * execution state transitions. A task becomes ->runnable() on a CPU, 3530c2b8356STejun Heo * and then goes through one or more ->running() and ->stopping() pairs 3540c2b8356STejun Heo * as it runs on the CPU, and eventually becomes ->quiescent() when it's 3550c2b8356STejun Heo * done running on the CPU. 3560c2b8356STejun Heo * 3570c2b8356STejun Heo * @p is becoming runnable on the CPU because it's 3580c2b8356STejun Heo * 3590c2b8356STejun Heo * - waking up (%SCX_ENQ_WAKEUP) 3600c2b8356STejun Heo * - being moved from another CPU 3610c2b8356STejun Heo * - being restored after temporarily taken off the queue for an 3620c2b8356STejun Heo * attribute change. 3630c2b8356STejun Heo * 3640c2b8356STejun Heo * This and ->enqueue() are related but not coupled. This operation 3650c2b8356STejun Heo * notifies @p's state transition and may not be followed by ->enqueue() 3660c2b8356STejun Heo * e.g. when @p is being dispatched to a remote CPU, or when @p is 3670c2b8356STejun Heo * being enqueued on a CPU experiencing a hotplug event. Likewise, a 3680c2b8356STejun Heo * task may be ->enqueue()'d without being preceded by this operation 3690c2b8356STejun Heo * e.g. after exhausting its slice. 3700c2b8356STejun Heo */ 3710c2b8356STejun Heo void (*runnable)(struct task_struct *p, u64 enq_flags); 3720c2b8356STejun Heo 3730c2b8356STejun Heo /** 3740c2b8356STejun Heo * @running: A task is starting to run on its associated CPU 3750c2b8356STejun Heo * @p: task starting to run 3760c2b8356STejun Heo * 3770c2b8356STejun Heo * Note that this callback may be called from a CPU other than the 3780c2b8356STejun Heo * one the task is going to run on. This can happen when a task 3790c2b8356STejun Heo * property is changed (i.e., affinity), since scx_next_task_scx(), 3800c2b8356STejun Heo * which triggers this callback, may run on a CPU different from 3810c2b8356STejun Heo * the task's assigned CPU. 3820c2b8356STejun Heo * 3830c2b8356STejun Heo * Therefore, always use scx_bpf_task_cpu(@p) to determine the 3840c2b8356STejun Heo * target CPU the task is going to use. 3850c2b8356STejun Heo * 3860c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers. 3870c2b8356STejun Heo */ 3880c2b8356STejun Heo void (*running)(struct task_struct *p); 3890c2b8356STejun Heo 3900c2b8356STejun Heo /** 3910c2b8356STejun Heo * @stopping: A task is stopping execution 3920c2b8356STejun Heo * @p: task stopping to run 3930c2b8356STejun Heo * @runnable: is task @p still runnable? 3940c2b8356STejun Heo * 3950c2b8356STejun Heo * Note that this callback may be called from a CPU other than the 3960c2b8356STejun Heo * one the task was running on. This can happen when a task 3970c2b8356STejun Heo * property is changed (i.e., affinity), since dequeue_task_scx(), 3980c2b8356STejun Heo * which triggers this callback, may run on a CPU different from 3990c2b8356STejun Heo * the task's assigned CPU. 4000c2b8356STejun Heo * 4010c2b8356STejun Heo * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU 4020c2b8356STejun Heo * the task was running on. 4030c2b8356STejun Heo * 4040c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers. If 4050c2b8356STejun Heo * !@runnable, ->quiescent() will be invoked after this operation 4060c2b8356STejun Heo * returns. 4070c2b8356STejun Heo */ 4080c2b8356STejun Heo void (*stopping)(struct task_struct *p, bool runnable); 4090c2b8356STejun Heo 4100c2b8356STejun Heo /** 4110c2b8356STejun Heo * @quiescent: A task is becoming not runnable on its associated CPU 4120c2b8356STejun Heo * @p: task becoming not runnable 4130c2b8356STejun Heo * @deq_flags: %SCX_DEQ_* 4140c2b8356STejun Heo * 4150c2b8356STejun Heo * See ->runnable() for explanation on the task state notifiers. 4160c2b8356STejun Heo * 4170c2b8356STejun Heo * @p is becoming quiescent on the CPU because it's 4180c2b8356STejun Heo * 4190c2b8356STejun Heo * - sleeping (%SCX_DEQ_SLEEP) 4200c2b8356STejun Heo * - being moved to another CPU 4210c2b8356STejun Heo * - being temporarily taken off the queue for an attribute change 4220c2b8356STejun Heo * (%SCX_DEQ_SAVE) 4230c2b8356STejun Heo * 4240c2b8356STejun Heo * This and ->dequeue() are related but not coupled. This operation 4250c2b8356STejun Heo * notifies @p's state transition and may not be preceded by ->dequeue() 4260c2b8356STejun Heo * e.g. when @p is being dispatched to a remote CPU. 4270c2b8356STejun Heo */ 4280c2b8356STejun Heo void (*quiescent)(struct task_struct *p, u64 deq_flags); 4290c2b8356STejun Heo 4300c2b8356STejun Heo /** 4310c2b8356STejun Heo * @yield: Yield CPU 4320c2b8356STejun Heo * @from: yielding task 4330c2b8356STejun Heo * @to: optional yield target task 4340c2b8356STejun Heo * 4350c2b8356STejun Heo * If @to is NULL, @from is yielding the CPU to other runnable tasks. 4360c2b8356STejun Heo * The BPF scheduler should ensure that other available tasks are 4370c2b8356STejun Heo * dispatched before the yielding task. Return value is ignored in this 4380c2b8356STejun Heo * case. 4390c2b8356STejun Heo * 4400c2b8356STejun Heo * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf 4410c2b8356STejun Heo * scheduler can implement the request, return %true; otherwise, %false. 4420c2b8356STejun Heo */ 4430c2b8356STejun Heo bool (*yield)(struct task_struct *from, struct task_struct *to); 4440c2b8356STejun Heo 4450c2b8356STejun Heo /** 4460c2b8356STejun Heo * @core_sched_before: Task ordering for core-sched 4470c2b8356STejun Heo * @a: task A 4480c2b8356STejun Heo * @b: task B 4490c2b8356STejun Heo * 4500c2b8356STejun Heo * Used by core-sched to determine the ordering between two tasks. See 4510c2b8356STejun Heo * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on 4520c2b8356STejun Heo * core-sched. 4530c2b8356STejun Heo * 4540c2b8356STejun Heo * Both @a and @b are runnable and may or may not currently be queued on 4550c2b8356STejun Heo * the BPF scheduler. Should return %true if @a should run before @b. 4560c2b8356STejun Heo * %false if there's no required ordering or @b should run before @a. 4570c2b8356STejun Heo * 4580c2b8356STejun Heo * If not specified, the default is ordering them according to when they 4590c2b8356STejun Heo * became runnable. 4600c2b8356STejun Heo */ 4610c2b8356STejun Heo bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); 4620c2b8356STejun Heo 4630c2b8356STejun Heo /** 4640c2b8356STejun Heo * @set_weight: Set task weight 4650c2b8356STejun Heo * @p: task to set weight for 4660c2b8356STejun Heo * @weight: new weight [1..10000] 4670c2b8356STejun Heo * 4680c2b8356STejun Heo * Update @p's weight to @weight. 4690c2b8356STejun Heo */ 4700c2b8356STejun Heo void (*set_weight)(struct task_struct *p, u32 weight); 4710c2b8356STejun Heo 4720c2b8356STejun Heo /** 4730c2b8356STejun Heo * @set_cpumask: Set CPU affinity 4740c2b8356STejun Heo * @p: task to set CPU affinity for 4750c2b8356STejun Heo * @cpumask: cpumask of cpus that @p can run on 4760c2b8356STejun Heo * 4770c2b8356STejun Heo * Update @p's CPU affinity to @cpumask. 4780c2b8356STejun Heo */ 4790c2b8356STejun Heo void (*set_cpumask)(struct task_struct *p, 4800c2b8356STejun Heo const struct cpumask *cpumask); 4810c2b8356STejun Heo 4820c2b8356STejun Heo /** 4830c2b8356STejun Heo * @update_idle: Update the idle state of a CPU 4840c2b8356STejun Heo * @cpu: CPU to update the idle state for 4850c2b8356STejun Heo * @idle: whether entering or exiting the idle state 4860c2b8356STejun Heo * 4870c2b8356STejun Heo * This operation is called when @rq's CPU goes or leaves the idle 4880c2b8356STejun Heo * state. By default, implementing this operation disables the built-in 4890c2b8356STejun Heo * idle CPU tracking and the following helpers become unavailable: 4900c2b8356STejun Heo * 4910c2b8356STejun Heo * - scx_bpf_select_cpu_dfl() 4920c2b8356STejun Heo * - scx_bpf_select_cpu_and() 4930c2b8356STejun Heo * - scx_bpf_test_and_clear_cpu_idle() 4940c2b8356STejun Heo * - scx_bpf_pick_idle_cpu() 4950c2b8356STejun Heo * 4960c2b8356STejun Heo * The user also must implement ops.select_cpu() as the default 4970c2b8356STejun Heo * implementation relies on scx_bpf_select_cpu_dfl(). 4980c2b8356STejun Heo * 4990c2b8356STejun Heo * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle 5000c2b8356STejun Heo * tracking. 5010c2b8356STejun Heo */ 5020c2b8356STejun Heo void (*update_idle)(s32 cpu, bool idle); 5030c2b8356STejun Heo 5040c2b8356STejun Heo /** 5050c2b8356STejun Heo * @cpu_acquire: A CPU is becoming available to the BPF scheduler 5060c2b8356STejun Heo * @cpu: The CPU being acquired by the BPF scheduler. 5070c2b8356STejun Heo * @args: Acquire arguments, see the struct definition. 5080c2b8356STejun Heo * 5090c2b8356STejun Heo * A CPU that was previously released from the BPF scheduler is now once 5100c2b8356STejun Heo * again under its control. 5110c2b8356STejun Heo */ 5120c2b8356STejun Heo void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); 5130c2b8356STejun Heo 5140c2b8356STejun Heo /** 5150c2b8356STejun Heo * @cpu_release: A CPU is taken away from the BPF scheduler 5160c2b8356STejun Heo * @cpu: The CPU being released by the BPF scheduler. 5170c2b8356STejun Heo * @args: Release arguments, see the struct definition. 5180c2b8356STejun Heo * 5190c2b8356STejun Heo * The specified CPU is no longer under the control of the BPF 5200c2b8356STejun Heo * scheduler. This could be because it was preempted by a higher 5210c2b8356STejun Heo * priority sched_class, though there may be other reasons as well. The 5220c2b8356STejun Heo * caller should consult @args->reason to determine the cause. 5230c2b8356STejun Heo */ 5240c2b8356STejun Heo void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); 5250c2b8356STejun Heo 5260c2b8356STejun Heo /** 5270c2b8356STejun Heo * @init_task: Initialize a task to run in a BPF scheduler 5280c2b8356STejun Heo * @p: task to initialize for BPF scheduling 5290c2b8356STejun Heo * @args: init arguments, see the struct definition 5300c2b8356STejun Heo * 5310c2b8356STejun Heo * Either we're loading a BPF scheduler or a new task is being forked. 5320c2b8356STejun Heo * Initialize @p for BPF scheduling. This operation may block and can 5330c2b8356STejun Heo * be used for allocations, and is called exactly once for a task. 5340c2b8356STejun Heo * 5350c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return while 5360c2b8356STejun Heo * loading will abort loading of the BPF scheduler. During a fork, it 5370c2b8356STejun Heo * will abort that specific fork. 5380c2b8356STejun Heo */ 5390c2b8356STejun Heo s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args); 5400c2b8356STejun Heo 5410c2b8356STejun Heo /** 5420c2b8356STejun Heo * @exit_task: Exit a previously-running task from the system 5430c2b8356STejun Heo * @p: task to exit 5440c2b8356STejun Heo * @args: exit arguments, see the struct definition 5450c2b8356STejun Heo * 5460c2b8356STejun Heo * @p is exiting or the BPF scheduler is being unloaded. Perform any 5470c2b8356STejun Heo * necessary cleanup for @p. 5480c2b8356STejun Heo */ 5490c2b8356STejun Heo void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args); 5500c2b8356STejun Heo 5510c2b8356STejun Heo /** 5520c2b8356STejun Heo * @enable: Enable BPF scheduling for a task 5530c2b8356STejun Heo * @p: task to enable BPF scheduling for 5540c2b8356STejun Heo * 5550c2b8356STejun Heo * Enable @p for BPF scheduling. enable() is called on @p any time it 5560c2b8356STejun Heo * enters SCX, and is always paired with a matching disable(). 5570c2b8356STejun Heo */ 5580c2b8356STejun Heo void (*enable)(struct task_struct *p); 5590c2b8356STejun Heo 5600c2b8356STejun Heo /** 5610c2b8356STejun Heo * @disable: Disable BPF scheduling for a task 5620c2b8356STejun Heo * @p: task to disable BPF scheduling for 5630c2b8356STejun Heo * 5640c2b8356STejun Heo * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. 5650c2b8356STejun Heo * Disable BPF scheduling for @p. A disable() call is always matched 5660c2b8356STejun Heo * with a prior enable() call. 5670c2b8356STejun Heo */ 5680c2b8356STejun Heo void (*disable)(struct task_struct *p); 5690c2b8356STejun Heo 5700c2b8356STejun Heo /** 5710c2b8356STejun Heo * @dump: Dump BPF scheduler state on error 5720c2b8356STejun Heo * @ctx: debug dump context 5730c2b8356STejun Heo * 5740c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump. 5750c2b8356STejun Heo */ 5760c2b8356STejun Heo void (*dump)(struct scx_dump_ctx *ctx); 5770c2b8356STejun Heo 5780c2b8356STejun Heo /** 5790c2b8356STejun Heo * @dump_cpu: Dump BPF scheduler state for a CPU on error 5800c2b8356STejun Heo * @ctx: debug dump context 5810c2b8356STejun Heo * @cpu: CPU to generate debug dump for 5820c2b8356STejun Heo * @idle: @cpu is currently idle without any runnable tasks 5830c2b8356STejun Heo * 5840c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 5850c2b8356STejun Heo * @cpu. If @idle is %true and this operation doesn't produce any 5860c2b8356STejun Heo * output, @cpu is skipped for dump. 5870c2b8356STejun Heo */ 5880c2b8356STejun Heo void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle); 5890c2b8356STejun Heo 5900c2b8356STejun Heo /** 5910c2b8356STejun Heo * @dump_task: Dump BPF scheduler state for a runnable task on error 5920c2b8356STejun Heo * @ctx: debug dump context 5930c2b8356STejun Heo * @p: runnable task to generate debug dump for 5940c2b8356STejun Heo * 5950c2b8356STejun Heo * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 5960c2b8356STejun Heo * @p. 5970c2b8356STejun Heo */ 5980c2b8356STejun Heo void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p); 5990c2b8356STejun Heo 6000c2b8356STejun Heo #ifdef CONFIG_EXT_GROUP_SCHED 6010c2b8356STejun Heo /** 6020c2b8356STejun Heo * @cgroup_init: Initialize a cgroup 6030c2b8356STejun Heo * @cgrp: cgroup being initialized 6040c2b8356STejun Heo * @args: init arguments, see the struct definition 6050c2b8356STejun Heo * 6060c2b8356STejun Heo * Either the BPF scheduler is being loaded or @cgrp created, initialize 6070c2b8356STejun Heo * @cgrp for sched_ext. This operation may block. 6080c2b8356STejun Heo * 6090c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return while 6100c2b8356STejun Heo * loading will abort loading of the BPF scheduler. During cgroup 6110c2b8356STejun Heo * creation, it will abort the specific cgroup creation. 6120c2b8356STejun Heo */ 6130c2b8356STejun Heo s32 (*cgroup_init)(struct cgroup *cgrp, 6140c2b8356STejun Heo struct scx_cgroup_init_args *args); 6150c2b8356STejun Heo 6160c2b8356STejun Heo /** 6170c2b8356STejun Heo * @cgroup_exit: Exit a cgroup 6180c2b8356STejun Heo * @cgrp: cgroup being exited 6190c2b8356STejun Heo * 6200c2b8356STejun Heo * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit 6210c2b8356STejun Heo * @cgrp for sched_ext. This operation my block. 6220c2b8356STejun Heo */ 6230c2b8356STejun Heo void (*cgroup_exit)(struct cgroup *cgrp); 6240c2b8356STejun Heo 6250c2b8356STejun Heo /** 6260c2b8356STejun Heo * @cgroup_prep_move: Prepare a task to be moved to a different cgroup 6270c2b8356STejun Heo * @p: task being moved 6280c2b8356STejun Heo * @from: cgroup @p is being moved from 6290c2b8356STejun Heo * @to: cgroup @p is being moved to 6300c2b8356STejun Heo * 6310c2b8356STejun Heo * Prepare @p for move from cgroup @from to @to. This operation may 6320c2b8356STejun Heo * block and can be used for allocations. 6330c2b8356STejun Heo * 6340c2b8356STejun Heo * Return 0 for success, -errno for failure. An error return aborts the 6350c2b8356STejun Heo * migration. 6360c2b8356STejun Heo */ 6370c2b8356STejun Heo s32 (*cgroup_prep_move)(struct task_struct *p, 6380c2b8356STejun Heo struct cgroup *from, struct cgroup *to); 6390c2b8356STejun Heo 6400c2b8356STejun Heo /** 6410c2b8356STejun Heo * @cgroup_move: Commit cgroup move 6420c2b8356STejun Heo * @p: task being moved 6430c2b8356STejun Heo * @from: cgroup @p is being moved from 6440c2b8356STejun Heo * @to: cgroup @p is being moved to 6450c2b8356STejun Heo * 6460c2b8356STejun Heo * Commit the move. @p is dequeued during this operation. 6470c2b8356STejun Heo */ 6480c2b8356STejun Heo void (*cgroup_move)(struct task_struct *p, 6490c2b8356STejun Heo struct cgroup *from, struct cgroup *to); 6500c2b8356STejun Heo 6510c2b8356STejun Heo /** 6520c2b8356STejun Heo * @cgroup_cancel_move: Cancel cgroup move 6530c2b8356STejun Heo * @p: task whose cgroup move is being canceled 6540c2b8356STejun Heo * @from: cgroup @p was being moved from 6550c2b8356STejun Heo * @to: cgroup @p was being moved to 6560c2b8356STejun Heo * 6570c2b8356STejun Heo * @p was cgroup_prep_move()'d but failed before reaching cgroup_move(). 6580c2b8356STejun Heo * Undo the preparation. 6590c2b8356STejun Heo */ 6600c2b8356STejun Heo void (*cgroup_cancel_move)(struct task_struct *p, 6610c2b8356STejun Heo struct cgroup *from, struct cgroup *to); 6620c2b8356STejun Heo 6630c2b8356STejun Heo /** 6640c2b8356STejun Heo * @cgroup_set_weight: A cgroup's weight is being changed 6650c2b8356STejun Heo * @cgrp: cgroup whose weight is being updated 6660c2b8356STejun Heo * @weight: new weight [1..10000] 6670c2b8356STejun Heo * 6680c2b8356STejun Heo * Update @cgrp's weight to @weight. 6690c2b8356STejun Heo */ 6700c2b8356STejun Heo void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight); 6710c2b8356STejun Heo 6720c2b8356STejun Heo /** 6730c2b8356STejun Heo * @cgroup_set_bandwidth: A cgroup's bandwidth is being changed 6740c2b8356STejun Heo * @cgrp: cgroup whose bandwidth is being updated 6750c2b8356STejun Heo * @period_us: bandwidth control period 6760c2b8356STejun Heo * @quota_us: bandwidth control quota 6770c2b8356STejun Heo * @burst_us: bandwidth control burst 6780c2b8356STejun Heo * 6790c2b8356STejun Heo * Update @cgrp's bandwidth control parameters. This is from the cpu.max 6800c2b8356STejun Heo * cgroup interface. 6810c2b8356STejun Heo * 6820c2b8356STejun Heo * @quota_us / @period_us determines the CPU bandwidth @cgrp is entitled 6830c2b8356STejun Heo * to. For example, if @period_us is 1_000_000 and @quota_us is 6840c2b8356STejun Heo * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be 6850c2b8356STejun Heo * interpreted in the same fashion and specifies how much @cgrp can 6860c2b8356STejun Heo * burst temporarily. The specific control mechanism and thus the 6870c2b8356STejun Heo * interpretation of @period_us and burstiness is upto to the BPF 6880c2b8356STejun Heo * scheduler. 6890c2b8356STejun Heo */ 6900c2b8356STejun Heo void (*cgroup_set_bandwidth)(struct cgroup *cgrp, 6910c2b8356STejun Heo u64 period_us, u64 quota_us, u64 burst_us); 6920c2b8356STejun Heo 6930c2b8356STejun Heo #endif /* CONFIG_EXT_GROUP_SCHED */ 6940c2b8356STejun Heo 6950c2b8356STejun Heo /* 6960c2b8356STejun Heo * All online ops must come before ops.cpu_online(). 6970c2b8356STejun Heo */ 6980c2b8356STejun Heo 6990c2b8356STejun Heo /** 7000c2b8356STejun Heo * @cpu_online: A CPU became online 7010c2b8356STejun Heo * @cpu: CPU which just came up 7020c2b8356STejun Heo * 7030c2b8356STejun Heo * @cpu just came online. @cpu will not call ops.enqueue() or 7040c2b8356STejun Heo * ops.dispatch(), nor run tasks associated with other CPUs beforehand. 7050c2b8356STejun Heo */ 7060c2b8356STejun Heo void (*cpu_online)(s32 cpu); 7070c2b8356STejun Heo 7080c2b8356STejun Heo /** 7090c2b8356STejun Heo * @cpu_offline: A CPU is going offline 7100c2b8356STejun Heo * @cpu: CPU which is going offline 7110c2b8356STejun Heo * 7120c2b8356STejun Heo * @cpu is going offline. @cpu will not call ops.enqueue() or 7130c2b8356STejun Heo * ops.dispatch(), nor run tasks associated with other CPUs afterwards. 7140c2b8356STejun Heo */ 7150c2b8356STejun Heo void (*cpu_offline)(s32 cpu); 7160c2b8356STejun Heo 7170c2b8356STejun Heo /* 7180c2b8356STejun Heo * All CPU hotplug ops must come before ops.init(). 7190c2b8356STejun Heo */ 7200c2b8356STejun Heo 7210c2b8356STejun Heo /** 7220c2b8356STejun Heo * @init: Initialize the BPF scheduler 7230c2b8356STejun Heo */ 7240c2b8356STejun Heo s32 (*init)(void); 7250c2b8356STejun Heo 7260c2b8356STejun Heo /** 7270c2b8356STejun Heo * @exit: Clean up after the BPF scheduler 7280c2b8356STejun Heo * @info: Exit info 7290c2b8356STejun Heo * 7300c2b8356STejun Heo * ops.exit() is also called on ops.init() failure, which is a bit 7310c2b8356STejun Heo * unusual. This is to allow rich reporting through @info on how 7320c2b8356STejun Heo * ops.init() failed. 7330c2b8356STejun Heo */ 7340c2b8356STejun Heo void (*exit)(struct scx_exit_info *info); 7350c2b8356STejun Heo 7360c2b8356STejun Heo /** 7370c2b8356STejun Heo * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch 7380c2b8356STejun Heo */ 7390c2b8356STejun Heo u32 dispatch_max_batch; 7400c2b8356STejun Heo 7410c2b8356STejun Heo /** 7420c2b8356STejun Heo * @flags: %SCX_OPS_* flags 7430c2b8356STejun Heo */ 7440c2b8356STejun Heo u64 flags; 7450c2b8356STejun Heo 7460c2b8356STejun Heo /** 7470c2b8356STejun Heo * @timeout_ms: The maximum amount of time, in milliseconds, that a 7480c2b8356STejun Heo * runnable task should be able to wait before being scheduled. The 7490c2b8356STejun Heo * maximum timeout may not exceed the default timeout of 30 seconds. 7500c2b8356STejun Heo * 7510c2b8356STejun Heo * Defaults to the maximum allowed timeout value of 30 seconds. 7520c2b8356STejun Heo */ 7530c2b8356STejun Heo u32 timeout_ms; 7540c2b8356STejun Heo 7550c2b8356STejun Heo /** 7560c2b8356STejun Heo * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default 7570c2b8356STejun Heo * value of 32768 is used. 7580c2b8356STejun Heo */ 7590c2b8356STejun Heo u32 exit_dump_len; 7600c2b8356STejun Heo 7610c2b8356STejun Heo /** 7620c2b8356STejun Heo * @hotplug_seq: A sequence number that may be set by the scheduler to 7630c2b8356STejun Heo * detect when a hotplug event has occurred during the loading process. 7640c2b8356STejun Heo * If 0, no detection occurs. Otherwise, the scheduler will fail to 7650c2b8356STejun Heo * load if the sequence number does not match @scx_hotplug_seq on the 7660c2b8356STejun Heo * enable path. 7670c2b8356STejun Heo */ 7680c2b8356STejun Heo u64 hotplug_seq; 7690c2b8356STejun Heo 7700c2b8356STejun Heo /** 7710c2b8356STejun Heo * @name: BPF scheduler's name 7720c2b8356STejun Heo * 7730c2b8356STejun Heo * Must be a non-zero valid BPF object name including only isalnum(), 7740c2b8356STejun Heo * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the 7750c2b8356STejun Heo * BPF scheduler is enabled. 7760c2b8356STejun Heo */ 7770c2b8356STejun Heo char name[SCX_OPS_NAME_LEN]; 7780c2b8356STejun Heo 7790c2b8356STejun Heo /* internal use only, must be NULL */ 7800c2b8356STejun Heo void *priv; 7810c2b8356STejun Heo }; 7820c2b8356STejun Heo 7830c2b8356STejun Heo enum scx_opi { 7840c2b8356STejun Heo SCX_OPI_BEGIN = 0, 7850c2b8356STejun Heo SCX_OPI_NORMAL_BEGIN = 0, 7860c2b8356STejun Heo SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online), 7870c2b8356STejun Heo SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online), 7880c2b8356STejun Heo SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init), 7890c2b8356STejun Heo SCX_OPI_END = SCX_OP_IDX(init), 7900c2b8356STejun Heo }; 7910c2b8356STejun Heo 7920c2b8356STejun Heo /* 7930c2b8356STejun Heo * Collection of event counters. Event types are placed in descending order. 7940c2b8356STejun Heo */ 7950c2b8356STejun Heo struct scx_event_stats { 7960c2b8356STejun Heo /* 7970c2b8356STejun Heo * If ops.select_cpu() returns a CPU which can't be used by the task, 7980c2b8356STejun Heo * the core scheduler code silently picks a fallback CPU. 7990c2b8356STejun Heo */ 8000c2b8356STejun Heo s64 SCX_EV_SELECT_CPU_FALLBACK; 8010c2b8356STejun Heo 8020c2b8356STejun Heo /* 8030c2b8356STejun Heo * When dispatching to a local DSQ, the CPU may have gone offline in 8040c2b8356STejun Heo * the meantime. In this case, the task is bounced to the global DSQ. 8050c2b8356STejun Heo */ 8060c2b8356STejun Heo s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE; 8070c2b8356STejun Heo 8080c2b8356STejun Heo /* 8090c2b8356STejun Heo * If SCX_OPS_ENQ_LAST is not set, the number of times that a task 8100c2b8356STejun Heo * continued to run because there were no other tasks on the CPU. 8110c2b8356STejun Heo */ 8120c2b8356STejun Heo s64 SCX_EV_DISPATCH_KEEP_LAST; 8130c2b8356STejun Heo 8140c2b8356STejun Heo /* 8150c2b8356STejun Heo * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task 8160c2b8356STejun Heo * is dispatched to a local DSQ when exiting. 8170c2b8356STejun Heo */ 8180c2b8356STejun Heo s64 SCX_EV_ENQ_SKIP_EXITING; 8190c2b8356STejun Heo 8200c2b8356STejun Heo /* 8210c2b8356STejun Heo * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a 8220c2b8356STejun Heo * migration disabled task skips ops.enqueue() and is dispatched to its 8230c2b8356STejun Heo * local DSQ. 8240c2b8356STejun Heo */ 8250c2b8356STejun Heo s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED; 8260c2b8356STejun Heo 8270c2b8356STejun Heo /* 8280c2b8356STejun Heo * Total number of times a task's time slice was refilled with the 8290c2b8356STejun Heo * default value (SCX_SLICE_DFL). 8300c2b8356STejun Heo */ 8310c2b8356STejun Heo s64 SCX_EV_REFILL_SLICE_DFL; 8320c2b8356STejun Heo 8330c2b8356STejun Heo /* 8340c2b8356STejun Heo * The total duration of bypass modes in nanoseconds. 8350c2b8356STejun Heo */ 8360c2b8356STejun Heo s64 SCX_EV_BYPASS_DURATION; 8370c2b8356STejun Heo 8380c2b8356STejun Heo /* 8390c2b8356STejun Heo * The number of tasks dispatched in the bypassing mode. 8400c2b8356STejun Heo */ 8410c2b8356STejun Heo s64 SCX_EV_BYPASS_DISPATCH; 8420c2b8356STejun Heo 8430c2b8356STejun Heo /* 8440c2b8356STejun Heo * The number of times the bypassing mode has been activated. 8450c2b8356STejun Heo */ 8460c2b8356STejun Heo s64 SCX_EV_BYPASS_ACTIVATE; 8470c2b8356STejun Heo }; 8480c2b8356STejun Heo 849bcb7c230STejun Heo struct scx_sched_pcpu { 850bcb7c230STejun Heo /* 851bcb7c230STejun Heo * The event counters are in a per-CPU variable to minimize the 852bcb7c230STejun Heo * accounting overhead. A system-wide view on the event counter is 853bcb7c230STejun Heo * constructed when requested by scx_bpf_events(). 854bcb7c230STejun Heo */ 855bcb7c230STejun Heo struct scx_event_stats event_stats; 856bcb7c230STejun Heo }; 857bcb7c230STejun Heo 8580c2b8356STejun Heo struct scx_sched { 8590c2b8356STejun Heo struct sched_ext_ops ops; 8600c2b8356STejun Heo DECLARE_BITMAP(has_op, SCX_OPI_END); 8610c2b8356STejun Heo 8620c2b8356STejun Heo /* 8630c2b8356STejun Heo * Dispatch queues. 8640c2b8356STejun Heo * 8650c2b8356STejun Heo * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. 8660c2b8356STejun Heo * This is to avoid live-locking in bypass mode where all tasks are 8670c2b8356STejun Heo * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If 8680c2b8356STejun Heo * per-node split isn't sufficient, it can be further split. 8690c2b8356STejun Heo */ 8700c2b8356STejun Heo struct rhashtable dsq_hash; 8710c2b8356STejun Heo struct scx_dispatch_q **global_dsqs; 872bcb7c230STejun Heo struct scx_sched_pcpu __percpu *pcpu; 8730c2b8356STejun Heo 874*c7e73974STejun Heo bool warned_zero_slice:1; 875*c7e73974STejun Heo bool warned_deprecated_rq:1; 8760c2b8356STejun Heo 8770c2b8356STejun Heo atomic_t exit_kind; 8780c2b8356STejun Heo struct scx_exit_info *exit_info; 8790c2b8356STejun Heo 8800c2b8356STejun Heo struct kobject kobj; 8810c2b8356STejun Heo 8820c2b8356STejun Heo struct kthread_worker *helper; 8830c2b8356STejun Heo struct irq_work error_irq_work; 8840c2b8356STejun Heo struct kthread_work disable_work; 8850c2b8356STejun Heo struct rcu_work rcu_work; 8860c2b8356STejun Heo }; 8870c2b8356STejun Heo 8880c2b8356STejun Heo enum scx_wake_flags { 8890c2b8356STejun Heo /* expose select WF_* flags as enums */ 8900c2b8356STejun Heo SCX_WAKE_FORK = WF_FORK, 8910c2b8356STejun Heo SCX_WAKE_TTWU = WF_TTWU, 8920c2b8356STejun Heo SCX_WAKE_SYNC = WF_SYNC, 8930c2b8356STejun Heo }; 8940c2b8356STejun Heo 8950c2b8356STejun Heo enum scx_enq_flags { 8960c2b8356STejun Heo /* expose select ENQUEUE_* flags as enums */ 8970c2b8356STejun Heo SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP, 8980c2b8356STejun Heo SCX_ENQ_HEAD = ENQUEUE_HEAD, 8990c2b8356STejun Heo SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED, 9000c2b8356STejun Heo 9010c2b8356STejun Heo /* high 32bits are SCX specific */ 9020c2b8356STejun Heo 9030c2b8356STejun Heo /* 9040c2b8356STejun Heo * Set the following to trigger preemption when calling 9050c2b8356STejun Heo * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the 9060c2b8356STejun Heo * current task is cleared to zero and the CPU is kicked into the 9070c2b8356STejun Heo * scheduling path. Implies %SCX_ENQ_HEAD. 9080c2b8356STejun Heo */ 9090c2b8356STejun Heo SCX_ENQ_PREEMPT = 1LLU << 32, 9100c2b8356STejun Heo 9110c2b8356STejun Heo /* 9120c2b8356STejun Heo * The task being enqueued was previously enqueued on the current CPU's 9130c2b8356STejun Heo * %SCX_DSQ_LOCAL, but was removed from it in a call to the 9140c2b8356STejun Heo * scx_bpf_reenqueue_local() kfunc. If scx_bpf_reenqueue_local() was 9150c2b8356STejun Heo * invoked in a ->cpu_release() callback, and the task is again 9160c2b8356STejun Heo * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the 9170c2b8356STejun Heo * task will not be scheduled on the CPU until at least the next invocation 9180c2b8356STejun Heo * of the ->cpu_acquire() callback. 9190c2b8356STejun Heo */ 9200c2b8356STejun Heo SCX_ENQ_REENQ = 1LLU << 40, 9210c2b8356STejun Heo 9220c2b8356STejun Heo /* 9230c2b8356STejun Heo * The task being enqueued is the only task available for the cpu. By 9240c2b8356STejun Heo * default, ext core keeps executing such tasks but when 9250c2b8356STejun Heo * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the 9260c2b8356STejun Heo * %SCX_ENQ_LAST flag set. 9270c2b8356STejun Heo * 9280c2b8356STejun Heo * The BPF scheduler is responsible for triggering a follow-up 9290c2b8356STejun Heo * scheduling event. Otherwise, Execution may stall. 9300c2b8356STejun Heo */ 9310c2b8356STejun Heo SCX_ENQ_LAST = 1LLU << 41, 9320c2b8356STejun Heo 9330c2b8356STejun Heo /* high 8 bits are internal */ 9340c2b8356STejun Heo __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, 9350c2b8356STejun Heo 9360c2b8356STejun Heo SCX_ENQ_CLEAR_OPSS = 1LLU << 56, 9370c2b8356STejun Heo SCX_ENQ_DSQ_PRIQ = 1LLU << 57, 9380c2b8356STejun Heo }; 9390c2b8356STejun Heo 9400c2b8356STejun Heo enum scx_deq_flags { 9410c2b8356STejun Heo /* expose select DEQUEUE_* flags as enums */ 9420c2b8356STejun Heo SCX_DEQ_SLEEP = DEQUEUE_SLEEP, 9430c2b8356STejun Heo 9440c2b8356STejun Heo /* high 32bits are SCX specific */ 9450c2b8356STejun Heo 9460c2b8356STejun Heo /* 9470c2b8356STejun Heo * The generic core-sched layer decided to execute the task even though 9480c2b8356STejun Heo * it hasn't been dispatched yet. Dequeue from the BPF side. 9490c2b8356STejun Heo */ 9500c2b8356STejun Heo SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, 9510c2b8356STejun Heo }; 9520c2b8356STejun Heo 9530c2b8356STejun Heo enum scx_pick_idle_cpu_flags { 9540c2b8356STejun Heo SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */ 9550c2b8356STejun Heo SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */ 9560c2b8356STejun Heo }; 9570c2b8356STejun Heo 9580c2b8356STejun Heo enum scx_kick_flags { 9590c2b8356STejun Heo /* 9600c2b8356STejun Heo * Kick the target CPU if idle. Guarantees that the target CPU goes 9610c2b8356STejun Heo * through at least one full scheduling cycle before going idle. If the 9620c2b8356STejun Heo * target CPU can be determined to be currently not idle and going to go 9630c2b8356STejun Heo * through a scheduling cycle before going idle, noop. 9640c2b8356STejun Heo */ 9650c2b8356STejun Heo SCX_KICK_IDLE = 1LLU << 0, 9660c2b8356STejun Heo 9670c2b8356STejun Heo /* 9680c2b8356STejun Heo * Preempt the current task and execute the dispatch path. If the 9690c2b8356STejun Heo * current task of the target CPU is an SCX task, its ->scx.slice is 9700c2b8356STejun Heo * cleared to zero before the scheduling path is invoked so that the 9710c2b8356STejun Heo * task expires and the dispatch path is invoked. 9720c2b8356STejun Heo */ 9730c2b8356STejun Heo SCX_KICK_PREEMPT = 1LLU << 1, 9740c2b8356STejun Heo 9750c2b8356STejun Heo /* 9760c2b8356STejun Heo * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will 9770c2b8356STejun Heo * return after the target CPU finishes picking the next task. 9780c2b8356STejun Heo */ 9790c2b8356STejun Heo SCX_KICK_WAIT = 1LLU << 2, 9800c2b8356STejun Heo }; 9810c2b8356STejun Heo 9820c2b8356STejun Heo enum scx_tg_flags { 9830c2b8356STejun Heo SCX_TG_ONLINE = 1U << 0, 9840c2b8356STejun Heo SCX_TG_INITED = 1U << 1, 9850c2b8356STejun Heo }; 9860c2b8356STejun Heo 9870c2b8356STejun Heo enum scx_enable_state { 9880c2b8356STejun Heo SCX_ENABLING, 9890c2b8356STejun Heo SCX_ENABLED, 9900c2b8356STejun Heo SCX_DISABLING, 9910c2b8356STejun Heo SCX_DISABLED, 9920c2b8356STejun Heo }; 9930c2b8356STejun Heo 9940c2b8356STejun Heo static const char *scx_enable_state_str[] = { 9950c2b8356STejun Heo [SCX_ENABLING] = "enabling", 9960c2b8356STejun Heo [SCX_ENABLED] = "enabled", 9970c2b8356STejun Heo [SCX_DISABLING] = "disabling", 9980c2b8356STejun Heo [SCX_DISABLED] = "disabled", 9990c2b8356STejun Heo }; 10000c2b8356STejun Heo 10010c2b8356STejun Heo /* 10020c2b8356STejun Heo * sched_ext_entity->ops_state 10030c2b8356STejun Heo * 10040c2b8356STejun Heo * Used to track the task ownership between the SCX core and the BPF scheduler. 10050c2b8356STejun Heo * State transitions look as follows: 10060c2b8356STejun Heo * 10070c2b8356STejun Heo * NONE -> QUEUEING -> QUEUED -> DISPATCHING 10080c2b8356STejun Heo * ^ | | 10090c2b8356STejun Heo * | v v 10100c2b8356STejun Heo * \-------------------------------/ 10110c2b8356STejun Heo * 10120c2b8356STejun Heo * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 10130c2b8356STejun Heo * sites for explanations on the conditions being waited upon and why they are 10140c2b8356STejun Heo * safe. Transitions out of them into NONE or QUEUED must store_release and the 10150c2b8356STejun Heo * waiters should load_acquire. 10160c2b8356STejun Heo * 10170c2b8356STejun Heo * Tracking scx_ops_state enables sched_ext core to reliably determine whether 10180c2b8356STejun Heo * any given task can be dispatched by the BPF scheduler at all times and thus 10190c2b8356STejun Heo * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 10200c2b8356STejun Heo * to try to dispatch any task anytime regardless of its state as the SCX core 10210c2b8356STejun Heo * can safely reject invalid dispatches. 10220c2b8356STejun Heo */ 10230c2b8356STejun Heo enum scx_ops_state { 10240c2b8356STejun Heo SCX_OPSS_NONE, /* owned by the SCX core */ 10250c2b8356STejun Heo SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */ 10260c2b8356STejun Heo SCX_OPSS_QUEUED, /* owned by the BPF scheduler */ 10270c2b8356STejun Heo SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */ 10280c2b8356STejun Heo 10290c2b8356STejun Heo /* 10300c2b8356STejun Heo * QSEQ brands each QUEUED instance so that, when dispatch races 10310c2b8356STejun Heo * dequeue/requeue, the dispatcher can tell whether it still has a claim 10320c2b8356STejun Heo * on the task being dispatched. 10330c2b8356STejun Heo * 10340c2b8356STejun Heo * As some 32bit archs can't do 64bit store_release/load_acquire, 10350c2b8356STejun Heo * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on 10360c2b8356STejun Heo * 32bit machines. The dispatch race window QSEQ protects is very narrow 10370c2b8356STejun Heo * and runs with IRQ disabled. 30 bits should be sufficient. 10380c2b8356STejun Heo */ 10390c2b8356STejun Heo SCX_OPSS_QSEQ_SHIFT = 2, 10400c2b8356STejun Heo }; 10410c2b8356STejun Heo 10420c2b8356STejun Heo /* Use macros to ensure that the type is unsigned long for the masks */ 10430c2b8356STejun Heo #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1) 10440c2b8356STejun Heo #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK) 10450c2b8356STejun Heo 10460c2b8356STejun Heo DECLARE_PER_CPU(struct rq *, scx_locked_rq_state); 10470c2b8356STejun Heo 10480c2b8356STejun Heo /* 10490c2b8356STejun Heo * Return the rq currently locked from an scx callback, or NULL if no rq is 10500c2b8356STejun Heo * locked. 10510c2b8356STejun Heo */ 10520c2b8356STejun Heo static inline struct rq *scx_locked_rq(void) 10530c2b8356STejun Heo { 10540c2b8356STejun Heo return __this_cpu_read(scx_locked_rq_state); 10550c2b8356STejun Heo } 10560c2b8356STejun Heo 10570c2b8356STejun Heo static inline bool scx_kf_allowed_if_unlocked(void) 10580c2b8356STejun Heo { 10590c2b8356STejun Heo return !current->scx.kf_mask; 10600c2b8356STejun Heo } 10610c2b8356STejun Heo 10620c2b8356STejun Heo static inline bool scx_rq_bypassing(struct rq *rq) 10630c2b8356STejun Heo { 10640c2b8356STejun Heo return unlikely(rq->scx.flags & SCX_RQ_BYPASSING); 10650c2b8356STejun Heo } 1066