1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #include <linux/btf_ids.h> 10 #include "ext_idle.h" 11 12 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) 13 14 enum scx_consts { 15 SCX_DSP_DFL_MAX_BATCH = 32, 16 SCX_DSP_MAX_LOOPS = 32, 17 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, 18 19 SCX_EXIT_BT_LEN = 64, 20 SCX_EXIT_MSG_LEN = 1024, 21 SCX_EXIT_DUMP_DFL_LEN = 32768, 22 23 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE, 24 25 /* 26 * Iterating all tasks may take a while. Periodically drop 27 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls. 28 */ 29 SCX_OPS_TASK_ITER_BATCH = 32, 30 }; 31 32 enum scx_exit_kind { 33 SCX_EXIT_NONE, 34 SCX_EXIT_DONE, 35 36 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */ 37 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */ 38 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */ 39 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ 40 41 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ 42 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ 43 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ 44 }; 45 46 /* 47 * An exit code can be specified when exiting with scx_bpf_exit() or 48 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN 49 * respectively. The codes are 64bit of the format: 50 * 51 * Bits: [63 .. 48 47 .. 32 31 .. 0] 52 * [ SYS ACT ] [ SYS RSN ] [ USR ] 53 * 54 * SYS ACT: System-defined exit actions 55 * SYS RSN: System-defined exit reasons 56 * USR : User-defined exit codes and reasons 57 * 58 * Using the above, users may communicate intention and context by ORing system 59 * actions and/or system reasons with a user-defined exit code. 60 */ 61 enum scx_exit_code { 62 /* Reasons */ 63 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32, 64 65 /* Actions */ 66 SCX_ECODE_ACT_RESTART = 1LLU << 48, 67 }; 68 69 /* 70 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is 71 * being disabled. 72 */ 73 struct scx_exit_info { 74 /* %SCX_EXIT_* - broad category of the exit reason */ 75 enum scx_exit_kind kind; 76 77 /* exit code if gracefully exiting */ 78 s64 exit_code; 79 80 /* textual representation of the above */ 81 const char *reason; 82 83 /* backtrace if exiting due to an error */ 84 unsigned long *bt; 85 u32 bt_len; 86 87 /* informational message */ 88 char *msg; 89 90 /* debug dump */ 91 char *dump; 92 }; 93 94 /* sched_ext_ops.flags */ 95 enum scx_ops_flags { 96 /* 97 * Keep built-in idle tracking even if ops.update_idle() is implemented. 98 */ 99 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0, 100 101 /* 102 * By default, if there are no other task to run on the CPU, ext core 103 * keeps running the current task even after its slice expires. If this 104 * flag is specified, such tasks are passed to ops.enqueue() with 105 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info. 106 */ 107 SCX_OPS_ENQ_LAST = 1LLU << 1, 108 109 /* 110 * An exiting task may schedule after PF_EXITING is set. In such cases, 111 * bpf_task_from_pid() may not be able to find the task and if the BPF 112 * scheduler depends on pid lookup for dispatching, the task will be 113 * lost leading to various issues including RCU grace period stalls. 114 * 115 * To mask this problem, by default, unhashed tasks are automatically 116 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't 117 * depend on pid lookups and wants to handle these tasks directly, the 118 * following flag can be used. 119 */ 120 SCX_OPS_ENQ_EXITING = 1LLU << 2, 121 122 /* 123 * If set, only tasks with policy set to SCHED_EXT are attached to 124 * sched_ext. If clear, SCHED_NORMAL tasks are also included. 125 */ 126 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3, 127 128 /* 129 * A migration disabled task can only execute on its current CPU. By 130 * default, such tasks are automatically put on the CPU's local DSQ with 131 * the default slice on enqueue. If this ops flag is set, they also go 132 * through ops.enqueue(). 133 * 134 * A migration disabled task never invokes ops.select_cpu() as it can 135 * only select the current CPU. Also, p->cpus_ptr will only contain its 136 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr 137 * and thus may disagree with cpumask_weight(p->cpus_ptr). 138 */ 139 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4, 140 141 /* 142 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes 143 * ops.enqueue() on the ops.select_cpu() selected or the wakee's 144 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline 145 * transfers. When this optimization is enabled, ops.select_cpu() is 146 * skipped in some cases (when racing against the wakee switching out). 147 * As the BPF scheduler may depend on ops.select_cpu() being invoked 148 * during wakeups, queued wakeup is disabled by default. 149 * 150 * If this ops flag is set, queued wakeup optimization is enabled and 151 * the BPF scheduler must be able to handle ops.enqueue() invoked on the 152 * wakee's CPU without preceding ops.select_cpu() even for tasks which 153 * may be executed on multiple CPUs. 154 */ 155 SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5, 156 157 /* 158 * If set, enable per-node idle cpumasks. If clear, use a single global 159 * flat idle cpumask. 160 */ 161 SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6, 162 163 /* 164 * CPU cgroup support flags 165 */ 166 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */ 167 168 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 169 SCX_OPS_ENQ_LAST | 170 SCX_OPS_ENQ_EXITING | 171 SCX_OPS_ENQ_MIGRATION_DISABLED | 172 SCX_OPS_ALLOW_QUEUED_WAKEUP | 173 SCX_OPS_SWITCH_PARTIAL | 174 SCX_OPS_BUILTIN_IDLE_PER_NODE | 175 SCX_OPS_HAS_CGROUP_WEIGHT, 176 }; 177 178 /* argument container for ops.init_task() */ 179 struct scx_init_task_args { 180 /* 181 * Set if ops.init_task() is being invoked on the fork path, as opposed 182 * to the scheduler transition path. 183 */ 184 bool fork; 185 #ifdef CONFIG_EXT_GROUP_SCHED 186 /* the cgroup the task is joining */ 187 struct cgroup *cgroup; 188 #endif 189 }; 190 191 /* argument container for ops.exit_task() */ 192 struct scx_exit_task_args { 193 /* Whether the task exited before running on sched_ext. */ 194 bool cancelled; 195 }; 196 197 /* argument container for ops->cgroup_init() */ 198 struct scx_cgroup_init_args { 199 /* the weight of the cgroup [1..10000] */ 200 u32 weight; 201 }; 202 203 enum scx_cpu_preempt_reason { 204 /* next task is being scheduled by &sched_class_rt */ 205 SCX_CPU_PREEMPT_RT, 206 /* next task is being scheduled by &sched_class_dl */ 207 SCX_CPU_PREEMPT_DL, 208 /* next task is being scheduled by &sched_class_stop */ 209 SCX_CPU_PREEMPT_STOP, 210 /* unknown reason for SCX being preempted */ 211 SCX_CPU_PREEMPT_UNKNOWN, 212 }; 213 214 /* 215 * Argument container for ops->cpu_acquire(). Currently empty, but may be 216 * expanded in the future. 217 */ 218 struct scx_cpu_acquire_args {}; 219 220 /* argument container for ops->cpu_release() */ 221 struct scx_cpu_release_args { 222 /* the reason the CPU was preempted */ 223 enum scx_cpu_preempt_reason reason; 224 225 /* the task that's going to be scheduled on the CPU */ 226 struct task_struct *task; 227 }; 228 229 /* 230 * Informational context provided to dump operations. 231 */ 232 struct scx_dump_ctx { 233 enum scx_exit_kind kind; 234 s64 exit_code; 235 const char *reason; 236 u64 at_ns; 237 u64 at_jiffies; 238 }; 239 240 /** 241 * struct sched_ext_ops - Operation table for BPF scheduler implementation 242 * 243 * A BPF scheduler can implement an arbitrary scheduling policy by 244 * implementing and loading operations in this table. Note that a userland 245 * scheduling policy can also be implemented using the BPF scheduler 246 * as a shim layer. 247 */ 248 struct sched_ext_ops { 249 /** 250 * @select_cpu: Pick the target CPU for a task which is being woken up 251 * @p: task being woken up 252 * @prev_cpu: the cpu @p was on before sleeping 253 * @wake_flags: SCX_WAKE_* 254 * 255 * Decision made here isn't final. @p may be moved to any CPU while it 256 * is getting dispatched for execution later. However, as @p is not on 257 * the rq at this point, getting the eventual execution CPU right here 258 * saves a small bit of overhead down the line. 259 * 260 * If an idle CPU is returned, the CPU is kicked and will try to 261 * dispatch. While an explicit custom mechanism can be added, 262 * select_cpu() serves as the default way to wake up idle CPUs. 263 * 264 * @p may be inserted into a DSQ directly by calling 265 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped. 266 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ 267 * of the CPU returned by this operation. 268 * 269 * Note that select_cpu() is never called for tasks that can only run 270 * on a single CPU or tasks with migration disabled, as they don't have 271 * the option to select a different CPU. See select_task_rq() for 272 * details. 273 */ 274 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); 275 276 /** 277 * @enqueue: Enqueue a task on the BPF scheduler 278 * @p: task being enqueued 279 * @enq_flags: %SCX_ENQ_* 280 * 281 * @p is ready to run. Insert directly into a DSQ by calling 282 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly 283 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p, 284 * the task will stall. 285 * 286 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is 287 * skipped. 288 */ 289 void (*enqueue)(struct task_struct *p, u64 enq_flags); 290 291 /** 292 * @dequeue: Remove a task from the BPF scheduler 293 * @p: task being dequeued 294 * @deq_flags: %SCX_DEQ_* 295 * 296 * Remove @p from the BPF scheduler. This is usually called to isolate 297 * the task while updating its scheduling properties (e.g. priority). 298 * 299 * The ext core keeps track of whether the BPF side owns a given task or 300 * not and can gracefully ignore spurious dispatches from BPF side, 301 * which makes it safe to not implement this method. However, depending 302 * on the scheduling logic, this can lead to confusing behaviors - e.g. 303 * scheduling position not being updated across a priority change. 304 */ 305 void (*dequeue)(struct task_struct *p, u64 deq_flags); 306 307 /** 308 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs 309 * @cpu: CPU to dispatch tasks for 310 * @prev: previous task being switched out 311 * 312 * Called when a CPU's local dsq is empty. The operation should dispatch 313 * one or more tasks from the BPF scheduler into the DSQs using 314 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ 315 * using scx_bpf_dsq_move_to_local(). 316 * 317 * The maximum number of times scx_bpf_dsq_insert() can be called 318 * without an intervening scx_bpf_dsq_move_to_local() is specified by 319 * ops.dispatch_max_batch. See the comments on top of the two functions 320 * for more details. 321 * 322 * When not %NULL, @prev is an SCX task with its slice depleted. If 323 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in 324 * @prev->scx.flags, it is not enqueued yet and will be enqueued after 325 * ops.dispatch() returns. To keep executing @prev, return without 326 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST. 327 */ 328 void (*dispatch)(s32 cpu, struct task_struct *prev); 329 330 /** 331 * @tick: Periodic tick 332 * @p: task running currently 333 * 334 * This operation is called every 1/HZ seconds on CPUs which are 335 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an 336 * immediate dispatch cycle on the CPU. 337 */ 338 void (*tick)(struct task_struct *p); 339 340 /** 341 * @runnable: A task is becoming runnable on its associated CPU 342 * @p: task becoming runnable 343 * @enq_flags: %SCX_ENQ_* 344 * 345 * This and the following three functions can be used to track a task's 346 * execution state transitions. A task becomes ->runnable() on a CPU, 347 * and then goes through one or more ->running() and ->stopping() pairs 348 * as it runs on the CPU, and eventually becomes ->quiescent() when it's 349 * done running on the CPU. 350 * 351 * @p is becoming runnable on the CPU because it's 352 * 353 * - waking up (%SCX_ENQ_WAKEUP) 354 * - being moved from another CPU 355 * - being restored after temporarily taken off the queue for an 356 * attribute change. 357 * 358 * This and ->enqueue() are related but not coupled. This operation 359 * notifies @p's state transition and may not be followed by ->enqueue() 360 * e.g. when @p is being dispatched to a remote CPU, or when @p is 361 * being enqueued on a CPU experiencing a hotplug event. Likewise, a 362 * task may be ->enqueue()'d without being preceded by this operation 363 * e.g. after exhausting its slice. 364 */ 365 void (*runnable)(struct task_struct *p, u64 enq_flags); 366 367 /** 368 * @running: A task is starting to run on its associated CPU 369 * @p: task starting to run 370 * 371 * See ->runnable() for explanation on the task state notifiers. 372 */ 373 void (*running)(struct task_struct *p); 374 375 /** 376 * @stopping: A task is stopping execution 377 * @p: task stopping to run 378 * @runnable: is task @p still runnable? 379 * 380 * See ->runnable() for explanation on the task state notifiers. If 381 * !@runnable, ->quiescent() will be invoked after this operation 382 * returns. 383 */ 384 void (*stopping)(struct task_struct *p, bool runnable); 385 386 /** 387 * @quiescent: A task is becoming not runnable on its associated CPU 388 * @p: task becoming not runnable 389 * @deq_flags: %SCX_DEQ_* 390 * 391 * See ->runnable() for explanation on the task state notifiers. 392 * 393 * @p is becoming quiescent on the CPU because it's 394 * 395 * - sleeping (%SCX_DEQ_SLEEP) 396 * - being moved to another CPU 397 * - being temporarily taken off the queue for an attribute change 398 * (%SCX_DEQ_SAVE) 399 * 400 * This and ->dequeue() are related but not coupled. This operation 401 * notifies @p's state transition and may not be preceded by ->dequeue() 402 * e.g. when @p is being dispatched to a remote CPU. 403 */ 404 void (*quiescent)(struct task_struct *p, u64 deq_flags); 405 406 /** 407 * @yield: Yield CPU 408 * @from: yielding task 409 * @to: optional yield target task 410 * 411 * If @to is NULL, @from is yielding the CPU to other runnable tasks. 412 * The BPF scheduler should ensure that other available tasks are 413 * dispatched before the yielding task. Return value is ignored in this 414 * case. 415 * 416 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf 417 * scheduler can implement the request, return %true; otherwise, %false. 418 */ 419 bool (*yield)(struct task_struct *from, struct task_struct *to); 420 421 /** 422 * @core_sched_before: Task ordering for core-sched 423 * @a: task A 424 * @b: task B 425 * 426 * Used by core-sched to determine the ordering between two tasks. See 427 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on 428 * core-sched. 429 * 430 * Both @a and @b are runnable and may or may not currently be queued on 431 * the BPF scheduler. Should return %true if @a should run before @b. 432 * %false if there's no required ordering or @b should run before @a. 433 * 434 * If not specified, the default is ordering them according to when they 435 * became runnable. 436 */ 437 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); 438 439 /** 440 * @set_weight: Set task weight 441 * @p: task to set weight for 442 * @weight: new weight [1..10000] 443 * 444 * Update @p's weight to @weight. 445 */ 446 void (*set_weight)(struct task_struct *p, u32 weight); 447 448 /** 449 * @set_cpumask: Set CPU affinity 450 * @p: task to set CPU affinity for 451 * @cpumask: cpumask of cpus that @p can run on 452 * 453 * Update @p's CPU affinity to @cpumask. 454 */ 455 void (*set_cpumask)(struct task_struct *p, 456 const struct cpumask *cpumask); 457 458 /** 459 * @update_idle: Update the idle state of a CPU 460 * @cpu: CPU to update the idle state for 461 * @idle: whether entering or exiting the idle state 462 * 463 * This operation is called when @rq's CPU goes or leaves the idle 464 * state. By default, implementing this operation disables the built-in 465 * idle CPU tracking and the following helpers become unavailable: 466 * 467 * - scx_bpf_select_cpu_dfl() 468 * - scx_bpf_test_and_clear_cpu_idle() 469 * - scx_bpf_pick_idle_cpu() 470 * 471 * The user also must implement ops.select_cpu() as the default 472 * implementation relies on scx_bpf_select_cpu_dfl(). 473 * 474 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle 475 * tracking. 476 */ 477 void (*update_idle)(s32 cpu, bool idle); 478 479 /** 480 * @cpu_acquire: A CPU is becoming available to the BPF scheduler 481 * @cpu: The CPU being acquired by the BPF scheduler. 482 * @args: Acquire arguments, see the struct definition. 483 * 484 * A CPU that was previously released from the BPF scheduler is now once 485 * again under its control. 486 */ 487 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); 488 489 /** 490 * @cpu_release: A CPU is taken away from the BPF scheduler 491 * @cpu: The CPU being released by the BPF scheduler. 492 * @args: Release arguments, see the struct definition. 493 * 494 * The specified CPU is no longer under the control of the BPF 495 * scheduler. This could be because it was preempted by a higher 496 * priority sched_class, though there may be other reasons as well. The 497 * caller should consult @args->reason to determine the cause. 498 */ 499 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); 500 501 /** 502 * @init_task: Initialize a task to run in a BPF scheduler 503 * @p: task to initialize for BPF scheduling 504 * @args: init arguments, see the struct definition 505 * 506 * Either we're loading a BPF scheduler or a new task is being forked. 507 * Initialize @p for BPF scheduling. This operation may block and can 508 * be used for allocations, and is called exactly once for a task. 509 * 510 * Return 0 for success, -errno for failure. An error return while 511 * loading will abort loading of the BPF scheduler. During a fork, it 512 * will abort that specific fork. 513 */ 514 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args); 515 516 /** 517 * @exit_task: Exit a previously-running task from the system 518 * @p: task to exit 519 * @args: exit arguments, see the struct definition 520 * 521 * @p is exiting or the BPF scheduler is being unloaded. Perform any 522 * necessary cleanup for @p. 523 */ 524 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args); 525 526 /** 527 * @enable: Enable BPF scheduling for a task 528 * @p: task to enable BPF scheduling for 529 * 530 * Enable @p for BPF scheduling. enable() is called on @p any time it 531 * enters SCX, and is always paired with a matching disable(). 532 */ 533 void (*enable)(struct task_struct *p); 534 535 /** 536 * @disable: Disable BPF scheduling for a task 537 * @p: task to disable BPF scheduling for 538 * 539 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. 540 * Disable BPF scheduling for @p. A disable() call is always matched 541 * with a prior enable() call. 542 */ 543 void (*disable)(struct task_struct *p); 544 545 /** 546 * @dump: Dump BPF scheduler state on error 547 * @ctx: debug dump context 548 * 549 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump. 550 */ 551 void (*dump)(struct scx_dump_ctx *ctx); 552 553 /** 554 * @dump_cpu: Dump BPF scheduler state for a CPU on error 555 * @ctx: debug dump context 556 * @cpu: CPU to generate debug dump for 557 * @idle: @cpu is currently idle without any runnable tasks 558 * 559 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 560 * @cpu. If @idle is %true and this operation doesn't produce any 561 * output, @cpu is skipped for dump. 562 */ 563 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle); 564 565 /** 566 * @dump_task: Dump BPF scheduler state for a runnable task on error 567 * @ctx: debug dump context 568 * @p: runnable task to generate debug dump for 569 * 570 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 571 * @p. 572 */ 573 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p); 574 575 #ifdef CONFIG_EXT_GROUP_SCHED 576 /** 577 * @cgroup_init: Initialize a cgroup 578 * @cgrp: cgroup being initialized 579 * @args: init arguments, see the struct definition 580 * 581 * Either the BPF scheduler is being loaded or @cgrp created, initialize 582 * @cgrp for sched_ext. This operation may block. 583 * 584 * Return 0 for success, -errno for failure. An error return while 585 * loading will abort loading of the BPF scheduler. During cgroup 586 * creation, it will abort the specific cgroup creation. 587 */ 588 s32 (*cgroup_init)(struct cgroup *cgrp, 589 struct scx_cgroup_init_args *args); 590 591 /** 592 * @cgroup_exit: Exit a cgroup 593 * @cgrp: cgroup being exited 594 * 595 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit 596 * @cgrp for sched_ext. This operation my block. 597 */ 598 void (*cgroup_exit)(struct cgroup *cgrp); 599 600 /** 601 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup 602 * @p: task being moved 603 * @from: cgroup @p is being moved from 604 * @to: cgroup @p is being moved to 605 * 606 * Prepare @p for move from cgroup @from to @to. This operation may 607 * block and can be used for allocations. 608 * 609 * Return 0 for success, -errno for failure. An error return aborts the 610 * migration. 611 */ 612 s32 (*cgroup_prep_move)(struct task_struct *p, 613 struct cgroup *from, struct cgroup *to); 614 615 /** 616 * @cgroup_move: Commit cgroup move 617 * @p: task being moved 618 * @from: cgroup @p is being moved from 619 * @to: cgroup @p is being moved to 620 * 621 * Commit the move. @p is dequeued during this operation. 622 */ 623 void (*cgroup_move)(struct task_struct *p, 624 struct cgroup *from, struct cgroup *to); 625 626 /** 627 * @cgroup_cancel_move: Cancel cgroup move 628 * @p: task whose cgroup move is being canceled 629 * @from: cgroup @p was being moved from 630 * @to: cgroup @p was being moved to 631 * 632 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move(). 633 * Undo the preparation. 634 */ 635 void (*cgroup_cancel_move)(struct task_struct *p, 636 struct cgroup *from, struct cgroup *to); 637 638 /** 639 * @cgroup_set_weight: A cgroup's weight is being changed 640 * @cgrp: cgroup whose weight is being updated 641 * @weight: new weight [1..10000] 642 * 643 * Update @tg's weight to @weight. 644 */ 645 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight); 646 #endif /* CONFIG_EXT_GROUP_SCHED */ 647 648 /* 649 * All online ops must come before ops.cpu_online(). 650 */ 651 652 /** 653 * @cpu_online: A CPU became online 654 * @cpu: CPU which just came up 655 * 656 * @cpu just came online. @cpu will not call ops.enqueue() or 657 * ops.dispatch(), nor run tasks associated with other CPUs beforehand. 658 */ 659 void (*cpu_online)(s32 cpu); 660 661 /** 662 * @cpu_offline: A CPU is going offline 663 * @cpu: CPU which is going offline 664 * 665 * @cpu is going offline. @cpu will not call ops.enqueue() or 666 * ops.dispatch(), nor run tasks associated with other CPUs afterwards. 667 */ 668 void (*cpu_offline)(s32 cpu); 669 670 /* 671 * All CPU hotplug ops must come before ops.init(). 672 */ 673 674 /** 675 * @init: Initialize the BPF scheduler 676 */ 677 s32 (*init)(void); 678 679 /** 680 * @exit: Clean up after the BPF scheduler 681 * @info: Exit info 682 * 683 * ops.exit() is also called on ops.init() failure, which is a bit 684 * unusual. This is to allow rich reporting through @info on how 685 * ops.init() failed. 686 */ 687 void (*exit)(struct scx_exit_info *info); 688 689 /** 690 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch 691 */ 692 u32 dispatch_max_batch; 693 694 /** 695 * @flags: %SCX_OPS_* flags 696 */ 697 u64 flags; 698 699 /** 700 * @timeout_ms: The maximum amount of time, in milliseconds, that a 701 * runnable task should be able to wait before being scheduled. The 702 * maximum timeout may not exceed the default timeout of 30 seconds. 703 * 704 * Defaults to the maximum allowed timeout value of 30 seconds. 705 */ 706 u32 timeout_ms; 707 708 /** 709 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default 710 * value of 32768 is used. 711 */ 712 u32 exit_dump_len; 713 714 /** 715 * @hotplug_seq: A sequence number that may be set by the scheduler to 716 * detect when a hotplug event has occurred during the loading process. 717 * If 0, no detection occurs. Otherwise, the scheduler will fail to 718 * load if the sequence number does not match @scx_hotplug_seq on the 719 * enable path. 720 */ 721 u64 hotplug_seq; 722 723 /** 724 * @name: BPF scheduler's name 725 * 726 * Must be a non-zero valid BPF object name including only isalnum(), 727 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the 728 * BPF scheduler is enabled. 729 */ 730 char name[SCX_OPS_NAME_LEN]; 731 }; 732 733 enum scx_opi { 734 SCX_OPI_BEGIN = 0, 735 SCX_OPI_NORMAL_BEGIN = 0, 736 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online), 737 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online), 738 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init), 739 SCX_OPI_END = SCX_OP_IDX(init), 740 }; 741 742 enum scx_wake_flags { 743 /* expose select WF_* flags as enums */ 744 SCX_WAKE_FORK = WF_FORK, 745 SCX_WAKE_TTWU = WF_TTWU, 746 SCX_WAKE_SYNC = WF_SYNC, 747 }; 748 749 enum scx_enq_flags { 750 /* expose select ENQUEUE_* flags as enums */ 751 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP, 752 SCX_ENQ_HEAD = ENQUEUE_HEAD, 753 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED, 754 755 /* high 32bits are SCX specific */ 756 757 /* 758 * Set the following to trigger preemption when calling 759 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the 760 * current task is cleared to zero and the CPU is kicked into the 761 * scheduling path. Implies %SCX_ENQ_HEAD. 762 */ 763 SCX_ENQ_PREEMPT = 1LLU << 32, 764 765 /* 766 * The task being enqueued was previously enqueued on the current CPU's 767 * %SCX_DSQ_LOCAL, but was removed from it in a call to the 768 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was 769 * invoked in a ->cpu_release() callback, and the task is again 770 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the 771 * task will not be scheduled on the CPU until at least the next invocation 772 * of the ->cpu_acquire() callback. 773 */ 774 SCX_ENQ_REENQ = 1LLU << 40, 775 776 /* 777 * The task being enqueued is the only task available for the cpu. By 778 * default, ext core keeps executing such tasks but when 779 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the 780 * %SCX_ENQ_LAST flag set. 781 * 782 * The BPF scheduler is responsible for triggering a follow-up 783 * scheduling event. Otherwise, Execution may stall. 784 */ 785 SCX_ENQ_LAST = 1LLU << 41, 786 787 /* high 8 bits are internal */ 788 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, 789 790 SCX_ENQ_CLEAR_OPSS = 1LLU << 56, 791 SCX_ENQ_DSQ_PRIQ = 1LLU << 57, 792 }; 793 794 enum scx_deq_flags { 795 /* expose select DEQUEUE_* flags as enums */ 796 SCX_DEQ_SLEEP = DEQUEUE_SLEEP, 797 798 /* high 32bits are SCX specific */ 799 800 /* 801 * The generic core-sched layer decided to execute the task even though 802 * it hasn't been dispatched yet. Dequeue from the BPF side. 803 */ 804 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, 805 }; 806 807 enum scx_pick_idle_cpu_flags { 808 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */ 809 SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */ 810 }; 811 812 enum scx_kick_flags { 813 /* 814 * Kick the target CPU if idle. Guarantees that the target CPU goes 815 * through at least one full scheduling cycle before going idle. If the 816 * target CPU can be determined to be currently not idle and going to go 817 * through a scheduling cycle before going idle, noop. 818 */ 819 SCX_KICK_IDLE = 1LLU << 0, 820 821 /* 822 * Preempt the current task and execute the dispatch path. If the 823 * current task of the target CPU is an SCX task, its ->scx.slice is 824 * cleared to zero before the scheduling path is invoked so that the 825 * task expires and the dispatch path is invoked. 826 */ 827 SCX_KICK_PREEMPT = 1LLU << 1, 828 829 /* 830 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will 831 * return after the target CPU finishes picking the next task. 832 */ 833 SCX_KICK_WAIT = 1LLU << 2, 834 }; 835 836 enum scx_tg_flags { 837 SCX_TG_ONLINE = 1U << 0, 838 SCX_TG_INITED = 1U << 1, 839 }; 840 841 enum scx_ops_enable_state { 842 SCX_OPS_ENABLING, 843 SCX_OPS_ENABLED, 844 SCX_OPS_DISABLING, 845 SCX_OPS_DISABLED, 846 }; 847 848 static const char *scx_ops_enable_state_str[] = { 849 [SCX_OPS_ENABLING] = "enabling", 850 [SCX_OPS_ENABLED] = "enabled", 851 [SCX_OPS_DISABLING] = "disabling", 852 [SCX_OPS_DISABLED] = "disabled", 853 }; 854 855 /* 856 * sched_ext_entity->ops_state 857 * 858 * Used to track the task ownership between the SCX core and the BPF scheduler. 859 * State transitions look as follows: 860 * 861 * NONE -> QUEUEING -> QUEUED -> DISPATCHING 862 * ^ | | 863 * | v v 864 * \-------------------------------/ 865 * 866 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 867 * sites for explanations on the conditions being waited upon and why they are 868 * safe. Transitions out of them into NONE or QUEUED must store_release and the 869 * waiters should load_acquire. 870 * 871 * Tracking scx_ops_state enables sched_ext core to reliably determine whether 872 * any given task can be dispatched by the BPF scheduler at all times and thus 873 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 874 * to try to dispatch any task anytime regardless of its state as the SCX core 875 * can safely reject invalid dispatches. 876 */ 877 enum scx_ops_state { 878 SCX_OPSS_NONE, /* owned by the SCX core */ 879 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */ 880 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */ 881 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */ 882 883 /* 884 * QSEQ brands each QUEUED instance so that, when dispatch races 885 * dequeue/requeue, the dispatcher can tell whether it still has a claim 886 * on the task being dispatched. 887 * 888 * As some 32bit archs can't do 64bit store_release/load_acquire, 889 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on 890 * 32bit machines. The dispatch race window QSEQ protects is very narrow 891 * and runs with IRQ disabled. 30 bits should be sufficient. 892 */ 893 SCX_OPSS_QSEQ_SHIFT = 2, 894 }; 895 896 /* Use macros to ensure that the type is unsigned long for the masks */ 897 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1) 898 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK) 899 900 /* 901 * During exit, a task may schedule after losing its PIDs. When disabling the 902 * BPF scheduler, we need to be able to iterate tasks in every state to 903 * guarantee system safety. Maintain a dedicated task list which contains every 904 * task between its fork and eventual free. 905 */ 906 static DEFINE_SPINLOCK(scx_tasks_lock); 907 static LIST_HEAD(scx_tasks); 908 909 /* ops enable/disable */ 910 static struct kthread_worker *scx_ops_helper; 911 static DEFINE_MUTEX(scx_ops_enable_mutex); 912 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); 913 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 914 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); 915 static unsigned long scx_in_softlockup; 916 static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0); 917 static int scx_ops_bypass_depth; 918 static bool scx_ops_init_task_enabled; 919 static bool scx_switching_all; 920 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 921 922 static struct sched_ext_ops scx_ops; 923 static bool scx_warned_zero_slice; 924 925 DEFINE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup); 926 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); 927 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); 928 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_migration_disabled); 929 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); 930 931 static struct static_key_false scx_has_op[SCX_OPI_END] = 932 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT }; 933 934 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE); 935 static struct scx_exit_info *scx_exit_info; 936 937 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 938 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 939 940 /* 941 * A monotically increasing sequence number that is incremented every time a 942 * scheduler is enabled. This can be used by to check if any custom sched_ext 943 * scheduler has ever been used in the system. 944 */ 945 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0); 946 947 /* 948 * The maximum amount of time in jiffies that a task may be runnable without 949 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 950 * scx_ops_error(). 951 */ 952 static unsigned long scx_watchdog_timeout; 953 954 /* 955 * The last time the delayed work was run. This delayed work relies on 956 * ksoftirqd being able to run to service timer interrupts, so it's possible 957 * that this work itself could get wedged. To account for this, we check that 958 * it's not stalled in the timer tick, and trigger an error if it is. 959 */ 960 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 961 962 static struct delayed_work scx_watchdog_work; 963 964 /* for %SCX_KICK_WAIT */ 965 static unsigned long __percpu *scx_kick_cpus_pnt_seqs; 966 967 /* 968 * Direct dispatch marker. 969 * 970 * Non-NULL values are used for direct dispatch from enqueue path. A valid 971 * pointer points to the task currently being enqueued. An ERR_PTR value is used 972 * to indicate that direct dispatch has already happened. 973 */ 974 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 975 976 /* 977 * Dispatch queues. 978 * 979 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is 980 * to avoid live-locking in bypass mode where all tasks are dispatched to 981 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't 982 * sufficient, it can be further split. 983 */ 984 static struct scx_dispatch_q **global_dsqs; 985 986 static const struct rhashtable_params dsq_hash_params = { 987 .key_len = sizeof_field(struct scx_dispatch_q, id), 988 .key_offset = offsetof(struct scx_dispatch_q, id), 989 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 990 }; 991 992 static struct rhashtable dsq_hash; 993 static LLIST_HEAD(dsqs_to_free); 994 995 /* dispatch buf */ 996 struct scx_dsp_buf_ent { 997 struct task_struct *task; 998 unsigned long qseq; 999 u64 dsq_id; 1000 u64 enq_flags; 1001 }; 1002 1003 static u32 scx_dsp_max_batch; 1004 1005 struct scx_dsp_ctx { 1006 struct rq *rq; 1007 u32 cursor; 1008 u32 nr_tasks; 1009 struct scx_dsp_buf_ent buf[]; 1010 }; 1011 1012 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 1013 1014 /* string formatting from BPF */ 1015 struct scx_bstr_buf { 1016 u64 data[MAX_BPRINTF_VARARGS]; 1017 char line[SCX_EXIT_MSG_LEN]; 1018 }; 1019 1020 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 1021 static struct scx_bstr_buf scx_exit_bstr_buf; 1022 1023 /* ops debug dump */ 1024 struct scx_dump_data { 1025 s32 cpu; 1026 bool first; 1027 s32 cursor; 1028 struct seq_buf *s; 1029 const char *prefix; 1030 struct scx_bstr_buf buf; 1031 }; 1032 1033 static struct scx_dump_data scx_dump_data = { 1034 .cpu = -1, 1035 }; 1036 1037 /* /sys/kernel/sched_ext interface */ 1038 static struct kset *scx_kset; 1039 static struct kobject *scx_root_kobj; 1040 1041 #define CREATE_TRACE_POINTS 1042 #include <trace/events/sched_ext.h> 1043 1044 static void process_ddsp_deferred_locals(struct rq *rq); 1045 static void scx_bpf_kick_cpu(s32 cpu, u64 flags); 1046 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 1047 s64 exit_code, 1048 const char *fmt, ...); 1049 1050 #define scx_ops_error_kind(err, fmt, args...) \ 1051 scx_ops_exit_kind((err), 0, fmt, ##args) 1052 1053 #define scx_ops_exit(code, fmt, args...) \ 1054 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args) 1055 1056 #define scx_ops_error(fmt, args...) \ 1057 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args) 1058 1059 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)]) 1060 1061 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 1062 { 1063 if (time_after(at, now)) 1064 return jiffies_to_msecs(at - now); 1065 else 1066 return -(long)jiffies_to_msecs(now - at); 1067 } 1068 1069 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 1070 static u32 higher_bits(u32 flags) 1071 { 1072 return ~((1 << fls(flags)) - 1); 1073 } 1074 1075 /* return the mask with only the highest bit set */ 1076 static u32 highest_bit(u32 flags) 1077 { 1078 int bit = fls(flags); 1079 return ((u64)1 << bit) >> 1; 1080 } 1081 1082 static bool u32_before(u32 a, u32 b) 1083 { 1084 return (s32)(a - b) < 0; 1085 } 1086 1087 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p) 1088 { 1089 return global_dsqs[cpu_to_node(task_cpu(p))]; 1090 } 1091 1092 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id) 1093 { 1094 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); 1095 } 1096 1097 /* 1098 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 1099 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 1100 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 1101 * whether it's running from an allowed context. 1102 * 1103 * @mask is constant, always inline to cull the mask calculations. 1104 */ 1105 static __always_inline void scx_kf_allow(u32 mask) 1106 { 1107 /* nesting is allowed only in increasing scx_kf_mask order */ 1108 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 1109 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 1110 current->scx.kf_mask, mask); 1111 current->scx.kf_mask |= mask; 1112 barrier(); 1113 } 1114 1115 static void scx_kf_disallow(u32 mask) 1116 { 1117 barrier(); 1118 current->scx.kf_mask &= ~mask; 1119 } 1120 1121 /* 1122 * Track the rq currently locked. 1123 * 1124 * This allows kfuncs to safely operate on rq from any scx ops callback, 1125 * knowing which rq is already locked. 1126 */ 1127 static DEFINE_PER_CPU(struct rq *, locked_rq); 1128 1129 static inline void update_locked_rq(struct rq *rq) 1130 { 1131 /* 1132 * Check whether @rq is actually locked. This can help expose bugs 1133 * or incorrect assumptions about the context in which a kfunc or 1134 * callback is executed. 1135 */ 1136 if (rq) 1137 lockdep_assert_rq_held(rq); 1138 __this_cpu_write(locked_rq, rq); 1139 } 1140 1141 /* 1142 * Return the rq currently locked from an scx callback, or NULL if no rq is 1143 * locked. 1144 */ 1145 static inline struct rq *scx_locked_rq(void) 1146 { 1147 return __this_cpu_read(locked_rq); 1148 } 1149 1150 #define SCX_CALL_OP(mask, op, rq, args...) \ 1151 do { \ 1152 update_locked_rq(rq); \ 1153 if (mask) { \ 1154 scx_kf_allow(mask); \ 1155 scx_ops.op(args); \ 1156 scx_kf_disallow(mask); \ 1157 } else { \ 1158 scx_ops.op(args); \ 1159 } \ 1160 update_locked_rq(NULL); \ 1161 } while (0) 1162 1163 #define SCX_CALL_OP_RET(mask, op, rq, args...) \ 1164 ({ \ 1165 __typeof__(scx_ops.op(args)) __ret; \ 1166 \ 1167 update_locked_rq(rq); \ 1168 if (mask) { \ 1169 scx_kf_allow(mask); \ 1170 __ret = scx_ops.op(args); \ 1171 scx_kf_disallow(mask); \ 1172 } else { \ 1173 __ret = scx_ops.op(args); \ 1174 } \ 1175 update_locked_rq(NULL); \ 1176 __ret; \ 1177 }) 1178 1179 /* 1180 * Some kfuncs are allowed only on the tasks that are subjects of the 1181 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 1182 * restrictions, the following SCX_CALL_OP_*() variants should be used when 1183 * invoking scx_ops operations that take task arguments. These can only be used 1184 * for non-nesting operations due to the way the tasks are tracked. 1185 * 1186 * kfuncs which can only operate on such tasks can in turn use 1187 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 1188 * the specific task. 1189 */ 1190 #define SCX_CALL_OP_TASK(mask, op, rq, task, args...) \ 1191 do { \ 1192 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1193 current->scx.kf_tasks[0] = task; \ 1194 SCX_CALL_OP(mask, op, rq, task, ##args); \ 1195 current->scx.kf_tasks[0] = NULL; \ 1196 } while (0) 1197 1198 #define SCX_CALL_OP_TASK_RET(mask, op, rq, task, args...) \ 1199 ({ \ 1200 __typeof__(scx_ops.op(task, ##args)) __ret; \ 1201 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1202 current->scx.kf_tasks[0] = task; \ 1203 __ret = SCX_CALL_OP_RET(mask, op, rq, task, ##args); \ 1204 current->scx.kf_tasks[0] = NULL; \ 1205 __ret; \ 1206 }) 1207 1208 #define SCX_CALL_OP_2TASKS_RET(mask, op, rq, task0, task1, args...) \ 1209 ({ \ 1210 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \ 1211 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1212 current->scx.kf_tasks[0] = task0; \ 1213 current->scx.kf_tasks[1] = task1; \ 1214 __ret = SCX_CALL_OP_RET(mask, op, rq, task0, task1, ##args); \ 1215 current->scx.kf_tasks[0] = NULL; \ 1216 current->scx.kf_tasks[1] = NULL; \ 1217 __ret; \ 1218 }) 1219 1220 /* @mask is constant, always inline to cull unnecessary branches */ 1221 static __always_inline bool scx_kf_allowed(u32 mask) 1222 { 1223 if (unlikely(!(current->scx.kf_mask & mask))) { 1224 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x", 1225 mask, current->scx.kf_mask); 1226 return false; 1227 } 1228 1229 /* 1230 * Enforce nesting boundaries. e.g. A kfunc which can be called from 1231 * DISPATCH must not be called if we're running DEQUEUE which is nested 1232 * inside ops.dispatch(). We don't need to check boundaries for any 1233 * blocking kfuncs as the verifier ensures they're only called from 1234 * sleepable progs. 1235 */ 1236 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 1237 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 1238 scx_ops_error("cpu_release kfunc called from a nested operation"); 1239 return false; 1240 } 1241 1242 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 1243 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 1244 scx_ops_error("dispatch kfunc called from a nested operation"); 1245 return false; 1246 } 1247 1248 return true; 1249 } 1250 1251 /* see SCX_CALL_OP_TASK() */ 1252 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask, 1253 struct task_struct *p) 1254 { 1255 if (!scx_kf_allowed(mask)) 1256 return false; 1257 1258 if (unlikely((p != current->scx.kf_tasks[0] && 1259 p != current->scx.kf_tasks[1]))) { 1260 scx_ops_error("called on a task not being operated on"); 1261 return false; 1262 } 1263 1264 return true; 1265 } 1266 1267 static bool scx_kf_allowed_if_unlocked(void) 1268 { 1269 return !current->scx.kf_mask; 1270 } 1271 1272 /** 1273 * nldsq_next_task - Iterate to the next task in a non-local DSQ 1274 * @dsq: user dsq being iterated 1275 * @cur: current position, %NULL to start iteration 1276 * @rev: walk backwards 1277 * 1278 * Returns %NULL when iteration is finished. 1279 */ 1280 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 1281 struct task_struct *cur, bool rev) 1282 { 1283 struct list_head *list_node; 1284 struct scx_dsq_list_node *dsq_lnode; 1285 1286 lockdep_assert_held(&dsq->lock); 1287 1288 if (cur) 1289 list_node = &cur->scx.dsq_list.node; 1290 else 1291 list_node = &dsq->list; 1292 1293 /* find the next task, need to skip BPF iteration cursors */ 1294 do { 1295 if (rev) 1296 list_node = list_node->prev; 1297 else 1298 list_node = list_node->next; 1299 1300 if (list_node == &dsq->list) 1301 return NULL; 1302 1303 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 1304 node); 1305 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR); 1306 1307 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 1308 } 1309 1310 #define nldsq_for_each_task(p, dsq) \ 1311 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 1312 (p) = nldsq_next_task((dsq), (p), false)) 1313 1314 1315 /* 1316 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 1317 * dispatch order. BPF-visible iterator is opaque and larger to allow future 1318 * changes without breaking backward compatibility. Can be used with 1319 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 1320 */ 1321 enum scx_dsq_iter_flags { 1322 /* iterate in the reverse dispatch order */ 1323 SCX_DSQ_ITER_REV = 1U << 16, 1324 1325 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30, 1326 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31, 1327 1328 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV, 1329 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS | 1330 __SCX_DSQ_ITER_HAS_SLICE | 1331 __SCX_DSQ_ITER_HAS_VTIME, 1332 }; 1333 1334 struct bpf_iter_scx_dsq_kern { 1335 struct scx_dsq_list_node cursor; 1336 struct scx_dispatch_q *dsq; 1337 u64 slice; 1338 u64 vtime; 1339 } __attribute__((aligned(8))); 1340 1341 struct bpf_iter_scx_dsq { 1342 u64 __opaque[6]; 1343 } __attribute__((aligned(8))); 1344 1345 1346 /* 1347 * SCX task iterator. 1348 */ 1349 struct scx_task_iter { 1350 struct sched_ext_entity cursor; 1351 struct task_struct *locked; 1352 struct rq *rq; 1353 struct rq_flags rf; 1354 u32 cnt; 1355 }; 1356 1357 /** 1358 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 1359 * @iter: iterator to init 1360 * 1361 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 1362 * must eventually be stopped with scx_task_iter_stop(). 1363 * 1364 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 1365 * between this and the first next() call or between any two next() calls. If 1366 * the locks are released between two next() calls, the caller is responsible 1367 * for ensuring that the task being iterated remains accessible either through 1368 * RCU read lock or obtaining a reference count. 1369 * 1370 * All tasks which existed when the iteration started are guaranteed to be 1371 * visited as long as they still exist. 1372 */ 1373 static void scx_task_iter_start(struct scx_task_iter *iter) 1374 { 1375 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 1376 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 1377 1378 spin_lock_irq(&scx_tasks_lock); 1379 1380 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 1381 list_add(&iter->cursor.tasks_node, &scx_tasks); 1382 iter->locked = NULL; 1383 iter->cnt = 0; 1384 } 1385 1386 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 1387 { 1388 if (iter->locked) { 1389 task_rq_unlock(iter->rq, iter->locked, &iter->rf); 1390 iter->locked = NULL; 1391 } 1392 } 1393 1394 /** 1395 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 1396 * @iter: iterator to unlock 1397 * 1398 * If @iter is in the middle of a locked iteration, it may be locking the rq of 1399 * the task currently being visited in addition to scx_tasks_lock. Unlock both. 1400 * This function can be safely called anytime during an iteration. 1401 */ 1402 static void scx_task_iter_unlock(struct scx_task_iter *iter) 1403 { 1404 __scx_task_iter_rq_unlock(iter); 1405 spin_unlock_irq(&scx_tasks_lock); 1406 } 1407 1408 /** 1409 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock() 1410 * @iter: iterator to re-lock 1411 * 1412 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it 1413 * doesn't re-lock the rq lock. Must be called before other iterator operations. 1414 */ 1415 static void scx_task_iter_relock(struct scx_task_iter *iter) 1416 { 1417 spin_lock_irq(&scx_tasks_lock); 1418 } 1419 1420 /** 1421 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 1422 * @iter: iterator to exit 1423 * 1424 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 1425 * which is released on return. If the iterator holds a task's rq lock, that rq 1426 * lock is also released. See scx_task_iter_start() for details. 1427 */ 1428 static void scx_task_iter_stop(struct scx_task_iter *iter) 1429 { 1430 list_del_init(&iter->cursor.tasks_node); 1431 scx_task_iter_unlock(iter); 1432 } 1433 1434 /** 1435 * scx_task_iter_next - Next task 1436 * @iter: iterator to walk 1437 * 1438 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 1439 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing 1440 * stalls by holding scx_tasks_lock for too long. 1441 */ 1442 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 1443 { 1444 struct list_head *cursor = &iter->cursor.tasks_node; 1445 struct sched_ext_entity *pos; 1446 1447 if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) { 1448 scx_task_iter_unlock(iter); 1449 cond_resched(); 1450 scx_task_iter_relock(iter); 1451 } 1452 1453 list_for_each_entry(pos, cursor, tasks_node) { 1454 if (&pos->tasks_node == &scx_tasks) 1455 return NULL; 1456 if (!(pos->flags & SCX_TASK_CURSOR)) { 1457 list_move(cursor, &pos->tasks_node); 1458 return container_of(pos, struct task_struct, scx); 1459 } 1460 } 1461 1462 /* can't happen, should always terminate at scx_tasks above */ 1463 BUG(); 1464 } 1465 1466 /** 1467 * scx_task_iter_next_locked - Next non-idle task with its rq locked 1468 * @iter: iterator to walk 1469 * 1470 * Visit the non-idle task with its rq lock held. Allows callers to specify 1471 * whether they would like to filter out dead tasks. See scx_task_iter_start() 1472 * for details. 1473 */ 1474 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 1475 { 1476 struct task_struct *p; 1477 1478 __scx_task_iter_rq_unlock(iter); 1479 1480 while ((p = scx_task_iter_next(iter))) { 1481 /* 1482 * scx_task_iter is used to prepare and move tasks into SCX 1483 * while loading the BPF scheduler and vice-versa while 1484 * unloading. The init_tasks ("swappers") should be excluded 1485 * from the iteration because: 1486 * 1487 * - It's unsafe to use __setschduler_prio() on an init_task to 1488 * determine the sched_class to use as it won't preserve its 1489 * idle_sched_class. 1490 * 1491 * - ops.init/exit_task() can easily be confused if called with 1492 * init_tasks as they, e.g., share PID 0. 1493 * 1494 * As init_tasks are never scheduled through SCX, they can be 1495 * skipped safely. Note that is_idle_task() which tests %PF_IDLE 1496 * doesn't work here: 1497 * 1498 * - %PF_IDLE may not be set for an init_task whose CPU hasn't 1499 * yet been onlined. 1500 * 1501 * - %PF_IDLE can be set on tasks that are not init_tasks. See 1502 * play_idle_precise() used by CONFIG_IDLE_INJECT. 1503 * 1504 * Test for idle_sched_class as only init_tasks are on it. 1505 */ 1506 if (p->sched_class != &idle_sched_class) 1507 break; 1508 } 1509 if (!p) 1510 return NULL; 1511 1512 iter->rq = task_rq_lock(p, &iter->rf); 1513 iter->locked = p; 1514 1515 return p; 1516 } 1517 1518 /* 1519 * Collection of event counters. Event types are placed in descending order. 1520 */ 1521 struct scx_event_stats { 1522 /* 1523 * If ops.select_cpu() returns a CPU which can't be used by the task, 1524 * the core scheduler code silently picks a fallback CPU. 1525 */ 1526 s64 SCX_EV_SELECT_CPU_FALLBACK; 1527 1528 /* 1529 * When dispatching to a local DSQ, the CPU may have gone offline in 1530 * the meantime. In this case, the task is bounced to the global DSQ. 1531 */ 1532 s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE; 1533 1534 /* 1535 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task 1536 * continued to run because there were no other tasks on the CPU. 1537 */ 1538 s64 SCX_EV_DISPATCH_KEEP_LAST; 1539 1540 /* 1541 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task 1542 * is dispatched to a local DSQ when exiting. 1543 */ 1544 s64 SCX_EV_ENQ_SKIP_EXITING; 1545 1546 /* 1547 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a 1548 * migration disabled task skips ops.enqueue() and is dispatched to its 1549 * local DSQ. 1550 */ 1551 s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED; 1552 1553 /* 1554 * The total number of tasks enqueued (or pick_task-ed) with a 1555 * default time slice (SCX_SLICE_DFL). 1556 */ 1557 s64 SCX_EV_ENQ_SLICE_DFL; 1558 1559 /* 1560 * The total duration of bypass modes in nanoseconds. 1561 */ 1562 s64 SCX_EV_BYPASS_DURATION; 1563 1564 /* 1565 * The number of tasks dispatched in the bypassing mode. 1566 */ 1567 s64 SCX_EV_BYPASS_DISPATCH; 1568 1569 /* 1570 * The number of times the bypassing mode has been activated. 1571 */ 1572 s64 SCX_EV_BYPASS_ACTIVATE; 1573 }; 1574 1575 /* 1576 * The event counter is organized by a per-CPU variable to minimize the 1577 * accounting overhead without synchronization. A system-wide view on the 1578 * event counter is constructed when requested by scx_bpf_get_event_stat(). 1579 */ 1580 static DEFINE_PER_CPU(struct scx_event_stats, event_stats_cpu); 1581 1582 /** 1583 * scx_add_event - Increase an event counter for 'name' by 'cnt' 1584 * @name: an event name defined in struct scx_event_stats 1585 * @cnt: the number of the event occured 1586 * 1587 * This can be used when preemption is not disabled. 1588 */ 1589 #define scx_add_event(name, cnt) do { \ 1590 this_cpu_add(event_stats_cpu.name, cnt); \ 1591 trace_sched_ext_event(#name, cnt); \ 1592 } while(0) 1593 1594 /** 1595 * __scx_add_event - Increase an event counter for 'name' by 'cnt' 1596 * @name: an event name defined in struct scx_event_stats 1597 * @cnt: the number of the event occured 1598 * 1599 * This should be used only when preemption is disabled. 1600 */ 1601 #define __scx_add_event(name, cnt) do { \ 1602 __this_cpu_add(event_stats_cpu.name, cnt); \ 1603 trace_sched_ext_event(#name, cnt); \ 1604 } while(0) 1605 1606 /** 1607 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e' 1608 * @dst_e: destination event stats 1609 * @src_e: source event stats 1610 * @kind: a kind of event to be aggregated 1611 */ 1612 #define scx_agg_event(dst_e, src_e, kind) do { \ 1613 (dst_e)->kind += READ_ONCE((src_e)->kind); \ 1614 } while(0) 1615 1616 /** 1617 * scx_dump_event - Dump an event 'kind' in 'events' to 's' 1618 * @s: output seq_buf 1619 * @events: event stats 1620 * @kind: a kind of event to dump 1621 */ 1622 #define scx_dump_event(s, events, kind) do { \ 1623 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \ 1624 } while (0) 1625 1626 1627 static void scx_bpf_events(struct scx_event_stats *events, size_t events__sz); 1628 1629 static enum scx_ops_enable_state scx_ops_enable_state(void) 1630 { 1631 return atomic_read(&scx_ops_enable_state_var); 1632 } 1633 1634 static enum scx_ops_enable_state 1635 scx_ops_set_enable_state(enum scx_ops_enable_state to) 1636 { 1637 return atomic_xchg(&scx_ops_enable_state_var, to); 1638 } 1639 1640 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to, 1641 enum scx_ops_enable_state from) 1642 { 1643 int from_v = from; 1644 1645 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to); 1646 } 1647 1648 static bool scx_rq_bypassing(struct rq *rq) 1649 { 1650 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING); 1651 } 1652 1653 /** 1654 * wait_ops_state - Busy-wait the specified ops state to end 1655 * @p: target task 1656 * @opss: state to wait the end of 1657 * 1658 * Busy-wait for @p to transition out of @opss. This can only be used when the 1659 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 1660 * has load_acquire semantics to ensure that the caller can see the updates made 1661 * in the enqueueing and dispatching paths. 1662 */ 1663 static void wait_ops_state(struct task_struct *p, unsigned long opss) 1664 { 1665 do { 1666 cpu_relax(); 1667 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 1668 } 1669 1670 /** 1671 * ops_cpu_valid - Verify a cpu number 1672 * @cpu: cpu number which came from a BPF ops 1673 * @where: extra information reported on error 1674 * 1675 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 1676 * Verify that it is in range and one of the possible cpus. If invalid, trigger 1677 * an ops error. 1678 */ 1679 static bool ops_cpu_valid(s32 cpu, const char *where) 1680 { 1681 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) { 1682 return true; 1683 } else { 1684 scx_ops_error("invalid CPU %d%s%s", cpu, 1685 where ? " " : "", where ?: ""); 1686 return false; 1687 } 1688 } 1689 1690 /** 1691 * ops_sanitize_err - Sanitize a -errno value 1692 * @ops_name: operation to blame on failure 1693 * @err: -errno value to sanitize 1694 * 1695 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return 1696 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 1697 * cause misbehaviors. For an example, a large negative return from 1698 * ops.init_task() triggers an oops when passed up the call chain because the 1699 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 1700 * handled as a pointer. 1701 */ 1702 static int ops_sanitize_err(const char *ops_name, s32 err) 1703 { 1704 if (err < 0 && err >= -MAX_ERRNO) 1705 return err; 1706 1707 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err); 1708 return -EPROTO; 1709 } 1710 1711 static void run_deferred(struct rq *rq) 1712 { 1713 process_ddsp_deferred_locals(rq); 1714 } 1715 1716 #ifdef CONFIG_SMP 1717 static void deferred_bal_cb_workfn(struct rq *rq) 1718 { 1719 run_deferred(rq); 1720 } 1721 #endif 1722 1723 static void deferred_irq_workfn(struct irq_work *irq_work) 1724 { 1725 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); 1726 1727 raw_spin_rq_lock(rq); 1728 run_deferred(rq); 1729 raw_spin_rq_unlock(rq); 1730 } 1731 1732 /** 1733 * schedule_deferred - Schedule execution of deferred actions on an rq 1734 * @rq: target rq 1735 * 1736 * Schedule execution of deferred actions on @rq. Must be called with @rq 1737 * locked. Deferred actions are executed with @rq locked but unpinned, and thus 1738 * can unlock @rq to e.g. migrate tasks to other rqs. 1739 */ 1740 static void schedule_deferred(struct rq *rq) 1741 { 1742 lockdep_assert_rq_held(rq); 1743 1744 #ifdef CONFIG_SMP 1745 /* 1746 * If in the middle of waking up a task, task_woken_scx() will be called 1747 * afterwards which will then run the deferred actions, no need to 1748 * schedule anything. 1749 */ 1750 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 1751 return; 1752 1753 /* 1754 * If in balance, the balance callbacks will be called before rq lock is 1755 * released. Schedule one. 1756 */ 1757 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 1758 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 1759 deferred_bal_cb_workfn); 1760 return; 1761 } 1762 #endif 1763 /* 1764 * No scheduler hooks available. Queue an irq work. They are executed on 1765 * IRQ re-enable which may take a bit longer than the scheduler hooks. 1766 * The above WAKEUP and BALANCE paths should cover most of the cases and 1767 * the time to IRQ re-enable shouldn't be long. 1768 */ 1769 irq_work_queue(&rq->scx.deferred_irq_work); 1770 } 1771 1772 /** 1773 * touch_core_sched - Update timestamp used for core-sched task ordering 1774 * @rq: rq to read clock from, must be locked 1775 * @p: task to update the timestamp for 1776 * 1777 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 1778 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 1779 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 1780 * exhaustion). 1781 */ 1782 static void touch_core_sched(struct rq *rq, struct task_struct *p) 1783 { 1784 lockdep_assert_rq_held(rq); 1785 1786 #ifdef CONFIG_SCHED_CORE 1787 /* 1788 * It's okay to update the timestamp spuriously. Use 1789 * sched_core_disabled() which is cheaper than enabled(). 1790 * 1791 * As this is used to determine ordering between tasks of sibling CPUs, 1792 * it may be better to use per-core dispatch sequence instead. 1793 */ 1794 if (!sched_core_disabled()) 1795 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); 1796 #endif 1797 } 1798 1799 /** 1800 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 1801 * @rq: rq to read clock from, must be locked 1802 * @p: task being dispatched 1803 * 1804 * If the BPF scheduler implements custom core-sched ordering via 1805 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 1806 * ordering within each local DSQ. This function is called from dispatch paths 1807 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 1808 */ 1809 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 1810 { 1811 lockdep_assert_rq_held(rq); 1812 1813 #ifdef CONFIG_SCHED_CORE 1814 if (SCX_HAS_OP(core_sched_before)) 1815 touch_core_sched(rq, p); 1816 #endif 1817 } 1818 1819 static void update_curr_scx(struct rq *rq) 1820 { 1821 struct task_struct *curr = rq->curr; 1822 s64 delta_exec; 1823 1824 delta_exec = update_curr_common(rq); 1825 if (unlikely(delta_exec <= 0)) 1826 return; 1827 1828 if (curr->scx.slice != SCX_SLICE_INF) { 1829 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); 1830 if (!curr->scx.slice) 1831 touch_core_sched(rq, curr); 1832 } 1833 } 1834 1835 static bool scx_dsq_priq_less(struct rb_node *node_a, 1836 const struct rb_node *node_b) 1837 { 1838 const struct task_struct *a = 1839 container_of(node_a, struct task_struct, scx.dsq_priq); 1840 const struct task_struct *b = 1841 container_of(node_b, struct task_struct, scx.dsq_priq); 1842 1843 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 1844 } 1845 1846 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 1847 { 1848 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */ 1849 WRITE_ONCE(dsq->nr, dsq->nr + delta); 1850 } 1851 1852 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, 1853 u64 enq_flags) 1854 { 1855 bool is_local = dsq->id == SCX_DSQ_LOCAL; 1856 1857 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1858 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 1859 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 1860 1861 if (!is_local) { 1862 raw_spin_lock(&dsq->lock); 1863 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 1864 scx_ops_error("attempting to dispatch to a destroyed dsq"); 1865 /* fall back to the global dsq */ 1866 raw_spin_unlock(&dsq->lock); 1867 dsq = find_global_dsq(p); 1868 raw_spin_lock(&dsq->lock); 1869 } 1870 } 1871 1872 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1873 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1874 /* 1875 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1876 * their FIFO queues. To avoid confusion and accidentally 1877 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1878 * disallow any internal DSQ from doing vtime ordering of 1879 * tasks. 1880 */ 1881 scx_ops_error("cannot use vtime ordering for built-in DSQs"); 1882 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1883 } 1884 1885 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1886 struct rb_node *rbp; 1887 1888 /* 1889 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1890 * linked to both the rbtree and list on PRIQs, this can only be 1891 * tested easily when adding the first task. 1892 */ 1893 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1894 nldsq_next_task(dsq, NULL, false))) 1895 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1896 dsq->id); 1897 1898 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1899 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1900 1901 /* 1902 * Find the previous task and insert after it on the list so 1903 * that @dsq->list is vtime ordered. 1904 */ 1905 rbp = rb_prev(&p->scx.dsq_priq); 1906 if (rbp) { 1907 struct task_struct *prev = 1908 container_of(rbp, struct task_struct, 1909 scx.dsq_priq); 1910 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1911 } else { 1912 list_add(&p->scx.dsq_list.node, &dsq->list); 1913 } 1914 } else { 1915 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1916 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1917 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1918 dsq->id); 1919 1920 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1921 list_add(&p->scx.dsq_list.node, &dsq->list); 1922 else 1923 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1924 } 1925 1926 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1927 dsq->seq++; 1928 p->scx.dsq_seq = dsq->seq; 1929 1930 dsq_mod_nr(dsq, 1); 1931 p->scx.dsq = dsq; 1932 1933 /* 1934 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1935 * direct dispatch path, but we clear them here because the direct 1936 * dispatch verdict may be overridden on the enqueue path during e.g. 1937 * bypass. 1938 */ 1939 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1940 p->scx.ddsp_enq_flags = 0; 1941 1942 /* 1943 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1944 * match waiters' load_acquire. 1945 */ 1946 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1947 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1948 1949 if (is_local) { 1950 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 1951 bool preempt = false; 1952 1953 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1954 rq->curr->sched_class == &ext_sched_class) { 1955 rq->curr->scx.slice = 0; 1956 preempt = true; 1957 } 1958 1959 if (preempt || sched_class_above(&ext_sched_class, 1960 rq->curr->sched_class)) 1961 resched_curr(rq); 1962 } else { 1963 raw_spin_unlock(&dsq->lock); 1964 } 1965 } 1966 1967 static void task_unlink_from_dsq(struct task_struct *p, 1968 struct scx_dispatch_q *dsq) 1969 { 1970 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); 1971 1972 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1973 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1974 RB_CLEAR_NODE(&p->scx.dsq_priq); 1975 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1976 } 1977 1978 list_del_init(&p->scx.dsq_list.node); 1979 dsq_mod_nr(dsq, -1); 1980 } 1981 1982 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1983 { 1984 struct scx_dispatch_q *dsq = p->scx.dsq; 1985 bool is_local = dsq == &rq->scx.local_dsq; 1986 1987 if (!dsq) { 1988 /* 1989 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. 1990 * Unlinking is all that's needed to cancel. 1991 */ 1992 if (unlikely(!list_empty(&p->scx.dsq_list.node))) 1993 list_del_init(&p->scx.dsq_list.node); 1994 1995 /* 1996 * When dispatching directly from the BPF scheduler to a local 1997 * DSQ, the task isn't associated with any DSQ but 1998 * @p->scx.holding_cpu may be set under the protection of 1999 * %SCX_OPSS_DISPATCHING. 2000 */ 2001 if (p->scx.holding_cpu >= 0) 2002 p->scx.holding_cpu = -1; 2003 2004 return; 2005 } 2006 2007 if (!is_local) 2008 raw_spin_lock(&dsq->lock); 2009 2010 /* 2011 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 2012 * change underneath us. 2013 */ 2014 if (p->scx.holding_cpu < 0) { 2015 /* @p must still be on @dsq, dequeue */ 2016 task_unlink_from_dsq(p, dsq); 2017 } else { 2018 /* 2019 * We're racing against dispatch_to_local_dsq() which already 2020 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 2021 * holding_cpu which tells dispatch_to_local_dsq() that it lost 2022 * the race. 2023 */ 2024 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); 2025 p->scx.holding_cpu = -1; 2026 } 2027 p->scx.dsq = NULL; 2028 2029 if (!is_local) 2030 raw_spin_unlock(&dsq->lock); 2031 } 2032 2033 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, 2034 struct task_struct *p) 2035 { 2036 struct scx_dispatch_q *dsq; 2037 2038 if (dsq_id == SCX_DSQ_LOCAL) 2039 return &rq->scx.local_dsq; 2040 2041 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 2042 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 2043 2044 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 2045 return find_global_dsq(p); 2046 2047 return &cpu_rq(cpu)->scx.local_dsq; 2048 } 2049 2050 if (dsq_id == SCX_DSQ_GLOBAL) 2051 dsq = find_global_dsq(p); 2052 else 2053 dsq = find_user_dsq(dsq_id); 2054 2055 if (unlikely(!dsq)) { 2056 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", 2057 dsq_id, p->comm, p->pid); 2058 return find_global_dsq(p); 2059 } 2060 2061 return dsq; 2062 } 2063 2064 static void mark_direct_dispatch(struct task_struct *ddsp_task, 2065 struct task_struct *p, u64 dsq_id, 2066 u64 enq_flags) 2067 { 2068 /* 2069 * Mark that dispatch already happened from ops.select_cpu() or 2070 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 2071 * which can never match a valid task pointer. 2072 */ 2073 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 2074 2075 /* @p must match the task on the enqueue path */ 2076 if (unlikely(p != ddsp_task)) { 2077 if (IS_ERR(ddsp_task)) 2078 scx_ops_error("%s[%d] already direct-dispatched", 2079 p->comm, p->pid); 2080 else 2081 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 2082 ddsp_task->comm, ddsp_task->pid, 2083 p->comm, p->pid); 2084 return; 2085 } 2086 2087 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 2088 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 2089 2090 p->scx.ddsp_dsq_id = dsq_id; 2091 p->scx.ddsp_enq_flags = enq_flags; 2092 } 2093 2094 static void direct_dispatch(struct task_struct *p, u64 enq_flags) 2095 { 2096 struct rq *rq = task_rq(p); 2097 struct scx_dispatch_q *dsq = 2098 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); 2099 2100 touch_core_sched_dispatch(rq, p); 2101 2102 p->scx.ddsp_enq_flags |= enq_flags; 2103 2104 /* 2105 * We are in the enqueue path with @rq locked and pinned, and thus can't 2106 * double lock a remote rq and enqueue to its local DSQ. For 2107 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer 2108 * the enqueue so that it's executed when @rq can be unlocked. 2109 */ 2110 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { 2111 unsigned long opss; 2112 2113 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; 2114 2115 switch (opss & SCX_OPSS_STATE_MASK) { 2116 case SCX_OPSS_NONE: 2117 break; 2118 case SCX_OPSS_QUEUEING: 2119 /* 2120 * As @p was never passed to the BPF side, _release is 2121 * not strictly necessary. Still do it for consistency. 2122 */ 2123 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2124 break; 2125 default: 2126 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()", 2127 p->comm, p->pid, opss); 2128 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2129 break; 2130 } 2131 2132 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 2133 list_add_tail(&p->scx.dsq_list.node, 2134 &rq->scx.ddsp_deferred_locals); 2135 schedule_deferred(rq); 2136 return; 2137 } 2138 2139 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 2140 } 2141 2142 static bool scx_rq_online(struct rq *rq) 2143 { 2144 /* 2145 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates 2146 * the online state as seen from the BPF scheduler. cpu_active() test 2147 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will 2148 * stay set until the current scheduling operation is complete even if 2149 * we aren't locking @rq. 2150 */ 2151 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); 2152 } 2153 2154 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 2155 int sticky_cpu) 2156 { 2157 struct task_struct **ddsp_taskp; 2158 unsigned long qseq; 2159 2160 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 2161 2162 /* rq migration */ 2163 if (sticky_cpu == cpu_of(rq)) 2164 goto local_norefill; 2165 2166 /* 2167 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 2168 * is offline and are just running the hotplug path. Don't bother the 2169 * BPF scheduler. 2170 */ 2171 if (!scx_rq_online(rq)) 2172 goto local; 2173 2174 if (scx_rq_bypassing(rq)) { 2175 __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1); 2176 goto global; 2177 } 2178 2179 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 2180 goto direct; 2181 2182 /* see %SCX_OPS_ENQ_EXITING */ 2183 if (!static_branch_unlikely(&scx_ops_enq_exiting) && 2184 unlikely(p->flags & PF_EXITING)) { 2185 __scx_add_event(SCX_EV_ENQ_SKIP_EXITING, 1); 2186 goto local; 2187 } 2188 2189 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */ 2190 if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) && 2191 is_migration_disabled(p)) { 2192 __scx_add_event(SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); 2193 goto local; 2194 } 2195 2196 if (!SCX_HAS_OP(enqueue)) 2197 goto global; 2198 2199 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 2200 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 2201 2202 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 2203 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 2204 2205 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 2206 WARN_ON_ONCE(*ddsp_taskp); 2207 *ddsp_taskp = p; 2208 2209 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); 2210 2211 *ddsp_taskp = NULL; 2212 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 2213 goto direct; 2214 2215 /* 2216 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 2217 * dequeue may be waiting. The store_release matches their load_acquire. 2218 */ 2219 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 2220 return; 2221 2222 direct: 2223 direct_dispatch(p, enq_flags); 2224 return; 2225 2226 local: 2227 /* 2228 * For task-ordering, slice refill must be treated as implying the end 2229 * of the current slice. Otherwise, the longer @p stays on the CPU, the 2230 * higher priority it becomes from scx_prio_less()'s POV. 2231 */ 2232 touch_core_sched(rq, p); 2233 p->scx.slice = SCX_SLICE_DFL; 2234 __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1); 2235 local_norefill: 2236 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); 2237 return; 2238 2239 global: 2240 touch_core_sched(rq, p); /* see the comment in local: */ 2241 p->scx.slice = SCX_SLICE_DFL; 2242 __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1); 2243 dispatch_enqueue(find_global_dsq(p), p, enq_flags); 2244 } 2245 2246 static bool task_runnable(const struct task_struct *p) 2247 { 2248 return !list_empty(&p->scx.runnable_node); 2249 } 2250 2251 static void set_task_runnable(struct rq *rq, struct task_struct *p) 2252 { 2253 lockdep_assert_rq_held(rq); 2254 2255 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 2256 p->scx.runnable_at = jiffies; 2257 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 2258 } 2259 2260 /* 2261 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being 2262 * appended to the runnable_list. 2263 */ 2264 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 2265 } 2266 2267 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 2268 { 2269 list_del_init(&p->scx.runnable_node); 2270 if (reset_runnable_at) 2271 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 2272 } 2273 2274 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 2275 { 2276 int sticky_cpu = p->scx.sticky_cpu; 2277 2278 if (enq_flags & ENQUEUE_WAKEUP) 2279 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 2280 2281 enq_flags |= rq->scx.extra_enq_flags; 2282 2283 if (sticky_cpu >= 0) 2284 p->scx.sticky_cpu = -1; 2285 2286 /* 2287 * Restoring a running task will be immediately followed by 2288 * set_next_task_scx() which expects the task to not be on the BPF 2289 * scheduler as tasks can only start running through local DSQs. Force 2290 * direct-dispatch into the local DSQ by setting the sticky_cpu. 2291 */ 2292 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 2293 sticky_cpu = cpu_of(rq); 2294 2295 if (p->scx.flags & SCX_TASK_QUEUED) { 2296 WARN_ON_ONCE(!task_runnable(p)); 2297 goto out; 2298 } 2299 2300 set_task_runnable(rq, p); 2301 p->scx.flags |= SCX_TASK_QUEUED; 2302 rq->scx.nr_running++; 2303 add_nr_running(rq, 1); 2304 2305 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p)) 2306 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, rq, p, enq_flags); 2307 2308 if (enq_flags & SCX_ENQ_WAKEUP) 2309 touch_core_sched(rq, p); 2310 2311 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 2312 out: 2313 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; 2314 2315 if ((enq_flags & SCX_ENQ_CPU_SELECTED) && 2316 unlikely(cpu_of(rq) != p->scx.selected_cpu)) 2317 __scx_add_event(SCX_EV_SELECT_CPU_FALLBACK, 1); 2318 } 2319 2320 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags) 2321 { 2322 unsigned long opss; 2323 2324 /* dequeue is always temporary, don't reset runnable_at */ 2325 clr_task_runnable(p, false); 2326 2327 /* acquire ensures that we see the preceding updates on QUEUED */ 2328 opss = atomic_long_read_acquire(&p->scx.ops_state); 2329 2330 switch (opss & SCX_OPSS_STATE_MASK) { 2331 case SCX_OPSS_NONE: 2332 break; 2333 case SCX_OPSS_QUEUEING: 2334 /* 2335 * QUEUEING is started and finished while holding @p's rq lock. 2336 * As we're holding the rq lock now, we shouldn't see QUEUEING. 2337 */ 2338 BUG(); 2339 case SCX_OPSS_QUEUED: 2340 if (SCX_HAS_OP(dequeue)) 2341 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, rq, p, deq_flags); 2342 2343 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2344 SCX_OPSS_NONE)) 2345 break; 2346 fallthrough; 2347 case SCX_OPSS_DISPATCHING: 2348 /* 2349 * If @p is being dispatched from the BPF scheduler to a DSQ, 2350 * wait for the transfer to complete so that @p doesn't get 2351 * added to its DSQ after dequeueing is complete. 2352 * 2353 * As we're waiting on DISPATCHING with the rq locked, the 2354 * dispatching side shouldn't try to lock the rq while 2355 * DISPATCHING is set. See dispatch_to_local_dsq(). 2356 * 2357 * DISPATCHING shouldn't have qseq set and control can reach 2358 * here with NONE @opss from the above QUEUED case block. 2359 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 2360 */ 2361 wait_ops_state(p, SCX_OPSS_DISPATCHING); 2362 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 2363 break; 2364 } 2365 } 2366 2367 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 2368 { 2369 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 2370 WARN_ON_ONCE(task_runnable(p)); 2371 return true; 2372 } 2373 2374 ops_dequeue(rq, p, deq_flags); 2375 2376 /* 2377 * A currently running task which is going off @rq first gets dequeued 2378 * and then stops running. As we want running <-> stopping transitions 2379 * to be contained within runnable <-> quiescent transitions, trigger 2380 * ->stopping() early here instead of in put_prev_task_scx(). 2381 * 2382 * @p may go through multiple stopping <-> running transitions between 2383 * here and put_prev_task_scx() if task attribute changes occur while 2384 * balance_scx() leaves @rq unlocked. However, they don't contain any 2385 * information meaningful to the BPF scheduler and can be suppressed by 2386 * skipping the callbacks if the task is !QUEUED. 2387 */ 2388 if (SCX_HAS_OP(stopping) && task_current(rq, p)) { 2389 update_curr_scx(rq); 2390 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, rq, p, false); 2391 } 2392 2393 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p)) 2394 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, rq, p, deq_flags); 2395 2396 if (deq_flags & SCX_DEQ_SLEEP) 2397 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 2398 else 2399 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 2400 2401 p->scx.flags &= ~SCX_TASK_QUEUED; 2402 rq->scx.nr_running--; 2403 sub_nr_running(rq, 1); 2404 2405 dispatch_dequeue(rq, p); 2406 return true; 2407 } 2408 2409 static void yield_task_scx(struct rq *rq) 2410 { 2411 struct task_struct *p = rq->curr; 2412 2413 if (SCX_HAS_OP(yield)) 2414 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, rq, p, NULL); 2415 else 2416 p->scx.slice = 0; 2417 } 2418 2419 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 2420 { 2421 struct task_struct *from = rq->curr; 2422 2423 if (SCX_HAS_OP(yield)) 2424 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, rq, from, to); 2425 else 2426 return false; 2427 } 2428 2429 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 2430 struct scx_dispatch_q *src_dsq, 2431 struct rq *dst_rq) 2432 { 2433 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; 2434 2435 /* @dsq is locked and @p is on @dst_rq */ 2436 lockdep_assert_held(&src_dsq->lock); 2437 lockdep_assert_rq_held(dst_rq); 2438 2439 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2440 2441 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 2442 list_add(&p->scx.dsq_list.node, &dst_dsq->list); 2443 else 2444 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); 2445 2446 dsq_mod_nr(dst_dsq, 1); 2447 p->scx.dsq = dst_dsq; 2448 } 2449 2450 #ifdef CONFIG_SMP 2451 /** 2452 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ 2453 * @p: task to move 2454 * @enq_flags: %SCX_ENQ_* 2455 * @src_rq: rq to move the task from, locked on entry, released on return 2456 * @dst_rq: rq to move the task into, locked on return 2457 * 2458 * Move @p which is currently on @src_rq to @dst_rq's local DSQ. 2459 */ 2460 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 2461 struct rq *src_rq, struct rq *dst_rq) 2462 { 2463 lockdep_assert_rq_held(src_rq); 2464 2465 /* the following marks @p MIGRATING which excludes dequeue */ 2466 deactivate_task(src_rq, p, 0); 2467 set_task_cpu(p, cpu_of(dst_rq)); 2468 p->scx.sticky_cpu = cpu_of(dst_rq); 2469 2470 raw_spin_rq_unlock(src_rq); 2471 raw_spin_rq_lock(dst_rq); 2472 2473 /* 2474 * We want to pass scx-specific enq_flags but activate_task() will 2475 * truncate the upper 32 bit. As we own @rq, we can pass them through 2476 * @rq->scx.extra_enq_flags instead. 2477 */ 2478 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); 2479 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); 2480 dst_rq->scx.extra_enq_flags = enq_flags; 2481 activate_task(dst_rq, p, 0); 2482 dst_rq->scx.extra_enq_flags = 0; 2483 } 2484 2485 /* 2486 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two 2487 * differences: 2488 * 2489 * - is_cpu_allowed() asks "Can this task run on this CPU?" while 2490 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to 2491 * this CPU?". 2492 * 2493 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task 2494 * must be allowed to finish on the CPU that it's currently on regardless of 2495 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the 2496 * BPF scheduler shouldn't attempt to migrate a task which has migration 2497 * disabled. 2498 * 2499 * - The BPF scheduler is bypassed while the rq is offline and we can always say 2500 * no to the BPF scheduler initiated migrations while offline. 2501 * 2502 * The caller must ensure that @p and @rq are on different CPUs. 2503 */ 2504 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, 2505 bool enforce) 2506 { 2507 int cpu = cpu_of(rq); 2508 2509 WARN_ON_ONCE(task_cpu(p) == cpu); 2510 2511 /* 2512 * If @p has migration disabled, @p->cpus_ptr is updated to contain only 2513 * the pinned CPU in migrate_disable_switch() while @p is being switched 2514 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is 2515 * updated and thus another CPU may see @p on a DSQ inbetween leading to 2516 * @p passing the below task_allowed_on_cpu() check while migration is 2517 * disabled. 2518 * 2519 * Test the migration disabled state first as the race window is narrow 2520 * and the BPF scheduler failing to check migration disabled state can 2521 * easily be masked if task_allowed_on_cpu() is done first. 2522 */ 2523 if (unlikely(is_migration_disabled(p))) { 2524 if (enforce) 2525 scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", 2526 p->comm, p->pid, task_cpu(p), cpu); 2527 return false; 2528 } 2529 2530 /* 2531 * We don't require the BPF scheduler to avoid dispatching to offline 2532 * CPUs mostly for convenience but also because CPUs can go offline 2533 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the 2534 * picked CPU is outside the allowed mask. 2535 */ 2536 if (!task_allowed_on_cpu(p, cpu)) { 2537 if (enforce) 2538 scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", 2539 cpu, p->comm, p->pid); 2540 return false; 2541 } 2542 2543 if (!scx_rq_online(rq)) { 2544 if (enforce) 2545 __scx_add_event(SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); 2546 return false; 2547 } 2548 2549 return true; 2550 } 2551 2552 /** 2553 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq 2554 * @p: target task 2555 * @dsq: locked DSQ @p is currently on 2556 * @src_rq: rq @p is currently on, stable with @dsq locked 2557 * 2558 * Called with @dsq locked but no rq's locked. We want to move @p to a different 2559 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is 2560 * required when transferring into a local DSQ. Even when transferring into a 2561 * non-local DSQ, it's better to use the same mechanism to protect against 2562 * dequeues and maintain the invariant that @p->scx.dsq can only change while 2563 * @src_rq is locked, which e.g. scx_dump_task() depends on. 2564 * 2565 * We want to grab @src_rq but that can deadlock if we try while locking @dsq, 2566 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As 2567 * this may race with dequeue, which can't drop the rq lock or fail, do a little 2568 * dancing from our side. 2569 * 2570 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets 2571 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu 2572 * would be cleared to -1. While other cpus may have updated it to different 2573 * values afterwards, as this operation can't be preempted or recurse, the 2574 * holding_cpu can never become this CPU again before we're done. Thus, we can 2575 * tell whether we lost to dequeue by testing whether the holding_cpu still 2576 * points to this CPU. See dispatch_dequeue() for the counterpart. 2577 * 2578 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is 2579 * still valid. %false if lost to dequeue. 2580 */ 2581 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, 2582 struct scx_dispatch_q *dsq, 2583 struct rq *src_rq) 2584 { 2585 s32 cpu = raw_smp_processor_id(); 2586 2587 lockdep_assert_held(&dsq->lock); 2588 2589 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2590 task_unlink_from_dsq(p, dsq); 2591 p->scx.holding_cpu = cpu; 2592 2593 raw_spin_unlock(&dsq->lock); 2594 raw_spin_rq_lock(src_rq); 2595 2596 /* task_rq couldn't have changed if we're still the holding cpu */ 2597 return likely(p->scx.holding_cpu == cpu) && 2598 !WARN_ON_ONCE(src_rq != task_rq(p)); 2599 } 2600 2601 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, 2602 struct scx_dispatch_q *dsq, struct rq *src_rq) 2603 { 2604 raw_spin_rq_unlock(this_rq); 2605 2606 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { 2607 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); 2608 return true; 2609 } else { 2610 raw_spin_rq_unlock(src_rq); 2611 raw_spin_rq_lock(this_rq); 2612 return false; 2613 } 2614 } 2615 #else /* CONFIG_SMP */ 2616 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); } 2617 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool enforce) { return false; } 2618 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; } 2619 #endif /* CONFIG_SMP */ 2620 2621 /** 2622 * move_task_between_dsqs() - Move a task from one DSQ to another 2623 * @p: target task 2624 * @enq_flags: %SCX_ENQ_* 2625 * @src_dsq: DSQ @p is currently on, must not be a local DSQ 2626 * @dst_dsq: DSQ @p is being moved to, can be any DSQ 2627 * 2628 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local 2629 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq 2630 * will change. As @p's task_rq is locked, this function doesn't need to use the 2631 * holding_cpu mechanism. 2632 * 2633 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the 2634 * return value, is locked. 2635 */ 2636 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags, 2637 struct scx_dispatch_q *src_dsq, 2638 struct scx_dispatch_q *dst_dsq) 2639 { 2640 struct rq *src_rq = task_rq(p), *dst_rq; 2641 2642 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL); 2643 lockdep_assert_held(&src_dsq->lock); 2644 lockdep_assert_rq_held(src_rq); 2645 2646 if (dst_dsq->id == SCX_DSQ_LOCAL) { 2647 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 2648 if (src_rq != dst_rq && 2649 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { 2650 dst_dsq = find_global_dsq(p); 2651 dst_rq = src_rq; 2652 } 2653 } else { 2654 /* no need to migrate if destination is a non-local DSQ */ 2655 dst_rq = src_rq; 2656 } 2657 2658 /* 2659 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different 2660 * CPU, @p will be migrated. 2661 */ 2662 if (dst_dsq->id == SCX_DSQ_LOCAL) { 2663 /* @p is going from a non-local DSQ to a local DSQ */ 2664 if (src_rq == dst_rq) { 2665 task_unlink_from_dsq(p, src_dsq); 2666 move_local_task_to_local_dsq(p, enq_flags, 2667 src_dsq, dst_rq); 2668 raw_spin_unlock(&src_dsq->lock); 2669 } else { 2670 raw_spin_unlock(&src_dsq->lock); 2671 move_remote_task_to_local_dsq(p, enq_flags, 2672 src_rq, dst_rq); 2673 } 2674 } else { 2675 /* 2676 * @p is going from a non-local DSQ to a non-local DSQ. As 2677 * $src_dsq is already locked, do an abbreviated dequeue. 2678 */ 2679 task_unlink_from_dsq(p, src_dsq); 2680 p->scx.dsq = NULL; 2681 raw_spin_unlock(&src_dsq->lock); 2682 2683 dispatch_enqueue(dst_dsq, p, enq_flags); 2684 } 2685 2686 return dst_rq; 2687 } 2688 2689 /* 2690 * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly 2691 * banging on the same DSQ on a large NUMA system to the point where switching 2692 * to the bypass mode can take a long time. Inject artificial delays while the 2693 * bypass mode is switching to guarantee timely completion. 2694 */ 2695 static void scx_ops_breather(struct rq *rq) 2696 { 2697 u64 until; 2698 2699 lockdep_assert_rq_held(rq); 2700 2701 if (likely(!atomic_read(&scx_ops_breather_depth))) 2702 return; 2703 2704 raw_spin_rq_unlock(rq); 2705 2706 until = ktime_get_ns() + NSEC_PER_MSEC; 2707 2708 do { 2709 int cnt = 1024; 2710 while (atomic_read(&scx_ops_breather_depth) && --cnt) 2711 cpu_relax(); 2712 } while (atomic_read(&scx_ops_breather_depth) && 2713 time_before64(ktime_get_ns(), until)); 2714 2715 raw_spin_rq_lock(rq); 2716 } 2717 2718 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq) 2719 { 2720 struct task_struct *p; 2721 retry: 2722 /* 2723 * This retry loop can repeatedly race against scx_ops_bypass() 2724 * dequeueing tasks from @dsq trying to put the system into the bypass 2725 * mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can 2726 * live-lock the machine into soft lockups. Give a breather. 2727 */ 2728 scx_ops_breather(rq); 2729 2730 /* 2731 * The caller can't expect to successfully consume a task if the task's 2732 * addition to @dsq isn't guaranteed to be visible somehow. Test 2733 * @dsq->list without locking and skip if it seems empty. 2734 */ 2735 if (list_empty(&dsq->list)) 2736 return false; 2737 2738 raw_spin_lock(&dsq->lock); 2739 2740 nldsq_for_each_task(p, dsq) { 2741 struct rq *task_rq = task_rq(p); 2742 2743 if (rq == task_rq) { 2744 task_unlink_from_dsq(p, dsq); 2745 move_local_task_to_local_dsq(p, 0, dsq, rq); 2746 raw_spin_unlock(&dsq->lock); 2747 return true; 2748 } 2749 2750 if (task_can_run_on_remote_rq(p, rq, false)) { 2751 if (likely(consume_remote_task(rq, p, dsq, task_rq))) 2752 return true; 2753 goto retry; 2754 } 2755 } 2756 2757 raw_spin_unlock(&dsq->lock); 2758 return false; 2759 } 2760 2761 static bool consume_global_dsq(struct rq *rq) 2762 { 2763 int node = cpu_to_node(cpu_of(rq)); 2764 2765 return consume_dispatch_q(rq, global_dsqs[node]); 2766 } 2767 2768 /** 2769 * dispatch_to_local_dsq - Dispatch a task to a local dsq 2770 * @rq: current rq which is locked 2771 * @dst_dsq: destination DSQ 2772 * @p: task to dispatch 2773 * @enq_flags: %SCX_ENQ_* 2774 * 2775 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local 2776 * DSQ. This function performs all the synchronization dancing needed because 2777 * local DSQs are protected with rq locks. 2778 * 2779 * The caller must have exclusive ownership of @p (e.g. through 2780 * %SCX_OPSS_DISPATCHING). 2781 */ 2782 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq, 2783 struct task_struct *p, u64 enq_flags) 2784 { 2785 struct rq *src_rq = task_rq(p); 2786 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 2787 #ifdef CONFIG_SMP 2788 struct rq *locked_rq = rq; 2789 #endif 2790 2791 /* 2792 * We're synchronized against dequeue through DISPATCHING. As @p can't 2793 * be dequeued, its task_rq and cpus_allowed are stable too. 2794 * 2795 * If dispatching to @rq that @p is already on, no lock dancing needed. 2796 */ 2797 if (rq == src_rq && rq == dst_rq) { 2798 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2799 return; 2800 } 2801 2802 #ifdef CONFIG_SMP 2803 if (src_rq != dst_rq && 2804 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { 2805 dispatch_enqueue(find_global_dsq(p), p, 2806 enq_flags | SCX_ENQ_CLEAR_OPSS); 2807 return; 2808 } 2809 2810 /* 2811 * @p is on a possibly remote @src_rq which we need to lock to move the 2812 * task. If dequeue is in progress, it'd be locking @src_rq and waiting 2813 * on DISPATCHING, so we can't grab @src_rq lock while holding 2814 * DISPATCHING. 2815 * 2816 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that 2817 * we're moving from a DSQ and use the same mechanism - mark the task 2818 * under transfer with holding_cpu, release DISPATCHING and then follow 2819 * the same protocol. See unlink_dsq_and_lock_src_rq(). 2820 */ 2821 p->scx.holding_cpu = raw_smp_processor_id(); 2822 2823 /* store_release ensures that dequeue sees the above */ 2824 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2825 2826 /* switch to @src_rq lock */ 2827 if (locked_rq != src_rq) { 2828 raw_spin_rq_unlock(locked_rq); 2829 locked_rq = src_rq; 2830 raw_spin_rq_lock(src_rq); 2831 } 2832 2833 /* task_rq couldn't have changed if we're still the holding cpu */ 2834 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && 2835 !WARN_ON_ONCE(src_rq != task_rq(p))) { 2836 /* 2837 * If @p is staying on the same rq, there's no need to go 2838 * through the full deactivate/activate cycle. Optimize by 2839 * abbreviating move_remote_task_to_local_dsq(). 2840 */ 2841 if (src_rq == dst_rq) { 2842 p->scx.holding_cpu = -1; 2843 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags); 2844 } else { 2845 move_remote_task_to_local_dsq(p, enq_flags, 2846 src_rq, dst_rq); 2847 /* task has been moved to dst_rq, which is now locked */ 2848 locked_rq = dst_rq; 2849 } 2850 2851 /* if the destination CPU is idle, wake it up */ 2852 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) 2853 resched_curr(dst_rq); 2854 } 2855 2856 /* switch back to @rq lock */ 2857 if (locked_rq != rq) { 2858 raw_spin_rq_unlock(locked_rq); 2859 raw_spin_rq_lock(rq); 2860 } 2861 #else /* CONFIG_SMP */ 2862 BUG(); /* control can not reach here on UP */ 2863 #endif /* CONFIG_SMP */ 2864 } 2865 2866 /** 2867 * finish_dispatch - Asynchronously finish dispatching a task 2868 * @rq: current rq which is locked 2869 * @p: task to finish dispatching 2870 * @qseq_at_dispatch: qseq when @p started getting dispatched 2871 * @dsq_id: destination DSQ ID 2872 * @enq_flags: %SCX_ENQ_* 2873 * 2874 * Dispatching to local DSQs may need to wait for queueing to complete or 2875 * require rq lock dancing. As we don't wanna do either while inside 2876 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2877 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the 2878 * task and its qseq. Once ops.dispatch() returns, this function is called to 2879 * finish up. 2880 * 2881 * There is no guarantee that @p is still valid for dispatching or even that it 2882 * was valid in the first place. Make sure that the task is still owned by the 2883 * BPF scheduler and claim the ownership before dispatching. 2884 */ 2885 static void finish_dispatch(struct rq *rq, struct task_struct *p, 2886 unsigned long qseq_at_dispatch, 2887 u64 dsq_id, u64 enq_flags) 2888 { 2889 struct scx_dispatch_q *dsq; 2890 unsigned long opss; 2891 2892 touch_core_sched_dispatch(rq, p); 2893 retry: 2894 /* 2895 * No need for _acquire here. @p is accessed only after a successful 2896 * try_cmpxchg to DISPATCHING. 2897 */ 2898 opss = atomic_long_read(&p->scx.ops_state); 2899 2900 switch (opss & SCX_OPSS_STATE_MASK) { 2901 case SCX_OPSS_DISPATCHING: 2902 case SCX_OPSS_NONE: 2903 /* someone else already got to it */ 2904 return; 2905 case SCX_OPSS_QUEUED: 2906 /* 2907 * If qseq doesn't match, @p has gone through at least one 2908 * dispatch/dequeue and re-enqueue cycle between 2909 * scx_bpf_dsq_insert() and here and we have no claim on it. 2910 */ 2911 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2912 return; 2913 2914 /* 2915 * While we know @p is accessible, we don't yet have a claim on 2916 * it - the BPF scheduler is allowed to dispatch tasks 2917 * spuriously and there can be a racing dequeue attempt. Let's 2918 * claim @p by atomically transitioning it from QUEUED to 2919 * DISPATCHING. 2920 */ 2921 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2922 SCX_OPSS_DISPATCHING))) 2923 break; 2924 goto retry; 2925 case SCX_OPSS_QUEUEING: 2926 /* 2927 * do_enqueue_task() is in the process of transferring the task 2928 * to the BPF scheduler while holding @p's rq lock. As we aren't 2929 * holding any kernel or BPF resource that the enqueue path may 2930 * depend upon, it's safe to wait. 2931 */ 2932 wait_ops_state(p, opss); 2933 goto retry; 2934 } 2935 2936 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2937 2938 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p); 2939 2940 if (dsq->id == SCX_DSQ_LOCAL) 2941 dispatch_to_local_dsq(rq, dsq, p, enq_flags); 2942 else 2943 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2944 } 2945 2946 static void flush_dispatch_buf(struct rq *rq) 2947 { 2948 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2949 u32 u; 2950 2951 for (u = 0; u < dspc->cursor; u++) { 2952 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2953 2954 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id, 2955 ent->enq_flags); 2956 } 2957 2958 dspc->nr_tasks += dspc->cursor; 2959 dspc->cursor = 0; 2960 } 2961 2962 static int balance_one(struct rq *rq, struct task_struct *prev) 2963 { 2964 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2965 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2966 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED; 2967 int nr_loops = SCX_DSP_MAX_LOOPS; 2968 2969 lockdep_assert_rq_held(rq); 2970 rq->scx.flags |= SCX_RQ_IN_BALANCE; 2971 rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP); 2972 2973 if (static_branch_unlikely(&scx_ops_cpu_preempt) && 2974 unlikely(rq->scx.cpu_released)) { 2975 /* 2976 * If the previous sched_class for the current CPU was not SCX, 2977 * notify the BPF scheduler that it again has control of the 2978 * core. This callback complements ->cpu_release(), which is 2979 * emitted in switch_class(). 2980 */ 2981 if (SCX_HAS_OP(cpu_acquire)) 2982 SCX_CALL_OP(SCX_KF_REST, cpu_acquire, rq, cpu_of(rq), NULL); 2983 rq->scx.cpu_released = false; 2984 } 2985 2986 if (prev_on_scx) { 2987 update_curr_scx(rq); 2988 2989 /* 2990 * If @prev is runnable & has slice left, it has priority and 2991 * fetching more just increases latency for the fetched tasks. 2992 * Tell pick_task_scx() to keep running @prev. If the BPF 2993 * scheduler wants to handle this explicitly, it should 2994 * implement ->cpu_release(). 2995 * 2996 * See scx_ops_disable_workfn() for the explanation on the 2997 * bypassing test. 2998 */ 2999 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) { 3000 rq->scx.flags |= SCX_RQ_BAL_KEEP; 3001 goto has_tasks; 3002 } 3003 } 3004 3005 /* if there already are tasks to run, nothing to do */ 3006 if (rq->scx.local_dsq.nr) 3007 goto has_tasks; 3008 3009 if (consume_global_dsq(rq)) 3010 goto has_tasks; 3011 3012 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq)) 3013 goto no_tasks; 3014 3015 dspc->rq = rq; 3016 3017 /* 3018 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 3019 * the local DSQ might still end up empty after a successful 3020 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 3021 * produced some tasks, retry. The BPF scheduler may depend on this 3022 * looping behavior to simplify its implementation. 3023 */ 3024 do { 3025 dspc->nr_tasks = 0; 3026 3027 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, rq, cpu_of(rq), 3028 prev_on_scx ? prev : NULL); 3029 3030 flush_dispatch_buf(rq); 3031 3032 if (prev_on_rq && prev->scx.slice) { 3033 rq->scx.flags |= SCX_RQ_BAL_KEEP; 3034 goto has_tasks; 3035 } 3036 if (rq->scx.local_dsq.nr) 3037 goto has_tasks; 3038 if (consume_global_dsq(rq)) 3039 goto has_tasks; 3040 3041 /* 3042 * ops.dispatch() can trap us in this loop by repeatedly 3043 * dispatching ineligible tasks. Break out once in a while to 3044 * allow the watchdog to run. As IRQ can't be enabled in 3045 * balance(), we want to complete this scheduling cycle and then 3046 * start a new one. IOW, we want to call resched_curr() on the 3047 * next, most likely idle, task, not the current one. Use 3048 * scx_bpf_kick_cpu() for deferred kicking. 3049 */ 3050 if (unlikely(!--nr_loops)) { 3051 scx_bpf_kick_cpu(cpu_of(rq), 0); 3052 break; 3053 } 3054 } while (dspc->nr_tasks); 3055 3056 no_tasks: 3057 /* 3058 * Didn't find another task to run. Keep running @prev unless 3059 * %SCX_OPS_ENQ_LAST is in effect. 3060 */ 3061 if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) || 3062 scx_rq_bypassing(rq))) { 3063 rq->scx.flags |= SCX_RQ_BAL_KEEP; 3064 __scx_add_event(SCX_EV_DISPATCH_KEEP_LAST, 1); 3065 goto has_tasks; 3066 } 3067 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 3068 return false; 3069 3070 has_tasks: 3071 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 3072 return true; 3073 } 3074 3075 static int balance_scx(struct rq *rq, struct task_struct *prev, 3076 struct rq_flags *rf) 3077 { 3078 int ret; 3079 3080 rq_unpin_lock(rq, rf); 3081 3082 ret = balance_one(rq, prev); 3083 3084 #ifdef CONFIG_SCHED_SMT 3085 /* 3086 * When core-sched is enabled, this ops.balance() call will be followed 3087 * by pick_task_scx() on this CPU and the SMT siblings. Balance the 3088 * siblings too. 3089 */ 3090 if (sched_core_enabled(rq)) { 3091 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); 3092 int scpu; 3093 3094 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) { 3095 struct rq *srq = cpu_rq(scpu); 3096 struct task_struct *sprev = srq->curr; 3097 3098 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq)); 3099 update_rq_clock(srq); 3100 balance_one(srq, sprev); 3101 } 3102 } 3103 #endif 3104 rq_repin_lock(rq, rf); 3105 3106 return ret; 3107 } 3108 3109 static void process_ddsp_deferred_locals(struct rq *rq) 3110 { 3111 struct task_struct *p; 3112 3113 lockdep_assert_rq_held(rq); 3114 3115 /* 3116 * Now that @rq can be unlocked, execute the deferred enqueueing of 3117 * tasks directly dispatched to the local DSQs of other CPUs. See 3118 * direct_dispatch(). Keep popping from the head instead of using 3119 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq 3120 * temporarily. 3121 */ 3122 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, 3123 struct task_struct, scx.dsq_list.node))) { 3124 struct scx_dispatch_q *dsq; 3125 3126 list_del_init(&p->scx.dsq_list.node); 3127 3128 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); 3129 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 3130 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags); 3131 } 3132 } 3133 3134 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 3135 { 3136 if (p->scx.flags & SCX_TASK_QUEUED) { 3137 /* 3138 * Core-sched might decide to execute @p before it is 3139 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 3140 */ 3141 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC); 3142 dispatch_dequeue(rq, p); 3143 } 3144 3145 p->se.exec_start = rq_clock_task(rq); 3146 3147 /* see dequeue_task_scx() on why we skip when !QUEUED */ 3148 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) 3149 SCX_CALL_OP_TASK(SCX_KF_REST, running, rq, p); 3150 3151 clr_task_runnable(p, true); 3152 3153 /* 3154 * @p is getting newly scheduled or got kicked after someone updated its 3155 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 3156 */ 3157 if ((p->scx.slice == SCX_SLICE_INF) != 3158 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 3159 if (p->scx.slice == SCX_SLICE_INF) 3160 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 3161 else 3162 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 3163 3164 sched_update_tick_dependency(rq); 3165 3166 /* 3167 * For now, let's refresh the load_avgs just when transitioning 3168 * in and out of nohz. In the future, we might want to add a 3169 * mechanism which calls the following periodically on 3170 * tick-stopped CPUs. 3171 */ 3172 update_other_load_avgs(rq); 3173 } 3174 } 3175 3176 static enum scx_cpu_preempt_reason 3177 preempt_reason_from_class(const struct sched_class *class) 3178 { 3179 #ifdef CONFIG_SMP 3180 if (class == &stop_sched_class) 3181 return SCX_CPU_PREEMPT_STOP; 3182 #endif 3183 if (class == &dl_sched_class) 3184 return SCX_CPU_PREEMPT_DL; 3185 if (class == &rt_sched_class) 3186 return SCX_CPU_PREEMPT_RT; 3187 return SCX_CPU_PREEMPT_UNKNOWN; 3188 } 3189 3190 static void switch_class(struct rq *rq, struct task_struct *next) 3191 { 3192 const struct sched_class *next_class = next->sched_class; 3193 3194 #ifdef CONFIG_SMP 3195 /* 3196 * Pairs with the smp_load_acquire() issued by a CPU in 3197 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a 3198 * resched. 3199 */ 3200 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); 3201 #endif 3202 if (!static_branch_unlikely(&scx_ops_cpu_preempt)) 3203 return; 3204 3205 /* 3206 * The callback is conceptually meant to convey that the CPU is no 3207 * longer under the control of SCX. Therefore, don't invoke the callback 3208 * if the next class is below SCX (in which case the BPF scheduler has 3209 * actively decided not to schedule any tasks on the CPU). 3210 */ 3211 if (sched_class_above(&ext_sched_class, next_class)) 3212 return; 3213 3214 /* 3215 * At this point we know that SCX was preempted by a higher priority 3216 * sched_class, so invoke the ->cpu_release() callback if we have not 3217 * done so already. We only send the callback once between SCX being 3218 * preempted, and it regaining control of the CPU. 3219 * 3220 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 3221 * next time that balance_scx() is invoked. 3222 */ 3223 if (!rq->scx.cpu_released) { 3224 if (SCX_HAS_OP(cpu_release)) { 3225 struct scx_cpu_release_args args = { 3226 .reason = preempt_reason_from_class(next_class), 3227 .task = next, 3228 }; 3229 3230 SCX_CALL_OP(SCX_KF_CPU_RELEASE, cpu_release, rq, cpu_of(rq), &args); 3231 } 3232 rq->scx.cpu_released = true; 3233 } 3234 } 3235 3236 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, 3237 struct task_struct *next) 3238 { 3239 update_curr_scx(rq); 3240 3241 /* see dequeue_task_scx() on why we skip when !QUEUED */ 3242 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 3243 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, rq, p, true); 3244 3245 if (p->scx.flags & SCX_TASK_QUEUED) { 3246 set_task_runnable(rq, p); 3247 3248 /* 3249 * If @p has slice left and is being put, @p is getting 3250 * preempted by a higher priority scheduler class or core-sched 3251 * forcing a different task. Leave it at the head of the local 3252 * DSQ. 3253 */ 3254 if (p->scx.slice && !scx_rq_bypassing(rq)) { 3255 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); 3256 goto switch_class; 3257 } 3258 3259 /* 3260 * If @p is runnable but we're about to enter a lower 3261 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell 3262 * ops.enqueue() that @p is the only one available for this cpu, 3263 * which should trigger an explicit follow-up scheduling event. 3264 */ 3265 if (sched_class_above(&ext_sched_class, next->sched_class)) { 3266 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last)); 3267 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 3268 } else { 3269 do_enqueue_task(rq, p, 0, -1); 3270 } 3271 } 3272 3273 switch_class: 3274 if (next && next->sched_class != &ext_sched_class) 3275 switch_class(rq, next); 3276 } 3277 3278 static struct task_struct *first_local_task(struct rq *rq) 3279 { 3280 return list_first_entry_or_null(&rq->scx.local_dsq.list, 3281 struct task_struct, scx.dsq_list.node); 3282 } 3283 3284 static struct task_struct *pick_task_scx(struct rq *rq) 3285 { 3286 struct task_struct *prev = rq->curr; 3287 struct task_struct *p; 3288 bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; 3289 bool kick_idle = false; 3290 3291 /* 3292 * WORKAROUND: 3293 * 3294 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just 3295 * have gone through balance_scx(). Unfortunately, there currently is a 3296 * bug where fair could say yes on balance() but no on pick_task(), 3297 * which then ends up calling pick_task_scx() without preceding 3298 * balance_scx(). 3299 * 3300 * Keep running @prev if possible and avoid stalling from entering idle 3301 * without balancing. 3302 * 3303 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE() 3304 * if pick_task_scx() is called without preceding balance_scx(). 3305 */ 3306 if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) { 3307 if (prev->scx.flags & SCX_TASK_QUEUED) { 3308 keep_prev = true; 3309 } else { 3310 keep_prev = false; 3311 kick_idle = true; 3312 } 3313 } else if (unlikely(keep_prev && 3314 prev->sched_class != &ext_sched_class)) { 3315 /* 3316 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is 3317 * conditional on scx_enabled() and may have been skipped. 3318 */ 3319 WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED); 3320 keep_prev = false; 3321 } 3322 3323 /* 3324 * If balance_scx() is telling us to keep running @prev, replenish slice 3325 * if necessary and keep running @prev. Otherwise, pop the first one 3326 * from the local DSQ. 3327 */ 3328 if (keep_prev) { 3329 p = prev; 3330 if (!p->scx.slice) { 3331 p->scx.slice = SCX_SLICE_DFL; 3332 __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1); 3333 } 3334 } else { 3335 p = first_local_task(rq); 3336 if (!p) { 3337 if (kick_idle) 3338 scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE); 3339 return NULL; 3340 } 3341 3342 if (unlikely(!p->scx.slice)) { 3343 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) { 3344 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 3345 p->comm, p->pid, __func__); 3346 scx_warned_zero_slice = true; 3347 } 3348 p->scx.slice = SCX_SLICE_DFL; 3349 __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1); 3350 } 3351 } 3352 3353 return p; 3354 } 3355 3356 #ifdef CONFIG_SCHED_CORE 3357 /** 3358 * scx_prio_less - Task ordering for core-sched 3359 * @a: task A 3360 * @b: task B 3361 * @in_fi: in forced idle state 3362 * 3363 * Core-sched is implemented as an additional scheduling layer on top of the 3364 * usual sched_class'es and needs to find out the expected task ordering. For 3365 * SCX, core-sched calls this function to interrogate the task ordering. 3366 * 3367 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 3368 * to implement the default task ordering. The older the timestamp, the higher 3369 * priority the task - the global FIFO ordering matching the default scheduling 3370 * behavior. 3371 * 3372 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 3373 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 3374 */ 3375 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 3376 bool in_fi) 3377 { 3378 /* 3379 * The const qualifiers are dropped from task_struct pointers when 3380 * calling ops.core_sched_before(). Accesses are controlled by the 3381 * verifier. 3382 */ 3383 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a))) 3384 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before, NULL, 3385 (struct task_struct *)a, 3386 (struct task_struct *)b); 3387 else 3388 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 3389 } 3390 #endif /* CONFIG_SCHED_CORE */ 3391 3392 #ifdef CONFIG_SMP 3393 3394 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 3395 { 3396 bool rq_bypass; 3397 3398 /* 3399 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 3400 * can be a good migration opportunity with low cache and memory 3401 * footprint. Returning a CPU different than @prev_cpu triggers 3402 * immediate rq migration. However, for SCX, as the current rq 3403 * association doesn't dictate where the task is going to run, this 3404 * doesn't fit well. If necessary, we can later add a dedicated method 3405 * which can decide to preempt self to force it through the regular 3406 * scheduling path. 3407 */ 3408 if (unlikely(wake_flags & WF_EXEC)) 3409 return prev_cpu; 3410 3411 rq_bypass = scx_rq_bypassing(task_rq(p)); 3412 if (SCX_HAS_OP(select_cpu) && !rq_bypass) { 3413 s32 cpu; 3414 struct task_struct **ddsp_taskp; 3415 3416 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 3417 WARN_ON_ONCE(*ddsp_taskp); 3418 *ddsp_taskp = p; 3419 3420 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 3421 select_cpu, NULL, p, prev_cpu, wake_flags); 3422 p->scx.selected_cpu = cpu; 3423 *ddsp_taskp = NULL; 3424 if (ops_cpu_valid(cpu, "from ops.select_cpu()")) 3425 return cpu; 3426 else 3427 return prev_cpu; 3428 } else { 3429 s32 cpu; 3430 3431 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); 3432 if (cpu >= 0) { 3433 p->scx.slice = SCX_SLICE_DFL; 3434 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 3435 __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1); 3436 } else { 3437 cpu = prev_cpu; 3438 } 3439 p->scx.selected_cpu = cpu; 3440 3441 if (rq_bypass) 3442 __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1); 3443 return cpu; 3444 } 3445 } 3446 3447 static void task_woken_scx(struct rq *rq, struct task_struct *p) 3448 { 3449 run_deferred(rq); 3450 } 3451 3452 static void set_cpus_allowed_scx(struct task_struct *p, 3453 struct affinity_context *ac) 3454 { 3455 set_cpus_allowed_common(p, ac); 3456 3457 /* 3458 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 3459 * differ from the configured one in @p->cpus_mask. Always tell the bpf 3460 * scheduler the effective one. 3461 * 3462 * Fine-grained memory write control is enforced by BPF making the const 3463 * designation pointless. Cast it away when calling the operation. 3464 */ 3465 if (SCX_HAS_OP(set_cpumask)) 3466 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, NULL, 3467 p, (struct cpumask *)p->cpus_ptr); 3468 } 3469 3470 static void handle_hotplug(struct rq *rq, bool online) 3471 { 3472 int cpu = cpu_of(rq); 3473 3474 atomic_long_inc(&scx_hotplug_seq); 3475 3476 if (scx_enabled()) 3477 scx_idle_update_selcpu_topology(&scx_ops); 3478 3479 if (online && SCX_HAS_OP(cpu_online)) 3480 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, NULL, cpu); 3481 else if (!online && SCX_HAS_OP(cpu_offline)) 3482 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); 3483 else 3484 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 3485 "cpu %d going %s, exiting scheduler", cpu, 3486 online ? "online" : "offline"); 3487 } 3488 3489 void scx_rq_activate(struct rq *rq) 3490 { 3491 handle_hotplug(rq, true); 3492 } 3493 3494 void scx_rq_deactivate(struct rq *rq) 3495 { 3496 handle_hotplug(rq, false); 3497 } 3498 3499 static void rq_online_scx(struct rq *rq) 3500 { 3501 rq->scx.flags |= SCX_RQ_ONLINE; 3502 } 3503 3504 static void rq_offline_scx(struct rq *rq) 3505 { 3506 rq->scx.flags &= ~SCX_RQ_ONLINE; 3507 } 3508 3509 #endif /* CONFIG_SMP */ 3510 3511 static bool check_rq_for_timeouts(struct rq *rq) 3512 { 3513 struct task_struct *p; 3514 struct rq_flags rf; 3515 bool timed_out = false; 3516 3517 rq_lock_irqsave(rq, &rf); 3518 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 3519 unsigned long last_runnable = p->scx.runnable_at; 3520 3521 if (unlikely(time_after(jiffies, 3522 last_runnable + scx_watchdog_timeout))) { 3523 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 3524 3525 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3526 "%s[%d] failed to run for %u.%03us", 3527 p->comm, p->pid, 3528 dur_ms / 1000, dur_ms % 1000); 3529 timed_out = true; 3530 break; 3531 } 3532 } 3533 rq_unlock_irqrestore(rq, &rf); 3534 3535 return timed_out; 3536 } 3537 3538 static void scx_watchdog_workfn(struct work_struct *work) 3539 { 3540 int cpu; 3541 3542 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 3543 3544 for_each_online_cpu(cpu) { 3545 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 3546 break; 3547 3548 cond_resched(); 3549 } 3550 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 3551 scx_watchdog_timeout / 2); 3552 } 3553 3554 void scx_tick(struct rq *rq) 3555 { 3556 unsigned long last_check; 3557 3558 if (!scx_enabled()) 3559 return; 3560 3561 last_check = READ_ONCE(scx_watchdog_timestamp); 3562 if (unlikely(time_after(jiffies, 3563 last_check + READ_ONCE(scx_watchdog_timeout)))) { 3564 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 3565 3566 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3567 "watchdog failed to check in for %u.%03us", 3568 dur_ms / 1000, dur_ms % 1000); 3569 } 3570 3571 update_other_load_avgs(rq); 3572 } 3573 3574 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 3575 { 3576 update_curr_scx(rq); 3577 3578 /* 3579 * While disabling, always resched and refresh core-sched timestamp as 3580 * we can't trust the slice management or ops.core_sched_before(). 3581 */ 3582 if (scx_rq_bypassing(rq)) { 3583 curr->scx.slice = 0; 3584 touch_core_sched(rq, curr); 3585 } else if (SCX_HAS_OP(tick)) { 3586 SCX_CALL_OP_TASK(SCX_KF_REST, tick, rq, curr); 3587 } 3588 3589 if (!curr->scx.slice) 3590 resched_curr(rq); 3591 } 3592 3593 #ifdef CONFIG_EXT_GROUP_SCHED 3594 static struct cgroup *tg_cgrp(struct task_group *tg) 3595 { 3596 /* 3597 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, 3598 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the 3599 * root cgroup. 3600 */ 3601 if (tg && tg->css.cgroup) 3602 return tg->css.cgroup; 3603 else 3604 return &cgrp_dfl_root.cgrp; 3605 } 3606 3607 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg), 3608 3609 #else /* CONFIG_EXT_GROUP_SCHED */ 3610 3611 #define SCX_INIT_TASK_ARGS_CGROUP(tg) 3612 3613 #endif /* CONFIG_EXT_GROUP_SCHED */ 3614 3615 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 3616 { 3617 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 3618 } 3619 3620 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 3621 { 3622 enum scx_task_state prev_state = scx_get_task_state(p); 3623 bool warn = false; 3624 3625 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 3626 3627 switch (state) { 3628 case SCX_TASK_NONE: 3629 break; 3630 case SCX_TASK_INIT: 3631 warn = prev_state != SCX_TASK_NONE; 3632 break; 3633 case SCX_TASK_READY: 3634 warn = prev_state == SCX_TASK_NONE; 3635 break; 3636 case SCX_TASK_ENABLED: 3637 warn = prev_state != SCX_TASK_READY; 3638 break; 3639 default: 3640 warn = true; 3641 return; 3642 } 3643 3644 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 3645 prev_state, state, p->comm, p->pid); 3646 3647 p->scx.flags &= ~SCX_TASK_STATE_MASK; 3648 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 3649 } 3650 3651 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork) 3652 { 3653 int ret; 3654 3655 p->scx.disallow = false; 3656 3657 if (SCX_HAS_OP(init_task)) { 3658 struct scx_init_task_args args = { 3659 SCX_INIT_TASK_ARGS_CGROUP(tg) 3660 .fork = fork, 3661 }; 3662 3663 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, NULL, p, &args); 3664 if (unlikely(ret)) { 3665 ret = ops_sanitize_err("init_task", ret); 3666 return ret; 3667 } 3668 } 3669 3670 scx_set_task_state(p, SCX_TASK_INIT); 3671 3672 if (p->scx.disallow) { 3673 if (!fork) { 3674 struct rq *rq; 3675 struct rq_flags rf; 3676 3677 rq = task_rq_lock(p, &rf); 3678 3679 /* 3680 * We're in the load path and @p->policy will be applied 3681 * right after. Reverting @p->policy here and rejecting 3682 * %SCHED_EXT transitions from scx_check_setscheduler() 3683 * guarantees that if ops.init_task() sets @p->disallow, 3684 * @p can never be in SCX. 3685 */ 3686 if (p->policy == SCHED_EXT) { 3687 p->policy = SCHED_NORMAL; 3688 atomic_long_inc(&scx_nr_rejected); 3689 } 3690 3691 task_rq_unlock(rq, p, &rf); 3692 } else if (p->policy == SCHED_EXT) { 3693 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork", 3694 p->comm, p->pid); 3695 } 3696 } 3697 3698 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 3699 return 0; 3700 } 3701 3702 static void scx_ops_enable_task(struct task_struct *p) 3703 { 3704 struct rq *rq = task_rq(p); 3705 u32 weight; 3706 3707 lockdep_assert_rq_held(rq); 3708 3709 /* 3710 * Set the weight before calling ops.enable() so that the scheduler 3711 * doesn't see a stale value if they inspect the task struct. 3712 */ 3713 if (task_has_idle_policy(p)) 3714 weight = WEIGHT_IDLEPRIO; 3715 else 3716 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 3717 3718 p->scx.weight = sched_weight_to_cgroup(weight); 3719 3720 if (SCX_HAS_OP(enable)) 3721 SCX_CALL_OP_TASK(SCX_KF_REST, enable, rq, p); 3722 scx_set_task_state(p, SCX_TASK_ENABLED); 3723 3724 if (SCX_HAS_OP(set_weight)) 3725 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, rq, p, p->scx.weight); 3726 } 3727 3728 static void scx_ops_disable_task(struct task_struct *p) 3729 { 3730 struct rq *rq = task_rq(p); 3731 3732 lockdep_assert_rq_held(rq); 3733 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 3734 3735 if (SCX_HAS_OP(disable)) 3736 SCX_CALL_OP_TASK(SCX_KF_REST, disable, rq, p); 3737 scx_set_task_state(p, SCX_TASK_READY); 3738 } 3739 3740 static void scx_ops_exit_task(struct task_struct *p) 3741 { 3742 struct scx_exit_task_args args = { 3743 .cancelled = false, 3744 }; 3745 3746 lockdep_assert_rq_held(task_rq(p)); 3747 3748 switch (scx_get_task_state(p)) { 3749 case SCX_TASK_NONE: 3750 return; 3751 case SCX_TASK_INIT: 3752 args.cancelled = true; 3753 break; 3754 case SCX_TASK_READY: 3755 break; 3756 case SCX_TASK_ENABLED: 3757 scx_ops_disable_task(p); 3758 break; 3759 default: 3760 WARN_ON_ONCE(true); 3761 return; 3762 } 3763 3764 if (SCX_HAS_OP(exit_task)) 3765 SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, task_rq(p), p, &args); 3766 scx_set_task_state(p, SCX_TASK_NONE); 3767 } 3768 3769 void init_scx_entity(struct sched_ext_entity *scx) 3770 { 3771 memset(scx, 0, sizeof(*scx)); 3772 INIT_LIST_HEAD(&scx->dsq_list.node); 3773 RB_CLEAR_NODE(&scx->dsq_priq); 3774 scx->sticky_cpu = -1; 3775 scx->holding_cpu = -1; 3776 INIT_LIST_HEAD(&scx->runnable_node); 3777 scx->runnable_at = jiffies; 3778 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 3779 scx->slice = SCX_SLICE_DFL; 3780 } 3781 3782 void scx_pre_fork(struct task_struct *p) 3783 { 3784 /* 3785 * BPF scheduler enable/disable paths want to be able to iterate and 3786 * update all tasks which can become complex when racing forks. As 3787 * enable/disable are very cold paths, let's use a percpu_rwsem to 3788 * exclude forks. 3789 */ 3790 percpu_down_read(&scx_fork_rwsem); 3791 } 3792 3793 int scx_fork(struct task_struct *p) 3794 { 3795 percpu_rwsem_assert_held(&scx_fork_rwsem); 3796 3797 if (scx_ops_init_task_enabled) 3798 return scx_ops_init_task(p, task_group(p), true); 3799 else 3800 return 0; 3801 } 3802 3803 void scx_post_fork(struct task_struct *p) 3804 { 3805 if (scx_ops_init_task_enabled) { 3806 scx_set_task_state(p, SCX_TASK_READY); 3807 3808 /* 3809 * Enable the task immediately if it's running on sched_ext. 3810 * Otherwise, it'll be enabled in switching_to_scx() if and 3811 * when it's ever configured to run with a SCHED_EXT policy. 3812 */ 3813 if (p->sched_class == &ext_sched_class) { 3814 struct rq_flags rf; 3815 struct rq *rq; 3816 3817 rq = task_rq_lock(p, &rf); 3818 scx_ops_enable_task(p); 3819 task_rq_unlock(rq, p, &rf); 3820 } 3821 } 3822 3823 spin_lock_irq(&scx_tasks_lock); 3824 list_add_tail(&p->scx.tasks_node, &scx_tasks); 3825 spin_unlock_irq(&scx_tasks_lock); 3826 3827 percpu_up_read(&scx_fork_rwsem); 3828 } 3829 3830 void scx_cancel_fork(struct task_struct *p) 3831 { 3832 if (scx_enabled()) { 3833 struct rq *rq; 3834 struct rq_flags rf; 3835 3836 rq = task_rq_lock(p, &rf); 3837 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3838 scx_ops_exit_task(p); 3839 task_rq_unlock(rq, p, &rf); 3840 } 3841 3842 percpu_up_read(&scx_fork_rwsem); 3843 } 3844 3845 void sched_ext_free(struct task_struct *p) 3846 { 3847 unsigned long flags; 3848 3849 spin_lock_irqsave(&scx_tasks_lock, flags); 3850 list_del_init(&p->scx.tasks_node); 3851 spin_unlock_irqrestore(&scx_tasks_lock, flags); 3852 3853 /* 3854 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY -> 3855 * ENABLED transitions can't race us. Disable ops for @p. 3856 */ 3857 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3858 struct rq_flags rf; 3859 struct rq *rq; 3860 3861 rq = task_rq_lock(p, &rf); 3862 scx_ops_exit_task(p); 3863 task_rq_unlock(rq, p, &rf); 3864 } 3865 } 3866 3867 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3868 const struct load_weight *lw) 3869 { 3870 lockdep_assert_rq_held(task_rq(p)); 3871 3872 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3873 if (SCX_HAS_OP(set_weight)) 3874 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, rq, p, p->scx.weight); 3875 } 3876 3877 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) 3878 { 3879 } 3880 3881 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3882 { 3883 scx_ops_enable_task(p); 3884 3885 /* 3886 * set_cpus_allowed_scx() is not called while @p is associated with a 3887 * different scheduler class. Keep the BPF scheduler up-to-date. 3888 */ 3889 if (SCX_HAS_OP(set_cpumask)) 3890 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, rq, 3891 p, (struct cpumask *)p->cpus_ptr); 3892 } 3893 3894 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3895 { 3896 scx_ops_disable_task(p); 3897 } 3898 3899 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} 3900 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3901 3902 int scx_check_setscheduler(struct task_struct *p, int policy) 3903 { 3904 lockdep_assert_rq_held(task_rq(p)); 3905 3906 /* if disallow, reject transitioning into SCX */ 3907 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3908 p->policy != policy && policy == SCHED_EXT) 3909 return -EACCES; 3910 3911 return 0; 3912 } 3913 3914 #ifdef CONFIG_NO_HZ_FULL 3915 bool scx_can_stop_tick(struct rq *rq) 3916 { 3917 struct task_struct *p = rq->curr; 3918 3919 if (scx_rq_bypassing(rq)) 3920 return false; 3921 3922 if (p->sched_class != &ext_sched_class) 3923 return true; 3924 3925 /* 3926 * @rq can dispatch from different DSQs, so we can't tell whether it 3927 * needs the tick or not by looking at nr_running. Allow stopping ticks 3928 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3929 */ 3930 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3931 } 3932 #endif 3933 3934 #ifdef CONFIG_EXT_GROUP_SCHED 3935 3936 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); 3937 static bool scx_cgroup_enabled; 3938 3939 int scx_tg_online(struct task_group *tg) 3940 { 3941 int ret = 0; 3942 3943 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3944 3945 percpu_down_read(&scx_cgroup_rwsem); 3946 3947 if (scx_cgroup_enabled) { 3948 if (SCX_HAS_OP(cgroup_init)) { 3949 struct scx_cgroup_init_args args = 3950 { .weight = tg->scx_weight }; 3951 3952 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, NULL, 3953 tg->css.cgroup, &args); 3954 if (ret) 3955 ret = ops_sanitize_err("cgroup_init", ret); 3956 } 3957 if (ret == 0) 3958 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED; 3959 } else { 3960 tg->scx_flags |= SCX_TG_ONLINE; 3961 } 3962 3963 percpu_up_read(&scx_cgroup_rwsem); 3964 return ret; 3965 } 3966 3967 void scx_tg_offline(struct task_group *tg) 3968 { 3969 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE)); 3970 3971 percpu_down_read(&scx_cgroup_rwsem); 3972 3973 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED)) 3974 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, NULL, tg->css.cgroup); 3975 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); 3976 3977 percpu_up_read(&scx_cgroup_rwsem); 3978 } 3979 3980 int scx_cgroup_can_attach(struct cgroup_taskset *tset) 3981 { 3982 struct cgroup_subsys_state *css; 3983 struct task_struct *p; 3984 int ret; 3985 3986 /* released in scx_finish/cancel_attach() */ 3987 percpu_down_read(&scx_cgroup_rwsem); 3988 3989 if (!scx_cgroup_enabled) 3990 return 0; 3991 3992 cgroup_taskset_for_each(p, css, tset) { 3993 struct cgroup *from = tg_cgrp(task_group(p)); 3994 struct cgroup *to = tg_cgrp(css_tg(css)); 3995 3996 WARN_ON_ONCE(p->scx.cgrp_moving_from); 3997 3998 /* 3999 * sched_move_task() omits identity migrations. Let's match the 4000 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move() 4001 * always match one-to-one. 4002 */ 4003 if (from == to) 4004 continue; 4005 4006 if (SCX_HAS_OP(cgroup_prep_move)) { 4007 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move, NULL, 4008 p, from, css->cgroup); 4009 if (ret) 4010 goto err; 4011 } 4012 4013 p->scx.cgrp_moving_from = from; 4014 } 4015 4016 return 0; 4017 4018 err: 4019 cgroup_taskset_for_each(p, css, tset) { 4020 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) 4021 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 4022 p, p->scx.cgrp_moving_from, css->cgroup); 4023 p->scx.cgrp_moving_from = NULL; 4024 } 4025 4026 percpu_up_read(&scx_cgroup_rwsem); 4027 return ops_sanitize_err("cgroup_prep_move", ret); 4028 } 4029 4030 void scx_cgroup_move_task(struct task_struct *p) 4031 { 4032 if (!scx_cgroup_enabled) 4033 return; 4034 4035 /* 4036 * @p must have ops.cgroup_prep_move() called on it and thus 4037 * cgrp_moving_from set. 4038 */ 4039 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) 4040 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, NULL, 4041 p, p->scx.cgrp_moving_from, tg_cgrp(task_group(p))); 4042 p->scx.cgrp_moving_from = NULL; 4043 } 4044 4045 void scx_cgroup_finish_attach(void) 4046 { 4047 percpu_up_read(&scx_cgroup_rwsem); 4048 } 4049 4050 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) 4051 { 4052 struct cgroup_subsys_state *css; 4053 struct task_struct *p; 4054 4055 if (!scx_cgroup_enabled) 4056 goto out_unlock; 4057 4058 cgroup_taskset_for_each(p, css, tset) { 4059 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) 4060 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 4061 p, p->scx.cgrp_moving_from, css->cgroup); 4062 p->scx.cgrp_moving_from = NULL; 4063 } 4064 out_unlock: 4065 percpu_up_read(&scx_cgroup_rwsem); 4066 } 4067 4068 void scx_group_set_weight(struct task_group *tg, unsigned long weight) 4069 { 4070 percpu_down_read(&scx_cgroup_rwsem); 4071 4072 if (scx_cgroup_enabled && tg->scx_weight != weight) { 4073 if (SCX_HAS_OP(cgroup_set_weight)) 4074 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight, NULL, 4075 tg_cgrp(tg), weight); 4076 tg->scx_weight = weight; 4077 } 4078 4079 percpu_up_read(&scx_cgroup_rwsem); 4080 } 4081 4082 void scx_group_set_idle(struct task_group *tg, bool idle) 4083 { 4084 /* TODO: Implement ops->cgroup_set_idle() */ 4085 } 4086 4087 static void scx_cgroup_lock(void) 4088 { 4089 percpu_down_write(&scx_cgroup_rwsem); 4090 } 4091 4092 static void scx_cgroup_unlock(void) 4093 { 4094 percpu_up_write(&scx_cgroup_rwsem); 4095 } 4096 4097 #else /* CONFIG_EXT_GROUP_SCHED */ 4098 4099 static inline void scx_cgroup_lock(void) {} 4100 static inline void scx_cgroup_unlock(void) {} 4101 4102 #endif /* CONFIG_EXT_GROUP_SCHED */ 4103 4104 /* 4105 * Omitted operations: 4106 * 4107 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 4108 * isn't tied to the CPU at that point. Preemption is implemented by resetting 4109 * the victim task's slice to 0 and triggering reschedule on the target CPU. 4110 * 4111 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 4112 * 4113 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 4114 * their current sched_class. Call them directly from sched core instead. 4115 */ 4116 DEFINE_SCHED_CLASS(ext) = { 4117 .enqueue_task = enqueue_task_scx, 4118 .dequeue_task = dequeue_task_scx, 4119 .yield_task = yield_task_scx, 4120 .yield_to_task = yield_to_task_scx, 4121 4122 .wakeup_preempt = wakeup_preempt_scx, 4123 4124 .balance = balance_scx, 4125 .pick_task = pick_task_scx, 4126 4127 .put_prev_task = put_prev_task_scx, 4128 .set_next_task = set_next_task_scx, 4129 4130 #ifdef CONFIG_SMP 4131 .select_task_rq = select_task_rq_scx, 4132 .task_woken = task_woken_scx, 4133 .set_cpus_allowed = set_cpus_allowed_scx, 4134 4135 .rq_online = rq_online_scx, 4136 .rq_offline = rq_offline_scx, 4137 #endif 4138 4139 .task_tick = task_tick_scx, 4140 4141 .switching_to = switching_to_scx, 4142 .switched_from = switched_from_scx, 4143 .switched_to = switched_to_scx, 4144 .reweight_task = reweight_task_scx, 4145 .prio_changed = prio_changed_scx, 4146 4147 .update_curr = update_curr_scx, 4148 4149 #ifdef CONFIG_UCLAMP_TASK 4150 .uclamp_enabled = 1, 4151 #endif 4152 }; 4153 4154 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 4155 { 4156 memset(dsq, 0, sizeof(*dsq)); 4157 4158 raw_spin_lock_init(&dsq->lock); 4159 INIT_LIST_HEAD(&dsq->list); 4160 dsq->id = dsq_id; 4161 } 4162 4163 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node) 4164 { 4165 struct scx_dispatch_q *dsq; 4166 int ret; 4167 4168 if (dsq_id & SCX_DSQ_FLAG_BUILTIN) 4169 return ERR_PTR(-EINVAL); 4170 4171 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 4172 if (!dsq) 4173 return ERR_PTR(-ENOMEM); 4174 4175 init_dsq(dsq, dsq_id); 4176 4177 ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node, 4178 dsq_hash_params); 4179 if (ret) { 4180 kfree(dsq); 4181 return ERR_PTR(ret); 4182 } 4183 return dsq; 4184 } 4185 4186 static void free_dsq_irq_workfn(struct irq_work *irq_work) 4187 { 4188 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 4189 struct scx_dispatch_q *dsq, *tmp_dsq; 4190 4191 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 4192 kfree_rcu(dsq, rcu); 4193 } 4194 4195 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 4196 4197 static void destroy_dsq(u64 dsq_id) 4198 { 4199 struct scx_dispatch_q *dsq; 4200 unsigned long flags; 4201 4202 rcu_read_lock(); 4203 4204 dsq = find_user_dsq(dsq_id); 4205 if (!dsq) 4206 goto out_unlock_rcu; 4207 4208 raw_spin_lock_irqsave(&dsq->lock, flags); 4209 4210 if (dsq->nr) { 4211 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)", 4212 dsq->id, dsq->nr); 4213 goto out_unlock_dsq; 4214 } 4215 4216 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params)) 4217 goto out_unlock_dsq; 4218 4219 /* 4220 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 4221 * queueing more tasks. As this function can be called from anywhere, 4222 * freeing is bounced through an irq work to avoid nesting RCU 4223 * operations inside scheduler locks. 4224 */ 4225 dsq->id = SCX_DSQ_INVALID; 4226 llist_add(&dsq->free_node, &dsqs_to_free); 4227 irq_work_queue(&free_dsq_irq_work); 4228 4229 out_unlock_dsq: 4230 raw_spin_unlock_irqrestore(&dsq->lock, flags); 4231 out_unlock_rcu: 4232 rcu_read_unlock(); 4233 } 4234 4235 #ifdef CONFIG_EXT_GROUP_SCHED 4236 static void scx_cgroup_exit(void) 4237 { 4238 struct cgroup_subsys_state *css; 4239 4240 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4241 4242 scx_cgroup_enabled = false; 4243 4244 /* 4245 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk 4246 * cgroups and exit all the inited ones, all online cgroups are exited. 4247 */ 4248 rcu_read_lock(); 4249 css_for_each_descendant_post(css, &root_task_group.css) { 4250 struct task_group *tg = css_tg(css); 4251 4252 if (!(tg->scx_flags & SCX_TG_INITED)) 4253 continue; 4254 tg->scx_flags &= ~SCX_TG_INITED; 4255 4256 if (!scx_ops.cgroup_exit) 4257 continue; 4258 4259 if (WARN_ON_ONCE(!css_tryget(css))) 4260 continue; 4261 rcu_read_unlock(); 4262 4263 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, NULL, css->cgroup); 4264 4265 rcu_read_lock(); 4266 css_put(css); 4267 } 4268 rcu_read_unlock(); 4269 } 4270 4271 static int scx_cgroup_init(void) 4272 { 4273 struct cgroup_subsys_state *css; 4274 int ret; 4275 4276 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4277 4278 /* 4279 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk 4280 * cgroups and init, all online cgroups are initialized. 4281 */ 4282 rcu_read_lock(); 4283 css_for_each_descendant_pre(css, &root_task_group.css) { 4284 struct task_group *tg = css_tg(css); 4285 struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; 4286 4287 if ((tg->scx_flags & 4288 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) 4289 continue; 4290 4291 if (!scx_ops.cgroup_init) { 4292 tg->scx_flags |= SCX_TG_INITED; 4293 continue; 4294 } 4295 4296 if (WARN_ON_ONCE(!css_tryget(css))) 4297 continue; 4298 rcu_read_unlock(); 4299 4300 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, NULL, 4301 css->cgroup, &args); 4302 if (ret) { 4303 css_put(css); 4304 scx_ops_error("ops.cgroup_init() failed (%d)", ret); 4305 return ret; 4306 } 4307 tg->scx_flags |= SCX_TG_INITED; 4308 4309 rcu_read_lock(); 4310 css_put(css); 4311 } 4312 rcu_read_unlock(); 4313 4314 WARN_ON_ONCE(scx_cgroup_enabled); 4315 scx_cgroup_enabled = true; 4316 4317 return 0; 4318 } 4319 4320 #else 4321 static void scx_cgroup_exit(void) {} 4322 static int scx_cgroup_init(void) { return 0; } 4323 #endif 4324 4325 4326 /******************************************************************************** 4327 * Sysfs interface and ops enable/disable. 4328 */ 4329 4330 #define SCX_ATTR(_name) \ 4331 static struct kobj_attribute scx_attr_##_name = { \ 4332 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 4333 .show = scx_attr_##_name##_show, \ 4334 } 4335 4336 static ssize_t scx_attr_state_show(struct kobject *kobj, 4337 struct kobj_attribute *ka, char *buf) 4338 { 4339 return sysfs_emit(buf, "%s\n", 4340 scx_ops_enable_state_str[scx_ops_enable_state()]); 4341 } 4342 SCX_ATTR(state); 4343 4344 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 4345 struct kobj_attribute *ka, char *buf) 4346 { 4347 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 4348 } 4349 SCX_ATTR(switch_all); 4350 4351 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 4352 struct kobj_attribute *ka, char *buf) 4353 { 4354 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 4355 } 4356 SCX_ATTR(nr_rejected); 4357 4358 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 4359 struct kobj_attribute *ka, char *buf) 4360 { 4361 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 4362 } 4363 SCX_ATTR(hotplug_seq); 4364 4365 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj, 4366 struct kobj_attribute *ka, char *buf) 4367 { 4368 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq)); 4369 } 4370 SCX_ATTR(enable_seq); 4371 4372 static struct attribute *scx_global_attrs[] = { 4373 &scx_attr_state.attr, 4374 &scx_attr_switch_all.attr, 4375 &scx_attr_nr_rejected.attr, 4376 &scx_attr_hotplug_seq.attr, 4377 &scx_attr_enable_seq.attr, 4378 NULL, 4379 }; 4380 4381 static const struct attribute_group scx_global_attr_group = { 4382 .attrs = scx_global_attrs, 4383 }; 4384 4385 static void scx_kobj_release(struct kobject *kobj) 4386 { 4387 kfree(kobj); 4388 } 4389 4390 static ssize_t scx_attr_ops_show(struct kobject *kobj, 4391 struct kobj_attribute *ka, char *buf) 4392 { 4393 return sysfs_emit(buf, "%s\n", scx_ops.name); 4394 } 4395 SCX_ATTR(ops); 4396 4397 #define scx_attr_event_show(buf, at, events, kind) ({ \ 4398 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \ 4399 }) 4400 4401 static ssize_t scx_attr_events_show(struct kobject *kobj, 4402 struct kobj_attribute *ka, char *buf) 4403 { 4404 struct scx_event_stats events; 4405 int at = 0; 4406 4407 scx_bpf_events(&events, sizeof(events)); 4408 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK); 4409 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 4410 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST); 4411 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING); 4412 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 4413 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SLICE_DFL); 4414 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION); 4415 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH); 4416 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE); 4417 return at; 4418 } 4419 SCX_ATTR(events); 4420 4421 static struct attribute *scx_sched_attrs[] = { 4422 &scx_attr_ops.attr, 4423 &scx_attr_events.attr, 4424 NULL, 4425 }; 4426 ATTRIBUTE_GROUPS(scx_sched); 4427 4428 static const struct kobj_type scx_ktype = { 4429 .release = scx_kobj_release, 4430 .sysfs_ops = &kobj_sysfs_ops, 4431 .default_groups = scx_sched_groups, 4432 }; 4433 4434 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 4435 { 4436 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name); 4437 } 4438 4439 static const struct kset_uevent_ops scx_uevent_ops = { 4440 .uevent = scx_uevent, 4441 }; 4442 4443 /* 4444 * Used by sched_fork() and __setscheduler_prio() to pick the matching 4445 * sched_class. dl/rt are already handled. 4446 */ 4447 bool task_should_scx(int policy) 4448 { 4449 if (!scx_enabled() || 4450 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING)) 4451 return false; 4452 if (READ_ONCE(scx_switching_all)) 4453 return true; 4454 return policy == SCHED_EXT; 4455 } 4456 4457 /** 4458 * scx_softlockup - sched_ext softlockup handler 4459 * @dur_s: number of seconds of CPU stuck due to soft lockup 4460 * 4461 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can 4462 * live-lock the system by making many CPUs target the same DSQ to the point 4463 * where soft-lockup detection triggers. This function is called from 4464 * soft-lockup watchdog when the triggering point is close and tries to unjam 4465 * the system by enabling the breather and aborting the BPF scheduler. 4466 */ 4467 void scx_softlockup(u32 dur_s) 4468 { 4469 switch (scx_ops_enable_state()) { 4470 case SCX_OPS_ENABLING: 4471 case SCX_OPS_ENABLED: 4472 break; 4473 default: 4474 return; 4475 } 4476 4477 /* allow only one instance, cleared at the end of scx_ops_bypass() */ 4478 if (test_and_set_bit(0, &scx_in_softlockup)) 4479 return; 4480 4481 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n", 4482 smp_processor_id(), dur_s, scx_ops.name); 4483 4484 /* 4485 * Some CPUs may be trapped in the dispatch paths. Enable breather 4486 * immediately; otherwise, we might even be able to get to 4487 * scx_ops_bypass(). 4488 */ 4489 atomic_inc(&scx_ops_breather_depth); 4490 4491 scx_ops_error("soft lockup - CPU#%d stuck for %us", 4492 smp_processor_id(), dur_s); 4493 } 4494 4495 static void scx_clear_softlockup(void) 4496 { 4497 if (test_and_clear_bit(0, &scx_in_softlockup)) 4498 atomic_dec(&scx_ops_breather_depth); 4499 } 4500 4501 /** 4502 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress 4503 * @bypass: true for bypass, false for unbypass 4504 * 4505 * Bypassing guarantees that all runnable tasks make forward progress without 4506 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 4507 * be held by tasks that the BPF scheduler is forgetting to run, which 4508 * unfortunately also excludes toggling the static branches. 4509 * 4510 * Let's work around by overriding a couple ops and modifying behaviors based on 4511 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4512 * to force global FIFO scheduling. 4513 * 4514 * - ops.select_cpu() is ignored and the default select_cpu() is used. 4515 * 4516 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4517 * %SCX_OPS_ENQ_LAST is also ignored. 4518 * 4519 * - ops.dispatch() is ignored. 4520 * 4521 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4522 * can't be trusted. Whenever a tick triggers, the running task is rotated to 4523 * the tail of the queue with core_sched_at touched. 4524 * 4525 * - pick_next_task() suppresses zero slice warning. 4526 * 4527 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM 4528 * operations. 4529 * 4530 * - scx_prio_less() reverts to the default core_sched_at order. 4531 */ 4532 static void scx_ops_bypass(bool bypass) 4533 { 4534 static DEFINE_RAW_SPINLOCK(bypass_lock); 4535 static unsigned long bypass_timestamp; 4536 4537 int cpu; 4538 unsigned long flags; 4539 4540 raw_spin_lock_irqsave(&bypass_lock, flags); 4541 if (bypass) { 4542 scx_ops_bypass_depth++; 4543 WARN_ON_ONCE(scx_ops_bypass_depth <= 0); 4544 if (scx_ops_bypass_depth != 1) 4545 goto unlock; 4546 bypass_timestamp = ktime_get_ns(); 4547 scx_add_event(SCX_EV_BYPASS_ACTIVATE, 1); 4548 } else { 4549 scx_ops_bypass_depth--; 4550 WARN_ON_ONCE(scx_ops_bypass_depth < 0); 4551 if (scx_ops_bypass_depth != 0) 4552 goto unlock; 4553 scx_add_event(SCX_EV_BYPASS_DURATION, 4554 ktime_get_ns() - bypass_timestamp); 4555 } 4556 4557 atomic_inc(&scx_ops_breather_depth); 4558 4559 /* 4560 * No task property is changing. We just need to make sure all currently 4561 * queued tasks are re-queued according to the new scx_rq_bypassing() 4562 * state. As an optimization, walk each rq's runnable_list instead of 4563 * the scx_tasks list. 4564 * 4565 * This function can't trust the scheduler and thus can't use 4566 * cpus_read_lock(). Walk all possible CPUs instead of online. 4567 */ 4568 for_each_possible_cpu(cpu) { 4569 struct rq *rq = cpu_rq(cpu); 4570 struct task_struct *p, *n; 4571 4572 raw_spin_rq_lock(rq); 4573 4574 if (bypass) { 4575 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); 4576 rq->scx.flags |= SCX_RQ_BYPASSING; 4577 } else { 4578 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); 4579 rq->scx.flags &= ~SCX_RQ_BYPASSING; 4580 } 4581 4582 /* 4583 * We need to guarantee that no tasks are on the BPF scheduler 4584 * while bypassing. Either we see enabled or the enable path 4585 * sees scx_rq_bypassing() before moving tasks to SCX. 4586 */ 4587 if (!scx_enabled()) { 4588 raw_spin_rq_unlock(rq); 4589 continue; 4590 } 4591 4592 /* 4593 * The use of list_for_each_entry_safe_reverse() is required 4594 * because each task is going to be removed from and added back 4595 * to the runnable_list during iteration. Because they're added 4596 * to the tail of the list, safe reverse iteration can still 4597 * visit all nodes. 4598 */ 4599 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 4600 scx.runnable_node) { 4601 struct sched_enq_and_set_ctx ctx; 4602 4603 /* cycling deq/enq is enough, see the function comment */ 4604 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4605 sched_enq_and_set_task(&ctx); 4606 } 4607 4608 /* resched to restore ticks and idle state */ 4609 if (cpu_online(cpu) || cpu == smp_processor_id()) 4610 resched_curr(rq); 4611 4612 raw_spin_rq_unlock(rq); 4613 } 4614 4615 atomic_dec(&scx_ops_breather_depth); 4616 unlock: 4617 raw_spin_unlock_irqrestore(&bypass_lock, flags); 4618 scx_clear_softlockup(); 4619 } 4620 4621 static void free_exit_info(struct scx_exit_info *ei) 4622 { 4623 kvfree(ei->dump); 4624 kfree(ei->msg); 4625 kfree(ei->bt); 4626 kfree(ei); 4627 } 4628 4629 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 4630 { 4631 struct scx_exit_info *ei; 4632 4633 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 4634 if (!ei) 4635 return NULL; 4636 4637 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4638 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4639 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); 4640 4641 if (!ei->bt || !ei->msg || !ei->dump) { 4642 free_exit_info(ei); 4643 return NULL; 4644 } 4645 4646 return ei; 4647 } 4648 4649 static const char *scx_exit_reason(enum scx_exit_kind kind) 4650 { 4651 switch (kind) { 4652 case SCX_EXIT_UNREG: 4653 return "unregistered from user space"; 4654 case SCX_EXIT_UNREG_BPF: 4655 return "unregistered from BPF"; 4656 case SCX_EXIT_UNREG_KERN: 4657 return "unregistered from the main kernel"; 4658 case SCX_EXIT_SYSRQ: 4659 return "disabled by sysrq-S"; 4660 case SCX_EXIT_ERROR: 4661 return "runtime error"; 4662 case SCX_EXIT_ERROR_BPF: 4663 return "scx_bpf_error"; 4664 case SCX_EXIT_ERROR_STALL: 4665 return "runnable task stall"; 4666 default: 4667 return "<UNKNOWN>"; 4668 } 4669 } 4670 4671 static void scx_ops_disable_workfn(struct kthread_work *work) 4672 { 4673 struct scx_exit_info *ei = scx_exit_info; 4674 struct scx_task_iter sti; 4675 struct task_struct *p; 4676 struct rhashtable_iter rht_iter; 4677 struct scx_dispatch_q *dsq; 4678 int i, kind, cpu; 4679 4680 kind = atomic_read(&scx_exit_kind); 4681 while (true) { 4682 /* 4683 * NONE indicates that a new scx_ops has been registered since 4684 * disable was scheduled - don't kill the new ops. DONE 4685 * indicates that the ops has already been disabled. 4686 */ 4687 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE) 4688 return; 4689 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE)) 4690 break; 4691 } 4692 ei->kind = kind; 4693 ei->reason = scx_exit_reason(ei->kind); 4694 4695 /* guarantee forward progress by bypassing scx_ops */ 4696 scx_ops_bypass(true); 4697 4698 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) { 4699 case SCX_OPS_DISABLING: 4700 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 4701 break; 4702 case SCX_OPS_DISABLED: 4703 pr_warn("sched_ext: ops error detected without ops (%s)\n", 4704 scx_exit_info->msg); 4705 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 4706 SCX_OPS_DISABLING); 4707 goto done; 4708 default: 4709 break; 4710 } 4711 4712 /* 4713 * Here, every runnable task is guaranteed to make forward progress and 4714 * we can safely use blocking synchronization constructs. Actually 4715 * disable ops. 4716 */ 4717 mutex_lock(&scx_ops_enable_mutex); 4718 4719 static_branch_disable(&__scx_switched_all); 4720 WRITE_ONCE(scx_switching_all, false); 4721 4722 /* 4723 * Shut down cgroup support before tasks so that the cgroup attach path 4724 * doesn't race against scx_ops_exit_task(). 4725 */ 4726 scx_cgroup_lock(); 4727 scx_cgroup_exit(); 4728 scx_cgroup_unlock(); 4729 4730 /* 4731 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones 4732 * must be switched out and exited synchronously. 4733 */ 4734 percpu_down_write(&scx_fork_rwsem); 4735 4736 scx_ops_init_task_enabled = false; 4737 4738 scx_task_iter_start(&sti); 4739 while ((p = scx_task_iter_next_locked(&sti))) { 4740 const struct sched_class *old_class = p->sched_class; 4741 const struct sched_class *new_class = 4742 __setscheduler_class(p->policy, p->prio); 4743 struct sched_enq_and_set_ctx ctx; 4744 4745 if (old_class != new_class && p->se.sched_delayed) 4746 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); 4747 4748 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4749 4750 p->sched_class = new_class; 4751 check_class_changing(task_rq(p), p, old_class); 4752 4753 sched_enq_and_set_task(&ctx); 4754 4755 check_class_changed(task_rq(p), p, old_class, p->prio); 4756 scx_ops_exit_task(p); 4757 } 4758 scx_task_iter_stop(&sti); 4759 percpu_up_write(&scx_fork_rwsem); 4760 4761 /* 4762 * Invalidate all the rq clocks to prevent getting outdated 4763 * rq clocks from a previous scx scheduler. 4764 */ 4765 for_each_possible_cpu(cpu) { 4766 struct rq *rq = cpu_rq(cpu); 4767 scx_rq_clock_invalidate(rq); 4768 } 4769 4770 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4771 static_branch_disable(&__scx_ops_enabled); 4772 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++) 4773 static_branch_disable(&scx_has_op[i]); 4774 static_branch_disable(&scx_ops_allow_queued_wakeup); 4775 static_branch_disable(&scx_ops_enq_last); 4776 static_branch_disable(&scx_ops_enq_exiting); 4777 static_branch_disable(&scx_ops_enq_migration_disabled); 4778 static_branch_disable(&scx_ops_cpu_preempt); 4779 scx_idle_disable(); 4780 synchronize_rcu(); 4781 4782 if (ei->kind >= SCX_EXIT_ERROR) { 4783 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4784 scx_ops.name, ei->reason); 4785 4786 if (ei->msg[0] != '\0') 4787 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg); 4788 #ifdef CONFIG_STACKTRACE 4789 stack_trace_print(ei->bt, ei->bt_len, 2); 4790 #endif 4791 } else { 4792 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4793 scx_ops.name, ei->reason); 4794 } 4795 4796 if (scx_ops.exit) 4797 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, NULL, ei); 4798 4799 cancel_delayed_work_sync(&scx_watchdog_work); 4800 4801 /* 4802 * Delete the kobject from the hierarchy eagerly in addition to just 4803 * dropping a reference. Otherwise, if the object is deleted 4804 * asynchronously, sysfs could observe an object of the same name still 4805 * in the hierarchy when another scheduler is loaded. 4806 */ 4807 kobject_del(scx_root_kobj); 4808 kobject_put(scx_root_kobj); 4809 scx_root_kobj = NULL; 4810 4811 memset(&scx_ops, 0, sizeof(scx_ops)); 4812 4813 rhashtable_walk_enter(&dsq_hash, &rht_iter); 4814 do { 4815 rhashtable_walk_start(&rht_iter); 4816 4817 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 4818 destroy_dsq(dsq->id); 4819 4820 rhashtable_walk_stop(&rht_iter); 4821 } while (dsq == ERR_PTR(-EAGAIN)); 4822 rhashtable_walk_exit(&rht_iter); 4823 4824 free_percpu(scx_dsp_ctx); 4825 scx_dsp_ctx = NULL; 4826 scx_dsp_max_batch = 0; 4827 4828 free_exit_info(scx_exit_info); 4829 scx_exit_info = NULL; 4830 4831 mutex_unlock(&scx_ops_enable_mutex); 4832 4833 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 4834 SCX_OPS_DISABLING); 4835 done: 4836 scx_ops_bypass(false); 4837 } 4838 4839 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); 4840 4841 static void schedule_scx_ops_disable_work(void) 4842 { 4843 struct kthread_worker *helper = READ_ONCE(scx_ops_helper); 4844 4845 /* 4846 * We may be called spuriously before the first bpf_sched_ext_reg(). If 4847 * scx_ops_helper isn't set up yet, there's nothing to do. 4848 */ 4849 if (helper) 4850 kthread_queue_work(helper, &scx_ops_disable_work); 4851 } 4852 4853 static void scx_ops_disable(enum scx_exit_kind kind) 4854 { 4855 int none = SCX_EXIT_NONE; 4856 4857 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4858 kind = SCX_EXIT_ERROR; 4859 4860 atomic_try_cmpxchg(&scx_exit_kind, &none, kind); 4861 4862 schedule_scx_ops_disable_work(); 4863 } 4864 4865 static void dump_newline(struct seq_buf *s) 4866 { 4867 trace_sched_ext_dump(""); 4868 4869 /* @s may be zero sized and seq_buf triggers WARN if so */ 4870 if (s->size) 4871 seq_buf_putc(s, '\n'); 4872 } 4873 4874 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4875 { 4876 va_list args; 4877 4878 #ifdef CONFIG_TRACEPOINTS 4879 if (trace_sched_ext_dump_enabled()) { 4880 /* protected by scx_dump_state()::dump_lock */ 4881 static char line_buf[SCX_EXIT_MSG_LEN]; 4882 4883 va_start(args, fmt); 4884 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4885 va_end(args); 4886 4887 trace_sched_ext_dump(line_buf); 4888 } 4889 #endif 4890 /* @s may be zero sized and seq_buf triggers WARN if so */ 4891 if (s->size) { 4892 va_start(args, fmt); 4893 seq_buf_vprintf(s, fmt, args); 4894 va_end(args); 4895 4896 seq_buf_putc(s, '\n'); 4897 } 4898 } 4899 4900 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4901 const unsigned long *bt, unsigned int len) 4902 { 4903 unsigned int i; 4904 4905 for (i = 0; i < len; i++) 4906 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4907 } 4908 4909 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4910 { 4911 struct scx_dump_data *dd = &scx_dump_data; 4912 4913 lockdep_assert_irqs_disabled(); 4914 4915 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4916 dd->first = true; 4917 dd->cursor = 0; 4918 dd->s = s; 4919 dd->prefix = prefix; 4920 } 4921 4922 static void ops_dump_flush(void) 4923 { 4924 struct scx_dump_data *dd = &scx_dump_data; 4925 char *line = dd->buf.line; 4926 4927 if (!dd->cursor) 4928 return; 4929 4930 /* 4931 * There's something to flush and this is the first line. Insert a blank 4932 * line to distinguish ops dump. 4933 */ 4934 if (dd->first) { 4935 dump_newline(dd->s); 4936 dd->first = false; 4937 } 4938 4939 /* 4940 * There may be multiple lines in $line. Scan and emit each line 4941 * separately. 4942 */ 4943 while (true) { 4944 char *end = line; 4945 char c; 4946 4947 while (*end != '\n' && *end != '\0') 4948 end++; 4949 4950 /* 4951 * If $line overflowed, it may not have newline at the end. 4952 * Always emit with a newline. 4953 */ 4954 c = *end; 4955 *end = '\0'; 4956 dump_line(dd->s, "%s%s", dd->prefix, line); 4957 if (c == '\0') 4958 break; 4959 4960 /* move to the next line */ 4961 end++; 4962 if (*end == '\0') 4963 break; 4964 line = end; 4965 } 4966 4967 dd->cursor = 0; 4968 } 4969 4970 static void ops_dump_exit(void) 4971 { 4972 ops_dump_flush(); 4973 scx_dump_data.cpu = -1; 4974 } 4975 4976 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4977 struct task_struct *p, char marker) 4978 { 4979 static unsigned long bt[SCX_EXIT_BT_LEN]; 4980 char dsq_id_buf[19] = "(n/a)"; 4981 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4982 unsigned int bt_len = 0; 4983 4984 if (p->scx.dsq) 4985 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4986 (unsigned long long)p->scx.dsq->id); 4987 4988 dump_newline(s); 4989 dump_line(s, " %c%c %s[%d] %+ldms", 4990 marker, task_state_to_char(p), p->comm, p->pid, 4991 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4992 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4993 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4994 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4995 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4996 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s", 4997 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); 4998 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u", 4999 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); 5000 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); 5001 5002 if (SCX_HAS_OP(dump_task)) { 5003 ops_dump_init(s, " "); 5004 SCX_CALL_OP(SCX_KF_REST, dump_task, NULL, dctx, p); 5005 ops_dump_exit(); 5006 } 5007 5008 #ifdef CONFIG_STACKTRACE 5009 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 5010 #endif 5011 if (bt_len) { 5012 dump_newline(s); 5013 dump_stack_trace(s, " ", bt, bt_len); 5014 } 5015 } 5016 5017 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 5018 { 5019 static DEFINE_SPINLOCK(dump_lock); 5020 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 5021 struct scx_dump_ctx dctx = { 5022 .kind = ei->kind, 5023 .exit_code = ei->exit_code, 5024 .reason = ei->reason, 5025 .at_ns = ktime_get_ns(), 5026 .at_jiffies = jiffies, 5027 }; 5028 struct seq_buf s; 5029 struct scx_event_stats events; 5030 unsigned long flags; 5031 char *buf; 5032 int cpu; 5033 5034 spin_lock_irqsave(&dump_lock, flags); 5035 5036 seq_buf_init(&s, ei->dump, dump_len); 5037 5038 if (ei->kind == SCX_EXIT_NONE) { 5039 dump_line(&s, "Debug dump triggered by %s", ei->reason); 5040 } else { 5041 dump_line(&s, "%s[%d] triggered exit kind %d:", 5042 current->comm, current->pid, ei->kind); 5043 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 5044 dump_newline(&s); 5045 dump_line(&s, "Backtrace:"); 5046 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 5047 } 5048 5049 if (SCX_HAS_OP(dump)) { 5050 ops_dump_init(&s, ""); 5051 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, NULL, &dctx); 5052 ops_dump_exit(); 5053 } 5054 5055 dump_newline(&s); 5056 dump_line(&s, "CPU states"); 5057 dump_line(&s, "----------"); 5058 5059 for_each_possible_cpu(cpu) { 5060 struct rq *rq = cpu_rq(cpu); 5061 struct rq_flags rf; 5062 struct task_struct *p; 5063 struct seq_buf ns; 5064 size_t avail, used; 5065 bool idle; 5066 5067 rq_lock(rq, &rf); 5068 5069 idle = list_empty(&rq->scx.runnable_list) && 5070 rq->curr->sched_class == &idle_sched_class; 5071 5072 if (idle && !SCX_HAS_OP(dump_cpu)) 5073 goto next; 5074 5075 /* 5076 * We don't yet know whether ops.dump_cpu() will produce output 5077 * and we may want to skip the default CPU dump if it doesn't. 5078 * Use a nested seq_buf to generate the standard dump so that we 5079 * can decide whether to commit later. 5080 */ 5081 avail = seq_buf_get_buf(&s, &buf); 5082 seq_buf_init(&ns, buf, avail); 5083 5084 dump_newline(&ns); 5085 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu", 5086 cpu, rq->scx.nr_running, rq->scx.flags, 5087 rq->scx.cpu_released, rq->scx.ops_qseq, 5088 rq->scx.pnt_seq); 5089 dump_line(&ns, " curr=%s[%d] class=%ps", 5090 rq->curr->comm, rq->curr->pid, 5091 rq->curr->sched_class); 5092 if (!cpumask_empty(rq->scx.cpus_to_kick)) 5093 dump_line(&ns, " cpus_to_kick : %*pb", 5094 cpumask_pr_args(rq->scx.cpus_to_kick)); 5095 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 5096 dump_line(&ns, " idle_to_kick : %*pb", 5097 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 5098 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 5099 dump_line(&ns, " cpus_to_preempt: %*pb", 5100 cpumask_pr_args(rq->scx.cpus_to_preempt)); 5101 if (!cpumask_empty(rq->scx.cpus_to_wait)) 5102 dump_line(&ns, " cpus_to_wait : %*pb", 5103 cpumask_pr_args(rq->scx.cpus_to_wait)); 5104 5105 used = seq_buf_used(&ns); 5106 if (SCX_HAS_OP(dump_cpu)) { 5107 ops_dump_init(&ns, " "); 5108 SCX_CALL_OP(SCX_KF_REST, dump_cpu, NULL, &dctx, cpu, idle); 5109 ops_dump_exit(); 5110 } 5111 5112 /* 5113 * If idle && nothing generated by ops.dump_cpu(), there's 5114 * nothing interesting. Skip. 5115 */ 5116 if (idle && used == seq_buf_used(&ns)) 5117 goto next; 5118 5119 /* 5120 * $s may already have overflowed when $ns was created. If so, 5121 * calling commit on it will trigger BUG. 5122 */ 5123 if (avail) { 5124 seq_buf_commit(&s, seq_buf_used(&ns)); 5125 if (seq_buf_has_overflowed(&ns)) 5126 seq_buf_set_overflow(&s); 5127 } 5128 5129 if (rq->curr->sched_class == &ext_sched_class) 5130 scx_dump_task(&s, &dctx, rq->curr, '*'); 5131 5132 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 5133 scx_dump_task(&s, &dctx, p, ' '); 5134 next: 5135 rq_unlock(rq, &rf); 5136 } 5137 5138 dump_newline(&s); 5139 dump_line(&s, "Event counters"); 5140 dump_line(&s, "--------------"); 5141 5142 scx_bpf_events(&events, sizeof(events)); 5143 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK); 5144 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 5145 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST); 5146 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING); 5147 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 5148 scx_dump_event(s, &events, SCX_EV_ENQ_SLICE_DFL); 5149 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION); 5150 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH); 5151 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE); 5152 5153 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 5154 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 5155 trunc_marker, sizeof(trunc_marker)); 5156 5157 spin_unlock_irqrestore(&dump_lock, flags); 5158 } 5159 5160 static void scx_ops_error_irq_workfn(struct irq_work *irq_work) 5161 { 5162 struct scx_exit_info *ei = scx_exit_info; 5163 5164 if (ei->kind >= SCX_EXIT_ERROR) 5165 scx_dump_state(ei, scx_ops.exit_dump_len); 5166 5167 schedule_scx_ops_disable_work(); 5168 } 5169 5170 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); 5171 5172 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 5173 s64 exit_code, 5174 const char *fmt, ...) 5175 { 5176 struct scx_exit_info *ei = scx_exit_info; 5177 int none = SCX_EXIT_NONE; 5178 va_list args; 5179 5180 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind)) 5181 return; 5182 5183 ei->exit_code = exit_code; 5184 #ifdef CONFIG_STACKTRACE 5185 if (kind >= SCX_EXIT_ERROR) 5186 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 5187 #endif 5188 va_start(args, fmt); 5189 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 5190 va_end(args); 5191 5192 /* 5193 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 5194 * in scx_ops_disable_workfn(). 5195 */ 5196 ei->kind = kind; 5197 ei->reason = scx_exit_reason(ei->kind); 5198 5199 irq_work_queue(&scx_ops_error_irq_work); 5200 } 5201 5202 static struct kthread_worker *scx_create_rt_helper(const char *name) 5203 { 5204 struct kthread_worker *helper; 5205 5206 helper = kthread_run_worker(0, name); 5207 if (helper) 5208 sched_set_fifo(helper->task); 5209 return helper; 5210 } 5211 5212 static void check_hotplug_seq(const struct sched_ext_ops *ops) 5213 { 5214 unsigned long long global_hotplug_seq; 5215 5216 /* 5217 * If a hotplug event has occurred between when a scheduler was 5218 * initialized, and when we were able to attach, exit and notify user 5219 * space about it. 5220 */ 5221 if (ops->hotplug_seq) { 5222 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 5223 if (ops->hotplug_seq != global_hotplug_seq) { 5224 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 5225 "expected hotplug seq %llu did not match actual %llu", 5226 ops->hotplug_seq, global_hotplug_seq); 5227 } 5228 } 5229 } 5230 5231 static int validate_ops(const struct sched_ext_ops *ops) 5232 { 5233 /* 5234 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 5235 * ops.enqueue() callback isn't implemented. 5236 */ 5237 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 5238 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 5239 return -EINVAL; 5240 } 5241 5242 /* 5243 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle 5244 * selection policy to be enabled. 5245 */ 5246 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && 5247 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { 5248 scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 5249 return -EINVAL; 5250 } 5251 5252 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) 5253 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); 5254 5255 return 0; 5256 } 5257 5258 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) 5259 { 5260 struct scx_task_iter sti; 5261 struct task_struct *p; 5262 unsigned long timeout; 5263 int i, cpu, node, ret; 5264 5265 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 5266 cpu_possible_mask)) { 5267 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n"); 5268 return -EINVAL; 5269 } 5270 5271 mutex_lock(&scx_ops_enable_mutex); 5272 5273 /* 5274 * Clear event counters so a new scx scheduler gets 5275 * fresh event counter values. 5276 */ 5277 for_each_possible_cpu(cpu) { 5278 struct scx_event_stats *e = per_cpu_ptr(&event_stats_cpu, cpu); 5279 memset(e, 0, sizeof(*e)); 5280 } 5281 5282 if (!scx_ops_helper) { 5283 WRITE_ONCE(scx_ops_helper, 5284 scx_create_rt_helper("sched_ext_ops_helper")); 5285 if (!scx_ops_helper) { 5286 ret = -ENOMEM; 5287 goto err_unlock; 5288 } 5289 } 5290 5291 if (!global_dsqs) { 5292 struct scx_dispatch_q **dsqs; 5293 5294 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL); 5295 if (!dsqs) { 5296 ret = -ENOMEM; 5297 goto err_unlock; 5298 } 5299 5300 for_each_node_state(node, N_POSSIBLE) { 5301 struct scx_dispatch_q *dsq; 5302 5303 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node); 5304 if (!dsq) { 5305 for_each_node_state(node, N_POSSIBLE) 5306 kfree(dsqs[node]); 5307 kfree(dsqs); 5308 ret = -ENOMEM; 5309 goto err_unlock; 5310 } 5311 5312 init_dsq(dsq, SCX_DSQ_GLOBAL); 5313 dsqs[node] = dsq; 5314 } 5315 5316 global_dsqs = dsqs; 5317 } 5318 5319 if (scx_ops_enable_state() != SCX_OPS_DISABLED) { 5320 ret = -EBUSY; 5321 goto err_unlock; 5322 } 5323 5324 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL); 5325 if (!scx_root_kobj) { 5326 ret = -ENOMEM; 5327 goto err_unlock; 5328 } 5329 5330 scx_root_kobj->kset = scx_kset; 5331 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root"); 5332 if (ret < 0) 5333 goto err; 5334 5335 scx_exit_info = alloc_exit_info(ops->exit_dump_len); 5336 if (!scx_exit_info) { 5337 ret = -ENOMEM; 5338 goto err_del; 5339 } 5340 5341 /* 5342 * Set scx_ops, transition to ENABLING and clear exit info to arm the 5343 * disable path. Failure triggers full disabling from here on. 5344 */ 5345 scx_ops = *ops; 5346 5347 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) != 5348 SCX_OPS_DISABLED); 5349 5350 atomic_set(&scx_exit_kind, SCX_EXIT_NONE); 5351 scx_warned_zero_slice = false; 5352 5353 atomic_long_set(&scx_nr_rejected, 0); 5354 5355 for_each_possible_cpu(cpu) 5356 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 5357 5358 /* 5359 * Keep CPUs stable during enable so that the BPF scheduler can track 5360 * online CPUs by watching ->on/offline_cpu() after ->init(). 5361 */ 5362 cpus_read_lock(); 5363 5364 scx_idle_enable(ops); 5365 5366 if (scx_ops.init) { 5367 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init, NULL); 5368 if (ret) { 5369 ret = ops_sanitize_err("init", ret); 5370 cpus_read_unlock(); 5371 scx_ops_error("ops.init() failed (%d)", ret); 5372 goto err_disable; 5373 } 5374 } 5375 5376 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 5377 if (((void (**)(void))ops)[i]) 5378 static_branch_enable_cpuslocked(&scx_has_op[i]); 5379 5380 check_hotplug_seq(ops); 5381 scx_idle_update_selcpu_topology(ops); 5382 5383 cpus_read_unlock(); 5384 5385 ret = validate_ops(ops); 5386 if (ret) 5387 goto err_disable; 5388 5389 WARN_ON_ONCE(scx_dsp_ctx); 5390 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 5391 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 5392 scx_dsp_max_batch), 5393 __alignof__(struct scx_dsp_ctx)); 5394 if (!scx_dsp_ctx) { 5395 ret = -ENOMEM; 5396 goto err_disable; 5397 } 5398 5399 if (ops->timeout_ms) 5400 timeout = msecs_to_jiffies(ops->timeout_ms); 5401 else 5402 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 5403 5404 WRITE_ONCE(scx_watchdog_timeout, timeout); 5405 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 5406 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 5407 scx_watchdog_timeout / 2); 5408 5409 /* 5410 * Once __scx_ops_enabled is set, %current can be switched to SCX 5411 * anytime. This can lead to stalls as some BPF schedulers (e.g. 5412 * userspace scheduling) may not function correctly before all tasks are 5413 * switched. Init in bypass mode to guarantee forward progress. 5414 */ 5415 scx_ops_bypass(true); 5416 5417 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 5418 if (((void (**)(void))ops)[i]) 5419 static_branch_enable(&scx_has_op[i]); 5420 5421 if (ops->flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) 5422 static_branch_enable(&scx_ops_allow_queued_wakeup); 5423 if (ops->flags & SCX_OPS_ENQ_LAST) 5424 static_branch_enable(&scx_ops_enq_last); 5425 if (ops->flags & SCX_OPS_ENQ_EXITING) 5426 static_branch_enable(&scx_ops_enq_exiting); 5427 if (ops->flags & SCX_OPS_ENQ_MIGRATION_DISABLED) 5428 static_branch_enable(&scx_ops_enq_migration_disabled); 5429 if (scx_ops.cpu_acquire || scx_ops.cpu_release) 5430 static_branch_enable(&scx_ops_cpu_preempt); 5431 5432 /* 5433 * Lock out forks, cgroup on/offlining and moves before opening the 5434 * floodgate so that they don't wander into the operations prematurely. 5435 */ 5436 percpu_down_write(&scx_fork_rwsem); 5437 5438 WARN_ON_ONCE(scx_ops_init_task_enabled); 5439 scx_ops_init_task_enabled = true; 5440 5441 /* 5442 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 5443 * preventing new tasks from being added. No need to exclude tasks 5444 * leaving as sched_ext_free() can handle both prepped and enabled 5445 * tasks. Prep all tasks first and then enable them with preemption 5446 * disabled. 5447 * 5448 * All cgroups should be initialized before scx_ops_init_task() so that 5449 * the BPF scheduler can reliably track each task's cgroup membership 5450 * from scx_ops_init_task(). Lock out cgroup on/offlining and task 5451 * migrations while tasks are being initialized so that 5452 * scx_cgroup_can_attach() never sees uninitialized tasks. 5453 */ 5454 scx_cgroup_lock(); 5455 ret = scx_cgroup_init(); 5456 if (ret) 5457 goto err_disable_unlock_all; 5458 5459 scx_task_iter_start(&sti); 5460 while ((p = scx_task_iter_next_locked(&sti))) { 5461 /* 5462 * @p may already be dead, have lost all its usages counts and 5463 * be waiting for RCU grace period before being freed. @p can't 5464 * be initialized for SCX in such cases and should be ignored. 5465 */ 5466 if (!tryget_task_struct(p)) 5467 continue; 5468 5469 scx_task_iter_unlock(&sti); 5470 5471 ret = scx_ops_init_task(p, task_group(p), false); 5472 if (ret) { 5473 put_task_struct(p); 5474 scx_task_iter_relock(&sti); 5475 scx_task_iter_stop(&sti); 5476 scx_ops_error("ops.init_task() failed (%d) for %s[%d]", 5477 ret, p->comm, p->pid); 5478 goto err_disable_unlock_all; 5479 } 5480 5481 scx_set_task_state(p, SCX_TASK_READY); 5482 5483 put_task_struct(p); 5484 scx_task_iter_relock(&sti); 5485 } 5486 scx_task_iter_stop(&sti); 5487 scx_cgroup_unlock(); 5488 percpu_up_write(&scx_fork_rwsem); 5489 5490 /* 5491 * All tasks are READY. It's safe to turn on scx_enabled() and switch 5492 * all eligible tasks. 5493 */ 5494 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 5495 static_branch_enable(&__scx_ops_enabled); 5496 5497 /* 5498 * We're fully committed and can't fail. The task READY -> ENABLED 5499 * transitions here are synchronized against sched_ext_free() through 5500 * scx_tasks_lock. 5501 */ 5502 percpu_down_write(&scx_fork_rwsem); 5503 scx_task_iter_start(&sti); 5504 while ((p = scx_task_iter_next_locked(&sti))) { 5505 const struct sched_class *old_class = p->sched_class; 5506 const struct sched_class *new_class = 5507 __setscheduler_class(p->policy, p->prio); 5508 struct sched_enq_and_set_ctx ctx; 5509 5510 if (old_class != new_class && p->se.sched_delayed) 5511 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); 5512 5513 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 5514 5515 p->scx.slice = SCX_SLICE_DFL; 5516 p->sched_class = new_class; 5517 check_class_changing(task_rq(p), p, old_class); 5518 5519 sched_enq_and_set_task(&ctx); 5520 5521 check_class_changed(task_rq(p), p, old_class, p->prio); 5522 } 5523 scx_task_iter_stop(&sti); 5524 percpu_up_write(&scx_fork_rwsem); 5525 5526 scx_ops_bypass(false); 5527 5528 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { 5529 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE); 5530 goto err_disable; 5531 } 5532 5533 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 5534 static_branch_enable(&__scx_switched_all); 5535 5536 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n", 5537 scx_ops.name, scx_switched_all() ? "" : " (partial)"); 5538 kobject_uevent(scx_root_kobj, KOBJ_ADD); 5539 mutex_unlock(&scx_ops_enable_mutex); 5540 5541 atomic_long_inc(&scx_enable_seq); 5542 5543 return 0; 5544 5545 err_del: 5546 kobject_del(scx_root_kobj); 5547 err: 5548 kobject_put(scx_root_kobj); 5549 scx_root_kobj = NULL; 5550 if (scx_exit_info) { 5551 free_exit_info(scx_exit_info); 5552 scx_exit_info = NULL; 5553 } 5554 err_unlock: 5555 mutex_unlock(&scx_ops_enable_mutex); 5556 return ret; 5557 5558 err_disable_unlock_all: 5559 scx_cgroup_unlock(); 5560 percpu_up_write(&scx_fork_rwsem); 5561 scx_ops_bypass(false); 5562 err_disable: 5563 mutex_unlock(&scx_ops_enable_mutex); 5564 /* 5565 * Returning an error code here would not pass all the error information 5566 * to userspace. Record errno using scx_ops_error() for cases 5567 * scx_ops_error() wasn't already invoked and exit indicating success so 5568 * that the error is notified through ops.exit() with all the details. 5569 * 5570 * Flush scx_ops_disable_work to ensure that error is reported before 5571 * init completion. 5572 */ 5573 scx_ops_error("scx_ops_enable() failed (%d)", ret); 5574 kthread_flush_work(&scx_ops_disable_work); 5575 return 0; 5576 } 5577 5578 5579 /******************************************************************************** 5580 * bpf_struct_ops plumbing. 5581 */ 5582 #include <linux/bpf_verifier.h> 5583 #include <linux/bpf.h> 5584 #include <linux/btf.h> 5585 5586 static const struct btf_type *task_struct_type; 5587 5588 static bool bpf_scx_is_valid_access(int off, int size, 5589 enum bpf_access_type type, 5590 const struct bpf_prog *prog, 5591 struct bpf_insn_access_aux *info) 5592 { 5593 if (type != BPF_READ) 5594 return false; 5595 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 5596 return false; 5597 if (off % size != 0) 5598 return false; 5599 5600 return btf_ctx_access(off, size, type, prog, info); 5601 } 5602 5603 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 5604 const struct bpf_reg_state *reg, int off, 5605 int size) 5606 { 5607 const struct btf_type *t; 5608 5609 t = btf_type_by_id(reg->btf, reg->btf_id); 5610 if (t == task_struct_type) { 5611 if (off >= offsetof(struct task_struct, scx.slice) && 5612 off + size <= offsetofend(struct task_struct, scx.slice)) 5613 return SCALAR_VALUE; 5614 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 5615 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 5616 return SCALAR_VALUE; 5617 if (off >= offsetof(struct task_struct, scx.disallow) && 5618 off + size <= offsetofend(struct task_struct, scx.disallow)) 5619 return SCALAR_VALUE; 5620 } 5621 5622 return -EACCES; 5623 } 5624 5625 static const struct bpf_func_proto * 5626 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5627 { 5628 switch (func_id) { 5629 case BPF_FUNC_task_storage_get: 5630 return &bpf_task_storage_get_proto; 5631 case BPF_FUNC_task_storage_delete: 5632 return &bpf_task_storage_delete_proto; 5633 default: 5634 return bpf_base_func_proto(func_id, prog); 5635 } 5636 } 5637 5638 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 5639 .get_func_proto = bpf_scx_get_func_proto, 5640 .is_valid_access = bpf_scx_is_valid_access, 5641 .btf_struct_access = bpf_scx_btf_struct_access, 5642 }; 5643 5644 static int bpf_scx_init_member(const struct btf_type *t, 5645 const struct btf_member *member, 5646 void *kdata, const void *udata) 5647 { 5648 const struct sched_ext_ops *uops = udata; 5649 struct sched_ext_ops *ops = kdata; 5650 u32 moff = __btf_member_bit_offset(t, member) / 8; 5651 int ret; 5652 5653 switch (moff) { 5654 case offsetof(struct sched_ext_ops, dispatch_max_batch): 5655 if (*(u32 *)(udata + moff) > INT_MAX) 5656 return -E2BIG; 5657 ops->dispatch_max_batch = *(u32 *)(udata + moff); 5658 return 1; 5659 case offsetof(struct sched_ext_ops, flags): 5660 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 5661 return -EINVAL; 5662 ops->flags = *(u64 *)(udata + moff); 5663 return 1; 5664 case offsetof(struct sched_ext_ops, name): 5665 ret = bpf_obj_name_cpy(ops->name, uops->name, 5666 sizeof(ops->name)); 5667 if (ret < 0) 5668 return ret; 5669 if (ret == 0) 5670 return -EINVAL; 5671 return 1; 5672 case offsetof(struct sched_ext_ops, timeout_ms): 5673 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 5674 SCX_WATCHDOG_MAX_TIMEOUT) 5675 return -E2BIG; 5676 ops->timeout_ms = *(u32 *)(udata + moff); 5677 return 1; 5678 case offsetof(struct sched_ext_ops, exit_dump_len): 5679 ops->exit_dump_len = 5680 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 5681 return 1; 5682 case offsetof(struct sched_ext_ops, hotplug_seq): 5683 ops->hotplug_seq = *(u64 *)(udata + moff); 5684 return 1; 5685 } 5686 5687 return 0; 5688 } 5689 5690 static int bpf_scx_check_member(const struct btf_type *t, 5691 const struct btf_member *member, 5692 const struct bpf_prog *prog) 5693 { 5694 u32 moff = __btf_member_bit_offset(t, member) / 8; 5695 5696 switch (moff) { 5697 case offsetof(struct sched_ext_ops, init_task): 5698 #ifdef CONFIG_EXT_GROUP_SCHED 5699 case offsetof(struct sched_ext_ops, cgroup_init): 5700 case offsetof(struct sched_ext_ops, cgroup_exit): 5701 case offsetof(struct sched_ext_ops, cgroup_prep_move): 5702 #endif 5703 case offsetof(struct sched_ext_ops, cpu_online): 5704 case offsetof(struct sched_ext_ops, cpu_offline): 5705 case offsetof(struct sched_ext_ops, init): 5706 case offsetof(struct sched_ext_ops, exit): 5707 break; 5708 default: 5709 if (prog->sleepable) 5710 return -EINVAL; 5711 } 5712 5713 return 0; 5714 } 5715 5716 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 5717 { 5718 return scx_ops_enable(kdata, link); 5719 } 5720 5721 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 5722 { 5723 scx_ops_disable(SCX_EXIT_UNREG); 5724 kthread_flush_work(&scx_ops_disable_work); 5725 } 5726 5727 static int bpf_scx_init(struct btf *btf) 5728 { 5729 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]); 5730 5731 return 0; 5732 } 5733 5734 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 5735 { 5736 /* 5737 * sched_ext does not support updating the actively-loaded BPF 5738 * scheduler, as registering a BPF scheduler can always fail if the 5739 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 5740 * etc. Similarly, we can always race with unregistration happening 5741 * elsewhere, such as with sysrq. 5742 */ 5743 return -EOPNOTSUPP; 5744 } 5745 5746 static int bpf_scx_validate(void *kdata) 5747 { 5748 return 0; 5749 } 5750 5751 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 5752 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} 5753 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} 5754 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {} 5755 static void sched_ext_ops__tick(struct task_struct *p) {} 5756 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} 5757 static void sched_ext_ops__running(struct task_struct *p) {} 5758 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} 5759 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} 5760 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; } 5761 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; } 5762 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} 5763 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {} 5764 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {} 5765 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {} 5766 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {} 5767 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 5768 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {} 5769 static void sched_ext_ops__enable(struct task_struct *p) {} 5770 static void sched_ext_ops__disable(struct task_struct *p) {} 5771 #ifdef CONFIG_EXT_GROUP_SCHED 5772 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; } 5773 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {} 5774 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; } 5775 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5776 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5777 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {} 5778 #endif 5779 static void sched_ext_ops__cpu_online(s32 cpu) {} 5780 static void sched_ext_ops__cpu_offline(s32 cpu) {} 5781 static s32 sched_ext_ops__init(void) { return -EINVAL; } 5782 static void sched_ext_ops__exit(struct scx_exit_info *info) {} 5783 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {} 5784 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {} 5785 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {} 5786 5787 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5788 .select_cpu = sched_ext_ops__select_cpu, 5789 .enqueue = sched_ext_ops__enqueue, 5790 .dequeue = sched_ext_ops__dequeue, 5791 .dispatch = sched_ext_ops__dispatch, 5792 .tick = sched_ext_ops__tick, 5793 .runnable = sched_ext_ops__runnable, 5794 .running = sched_ext_ops__running, 5795 .stopping = sched_ext_ops__stopping, 5796 .quiescent = sched_ext_ops__quiescent, 5797 .yield = sched_ext_ops__yield, 5798 .core_sched_before = sched_ext_ops__core_sched_before, 5799 .set_weight = sched_ext_ops__set_weight, 5800 .set_cpumask = sched_ext_ops__set_cpumask, 5801 .update_idle = sched_ext_ops__update_idle, 5802 .cpu_acquire = sched_ext_ops__cpu_acquire, 5803 .cpu_release = sched_ext_ops__cpu_release, 5804 .init_task = sched_ext_ops__init_task, 5805 .exit_task = sched_ext_ops__exit_task, 5806 .enable = sched_ext_ops__enable, 5807 .disable = sched_ext_ops__disable, 5808 #ifdef CONFIG_EXT_GROUP_SCHED 5809 .cgroup_init = sched_ext_ops__cgroup_init, 5810 .cgroup_exit = sched_ext_ops__cgroup_exit, 5811 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move, 5812 .cgroup_move = sched_ext_ops__cgroup_move, 5813 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move, 5814 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight, 5815 #endif 5816 .cpu_online = sched_ext_ops__cpu_online, 5817 .cpu_offline = sched_ext_ops__cpu_offline, 5818 .init = sched_ext_ops__init, 5819 .exit = sched_ext_ops__exit, 5820 .dump = sched_ext_ops__dump, 5821 .dump_cpu = sched_ext_ops__dump_cpu, 5822 .dump_task = sched_ext_ops__dump_task, 5823 }; 5824 5825 static struct bpf_struct_ops bpf_sched_ext_ops = { 5826 .verifier_ops = &bpf_scx_verifier_ops, 5827 .reg = bpf_scx_reg, 5828 .unreg = bpf_scx_unreg, 5829 .check_member = bpf_scx_check_member, 5830 .init_member = bpf_scx_init_member, 5831 .init = bpf_scx_init, 5832 .update = bpf_scx_update, 5833 .validate = bpf_scx_validate, 5834 .name = "sched_ext_ops", 5835 .owner = THIS_MODULE, 5836 .cfi_stubs = &__bpf_ops_sched_ext_ops 5837 }; 5838 5839 5840 /******************************************************************************** 5841 * System integration and init. 5842 */ 5843 5844 static void sysrq_handle_sched_ext_reset(u8 key) 5845 { 5846 if (scx_ops_helper) 5847 scx_ops_disable(SCX_EXIT_SYSRQ); 5848 else 5849 pr_info("sched_ext: BPF scheduler not yet used\n"); 5850 } 5851 5852 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5853 .handler = sysrq_handle_sched_ext_reset, 5854 .help_msg = "reset-sched-ext(S)", 5855 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5856 .enable_mask = SYSRQ_ENABLE_RTNICE, 5857 }; 5858 5859 static void sysrq_handle_sched_ext_dump(u8 key) 5860 { 5861 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5862 5863 if (scx_enabled()) 5864 scx_dump_state(&ei, 0); 5865 } 5866 5867 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5868 .handler = sysrq_handle_sched_ext_dump, 5869 .help_msg = "dump-sched-ext(D)", 5870 .action_msg = "Trigger sched_ext debug dump", 5871 .enable_mask = SYSRQ_ENABLE_RTNICE, 5872 }; 5873 5874 static bool can_skip_idle_kick(struct rq *rq) 5875 { 5876 lockdep_assert_rq_held(rq); 5877 5878 /* 5879 * We can skip idle kicking if @rq is going to go through at least one 5880 * full SCX scheduling cycle before going idle. Just checking whether 5881 * curr is not idle is insufficient because we could be racing 5882 * balance_one() trying to pull the next task from a remote rq, which 5883 * may fail, and @rq may become idle afterwards. 5884 * 5885 * The race window is small and we don't and can't guarantee that @rq is 5886 * only kicked while idle anyway. Skip only when sure. 5887 */ 5888 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); 5889 } 5890 5891 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) 5892 { 5893 struct rq *rq = cpu_rq(cpu); 5894 struct scx_rq *this_scx = &this_rq->scx; 5895 bool should_wait = false; 5896 unsigned long flags; 5897 5898 raw_spin_rq_lock_irqsave(rq, flags); 5899 5900 /* 5901 * During CPU hotplug, a CPU may depend on kicking itself to make 5902 * forward progress. Allow kicking self regardless of online state. 5903 */ 5904 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) { 5905 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5906 if (rq->curr->sched_class == &ext_sched_class) 5907 rq->curr->scx.slice = 0; 5908 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5909 } 5910 5911 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5912 pseqs[cpu] = rq->scx.pnt_seq; 5913 should_wait = true; 5914 } 5915 5916 resched_curr(rq); 5917 } else { 5918 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5919 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5920 } 5921 5922 raw_spin_rq_unlock_irqrestore(rq, flags); 5923 5924 return should_wait; 5925 } 5926 5927 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5928 { 5929 struct rq *rq = cpu_rq(cpu); 5930 unsigned long flags; 5931 5932 raw_spin_rq_lock_irqsave(rq, flags); 5933 5934 if (!can_skip_idle_kick(rq) && 5935 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5936 resched_curr(rq); 5937 5938 raw_spin_rq_unlock_irqrestore(rq, flags); 5939 } 5940 5941 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5942 { 5943 struct rq *this_rq = this_rq(); 5944 struct scx_rq *this_scx = &this_rq->scx; 5945 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); 5946 bool should_wait = false; 5947 s32 cpu; 5948 5949 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5950 should_wait |= kick_one_cpu(cpu, this_rq, pseqs); 5951 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5952 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5953 } 5954 5955 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5956 kick_one_cpu_if_idle(cpu, this_rq); 5957 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5958 } 5959 5960 if (!should_wait) 5961 return; 5962 5963 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5964 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; 5965 5966 if (cpu != cpu_of(this_rq)) { 5967 /* 5968 * Pairs with smp_store_release() issued by this CPU in 5969 * switch_class() on the resched path. 5970 * 5971 * We busy-wait here to guarantee that no other task can 5972 * be scheduled on our core before the target CPU has 5973 * entered the resched path. 5974 */ 5975 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu]) 5976 cpu_relax(); 5977 } 5978 5979 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5980 } 5981 } 5982 5983 /** 5984 * print_scx_info - print out sched_ext scheduler state 5985 * @log_lvl: the log level to use when printing 5986 * @p: target task 5987 * 5988 * If a sched_ext scheduler is enabled, print the name and state of the 5989 * scheduler. If @p is on sched_ext, print further information about the task. 5990 * 5991 * This function can be safely called on any task as long as the task_struct 5992 * itself is accessible. While safe, this function isn't synchronized and may 5993 * print out mixups or garbages of limited length. 5994 */ 5995 void print_scx_info(const char *log_lvl, struct task_struct *p) 5996 { 5997 enum scx_ops_enable_state state = scx_ops_enable_state(); 5998 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5999 char runnable_at_buf[22] = "?"; 6000 struct sched_class *class; 6001 unsigned long runnable_at; 6002 6003 if (state == SCX_OPS_DISABLED) 6004 return; 6005 6006 /* 6007 * Carefully check if the task was running on sched_ext, and then 6008 * carefully copy the time it's been runnable, and its state. 6009 */ 6010 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 6011 class != &ext_sched_class) { 6012 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name, 6013 scx_ops_enable_state_str[state], all); 6014 return; 6015 } 6016 6017 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 6018 sizeof(runnable_at))) 6019 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 6020 jiffies_delta_msecs(runnable_at, jiffies)); 6021 6022 /* print everything onto one line to conserve console space */ 6023 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 6024 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all, 6025 runnable_at_buf); 6026 } 6027 6028 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 6029 { 6030 /* 6031 * SCX schedulers often have userspace components which are sometimes 6032 * involved in critial scheduling paths. PM operations involve freezing 6033 * userspace which can lead to scheduling misbehaviors including stalls. 6034 * Let's bypass while PM operations are in progress. 6035 */ 6036 switch (event) { 6037 case PM_HIBERNATION_PREPARE: 6038 case PM_SUSPEND_PREPARE: 6039 case PM_RESTORE_PREPARE: 6040 scx_ops_bypass(true); 6041 break; 6042 case PM_POST_HIBERNATION: 6043 case PM_POST_SUSPEND: 6044 case PM_POST_RESTORE: 6045 scx_ops_bypass(false); 6046 break; 6047 } 6048 6049 return NOTIFY_OK; 6050 } 6051 6052 static struct notifier_block scx_pm_notifier = { 6053 .notifier_call = scx_pm_handler, 6054 }; 6055 6056 void __init init_sched_ext_class(void) 6057 { 6058 s32 cpu, v; 6059 6060 /* 6061 * The following is to prevent the compiler from optimizing out the enum 6062 * definitions so that BPF scheduler implementations can use them 6063 * through the generated vmlinux.h. 6064 */ 6065 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT | 6066 SCX_TG_ONLINE); 6067 6068 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); 6069 scx_idle_init_masks(); 6070 6071 scx_kick_cpus_pnt_seqs = 6072 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, 6073 __alignof__(scx_kick_cpus_pnt_seqs[0])); 6074 BUG_ON(!scx_kick_cpus_pnt_seqs); 6075 6076 for_each_possible_cpu(cpu) { 6077 struct rq *rq = cpu_rq(cpu); 6078 int n = cpu_to_node(cpu); 6079 6080 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 6081 INIT_LIST_HEAD(&rq->scx.runnable_list); 6082 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); 6083 6084 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); 6085 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); 6086 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); 6087 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); 6088 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn); 6089 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); 6090 6091 if (cpu_online(cpu)) 6092 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 6093 } 6094 6095 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 6096 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 6097 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 6098 } 6099 6100 6101 /******************************************************************************** 6102 * Helpers that can be called from the BPF scheduler. 6103 */ 6104 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags) 6105 { 6106 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 6107 return false; 6108 6109 lockdep_assert_irqs_disabled(); 6110 6111 if (unlikely(!p)) { 6112 scx_ops_error("called with NULL task"); 6113 return false; 6114 } 6115 6116 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 6117 scx_ops_error("invalid enq_flags 0x%llx", enq_flags); 6118 return false; 6119 } 6120 6121 return true; 6122 } 6123 6124 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id, 6125 u64 enq_flags) 6126 { 6127 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6128 struct task_struct *ddsp_task; 6129 6130 ddsp_task = __this_cpu_read(direct_dispatch_task); 6131 if (ddsp_task) { 6132 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags); 6133 return; 6134 } 6135 6136 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 6137 scx_ops_error("dispatch buffer overflow"); 6138 return; 6139 } 6140 6141 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 6142 .task = p, 6143 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 6144 .dsq_id = dsq_id, 6145 .enq_flags = enq_flags, 6146 }; 6147 } 6148 6149 __bpf_kfunc_start_defs(); 6150 6151 /** 6152 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ 6153 * @p: task_struct to insert 6154 * @dsq_id: DSQ to insert into 6155 * @slice: duration @p can run for in nsecs, 0 to keep the current value 6156 * @enq_flags: SCX_ENQ_* 6157 * 6158 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to 6159 * call this function spuriously. Can be called from ops.enqueue(), 6160 * ops.select_cpu(), and ops.dispatch(). 6161 * 6162 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 6163 * and @p must match the task being enqueued. 6164 * 6165 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 6166 * will be directly inserted into the corresponding dispatch queue after 6167 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be 6168 * inserted into the local DSQ of the CPU returned by ops.select_cpu(). 6169 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 6170 * task is inserted. 6171 * 6172 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 6173 * and this function can be called upto ops.dispatch_max_batch times to insert 6174 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 6175 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter. 6176 * 6177 * This function doesn't have any locking restrictions and may be called under 6178 * BPF locks (in the future when BPF introduces more flexible locking). 6179 * 6180 * @p is allowed to run for @slice. The scheduling path is triggered on slice 6181 * exhaustion. If zero, the current residual slice is maintained. If 6182 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 6183 * scx_bpf_kick_cpu() to trigger scheduling. 6184 */ 6185 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, 6186 u64 enq_flags) 6187 { 6188 if (!scx_dsq_insert_preamble(p, enq_flags)) 6189 return; 6190 6191 if (slice) 6192 p->scx.slice = slice; 6193 else 6194 p->scx.slice = p->scx.slice ?: 1; 6195 6196 scx_dsq_insert_commit(p, dsq_id, enq_flags); 6197 } 6198 6199 /* for backward compatibility, will be removed in v6.15 */ 6200 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, 6201 u64 enq_flags) 6202 { 6203 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()"); 6204 scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags); 6205 } 6206 6207 /** 6208 * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ 6209 * @p: task_struct to insert 6210 * @dsq_id: DSQ to insert into 6211 * @slice: duration @p can run for in nsecs, 0 to keep the current value 6212 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 6213 * @enq_flags: SCX_ENQ_* 6214 * 6215 * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id. 6216 * Tasks queued into the priority queue are ordered by @vtime. All other aspects 6217 * are identical to scx_bpf_dsq_insert(). 6218 * 6219 * @vtime ordering is according to time_before64() which considers wrapping. A 6220 * numerically larger vtime may indicate an earlier position in the ordering and 6221 * vice-versa. 6222 * 6223 * A DSQ can only be used as a FIFO or priority queue at any given time and this 6224 * function must not be called on a DSQ which already has one or more FIFO tasks 6225 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and 6226 * SCX_DSQ_GLOBAL) cannot be used as priority queues. 6227 */ 6228 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, 6229 u64 slice, u64 vtime, u64 enq_flags) 6230 { 6231 if (!scx_dsq_insert_preamble(p, enq_flags)) 6232 return; 6233 6234 if (slice) 6235 p->scx.slice = slice; 6236 else 6237 p->scx.slice = p->scx.slice ?: 1; 6238 6239 p->scx.dsq_vtime = vtime; 6240 6241 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6242 } 6243 6244 /* for backward compatibility, will be removed in v6.15 */ 6245 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, 6246 u64 slice, u64 vtime, u64 enq_flags) 6247 { 6248 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()"); 6249 scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags); 6250 } 6251 6252 __bpf_kfunc_end_defs(); 6253 6254 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 6255 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) 6256 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) 6257 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU) 6258 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU) 6259 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 6260 6261 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 6262 .owner = THIS_MODULE, 6263 .set = &scx_kfunc_ids_enqueue_dispatch, 6264 }; 6265 6266 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, 6267 struct task_struct *p, u64 dsq_id, u64 enq_flags) 6268 { 6269 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; 6270 struct rq *this_rq, *src_rq, *locked_rq; 6271 bool dispatched = false; 6272 bool in_balance; 6273 unsigned long flags; 6274 6275 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH)) 6276 return false; 6277 6278 /* 6279 * Can be called from either ops.dispatch() locking this_rq() or any 6280 * context where no rq lock is held. If latter, lock @p's task_rq which 6281 * we'll likely need anyway. 6282 */ 6283 src_rq = task_rq(p); 6284 6285 local_irq_save(flags); 6286 this_rq = this_rq(); 6287 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; 6288 6289 if (in_balance) { 6290 if (this_rq != src_rq) { 6291 raw_spin_rq_unlock(this_rq); 6292 raw_spin_rq_lock(src_rq); 6293 } 6294 } else { 6295 raw_spin_rq_lock(src_rq); 6296 } 6297 6298 /* 6299 * If the BPF scheduler keeps calling this function repeatedly, it can 6300 * cause similar live-lock conditions as consume_dispatch_q(). Insert a 6301 * breather if necessary. 6302 */ 6303 scx_ops_breather(src_rq); 6304 6305 locked_rq = src_rq; 6306 raw_spin_lock(&src_dsq->lock); 6307 6308 /* 6309 * Did someone else get to it? @p could have already left $src_dsq, got 6310 * re-enqueud, or be in the process of being consumed by someone else. 6311 */ 6312 if (unlikely(p->scx.dsq != src_dsq || 6313 u32_before(kit->cursor.priv, p->scx.dsq_seq) || 6314 p->scx.holding_cpu >= 0) || 6315 WARN_ON_ONCE(src_rq != task_rq(p))) { 6316 raw_spin_unlock(&src_dsq->lock); 6317 goto out; 6318 } 6319 6320 /* @p is still on $src_dsq and stable, determine the destination */ 6321 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p); 6322 6323 /* 6324 * Apply vtime and slice updates before moving so that the new time is 6325 * visible before inserting into $dst_dsq. @p is still on $src_dsq but 6326 * this is safe as we're locking it. 6327 */ 6328 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) 6329 p->scx.dsq_vtime = kit->vtime; 6330 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) 6331 p->scx.slice = kit->slice; 6332 6333 /* execute move */ 6334 locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq); 6335 dispatched = true; 6336 out: 6337 if (in_balance) { 6338 if (this_rq != locked_rq) { 6339 raw_spin_rq_unlock(locked_rq); 6340 raw_spin_rq_lock(this_rq); 6341 } 6342 } else { 6343 raw_spin_rq_unlock_irqrestore(locked_rq, flags); 6344 } 6345 6346 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE | 6347 __SCX_DSQ_ITER_HAS_VTIME); 6348 return dispatched; 6349 } 6350 6351 __bpf_kfunc_start_defs(); 6352 6353 /** 6354 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 6355 * 6356 * Can only be called from ops.dispatch(). 6357 */ 6358 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 6359 { 6360 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6361 return 0; 6362 6363 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 6364 } 6365 6366 /** 6367 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 6368 * 6369 * Cancel the latest dispatch. Can be called multiple times to cancel further 6370 * dispatches. Can only be called from ops.dispatch(). 6371 */ 6372 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 6373 { 6374 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6375 6376 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6377 return; 6378 6379 if (dspc->cursor > 0) 6380 dspc->cursor--; 6381 else 6382 scx_ops_error("dispatch buffer underflow"); 6383 } 6384 6385 /** 6386 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ 6387 * @dsq_id: DSQ to move task from 6388 * 6389 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's 6390 * local DSQ for execution. Can only be called from ops.dispatch(). 6391 * 6392 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert() 6393 * before trying to move from the specified DSQ. It may also grab rq locks and 6394 * thus can't be called under any BPF locks. 6395 * 6396 * Returns %true if a task has been moved, %false if there isn't any task to 6397 * move. 6398 */ 6399 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) 6400 { 6401 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6402 struct scx_dispatch_q *dsq; 6403 6404 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6405 return false; 6406 6407 flush_dispatch_buf(dspc->rq); 6408 6409 dsq = find_user_dsq(dsq_id); 6410 if (unlikely(!dsq)) { 6411 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id); 6412 return false; 6413 } 6414 6415 if (consume_dispatch_q(dspc->rq, dsq)) { 6416 /* 6417 * A successfully consumed task can be dequeued before it starts 6418 * running while the CPU is trying to migrate other dispatched 6419 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty 6420 * local DSQ. 6421 */ 6422 dspc->nr_tasks++; 6423 return true; 6424 } else { 6425 return false; 6426 } 6427 } 6428 6429 /* for backward compatibility, will be removed in v6.15 */ 6430 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id) 6431 { 6432 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); 6433 return scx_bpf_dsq_move_to_local(dsq_id); 6434 } 6435 6436 /** 6437 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs 6438 * @it__iter: DSQ iterator in progress 6439 * @slice: duration the moved task can run for in nsecs 6440 * 6441 * Override the slice of the next task that will be moved from @it__iter using 6442 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous 6443 * slice duration is kept. 6444 */ 6445 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, 6446 u64 slice) 6447 { 6448 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6449 6450 kit->slice = slice; 6451 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE; 6452 } 6453 6454 /* for backward compatibility, will be removed in v6.15 */ 6455 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice( 6456 struct bpf_iter_scx_dsq *it__iter, u64 slice) 6457 { 6458 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()"); 6459 scx_bpf_dsq_move_set_slice(it__iter, slice); 6460 } 6461 6462 /** 6463 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs 6464 * @it__iter: DSQ iterator in progress 6465 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ 6466 * 6467 * Override the vtime of the next task that will be moved from @it__iter using 6468 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice 6469 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the 6470 * override is ignored and cleared. 6471 */ 6472 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, 6473 u64 vtime) 6474 { 6475 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6476 6477 kit->vtime = vtime; 6478 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME; 6479 } 6480 6481 /* for backward compatibility, will be removed in v6.15 */ 6482 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime( 6483 struct bpf_iter_scx_dsq *it__iter, u64 vtime) 6484 { 6485 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()"); 6486 scx_bpf_dsq_move_set_vtime(it__iter, vtime); 6487 } 6488 6489 /** 6490 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ 6491 * @it__iter: DSQ iterator in progress 6492 * @p: task to transfer 6493 * @dsq_id: DSQ to move @p to 6494 * @enq_flags: SCX_ENQ_* 6495 * 6496 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ 6497 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can 6498 * be the destination. 6499 * 6500 * For the transfer to be successful, @p must still be on the DSQ and have been 6501 * queued before the DSQ iteration started. This function doesn't care whether 6502 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have 6503 * been queued before the iteration started. 6504 * 6505 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update. 6506 * 6507 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq 6508 * lock (e.g. BPF timers or SYSCALL programs). 6509 * 6510 * Returns %true if @p has been consumed, %false if @p had already been consumed 6511 * or dequeued. 6512 */ 6513 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, 6514 struct task_struct *p, u64 dsq_id, 6515 u64 enq_flags) 6516 { 6517 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6518 p, dsq_id, enq_flags); 6519 } 6520 6521 /* for backward compatibility, will be removed in v6.15 */ 6522 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, 6523 struct task_struct *p, u64 dsq_id, 6524 u64 enq_flags) 6525 { 6526 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); 6527 return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags); 6528 } 6529 6530 /** 6531 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ 6532 * @it__iter: DSQ iterator in progress 6533 * @p: task to transfer 6534 * @dsq_id: DSQ to move @p to 6535 * @enq_flags: SCX_ENQ_* 6536 * 6537 * Transfer @p which is on the DSQ currently iterated by @it__iter to the 6538 * priority queue of the DSQ specified by @dsq_id. The destination must be a 6539 * user DSQ as only user DSQs support priority queue. 6540 * 6541 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice() 6542 * and scx_bpf_dsq_move_set_vtime() to update. 6543 * 6544 * All other aspects are identical to scx_bpf_dsq_move(). See 6545 * scx_bpf_dsq_insert_vtime() for more information on @vtime. 6546 */ 6547 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, 6548 struct task_struct *p, u64 dsq_id, 6549 u64 enq_flags) 6550 { 6551 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6552 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6553 } 6554 6555 /* for backward compatibility, will be removed in v6.15 */ 6556 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter, 6557 struct task_struct *p, u64 dsq_id, 6558 u64 enq_flags) 6559 { 6560 printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()"); 6561 return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags); 6562 } 6563 6564 __bpf_kfunc_end_defs(); 6565 6566 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 6567 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 6568 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 6569 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) 6570 BTF_ID_FLAGS(func, scx_bpf_consume) 6571 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) 6572 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) 6573 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6574 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6575 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice) 6576 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime) 6577 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU) 6578 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU) 6579 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 6580 6581 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 6582 .owner = THIS_MODULE, 6583 .set = &scx_kfunc_ids_dispatch, 6584 }; 6585 6586 __bpf_kfunc_start_defs(); 6587 6588 /** 6589 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6590 * 6591 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6592 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 6593 * processed tasks. Can only be called from ops.cpu_release(). 6594 */ 6595 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 6596 { 6597 LIST_HEAD(tasks); 6598 u32 nr_enqueued = 0; 6599 struct rq *rq; 6600 struct task_struct *p, *n; 6601 6602 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) 6603 return 0; 6604 6605 rq = cpu_rq(smp_processor_id()); 6606 lockdep_assert_rq_held(rq); 6607 6608 /* 6609 * The BPF scheduler may choose to dispatch tasks back to 6610 * @rq->scx.local_dsq. Move all candidate tasks off to a private list 6611 * first to avoid processing the same tasks repeatedly. 6612 */ 6613 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, 6614 scx.dsq_list.node) { 6615 /* 6616 * If @p is being migrated, @p's current CPU may not agree with 6617 * its allowed CPUs and the migration_cpu_stop is about to 6618 * deactivate and re-activate @p anyway. Skip re-enqueueing. 6619 * 6620 * While racing sched property changes may also dequeue and 6621 * re-enqueue a migrating task while its current CPU and allowed 6622 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6623 * the current local DSQ for running tasks and thus are not 6624 * visible to the BPF scheduler. 6625 * 6626 * Also skip re-enqueueing tasks that can only run on this 6627 * CPU, as they would just be re-added to the same local 6628 * DSQ without any benefit. 6629 */ 6630 if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1) 6631 continue; 6632 6633 dispatch_dequeue(rq, p); 6634 list_add_tail(&p->scx.dsq_list.node, &tasks); 6635 } 6636 6637 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { 6638 list_del_init(&p->scx.dsq_list.node); 6639 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 6640 nr_enqueued++; 6641 } 6642 6643 return nr_enqueued; 6644 } 6645 6646 __bpf_kfunc_end_defs(); 6647 6648 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 6649 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 6650 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 6651 6652 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 6653 .owner = THIS_MODULE, 6654 .set = &scx_kfunc_ids_cpu_release, 6655 }; 6656 6657 __bpf_kfunc_start_defs(); 6658 6659 /** 6660 * scx_bpf_create_dsq - Create a custom DSQ 6661 * @dsq_id: DSQ to create 6662 * @node: NUMA node to allocate from 6663 * 6664 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable 6665 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog. 6666 */ 6667 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 6668 { 6669 if (unlikely(node >= (int)nr_node_ids || 6670 (node < 0 && node != NUMA_NO_NODE))) 6671 return -EINVAL; 6672 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node)); 6673 } 6674 6675 __bpf_kfunc_end_defs(); 6676 6677 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 6678 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 6679 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) 6680 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) 6681 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6682 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6683 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice) 6684 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime) 6685 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU) 6686 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU) 6687 BTF_KFUNCS_END(scx_kfunc_ids_unlocked) 6688 6689 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { 6690 .owner = THIS_MODULE, 6691 .set = &scx_kfunc_ids_unlocked, 6692 }; 6693 6694 __bpf_kfunc_start_defs(); 6695 6696 /** 6697 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 6698 * @cpu: cpu to kick 6699 * @flags: %SCX_KICK_* flags 6700 * 6701 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 6702 * trigger rescheduling on a busy CPU. This can be called from any online 6703 * scx_ops operation and the actual kicking is performed asynchronously through 6704 * an irq work. 6705 */ 6706 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 6707 { 6708 struct rq *this_rq; 6709 unsigned long irq_flags; 6710 6711 if (!ops_cpu_valid(cpu, NULL)) 6712 return; 6713 6714 local_irq_save(irq_flags); 6715 6716 this_rq = this_rq(); 6717 6718 /* 6719 * While bypassing for PM ops, IRQ handling may not be online which can 6720 * lead to irq_work_queue() malfunction such as infinite busy wait for 6721 * IRQ status update. Suppress kicking. 6722 */ 6723 if (scx_rq_bypassing(this_rq)) 6724 goto out; 6725 6726 /* 6727 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 6728 * rq locks. We can probably be smarter and avoid bouncing if called 6729 * from ops which don't hold a rq lock. 6730 */ 6731 if (flags & SCX_KICK_IDLE) { 6732 struct rq *target_rq = cpu_rq(cpu); 6733 6734 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 6735 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 6736 6737 if (raw_spin_rq_trylock(target_rq)) { 6738 if (can_skip_idle_kick(target_rq)) { 6739 raw_spin_rq_unlock(target_rq); 6740 goto out; 6741 } 6742 raw_spin_rq_unlock(target_rq); 6743 } 6744 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 6745 } else { 6746 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 6747 6748 if (flags & SCX_KICK_PREEMPT) 6749 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 6750 if (flags & SCX_KICK_WAIT) 6751 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 6752 } 6753 6754 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 6755 out: 6756 local_irq_restore(irq_flags); 6757 } 6758 6759 /** 6760 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 6761 * @dsq_id: id of the DSQ 6762 * 6763 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 6764 * -%ENOENT is returned. 6765 */ 6766 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 6767 { 6768 struct scx_dispatch_q *dsq; 6769 s32 ret; 6770 6771 preempt_disable(); 6772 6773 if (dsq_id == SCX_DSQ_LOCAL) { 6774 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 6775 goto out; 6776 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 6777 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 6778 6779 if (ops_cpu_valid(cpu, NULL)) { 6780 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 6781 goto out; 6782 } 6783 } else { 6784 dsq = find_user_dsq(dsq_id); 6785 if (dsq) { 6786 ret = READ_ONCE(dsq->nr); 6787 goto out; 6788 } 6789 } 6790 ret = -ENOENT; 6791 out: 6792 preempt_enable(); 6793 return ret; 6794 } 6795 6796 /** 6797 * scx_bpf_destroy_dsq - Destroy a custom DSQ 6798 * @dsq_id: DSQ to destroy 6799 * 6800 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 6801 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 6802 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 6803 * which doesn't exist. Can be called from any online scx_ops operations. 6804 */ 6805 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 6806 { 6807 destroy_dsq(dsq_id); 6808 } 6809 6810 /** 6811 * bpf_iter_scx_dsq_new - Create a DSQ iterator 6812 * @it: iterator to initialize 6813 * @dsq_id: DSQ to iterate 6814 * @flags: %SCX_DSQ_ITER_* 6815 * 6816 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 6817 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 6818 * tasks which are already queued when this function is invoked. 6819 */ 6820 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 6821 u64 flags) 6822 { 6823 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6824 6825 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 6826 sizeof(struct bpf_iter_scx_dsq)); 6827 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 6828 __alignof__(struct bpf_iter_scx_dsq)); 6829 6830 /* 6831 * next() and destroy() will be called regardless of the return value. 6832 * Always clear $kit->dsq. 6833 */ 6834 kit->dsq = NULL; 6835 6836 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) 6837 return -EINVAL; 6838 6839 kit->dsq = find_user_dsq(dsq_id); 6840 if (!kit->dsq) 6841 return -ENOENT; 6842 6843 INIT_LIST_HEAD(&kit->cursor.node); 6844 kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags; 6845 kit->cursor.priv = READ_ONCE(kit->dsq->seq); 6846 6847 return 0; 6848 } 6849 6850 /** 6851 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 6852 * @it: iterator to progress 6853 * 6854 * Return the next task. See bpf_iter_scx_dsq_new(). 6855 */ 6856 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 6857 { 6858 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6859 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV; 6860 struct task_struct *p; 6861 unsigned long flags; 6862 6863 if (!kit->dsq) 6864 return NULL; 6865 6866 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6867 6868 if (list_empty(&kit->cursor.node)) 6869 p = NULL; 6870 else 6871 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 6872 6873 /* 6874 * Only tasks which were queued before the iteration started are 6875 * visible. This bounds BPF iterations and guarantees that vtime never 6876 * jumps in the other direction while iterating. 6877 */ 6878 do { 6879 p = nldsq_next_task(kit->dsq, p, rev); 6880 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); 6881 6882 if (p) { 6883 if (rev) 6884 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 6885 else 6886 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 6887 } else { 6888 list_del_init(&kit->cursor.node); 6889 } 6890 6891 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6892 6893 return p; 6894 } 6895 6896 /** 6897 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 6898 * @it: iterator to destroy 6899 * 6900 * Undo scx_iter_scx_dsq_new(). 6901 */ 6902 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 6903 { 6904 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6905 6906 if (!kit->dsq) 6907 return; 6908 6909 if (!list_empty(&kit->cursor.node)) { 6910 unsigned long flags; 6911 6912 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6913 list_del_init(&kit->cursor.node); 6914 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6915 } 6916 kit->dsq = NULL; 6917 } 6918 6919 __bpf_kfunc_end_defs(); 6920 6921 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size, 6922 char *fmt, unsigned long long *data, u32 data__sz) 6923 { 6924 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 6925 s32 ret; 6926 6927 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 6928 (data__sz && !data)) { 6929 scx_ops_error("invalid data=%p and data__sz=%u", 6930 (void *)data, data__sz); 6931 return -EINVAL; 6932 } 6933 6934 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 6935 if (ret < 0) { 6936 scx_ops_error("failed to read data fields (%d)", ret); 6937 return ret; 6938 } 6939 6940 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 6941 &bprintf_data); 6942 if (ret < 0) { 6943 scx_ops_error("format preparation failed (%d)", ret); 6944 return ret; 6945 } 6946 6947 ret = bstr_printf(line_buf, line_size, fmt, 6948 bprintf_data.bin_args); 6949 bpf_bprintf_cleanup(&bprintf_data); 6950 if (ret < 0) { 6951 scx_ops_error("(\"%s\", %p, %u) failed to format", 6952 fmt, data, data__sz); 6953 return ret; 6954 } 6955 6956 return ret; 6957 } 6958 6959 static s32 bstr_format(struct scx_bstr_buf *buf, 6960 char *fmt, unsigned long long *data, u32 data__sz) 6961 { 6962 return __bstr_format(buf->data, buf->line, sizeof(buf->line), 6963 fmt, data, data__sz); 6964 } 6965 6966 __bpf_kfunc_start_defs(); 6967 6968 /** 6969 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 6970 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 6971 * @fmt: error message format string 6972 * @data: format string parameters packaged using ___bpf_fill() macro 6973 * @data__sz: @data len, must end in '__sz' for the verifier 6974 * 6975 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 6976 * disabling. 6977 */ 6978 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 6979 unsigned long long *data, u32 data__sz) 6980 { 6981 unsigned long flags; 6982 6983 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6984 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6985 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s", 6986 scx_exit_bstr_buf.line); 6987 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6988 } 6989 6990 /** 6991 * scx_bpf_error_bstr - Indicate fatal error 6992 * @fmt: error message format string 6993 * @data: format string parameters packaged using ___bpf_fill() macro 6994 * @data__sz: @data len, must end in '__sz' for the verifier 6995 * 6996 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 6997 * disabling. 6998 */ 6999 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 7000 u32 data__sz) 7001 { 7002 unsigned long flags; 7003 7004 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 7005 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 7006 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s", 7007 scx_exit_bstr_buf.line); 7008 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 7009 } 7010 7011 /** 7012 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler 7013 * @fmt: format string 7014 * @data: format string parameters packaged using ___bpf_fill() macro 7015 * @data__sz: @data len, must end in '__sz' for the verifier 7016 * 7017 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 7018 * dump_task() to generate extra debug dump specific to the BPF scheduler. 7019 * 7020 * The extra dump may be multiple lines. A single line may be split over 7021 * multiple calls. The last line is automatically terminated. 7022 */ 7023 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 7024 u32 data__sz) 7025 { 7026 struct scx_dump_data *dd = &scx_dump_data; 7027 struct scx_bstr_buf *buf = &dd->buf; 7028 s32 ret; 7029 7030 if (raw_smp_processor_id() != dd->cpu) { 7031 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends"); 7032 return; 7033 } 7034 7035 /* append the formatted string to the line buf */ 7036 ret = __bstr_format(buf->data, buf->line + dd->cursor, 7037 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 7038 if (ret < 0) { 7039 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 7040 dd->prefix, fmt, data, data__sz, ret); 7041 return; 7042 } 7043 7044 dd->cursor += ret; 7045 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 7046 7047 if (!dd->cursor) 7048 return; 7049 7050 /* 7051 * If the line buf overflowed or ends in a newline, flush it into the 7052 * dump. This is to allow the caller to generate a single line over 7053 * multiple calls. As ops_dump_flush() can also handle multiple lines in 7054 * the line buf, the only case which can lead to an unexpected 7055 * truncation is when the caller keeps generating newlines in the middle 7056 * instead of the end consecutively. Don't do that. 7057 */ 7058 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 7059 ops_dump_flush(); 7060 } 7061 7062 /** 7063 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 7064 * @cpu: CPU of interest 7065 * 7066 * Return the maximum relative capacity of @cpu in relation to the most 7067 * performant CPU in the system. The return value is in the range [1, 7068 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 7069 */ 7070 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 7071 { 7072 if (ops_cpu_valid(cpu, NULL)) 7073 return arch_scale_cpu_capacity(cpu); 7074 else 7075 return SCX_CPUPERF_ONE; 7076 } 7077 7078 /** 7079 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 7080 * @cpu: CPU of interest 7081 * 7082 * Return the current relative performance of @cpu in relation to its maximum. 7083 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 7084 * 7085 * The current performance level of a CPU in relation to the maximum performance 7086 * available in the system can be calculated as follows: 7087 * 7088 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 7089 * 7090 * The result is in the range [1, %SCX_CPUPERF_ONE]. 7091 */ 7092 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 7093 { 7094 if (ops_cpu_valid(cpu, NULL)) 7095 return arch_scale_freq_capacity(cpu); 7096 else 7097 return SCX_CPUPERF_ONE; 7098 } 7099 7100 /** 7101 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 7102 * @cpu: CPU of interest 7103 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 7104 * 7105 * Set the target performance level of @cpu to @perf. @perf is in linear 7106 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 7107 * schedutil cpufreq governor chooses the target frequency. 7108 * 7109 * The actual performance level chosen, CPU grouping, and the overhead and 7110 * latency of the operations are dependent on the hardware and cpufreq driver in 7111 * use. Consult hardware and cpufreq documentation for more information. The 7112 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 7113 */ 7114 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 7115 { 7116 if (unlikely(perf > SCX_CPUPERF_ONE)) { 7117 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu); 7118 return; 7119 } 7120 7121 if (ops_cpu_valid(cpu, NULL)) { 7122 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq(); 7123 struct rq_flags rf; 7124 7125 /* 7126 * When called with an rq lock held, restrict the operation 7127 * to the corresponding CPU to prevent ABBA deadlocks. 7128 */ 7129 if (locked_rq && rq != locked_rq) { 7130 scx_ops_error("Invalid target CPU %d", cpu); 7131 return; 7132 } 7133 7134 /* 7135 * If no rq lock is held, allow to operate on any CPU by 7136 * acquiring the corresponding rq lock. 7137 */ 7138 if (!locked_rq) { 7139 rq_lock_irqsave(rq, &rf); 7140 update_rq_clock(rq); 7141 } 7142 7143 rq->scx.cpuperf_target = perf; 7144 cpufreq_update_util(rq, 0); 7145 7146 if (!locked_rq) 7147 rq_unlock_irqrestore(rq, &rf); 7148 } 7149 } 7150 7151 /** 7152 * scx_bpf_nr_node_ids - Return the number of possible node IDs 7153 * 7154 * All valid node IDs in the system are smaller than the returned value. 7155 */ 7156 __bpf_kfunc u32 scx_bpf_nr_node_ids(void) 7157 { 7158 return nr_node_ids; 7159 } 7160 7161 /** 7162 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 7163 * 7164 * All valid CPU IDs in the system are smaller than the returned value. 7165 */ 7166 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 7167 { 7168 return nr_cpu_ids; 7169 } 7170 7171 /** 7172 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 7173 */ 7174 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 7175 { 7176 return cpu_possible_mask; 7177 } 7178 7179 /** 7180 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 7181 */ 7182 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 7183 { 7184 return cpu_online_mask; 7185 } 7186 7187 /** 7188 * scx_bpf_put_cpumask - Release a possible/online cpumask 7189 * @cpumask: cpumask to release 7190 */ 7191 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 7192 { 7193 /* 7194 * Empty function body because we aren't actually acquiring or releasing 7195 * a reference to a global cpumask, which is read-only in the caller and 7196 * is never released. The acquire / release semantics here are just used 7197 * to make the cpumask is a trusted pointer in the caller. 7198 */ 7199 } 7200 7201 /** 7202 * scx_bpf_task_running - Is task currently running? 7203 * @p: task of interest 7204 */ 7205 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 7206 { 7207 return task_rq(p)->curr == p; 7208 } 7209 7210 /** 7211 * scx_bpf_task_cpu - CPU a task is currently associated with 7212 * @p: task of interest 7213 */ 7214 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 7215 { 7216 return task_cpu(p); 7217 } 7218 7219 /** 7220 * scx_bpf_cpu_rq - Fetch the rq of a CPU 7221 * @cpu: CPU of the rq 7222 */ 7223 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 7224 { 7225 if (!ops_cpu_valid(cpu, NULL)) 7226 return NULL; 7227 7228 return cpu_rq(cpu); 7229 } 7230 7231 /** 7232 * scx_bpf_task_cgroup - Return the sched cgroup of a task 7233 * @p: task of interest 7234 * 7235 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with 7236 * from the scheduler's POV. SCX operations should use this function to 7237 * determine @p's current cgroup as, unlike following @p->cgroups, 7238 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all 7239 * rq-locked operations. Can be called on the parameter tasks of rq-locked 7240 * operations. The restriction guarantees that @p's rq is locked by the caller. 7241 */ 7242 #ifdef CONFIG_CGROUP_SCHED 7243 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) 7244 { 7245 struct task_group *tg = p->sched_task_group; 7246 struct cgroup *cgrp = &cgrp_dfl_root.cgrp; 7247 7248 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p)) 7249 goto out; 7250 7251 cgrp = tg_cgrp(tg); 7252 7253 out: 7254 cgroup_get(cgrp); 7255 return cgrp; 7256 } 7257 #endif 7258 7259 /** 7260 * scx_bpf_now - Returns a high-performance monotonically non-decreasing 7261 * clock for the current CPU. The clock returned is in nanoseconds. 7262 * 7263 * It provides the following properties: 7264 * 7265 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently 7266 * to account for execution time and track tasks' runtime properties. 7267 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which 7268 * eventually reads a hardware timestamp counter -- is neither performant nor 7269 * scalable. scx_bpf_now() aims to provide a high-performance clock by 7270 * using the rq clock in the scheduler core whenever possible. 7271 * 7272 * 2) High enough resolution for the BPF scheduler use cases: In most BPF 7273 * scheduler use cases, the required clock resolution is lower than the most 7274 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically 7275 * uses the rq clock in the scheduler core whenever it is valid. It considers 7276 * that the rq clock is valid from the time the rq clock is updated 7277 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 7278 * 7279 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() 7280 * guarantees the clock never goes backward when comparing them in the same 7281 * CPU. On the other hand, when comparing clocks in different CPUs, there 7282 * is no such guarantee -- the clock can go backward. It provides a 7283 * monotonically *non-decreasing* clock so that it would provide the same 7284 * clock values in two different scx_bpf_now() calls in the same CPU 7285 * during the same period of when the rq clock is valid. 7286 */ 7287 __bpf_kfunc u64 scx_bpf_now(void) 7288 { 7289 struct rq *rq; 7290 u64 clock; 7291 7292 preempt_disable(); 7293 7294 rq = this_rq(); 7295 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { 7296 /* 7297 * If the rq clock is valid, use the cached rq clock. 7298 * 7299 * Note that scx_bpf_now() is re-entrant between a process 7300 * context and an interrupt context (e.g., timer interrupt). 7301 * However, we don't need to consider the race between them 7302 * because such race is not observable from a caller. 7303 */ 7304 clock = READ_ONCE(rq->scx.clock); 7305 } else { 7306 /* 7307 * Otherwise, return a fresh rq clock. 7308 * 7309 * The rq clock is updated outside of the rq lock. 7310 * In this case, keep the updated rq clock invalid so the next 7311 * kfunc call outside the rq lock gets a fresh rq clock. 7312 */ 7313 clock = sched_clock_cpu(cpu_of(rq)); 7314 } 7315 7316 preempt_enable(); 7317 7318 return clock; 7319 } 7320 7321 /* 7322 * scx_bpf_events - Get a system-wide event counter to 7323 * @events: output buffer from a BPF program 7324 * @events__sz: @events len, must end in '__sz'' for the verifier 7325 */ 7326 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events, 7327 size_t events__sz) 7328 { 7329 struct scx_event_stats e_sys, *e_cpu; 7330 int cpu; 7331 7332 /* Aggregate per-CPU event counters into the system-wide counters. */ 7333 memset(&e_sys, 0, sizeof(e_sys)); 7334 for_each_possible_cpu(cpu) { 7335 e_cpu = per_cpu_ptr(&event_stats_cpu, cpu); 7336 scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK); 7337 scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 7338 scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST); 7339 scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_EXITING); 7340 scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 7341 scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SLICE_DFL); 7342 scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DURATION); 7343 scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DISPATCH); 7344 scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_ACTIVATE); 7345 } 7346 7347 /* 7348 * We cannot entirely trust a BPF-provided size since a BPF program 7349 * might be compiled against a different vmlinux.h, of which 7350 * scx_event_stats would be larger (a newer vmlinux.h) or smaller 7351 * (an older vmlinux.h). Hence, we use the smaller size to avoid 7352 * memory corruption. 7353 */ 7354 events__sz = min(events__sz, sizeof(*events)); 7355 memcpy(events, &e_sys, events__sz); 7356 } 7357 7358 __bpf_kfunc_end_defs(); 7359 7360 BTF_KFUNCS_START(scx_kfunc_ids_any) 7361 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 7362 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 7363 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 7364 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 7365 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 7366 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 7367 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) 7368 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) 7369 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) 7370 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 7371 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 7372 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 7373 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids) 7374 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 7375 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 7376 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 7377 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 7378 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 7379 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 7380 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 7381 #ifdef CONFIG_CGROUP_SCHED 7382 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) 7383 #endif 7384 BTF_ID_FLAGS(func, scx_bpf_now) 7385 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS) 7386 BTF_KFUNCS_END(scx_kfunc_ids_any) 7387 7388 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 7389 .owner = THIS_MODULE, 7390 .set = &scx_kfunc_ids_any, 7391 }; 7392 7393 static int __init scx_init(void) 7394 { 7395 int ret; 7396 7397 /* 7398 * kfunc registration can't be done from init_sched_ext_class() as 7399 * register_btf_kfunc_id_set() needs most of the system to be up. 7400 * 7401 * Some kfuncs are context-sensitive and can only be called from 7402 * specific SCX ops. They are grouped into BTF sets accordingly. 7403 * Unfortunately, BPF currently doesn't have a way of enforcing such 7404 * restrictions. Eventually, the verifier should be able to enforce 7405 * them. For now, register them the same and make each kfunc explicitly 7406 * check using scx_kf_allowed(). 7407 */ 7408 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7409 &scx_kfunc_set_enqueue_dispatch)) || 7410 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7411 &scx_kfunc_set_dispatch)) || 7412 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7413 &scx_kfunc_set_cpu_release)) || 7414 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7415 &scx_kfunc_set_unlocked)) || 7416 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7417 &scx_kfunc_set_unlocked)) || 7418 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7419 &scx_kfunc_set_any)) || 7420 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 7421 &scx_kfunc_set_any)) || 7422 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7423 &scx_kfunc_set_any))) { 7424 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 7425 return ret; 7426 } 7427 7428 ret = scx_idle_init(); 7429 if (ret) { 7430 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret); 7431 return ret; 7432 } 7433 7434 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 7435 if (ret) { 7436 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 7437 return ret; 7438 } 7439 7440 ret = register_pm_notifier(&scx_pm_notifier); 7441 if (ret) { 7442 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 7443 return ret; 7444 } 7445 7446 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 7447 if (!scx_kset) { 7448 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 7449 return -ENOMEM; 7450 } 7451 7452 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 7453 if (ret < 0) { 7454 pr_err("sched_ext: Failed to add global attributes\n"); 7455 return ret; 7456 } 7457 7458 return 0; 7459 } 7460 __initcall(scx_init); 7461