1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) 10 11 enum scx_consts { 12 SCX_DSP_DFL_MAX_BATCH = 32, 13 SCX_DSP_MAX_LOOPS = 32, 14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, 15 16 SCX_EXIT_BT_LEN = 64, 17 SCX_EXIT_MSG_LEN = 1024, 18 SCX_EXIT_DUMP_DFL_LEN = 32768, 19 20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE, 21 22 /* 23 * Iterating all tasks may take a while. Periodically drop 24 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls. 25 */ 26 SCX_OPS_TASK_ITER_BATCH = 32, 27 }; 28 29 enum scx_exit_kind { 30 SCX_EXIT_NONE, 31 SCX_EXIT_DONE, 32 33 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */ 34 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */ 35 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */ 36 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ 37 38 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ 39 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ 40 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ 41 }; 42 43 /* 44 * An exit code can be specified when exiting with scx_bpf_exit() or 45 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN 46 * respectively. The codes are 64bit of the format: 47 * 48 * Bits: [63 .. 48 47 .. 32 31 .. 0] 49 * [ SYS ACT ] [ SYS RSN ] [ USR ] 50 * 51 * SYS ACT: System-defined exit actions 52 * SYS RSN: System-defined exit reasons 53 * USR : User-defined exit codes and reasons 54 * 55 * Using the above, users may communicate intention and context by ORing system 56 * actions and/or system reasons with a user-defined exit code. 57 */ 58 enum scx_exit_code { 59 /* Reasons */ 60 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32, 61 62 /* Actions */ 63 SCX_ECODE_ACT_RESTART = 1LLU << 48, 64 }; 65 66 /* 67 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is 68 * being disabled. 69 */ 70 struct scx_exit_info { 71 /* %SCX_EXIT_* - broad category of the exit reason */ 72 enum scx_exit_kind kind; 73 74 /* exit code if gracefully exiting */ 75 s64 exit_code; 76 77 /* textual representation of the above */ 78 const char *reason; 79 80 /* backtrace if exiting due to an error */ 81 unsigned long *bt; 82 u32 bt_len; 83 84 /* informational message */ 85 char *msg; 86 87 /* debug dump */ 88 char *dump; 89 }; 90 91 /* sched_ext_ops.flags */ 92 enum scx_ops_flags { 93 /* 94 * Keep built-in idle tracking even if ops.update_idle() is implemented. 95 */ 96 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0, 97 98 /* 99 * By default, if there are no other task to run on the CPU, ext core 100 * keeps running the current task even after its slice expires. If this 101 * flag is specified, such tasks are passed to ops.enqueue() with 102 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info. 103 */ 104 SCX_OPS_ENQ_LAST = 1LLU << 1, 105 106 /* 107 * An exiting task may schedule after PF_EXITING is set. In such cases, 108 * bpf_task_from_pid() may not be able to find the task and if the BPF 109 * scheduler depends on pid lookup for dispatching, the task will be 110 * lost leading to various issues including RCU grace period stalls. 111 * 112 * To mask this problem, by default, unhashed tasks are automatically 113 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't 114 * depend on pid lookups and wants to handle these tasks directly, the 115 * following flag can be used. 116 */ 117 SCX_OPS_ENQ_EXITING = 1LLU << 2, 118 119 /* 120 * If set, only tasks with policy set to SCHED_EXT are attached to 121 * sched_ext. If clear, SCHED_NORMAL tasks are also included. 122 */ 123 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3, 124 125 /* 126 * CPU cgroup support flags 127 */ 128 SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */ 129 130 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 131 SCX_OPS_ENQ_LAST | 132 SCX_OPS_ENQ_EXITING | 133 SCX_OPS_SWITCH_PARTIAL | 134 SCX_OPS_HAS_CGROUP_WEIGHT, 135 }; 136 137 /* argument container for ops.init_task() */ 138 struct scx_init_task_args { 139 /* 140 * Set if ops.init_task() is being invoked on the fork path, as opposed 141 * to the scheduler transition path. 142 */ 143 bool fork; 144 #ifdef CONFIG_EXT_GROUP_SCHED 145 /* the cgroup the task is joining */ 146 struct cgroup *cgroup; 147 #endif 148 }; 149 150 /* argument container for ops.exit_task() */ 151 struct scx_exit_task_args { 152 /* Whether the task exited before running on sched_ext. */ 153 bool cancelled; 154 }; 155 156 /* argument container for ops->cgroup_init() */ 157 struct scx_cgroup_init_args { 158 /* the weight of the cgroup [1..10000] */ 159 u32 weight; 160 }; 161 162 enum scx_cpu_preempt_reason { 163 /* next task is being scheduled by &sched_class_rt */ 164 SCX_CPU_PREEMPT_RT, 165 /* next task is being scheduled by &sched_class_dl */ 166 SCX_CPU_PREEMPT_DL, 167 /* next task is being scheduled by &sched_class_stop */ 168 SCX_CPU_PREEMPT_STOP, 169 /* unknown reason for SCX being preempted */ 170 SCX_CPU_PREEMPT_UNKNOWN, 171 }; 172 173 /* 174 * Argument container for ops->cpu_acquire(). Currently empty, but may be 175 * expanded in the future. 176 */ 177 struct scx_cpu_acquire_args {}; 178 179 /* argument container for ops->cpu_release() */ 180 struct scx_cpu_release_args { 181 /* the reason the CPU was preempted */ 182 enum scx_cpu_preempt_reason reason; 183 184 /* the task that's going to be scheduled on the CPU */ 185 struct task_struct *task; 186 }; 187 188 /* 189 * Informational context provided to dump operations. 190 */ 191 struct scx_dump_ctx { 192 enum scx_exit_kind kind; 193 s64 exit_code; 194 const char *reason; 195 u64 at_ns; 196 u64 at_jiffies; 197 }; 198 199 /** 200 * struct sched_ext_ops - Operation table for BPF scheduler implementation 201 * 202 * Userland can implement an arbitrary scheduling policy by implementing and 203 * loading operations in this table. 204 */ 205 struct sched_ext_ops { 206 /** 207 * select_cpu - Pick the target CPU for a task which is being woken up 208 * @p: task being woken up 209 * @prev_cpu: the cpu @p was on before sleeping 210 * @wake_flags: SCX_WAKE_* 211 * 212 * Decision made here isn't final. @p may be moved to any CPU while it 213 * is getting dispatched for execution later. However, as @p is not on 214 * the rq at this point, getting the eventual execution CPU right here 215 * saves a small bit of overhead down the line. 216 * 217 * If an idle CPU is returned, the CPU is kicked and will try to 218 * dispatch. While an explicit custom mechanism can be added, 219 * select_cpu() serves as the default way to wake up idle CPUs. 220 * 221 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p 222 * is dispatched, the ops.enqueue() callback will be skipped. Finally, 223 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the 224 * local DSQ of whatever CPU is returned by this callback. 225 */ 226 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); 227 228 /** 229 * enqueue - Enqueue a task on the BPF scheduler 230 * @p: task being enqueued 231 * @enq_flags: %SCX_ENQ_* 232 * 233 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch() 234 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf 235 * scheduler owns @p and if it fails to dispatch @p, the task will 236 * stall. 237 * 238 * If @p was dispatched from ops.select_cpu(), this callback is 239 * skipped. 240 */ 241 void (*enqueue)(struct task_struct *p, u64 enq_flags); 242 243 /** 244 * dequeue - Remove a task from the BPF scheduler 245 * @p: task being dequeued 246 * @deq_flags: %SCX_DEQ_* 247 * 248 * Remove @p from the BPF scheduler. This is usually called to isolate 249 * the task while updating its scheduling properties (e.g. priority). 250 * 251 * The ext core keeps track of whether the BPF side owns a given task or 252 * not and can gracefully ignore spurious dispatches from BPF side, 253 * which makes it safe to not implement this method. However, depending 254 * on the scheduling logic, this can lead to confusing behaviors - e.g. 255 * scheduling position not being updated across a priority change. 256 */ 257 void (*dequeue)(struct task_struct *p, u64 deq_flags); 258 259 /** 260 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs 261 * @cpu: CPU to dispatch tasks for 262 * @prev: previous task being switched out 263 * 264 * Called when a CPU's local dsq is empty. The operation should dispatch 265 * one or more tasks from the BPF scheduler into the DSQs using 266 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using 267 * scx_bpf_consume(). 268 * 269 * The maximum number of times scx_bpf_dispatch() can be called without 270 * an intervening scx_bpf_consume() is specified by 271 * ops.dispatch_max_batch. See the comments on top of the two functions 272 * for more details. 273 * 274 * When not %NULL, @prev is an SCX task with its slice depleted. If 275 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in 276 * @prev->scx.flags, it is not enqueued yet and will be enqueued after 277 * ops.dispatch() returns. To keep executing @prev, return without 278 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST. 279 */ 280 void (*dispatch)(s32 cpu, struct task_struct *prev); 281 282 /** 283 * tick - Periodic tick 284 * @p: task running currently 285 * 286 * This operation is called every 1/HZ seconds on CPUs which are 287 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an 288 * immediate dispatch cycle on the CPU. 289 */ 290 void (*tick)(struct task_struct *p); 291 292 /** 293 * runnable - A task is becoming runnable on its associated CPU 294 * @p: task becoming runnable 295 * @enq_flags: %SCX_ENQ_* 296 * 297 * This and the following three functions can be used to track a task's 298 * execution state transitions. A task becomes ->runnable() on a CPU, 299 * and then goes through one or more ->running() and ->stopping() pairs 300 * as it runs on the CPU, and eventually becomes ->quiescent() when it's 301 * done running on the CPU. 302 * 303 * @p is becoming runnable on the CPU because it's 304 * 305 * - waking up (%SCX_ENQ_WAKEUP) 306 * - being moved from another CPU 307 * - being restored after temporarily taken off the queue for an 308 * attribute change. 309 * 310 * This and ->enqueue() are related but not coupled. This operation 311 * notifies @p's state transition and may not be followed by ->enqueue() 312 * e.g. when @p is being dispatched to a remote CPU, or when @p is 313 * being enqueued on a CPU experiencing a hotplug event. Likewise, a 314 * task may be ->enqueue()'d without being preceded by this operation 315 * e.g. after exhausting its slice. 316 */ 317 void (*runnable)(struct task_struct *p, u64 enq_flags); 318 319 /** 320 * running - A task is starting to run on its associated CPU 321 * @p: task starting to run 322 * 323 * See ->runnable() for explanation on the task state notifiers. 324 */ 325 void (*running)(struct task_struct *p); 326 327 /** 328 * stopping - A task is stopping execution 329 * @p: task stopping to run 330 * @runnable: is task @p still runnable? 331 * 332 * See ->runnable() for explanation on the task state notifiers. If 333 * !@runnable, ->quiescent() will be invoked after this operation 334 * returns. 335 */ 336 void (*stopping)(struct task_struct *p, bool runnable); 337 338 /** 339 * quiescent - A task is becoming not runnable on its associated CPU 340 * @p: task becoming not runnable 341 * @deq_flags: %SCX_DEQ_* 342 * 343 * See ->runnable() for explanation on the task state notifiers. 344 * 345 * @p is becoming quiescent on the CPU because it's 346 * 347 * - sleeping (%SCX_DEQ_SLEEP) 348 * - being moved to another CPU 349 * - being temporarily taken off the queue for an attribute change 350 * (%SCX_DEQ_SAVE) 351 * 352 * This and ->dequeue() are related but not coupled. This operation 353 * notifies @p's state transition and may not be preceded by ->dequeue() 354 * e.g. when @p is being dispatched to a remote CPU. 355 */ 356 void (*quiescent)(struct task_struct *p, u64 deq_flags); 357 358 /** 359 * yield - Yield CPU 360 * @from: yielding task 361 * @to: optional yield target task 362 * 363 * If @to is NULL, @from is yielding the CPU to other runnable tasks. 364 * The BPF scheduler should ensure that other available tasks are 365 * dispatched before the yielding task. Return value is ignored in this 366 * case. 367 * 368 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf 369 * scheduler can implement the request, return %true; otherwise, %false. 370 */ 371 bool (*yield)(struct task_struct *from, struct task_struct *to); 372 373 /** 374 * core_sched_before - Task ordering for core-sched 375 * @a: task A 376 * @b: task B 377 * 378 * Used by core-sched to determine the ordering between two tasks. See 379 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on 380 * core-sched. 381 * 382 * Both @a and @b are runnable and may or may not currently be queued on 383 * the BPF scheduler. Should return %true if @a should run before @b. 384 * %false if there's no required ordering or @b should run before @a. 385 * 386 * If not specified, the default is ordering them according to when they 387 * became runnable. 388 */ 389 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); 390 391 /** 392 * set_weight - Set task weight 393 * @p: task to set weight for 394 * @weight: new weight [1..10000] 395 * 396 * Update @p's weight to @weight. 397 */ 398 void (*set_weight)(struct task_struct *p, u32 weight); 399 400 /** 401 * set_cpumask - Set CPU affinity 402 * @p: task to set CPU affinity for 403 * @cpumask: cpumask of cpus that @p can run on 404 * 405 * Update @p's CPU affinity to @cpumask. 406 */ 407 void (*set_cpumask)(struct task_struct *p, 408 const struct cpumask *cpumask); 409 410 /** 411 * update_idle - Update the idle state of a CPU 412 * @cpu: CPU to udpate the idle state for 413 * @idle: whether entering or exiting the idle state 414 * 415 * This operation is called when @rq's CPU goes or leaves the idle 416 * state. By default, implementing this operation disables the built-in 417 * idle CPU tracking and the following helpers become unavailable: 418 * 419 * - scx_bpf_select_cpu_dfl() 420 * - scx_bpf_test_and_clear_cpu_idle() 421 * - scx_bpf_pick_idle_cpu() 422 * 423 * The user also must implement ops.select_cpu() as the default 424 * implementation relies on scx_bpf_select_cpu_dfl(). 425 * 426 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle 427 * tracking. 428 */ 429 void (*update_idle)(s32 cpu, bool idle); 430 431 /** 432 * cpu_acquire - A CPU is becoming available to the BPF scheduler 433 * @cpu: The CPU being acquired by the BPF scheduler. 434 * @args: Acquire arguments, see the struct definition. 435 * 436 * A CPU that was previously released from the BPF scheduler is now once 437 * again under its control. 438 */ 439 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); 440 441 /** 442 * cpu_release - A CPU is taken away from the BPF scheduler 443 * @cpu: The CPU being released by the BPF scheduler. 444 * @args: Release arguments, see the struct definition. 445 * 446 * The specified CPU is no longer under the control of the BPF 447 * scheduler. This could be because it was preempted by a higher 448 * priority sched_class, though there may be other reasons as well. The 449 * caller should consult @args->reason to determine the cause. 450 */ 451 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); 452 453 /** 454 * init_task - Initialize a task to run in a BPF scheduler 455 * @p: task to initialize for BPF scheduling 456 * @args: init arguments, see the struct definition 457 * 458 * Either we're loading a BPF scheduler or a new task is being forked. 459 * Initialize @p for BPF scheduling. This operation may block and can 460 * be used for allocations, and is called exactly once for a task. 461 * 462 * Return 0 for success, -errno for failure. An error return while 463 * loading will abort loading of the BPF scheduler. During a fork, it 464 * will abort that specific fork. 465 */ 466 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args); 467 468 /** 469 * exit_task - Exit a previously-running task from the system 470 * @p: task to exit 471 * 472 * @p is exiting or the BPF scheduler is being unloaded. Perform any 473 * necessary cleanup for @p. 474 */ 475 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args); 476 477 /** 478 * enable - Enable BPF scheduling for a task 479 * @p: task to enable BPF scheduling for 480 * 481 * Enable @p for BPF scheduling. enable() is called on @p any time it 482 * enters SCX, and is always paired with a matching disable(). 483 */ 484 void (*enable)(struct task_struct *p); 485 486 /** 487 * disable - Disable BPF scheduling for a task 488 * @p: task to disable BPF scheduling for 489 * 490 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. 491 * Disable BPF scheduling for @p. A disable() call is always matched 492 * with a prior enable() call. 493 */ 494 void (*disable)(struct task_struct *p); 495 496 /** 497 * dump - Dump BPF scheduler state on error 498 * @ctx: debug dump context 499 * 500 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump. 501 */ 502 void (*dump)(struct scx_dump_ctx *ctx); 503 504 /** 505 * dump_cpu - Dump BPF scheduler state for a CPU on error 506 * @ctx: debug dump context 507 * @cpu: CPU to generate debug dump for 508 * @idle: @cpu is currently idle without any runnable tasks 509 * 510 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 511 * @cpu. If @idle is %true and this operation doesn't produce any 512 * output, @cpu is skipped for dump. 513 */ 514 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle); 515 516 /** 517 * dump_task - Dump BPF scheduler state for a runnable task on error 518 * @ctx: debug dump context 519 * @p: runnable task to generate debug dump for 520 * 521 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 522 * @p. 523 */ 524 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p); 525 526 #ifdef CONFIG_EXT_GROUP_SCHED 527 /** 528 * cgroup_init - Initialize a cgroup 529 * @cgrp: cgroup being initialized 530 * @args: init arguments, see the struct definition 531 * 532 * Either the BPF scheduler is being loaded or @cgrp created, initialize 533 * @cgrp for sched_ext. This operation may block. 534 * 535 * Return 0 for success, -errno for failure. An error return while 536 * loading will abort loading of the BPF scheduler. During cgroup 537 * creation, it will abort the specific cgroup creation. 538 */ 539 s32 (*cgroup_init)(struct cgroup *cgrp, 540 struct scx_cgroup_init_args *args); 541 542 /** 543 * cgroup_exit - Exit a cgroup 544 * @cgrp: cgroup being exited 545 * 546 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit 547 * @cgrp for sched_ext. This operation my block. 548 */ 549 void (*cgroup_exit)(struct cgroup *cgrp); 550 551 /** 552 * cgroup_prep_move - Prepare a task to be moved to a different cgroup 553 * @p: task being moved 554 * @from: cgroup @p is being moved from 555 * @to: cgroup @p is being moved to 556 * 557 * Prepare @p for move from cgroup @from to @to. This operation may 558 * block and can be used for allocations. 559 * 560 * Return 0 for success, -errno for failure. An error return aborts the 561 * migration. 562 */ 563 s32 (*cgroup_prep_move)(struct task_struct *p, 564 struct cgroup *from, struct cgroup *to); 565 566 /** 567 * cgroup_move - Commit cgroup move 568 * @p: task being moved 569 * @from: cgroup @p is being moved from 570 * @to: cgroup @p is being moved to 571 * 572 * Commit the move. @p is dequeued during this operation. 573 */ 574 void (*cgroup_move)(struct task_struct *p, 575 struct cgroup *from, struct cgroup *to); 576 577 /** 578 * cgroup_cancel_move - Cancel cgroup move 579 * @p: task whose cgroup move is being canceled 580 * @from: cgroup @p was being moved from 581 * @to: cgroup @p was being moved to 582 * 583 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move(). 584 * Undo the preparation. 585 */ 586 void (*cgroup_cancel_move)(struct task_struct *p, 587 struct cgroup *from, struct cgroup *to); 588 589 /** 590 * cgroup_set_weight - A cgroup's weight is being changed 591 * @cgrp: cgroup whose weight is being updated 592 * @weight: new weight [1..10000] 593 * 594 * Update @tg's weight to @weight. 595 */ 596 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight); 597 #endif /* CONFIG_CGROUPS */ 598 599 /* 600 * All online ops must come before ops.cpu_online(). 601 */ 602 603 /** 604 * cpu_online - A CPU became online 605 * @cpu: CPU which just came up 606 * 607 * @cpu just came online. @cpu will not call ops.enqueue() or 608 * ops.dispatch(), nor run tasks associated with other CPUs beforehand. 609 */ 610 void (*cpu_online)(s32 cpu); 611 612 /** 613 * cpu_offline - A CPU is going offline 614 * @cpu: CPU which is going offline 615 * 616 * @cpu is going offline. @cpu will not call ops.enqueue() or 617 * ops.dispatch(), nor run tasks associated with other CPUs afterwards. 618 */ 619 void (*cpu_offline)(s32 cpu); 620 621 /* 622 * All CPU hotplug ops must come before ops.init(). 623 */ 624 625 /** 626 * init - Initialize the BPF scheduler 627 */ 628 s32 (*init)(void); 629 630 /** 631 * exit - Clean up after the BPF scheduler 632 * @info: Exit info 633 * 634 * ops.exit() is also called on ops.init() failure, which is a bit 635 * unusual. This is to allow rich reporting through @info on how 636 * ops.init() failed. 637 */ 638 void (*exit)(struct scx_exit_info *info); 639 640 /** 641 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch 642 */ 643 u32 dispatch_max_batch; 644 645 /** 646 * flags - %SCX_OPS_* flags 647 */ 648 u64 flags; 649 650 /** 651 * timeout_ms - The maximum amount of time, in milliseconds, that a 652 * runnable task should be able to wait before being scheduled. The 653 * maximum timeout may not exceed the default timeout of 30 seconds. 654 * 655 * Defaults to the maximum allowed timeout value of 30 seconds. 656 */ 657 u32 timeout_ms; 658 659 /** 660 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default 661 * value of 32768 is used. 662 */ 663 u32 exit_dump_len; 664 665 /** 666 * hotplug_seq - A sequence number that may be set by the scheduler to 667 * detect when a hotplug event has occurred during the loading process. 668 * If 0, no detection occurs. Otherwise, the scheduler will fail to 669 * load if the sequence number does not match @scx_hotplug_seq on the 670 * enable path. 671 */ 672 u64 hotplug_seq; 673 674 /** 675 * name - BPF scheduler's name 676 * 677 * Must be a non-zero valid BPF object name including only isalnum(), 678 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the 679 * BPF scheduler is enabled. 680 */ 681 char name[SCX_OPS_NAME_LEN]; 682 }; 683 684 enum scx_opi { 685 SCX_OPI_BEGIN = 0, 686 SCX_OPI_NORMAL_BEGIN = 0, 687 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online), 688 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online), 689 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init), 690 SCX_OPI_END = SCX_OP_IDX(init), 691 }; 692 693 enum scx_wake_flags { 694 /* expose select WF_* flags as enums */ 695 SCX_WAKE_FORK = WF_FORK, 696 SCX_WAKE_TTWU = WF_TTWU, 697 SCX_WAKE_SYNC = WF_SYNC, 698 }; 699 700 enum scx_enq_flags { 701 /* expose select ENQUEUE_* flags as enums */ 702 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP, 703 SCX_ENQ_HEAD = ENQUEUE_HEAD, 704 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED, 705 706 /* high 32bits are SCX specific */ 707 708 /* 709 * Set the following to trigger preemption when calling 710 * scx_bpf_dispatch() with a local dsq as the target. The slice of the 711 * current task is cleared to zero and the CPU is kicked into the 712 * scheduling path. Implies %SCX_ENQ_HEAD. 713 */ 714 SCX_ENQ_PREEMPT = 1LLU << 32, 715 716 /* 717 * The task being enqueued was previously enqueued on the current CPU's 718 * %SCX_DSQ_LOCAL, but was removed from it in a call to the 719 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was 720 * invoked in a ->cpu_release() callback, and the task is again 721 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the 722 * task will not be scheduled on the CPU until at least the next invocation 723 * of the ->cpu_acquire() callback. 724 */ 725 SCX_ENQ_REENQ = 1LLU << 40, 726 727 /* 728 * The task being enqueued is the only task available for the cpu. By 729 * default, ext core keeps executing such tasks but when 730 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the 731 * %SCX_ENQ_LAST flag set. 732 * 733 * The BPF scheduler is responsible for triggering a follow-up 734 * scheduling event. Otherwise, Execution may stall. 735 */ 736 SCX_ENQ_LAST = 1LLU << 41, 737 738 /* high 8 bits are internal */ 739 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, 740 741 SCX_ENQ_CLEAR_OPSS = 1LLU << 56, 742 SCX_ENQ_DSQ_PRIQ = 1LLU << 57, 743 }; 744 745 enum scx_deq_flags { 746 /* expose select DEQUEUE_* flags as enums */ 747 SCX_DEQ_SLEEP = DEQUEUE_SLEEP, 748 749 /* high 32bits are SCX specific */ 750 751 /* 752 * The generic core-sched layer decided to execute the task even though 753 * it hasn't been dispatched yet. Dequeue from the BPF side. 754 */ 755 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, 756 }; 757 758 enum scx_pick_idle_cpu_flags { 759 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */ 760 }; 761 762 enum scx_kick_flags { 763 /* 764 * Kick the target CPU if idle. Guarantees that the target CPU goes 765 * through at least one full scheduling cycle before going idle. If the 766 * target CPU can be determined to be currently not idle and going to go 767 * through a scheduling cycle before going idle, noop. 768 */ 769 SCX_KICK_IDLE = 1LLU << 0, 770 771 /* 772 * Preempt the current task and execute the dispatch path. If the 773 * current task of the target CPU is an SCX task, its ->scx.slice is 774 * cleared to zero before the scheduling path is invoked so that the 775 * task expires and the dispatch path is invoked. 776 */ 777 SCX_KICK_PREEMPT = 1LLU << 1, 778 779 /* 780 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will 781 * return after the target CPU finishes picking the next task. 782 */ 783 SCX_KICK_WAIT = 1LLU << 2, 784 }; 785 786 enum scx_tg_flags { 787 SCX_TG_ONLINE = 1U << 0, 788 SCX_TG_INITED = 1U << 1, 789 }; 790 791 enum scx_ops_enable_state { 792 SCX_OPS_ENABLING, 793 SCX_OPS_ENABLED, 794 SCX_OPS_DISABLING, 795 SCX_OPS_DISABLED, 796 }; 797 798 static const char *scx_ops_enable_state_str[] = { 799 [SCX_OPS_ENABLING] = "enabling", 800 [SCX_OPS_ENABLED] = "enabled", 801 [SCX_OPS_DISABLING] = "disabling", 802 [SCX_OPS_DISABLED] = "disabled", 803 }; 804 805 /* 806 * sched_ext_entity->ops_state 807 * 808 * Used to track the task ownership between the SCX core and the BPF scheduler. 809 * State transitions look as follows: 810 * 811 * NONE -> QUEUEING -> QUEUED -> DISPATCHING 812 * ^ | | 813 * | v v 814 * \-------------------------------/ 815 * 816 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 817 * sites for explanations on the conditions being waited upon and why they are 818 * safe. Transitions out of them into NONE or QUEUED must store_release and the 819 * waiters should load_acquire. 820 * 821 * Tracking scx_ops_state enables sched_ext core to reliably determine whether 822 * any given task can be dispatched by the BPF scheduler at all times and thus 823 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 824 * to try to dispatch any task anytime regardless of its state as the SCX core 825 * can safely reject invalid dispatches. 826 */ 827 enum scx_ops_state { 828 SCX_OPSS_NONE, /* owned by the SCX core */ 829 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */ 830 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */ 831 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */ 832 833 /* 834 * QSEQ brands each QUEUED instance so that, when dispatch races 835 * dequeue/requeue, the dispatcher can tell whether it still has a claim 836 * on the task being dispatched. 837 * 838 * As some 32bit archs can't do 64bit store_release/load_acquire, 839 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on 840 * 32bit machines. The dispatch race window QSEQ protects is very narrow 841 * and runs with IRQ disabled. 30 bits should be sufficient. 842 */ 843 SCX_OPSS_QSEQ_SHIFT = 2, 844 }; 845 846 /* Use macros to ensure that the type is unsigned long for the masks */ 847 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1) 848 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK) 849 850 /* 851 * During exit, a task may schedule after losing its PIDs. When disabling the 852 * BPF scheduler, we need to be able to iterate tasks in every state to 853 * guarantee system safety. Maintain a dedicated task list which contains every 854 * task between its fork and eventual free. 855 */ 856 static DEFINE_SPINLOCK(scx_tasks_lock); 857 static LIST_HEAD(scx_tasks); 858 859 /* ops enable/disable */ 860 static struct kthread_worker *scx_ops_helper; 861 static DEFINE_MUTEX(scx_ops_enable_mutex); 862 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); 863 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 864 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); 865 static int scx_ops_bypass_depth; 866 static DEFINE_RAW_SPINLOCK(__scx_ops_bypass_lock); 867 static bool scx_ops_init_task_enabled; 868 static bool scx_switching_all; 869 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 870 871 static struct sched_ext_ops scx_ops; 872 static bool scx_warned_zero_slice; 873 874 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); 875 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); 876 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); 877 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); 878 879 static struct static_key_false scx_has_op[SCX_OPI_END] = 880 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT }; 881 882 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE); 883 static struct scx_exit_info *scx_exit_info; 884 885 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 886 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 887 888 /* 889 * A monotically increasing sequence number that is incremented every time a 890 * scheduler is enabled. This can be used by to check if any custom sched_ext 891 * scheduler has ever been used in the system. 892 */ 893 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0); 894 895 /* 896 * The maximum amount of time in jiffies that a task may be runnable without 897 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 898 * scx_ops_error(). 899 */ 900 static unsigned long scx_watchdog_timeout; 901 902 /* 903 * The last time the delayed work was run. This delayed work relies on 904 * ksoftirqd being able to run to service timer interrupts, so it's possible 905 * that this work itself could get wedged. To account for this, we check that 906 * it's not stalled in the timer tick, and trigger an error if it is. 907 */ 908 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 909 910 static struct delayed_work scx_watchdog_work; 911 912 /* idle tracking */ 913 #ifdef CONFIG_SMP 914 #ifdef CONFIG_CPUMASK_OFFSTACK 915 #define CL_ALIGNED_IF_ONSTACK 916 #else 917 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp 918 #endif 919 920 static struct { 921 cpumask_var_t cpu; 922 cpumask_var_t smt; 923 } idle_masks CL_ALIGNED_IF_ONSTACK; 924 925 #endif /* CONFIG_SMP */ 926 927 /* for %SCX_KICK_WAIT */ 928 static unsigned long __percpu *scx_kick_cpus_pnt_seqs; 929 930 /* 931 * Direct dispatch marker. 932 * 933 * Non-NULL values are used for direct dispatch from enqueue path. A valid 934 * pointer points to the task currently being enqueued. An ERR_PTR value is used 935 * to indicate that direct dispatch has already happened. 936 */ 937 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 938 939 /* 940 * Dispatch queues. 941 * 942 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is 943 * to avoid live-locking in bypass mode where all tasks are dispatched to 944 * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't 945 * sufficient, it can be further split. 946 */ 947 static struct scx_dispatch_q **global_dsqs; 948 949 static const struct rhashtable_params dsq_hash_params = { 950 .key_len = 8, 951 .key_offset = offsetof(struct scx_dispatch_q, id), 952 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 953 }; 954 955 static struct rhashtable dsq_hash; 956 static LLIST_HEAD(dsqs_to_free); 957 958 /* dispatch buf */ 959 struct scx_dsp_buf_ent { 960 struct task_struct *task; 961 unsigned long qseq; 962 u64 dsq_id; 963 u64 enq_flags; 964 }; 965 966 static u32 scx_dsp_max_batch; 967 968 struct scx_dsp_ctx { 969 struct rq *rq; 970 u32 cursor; 971 u32 nr_tasks; 972 struct scx_dsp_buf_ent buf[]; 973 }; 974 975 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 976 977 /* string formatting from BPF */ 978 struct scx_bstr_buf { 979 u64 data[MAX_BPRINTF_VARARGS]; 980 char line[SCX_EXIT_MSG_LEN]; 981 }; 982 983 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 984 static struct scx_bstr_buf scx_exit_bstr_buf; 985 986 /* ops debug dump */ 987 struct scx_dump_data { 988 s32 cpu; 989 bool first; 990 s32 cursor; 991 struct seq_buf *s; 992 const char *prefix; 993 struct scx_bstr_buf buf; 994 }; 995 996 static struct scx_dump_data scx_dump_data = { 997 .cpu = -1, 998 }; 999 1000 /* /sys/kernel/sched_ext interface */ 1001 static struct kset *scx_kset; 1002 static struct kobject *scx_root_kobj; 1003 1004 #define CREATE_TRACE_POINTS 1005 #include <trace/events/sched_ext.h> 1006 1007 static void process_ddsp_deferred_locals(struct rq *rq); 1008 static void scx_bpf_kick_cpu(s32 cpu, u64 flags); 1009 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 1010 s64 exit_code, 1011 const char *fmt, ...); 1012 1013 #define scx_ops_error_kind(err, fmt, args...) \ 1014 scx_ops_exit_kind((err), 0, fmt, ##args) 1015 1016 #define scx_ops_exit(code, fmt, args...) \ 1017 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args) 1018 1019 #define scx_ops_error(fmt, args...) \ 1020 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args) 1021 1022 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)]) 1023 1024 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 1025 { 1026 if (time_after(at, now)) 1027 return jiffies_to_msecs(at - now); 1028 else 1029 return -(long)jiffies_to_msecs(now - at); 1030 } 1031 1032 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 1033 static u32 higher_bits(u32 flags) 1034 { 1035 return ~((1 << fls(flags)) - 1); 1036 } 1037 1038 /* return the mask with only the highest bit set */ 1039 static u32 highest_bit(u32 flags) 1040 { 1041 int bit = fls(flags); 1042 return ((u64)1 << bit) >> 1; 1043 } 1044 1045 static bool u32_before(u32 a, u32 b) 1046 { 1047 return (s32)(a - b) < 0; 1048 } 1049 1050 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p) 1051 { 1052 return global_dsqs[cpu_to_node(task_cpu(p))]; 1053 } 1054 1055 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id) 1056 { 1057 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); 1058 } 1059 1060 /* 1061 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 1062 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 1063 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 1064 * whether it's running from an allowed context. 1065 * 1066 * @mask is constant, always inline to cull the mask calculations. 1067 */ 1068 static __always_inline void scx_kf_allow(u32 mask) 1069 { 1070 /* nesting is allowed only in increasing scx_kf_mask order */ 1071 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 1072 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 1073 current->scx.kf_mask, mask); 1074 current->scx.kf_mask |= mask; 1075 barrier(); 1076 } 1077 1078 static void scx_kf_disallow(u32 mask) 1079 { 1080 barrier(); 1081 current->scx.kf_mask &= ~mask; 1082 } 1083 1084 #define SCX_CALL_OP(mask, op, args...) \ 1085 do { \ 1086 if (mask) { \ 1087 scx_kf_allow(mask); \ 1088 scx_ops.op(args); \ 1089 scx_kf_disallow(mask); \ 1090 } else { \ 1091 scx_ops.op(args); \ 1092 } \ 1093 } while (0) 1094 1095 #define SCX_CALL_OP_RET(mask, op, args...) \ 1096 ({ \ 1097 __typeof__(scx_ops.op(args)) __ret; \ 1098 if (mask) { \ 1099 scx_kf_allow(mask); \ 1100 __ret = scx_ops.op(args); \ 1101 scx_kf_disallow(mask); \ 1102 } else { \ 1103 __ret = scx_ops.op(args); \ 1104 } \ 1105 __ret; \ 1106 }) 1107 1108 /* 1109 * Some kfuncs are allowed only on the tasks that are subjects of the 1110 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 1111 * restrictions, the following SCX_CALL_OP_*() variants should be used when 1112 * invoking scx_ops operations that take task arguments. These can only be used 1113 * for non-nesting operations due to the way the tasks are tracked. 1114 * 1115 * kfuncs which can only operate on such tasks can in turn use 1116 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 1117 * the specific task. 1118 */ 1119 #define SCX_CALL_OP_TASK(mask, op, task, args...) \ 1120 do { \ 1121 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1122 current->scx.kf_tasks[0] = task; \ 1123 SCX_CALL_OP(mask, op, task, ##args); \ 1124 current->scx.kf_tasks[0] = NULL; \ 1125 } while (0) 1126 1127 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \ 1128 ({ \ 1129 __typeof__(scx_ops.op(task, ##args)) __ret; \ 1130 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1131 current->scx.kf_tasks[0] = task; \ 1132 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \ 1133 current->scx.kf_tasks[0] = NULL; \ 1134 __ret; \ 1135 }) 1136 1137 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \ 1138 ({ \ 1139 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \ 1140 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1141 current->scx.kf_tasks[0] = task0; \ 1142 current->scx.kf_tasks[1] = task1; \ 1143 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \ 1144 current->scx.kf_tasks[0] = NULL; \ 1145 current->scx.kf_tasks[1] = NULL; \ 1146 __ret; \ 1147 }) 1148 1149 /* @mask is constant, always inline to cull unnecessary branches */ 1150 static __always_inline bool scx_kf_allowed(u32 mask) 1151 { 1152 if (unlikely(!(current->scx.kf_mask & mask))) { 1153 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x", 1154 mask, current->scx.kf_mask); 1155 return false; 1156 } 1157 1158 /* 1159 * Enforce nesting boundaries. e.g. A kfunc which can be called from 1160 * DISPATCH must not be called if we're running DEQUEUE which is nested 1161 * inside ops.dispatch(). We don't need to check boundaries for any 1162 * blocking kfuncs as the verifier ensures they're only called from 1163 * sleepable progs. 1164 */ 1165 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 1166 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 1167 scx_ops_error("cpu_release kfunc called from a nested operation"); 1168 return false; 1169 } 1170 1171 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 1172 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 1173 scx_ops_error("dispatch kfunc called from a nested operation"); 1174 return false; 1175 } 1176 1177 return true; 1178 } 1179 1180 /* see SCX_CALL_OP_TASK() */ 1181 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask, 1182 struct task_struct *p) 1183 { 1184 if (!scx_kf_allowed(mask)) 1185 return false; 1186 1187 if (unlikely((p != current->scx.kf_tasks[0] && 1188 p != current->scx.kf_tasks[1]))) { 1189 scx_ops_error("called on a task not being operated on"); 1190 return false; 1191 } 1192 1193 return true; 1194 } 1195 1196 static bool scx_kf_allowed_if_unlocked(void) 1197 { 1198 return !current->scx.kf_mask; 1199 } 1200 1201 /** 1202 * nldsq_next_task - Iterate to the next task in a non-local DSQ 1203 * @dsq: user dsq being interated 1204 * @cur: current position, %NULL to start iteration 1205 * @rev: walk backwards 1206 * 1207 * Returns %NULL when iteration is finished. 1208 */ 1209 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 1210 struct task_struct *cur, bool rev) 1211 { 1212 struct list_head *list_node; 1213 struct scx_dsq_list_node *dsq_lnode; 1214 1215 lockdep_assert_held(&dsq->lock); 1216 1217 if (cur) 1218 list_node = &cur->scx.dsq_list.node; 1219 else 1220 list_node = &dsq->list; 1221 1222 /* find the next task, need to skip BPF iteration cursors */ 1223 do { 1224 if (rev) 1225 list_node = list_node->prev; 1226 else 1227 list_node = list_node->next; 1228 1229 if (list_node == &dsq->list) 1230 return NULL; 1231 1232 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 1233 node); 1234 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR); 1235 1236 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 1237 } 1238 1239 #define nldsq_for_each_task(p, dsq) \ 1240 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 1241 (p) = nldsq_next_task((dsq), (p), false)) 1242 1243 1244 /* 1245 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 1246 * dispatch order. BPF-visible iterator is opaque and larger to allow future 1247 * changes without breaking backward compatibility. Can be used with 1248 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 1249 */ 1250 enum scx_dsq_iter_flags { 1251 /* iterate in the reverse dispatch order */ 1252 SCX_DSQ_ITER_REV = 1U << 16, 1253 1254 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30, 1255 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31, 1256 1257 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV, 1258 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS | 1259 __SCX_DSQ_ITER_HAS_SLICE | 1260 __SCX_DSQ_ITER_HAS_VTIME, 1261 }; 1262 1263 struct bpf_iter_scx_dsq_kern { 1264 struct scx_dsq_list_node cursor; 1265 struct scx_dispatch_q *dsq; 1266 u64 slice; 1267 u64 vtime; 1268 } __attribute__((aligned(8))); 1269 1270 struct bpf_iter_scx_dsq { 1271 u64 __opaque[6]; 1272 } __attribute__((aligned(8))); 1273 1274 1275 /* 1276 * SCX task iterator. 1277 */ 1278 struct scx_task_iter { 1279 struct sched_ext_entity cursor; 1280 struct task_struct *locked; 1281 struct rq *rq; 1282 struct rq_flags rf; 1283 u32 cnt; 1284 }; 1285 1286 /** 1287 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 1288 * @iter: iterator to init 1289 * 1290 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 1291 * must eventually be stopped with scx_task_iter_stop(). 1292 * 1293 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 1294 * between this and the first next() call or between any two next() calls. If 1295 * the locks are released between two next() calls, the caller is responsible 1296 * for ensuring that the task being iterated remains accessible either through 1297 * RCU read lock or obtaining a reference count. 1298 * 1299 * All tasks which existed when the iteration started are guaranteed to be 1300 * visited as long as they still exist. 1301 */ 1302 static void scx_task_iter_start(struct scx_task_iter *iter) 1303 { 1304 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 1305 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 1306 1307 spin_lock_irq(&scx_tasks_lock); 1308 1309 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 1310 list_add(&iter->cursor.tasks_node, &scx_tasks); 1311 iter->locked = NULL; 1312 iter->cnt = 0; 1313 } 1314 1315 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 1316 { 1317 if (iter->locked) { 1318 task_rq_unlock(iter->rq, iter->locked, &iter->rf); 1319 iter->locked = NULL; 1320 } 1321 } 1322 1323 /** 1324 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 1325 * @iter: iterator to unlock 1326 * 1327 * If @iter is in the middle of a locked iteration, it may be locking the rq of 1328 * the task currently being visited in addition to scx_tasks_lock. Unlock both. 1329 * This function can be safely called anytime during an iteration. 1330 */ 1331 static void scx_task_iter_unlock(struct scx_task_iter *iter) 1332 { 1333 __scx_task_iter_rq_unlock(iter); 1334 spin_unlock_irq(&scx_tasks_lock); 1335 } 1336 1337 /** 1338 * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock() 1339 * @iter: iterator to re-lock 1340 * 1341 * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it 1342 * doesn't re-lock the rq lock. Must be called before other iterator operations. 1343 */ 1344 static void scx_task_iter_relock(struct scx_task_iter *iter) 1345 { 1346 spin_lock_irq(&scx_tasks_lock); 1347 } 1348 1349 /** 1350 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 1351 * @iter: iterator to exit 1352 * 1353 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 1354 * which is released on return. If the iterator holds a task's rq lock, that rq 1355 * lock is also released. See scx_task_iter_start() for details. 1356 */ 1357 static void scx_task_iter_stop(struct scx_task_iter *iter) 1358 { 1359 list_del_init(&iter->cursor.tasks_node); 1360 scx_task_iter_unlock(iter); 1361 } 1362 1363 /** 1364 * scx_task_iter_next - Next task 1365 * @iter: iterator to walk 1366 * 1367 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 1368 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing 1369 * stalls by holding scx_tasks_lock for too long. 1370 */ 1371 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 1372 { 1373 struct list_head *cursor = &iter->cursor.tasks_node; 1374 struct sched_ext_entity *pos; 1375 1376 if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) { 1377 scx_task_iter_unlock(iter); 1378 cond_resched(); 1379 scx_task_iter_relock(iter); 1380 } 1381 1382 list_for_each_entry(pos, cursor, tasks_node) { 1383 if (&pos->tasks_node == &scx_tasks) 1384 return NULL; 1385 if (!(pos->flags & SCX_TASK_CURSOR)) { 1386 list_move(cursor, &pos->tasks_node); 1387 return container_of(pos, struct task_struct, scx); 1388 } 1389 } 1390 1391 /* can't happen, should always terminate at scx_tasks above */ 1392 BUG(); 1393 } 1394 1395 /** 1396 * scx_task_iter_next_locked - Next non-idle task with its rq locked 1397 * @iter: iterator to walk 1398 * @include_dead: Whether we should include dead tasks in the iteration 1399 * 1400 * Visit the non-idle task with its rq lock held. Allows callers to specify 1401 * whether they would like to filter out dead tasks. See scx_task_iter_start() 1402 * for details. 1403 */ 1404 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 1405 { 1406 struct task_struct *p; 1407 1408 __scx_task_iter_rq_unlock(iter); 1409 1410 while ((p = scx_task_iter_next(iter))) { 1411 /* 1412 * scx_task_iter is used to prepare and move tasks into SCX 1413 * while loading the BPF scheduler and vice-versa while 1414 * unloading. The init_tasks ("swappers") should be excluded 1415 * from the iteration because: 1416 * 1417 * - It's unsafe to use __setschduler_prio() on an init_task to 1418 * determine the sched_class to use as it won't preserve its 1419 * idle_sched_class. 1420 * 1421 * - ops.init/exit_task() can easily be confused if called with 1422 * init_tasks as they, e.g., share PID 0. 1423 * 1424 * As init_tasks are never scheduled through SCX, they can be 1425 * skipped safely. Note that is_idle_task() which tests %PF_IDLE 1426 * doesn't work here: 1427 * 1428 * - %PF_IDLE may not be set for an init_task whose CPU hasn't 1429 * yet been onlined. 1430 * 1431 * - %PF_IDLE can be set on tasks that are not init_tasks. See 1432 * play_idle_precise() used by CONFIG_IDLE_INJECT. 1433 * 1434 * Test for idle_sched_class as only init_tasks are on it. 1435 */ 1436 if (p->sched_class != &idle_sched_class) 1437 break; 1438 } 1439 if (!p) 1440 return NULL; 1441 1442 iter->rq = task_rq_lock(p, &iter->rf); 1443 iter->locked = p; 1444 1445 return p; 1446 } 1447 1448 static enum scx_ops_enable_state scx_ops_enable_state(void) 1449 { 1450 return atomic_read(&scx_ops_enable_state_var); 1451 } 1452 1453 static enum scx_ops_enable_state 1454 scx_ops_set_enable_state(enum scx_ops_enable_state to) 1455 { 1456 return atomic_xchg(&scx_ops_enable_state_var, to); 1457 } 1458 1459 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to, 1460 enum scx_ops_enable_state from) 1461 { 1462 int from_v = from; 1463 1464 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to); 1465 } 1466 1467 static bool scx_rq_bypassing(struct rq *rq) 1468 { 1469 return unlikely(rq->scx.flags & SCX_RQ_BYPASSING); 1470 } 1471 1472 /** 1473 * wait_ops_state - Busy-wait the specified ops state to end 1474 * @p: target task 1475 * @opss: state to wait the end of 1476 * 1477 * Busy-wait for @p to transition out of @opss. This can only be used when the 1478 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 1479 * has load_acquire semantics to ensure that the caller can see the updates made 1480 * in the enqueueing and dispatching paths. 1481 */ 1482 static void wait_ops_state(struct task_struct *p, unsigned long opss) 1483 { 1484 do { 1485 cpu_relax(); 1486 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 1487 } 1488 1489 /** 1490 * ops_cpu_valid - Verify a cpu number 1491 * @cpu: cpu number which came from a BPF ops 1492 * @where: extra information reported on error 1493 * 1494 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 1495 * Verify that it is in range and one of the possible cpus. If invalid, trigger 1496 * an ops error. 1497 */ 1498 static bool ops_cpu_valid(s32 cpu, const char *where) 1499 { 1500 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) { 1501 return true; 1502 } else { 1503 scx_ops_error("invalid CPU %d%s%s", cpu, 1504 where ? " " : "", where ?: ""); 1505 return false; 1506 } 1507 } 1508 1509 /** 1510 * ops_sanitize_err - Sanitize a -errno value 1511 * @ops_name: operation to blame on failure 1512 * @err: -errno value to sanitize 1513 * 1514 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return 1515 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 1516 * cause misbehaviors. For an example, a large negative return from 1517 * ops.init_task() triggers an oops when passed up the call chain because the 1518 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 1519 * handled as a pointer. 1520 */ 1521 static int ops_sanitize_err(const char *ops_name, s32 err) 1522 { 1523 if (err < 0 && err >= -MAX_ERRNO) 1524 return err; 1525 1526 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err); 1527 return -EPROTO; 1528 } 1529 1530 static void run_deferred(struct rq *rq) 1531 { 1532 process_ddsp_deferred_locals(rq); 1533 } 1534 1535 #ifdef CONFIG_SMP 1536 static void deferred_bal_cb_workfn(struct rq *rq) 1537 { 1538 run_deferred(rq); 1539 } 1540 #endif 1541 1542 static void deferred_irq_workfn(struct irq_work *irq_work) 1543 { 1544 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); 1545 1546 raw_spin_rq_lock(rq); 1547 run_deferred(rq); 1548 raw_spin_rq_unlock(rq); 1549 } 1550 1551 /** 1552 * schedule_deferred - Schedule execution of deferred actions on an rq 1553 * @rq: target rq 1554 * 1555 * Schedule execution of deferred actions on @rq. Must be called with @rq 1556 * locked. Deferred actions are executed with @rq locked but unpinned, and thus 1557 * can unlock @rq to e.g. migrate tasks to other rqs. 1558 */ 1559 static void schedule_deferred(struct rq *rq) 1560 { 1561 lockdep_assert_rq_held(rq); 1562 1563 #ifdef CONFIG_SMP 1564 /* 1565 * If in the middle of waking up a task, task_woken_scx() will be called 1566 * afterwards which will then run the deferred actions, no need to 1567 * schedule anything. 1568 */ 1569 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 1570 return; 1571 1572 /* 1573 * If in balance, the balance callbacks will be called before rq lock is 1574 * released. Schedule one. 1575 */ 1576 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 1577 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 1578 deferred_bal_cb_workfn); 1579 return; 1580 } 1581 #endif 1582 /* 1583 * No scheduler hooks available. Queue an irq work. They are executed on 1584 * IRQ re-enable which may take a bit longer than the scheduler hooks. 1585 * The above WAKEUP and BALANCE paths should cover most of the cases and 1586 * the time to IRQ re-enable shouldn't be long. 1587 */ 1588 irq_work_queue(&rq->scx.deferred_irq_work); 1589 } 1590 1591 /** 1592 * touch_core_sched - Update timestamp used for core-sched task ordering 1593 * @rq: rq to read clock from, must be locked 1594 * @p: task to update the timestamp for 1595 * 1596 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 1597 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 1598 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 1599 * exhaustion). 1600 */ 1601 static void touch_core_sched(struct rq *rq, struct task_struct *p) 1602 { 1603 lockdep_assert_rq_held(rq); 1604 1605 #ifdef CONFIG_SCHED_CORE 1606 /* 1607 * It's okay to update the timestamp spuriously. Use 1608 * sched_core_disabled() which is cheaper than enabled(). 1609 * 1610 * As this is used to determine ordering between tasks of sibling CPUs, 1611 * it may be better to use per-core dispatch sequence instead. 1612 */ 1613 if (!sched_core_disabled()) 1614 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); 1615 #endif 1616 } 1617 1618 /** 1619 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 1620 * @rq: rq to read clock from, must be locked 1621 * @p: task being dispatched 1622 * 1623 * If the BPF scheduler implements custom core-sched ordering via 1624 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 1625 * ordering within each local DSQ. This function is called from dispatch paths 1626 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 1627 */ 1628 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 1629 { 1630 lockdep_assert_rq_held(rq); 1631 1632 #ifdef CONFIG_SCHED_CORE 1633 if (SCX_HAS_OP(core_sched_before)) 1634 touch_core_sched(rq, p); 1635 #endif 1636 } 1637 1638 static void update_curr_scx(struct rq *rq) 1639 { 1640 struct task_struct *curr = rq->curr; 1641 s64 delta_exec; 1642 1643 delta_exec = update_curr_common(rq); 1644 if (unlikely(delta_exec <= 0)) 1645 return; 1646 1647 if (curr->scx.slice != SCX_SLICE_INF) { 1648 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); 1649 if (!curr->scx.slice) 1650 touch_core_sched(rq, curr); 1651 } 1652 } 1653 1654 static bool scx_dsq_priq_less(struct rb_node *node_a, 1655 const struct rb_node *node_b) 1656 { 1657 const struct task_struct *a = 1658 container_of(node_a, struct task_struct, scx.dsq_priq); 1659 const struct task_struct *b = 1660 container_of(node_b, struct task_struct, scx.dsq_priq); 1661 1662 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 1663 } 1664 1665 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 1666 { 1667 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */ 1668 WRITE_ONCE(dsq->nr, dsq->nr + delta); 1669 } 1670 1671 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, 1672 u64 enq_flags) 1673 { 1674 bool is_local = dsq->id == SCX_DSQ_LOCAL; 1675 1676 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1677 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 1678 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 1679 1680 if (!is_local) { 1681 raw_spin_lock(&dsq->lock); 1682 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 1683 scx_ops_error("attempting to dispatch to a destroyed dsq"); 1684 /* fall back to the global dsq */ 1685 raw_spin_unlock(&dsq->lock); 1686 dsq = find_global_dsq(p); 1687 raw_spin_lock(&dsq->lock); 1688 } 1689 } 1690 1691 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1692 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1693 /* 1694 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1695 * their FIFO queues. To avoid confusion and accidentally 1696 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1697 * disallow any internal DSQ from doing vtime ordering of 1698 * tasks. 1699 */ 1700 scx_ops_error("cannot use vtime ordering for built-in DSQs"); 1701 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1702 } 1703 1704 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1705 struct rb_node *rbp; 1706 1707 /* 1708 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1709 * linked to both the rbtree and list on PRIQs, this can only be 1710 * tested easily when adding the first task. 1711 */ 1712 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1713 nldsq_next_task(dsq, NULL, false))) 1714 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1715 dsq->id); 1716 1717 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1718 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1719 1720 /* 1721 * Find the previous task and insert after it on the list so 1722 * that @dsq->list is vtime ordered. 1723 */ 1724 rbp = rb_prev(&p->scx.dsq_priq); 1725 if (rbp) { 1726 struct task_struct *prev = 1727 container_of(rbp, struct task_struct, 1728 scx.dsq_priq); 1729 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1730 } else { 1731 list_add(&p->scx.dsq_list.node, &dsq->list); 1732 } 1733 } else { 1734 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1735 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1736 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1737 dsq->id); 1738 1739 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1740 list_add(&p->scx.dsq_list.node, &dsq->list); 1741 else 1742 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1743 } 1744 1745 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1746 dsq->seq++; 1747 p->scx.dsq_seq = dsq->seq; 1748 1749 dsq_mod_nr(dsq, 1); 1750 p->scx.dsq = dsq; 1751 1752 /* 1753 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1754 * direct dispatch path, but we clear them here because the direct 1755 * dispatch verdict may be overridden on the enqueue path during e.g. 1756 * bypass. 1757 */ 1758 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1759 p->scx.ddsp_enq_flags = 0; 1760 1761 /* 1762 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1763 * match waiters' load_acquire. 1764 */ 1765 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1766 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1767 1768 if (is_local) { 1769 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 1770 bool preempt = false; 1771 1772 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1773 rq->curr->sched_class == &ext_sched_class) { 1774 rq->curr->scx.slice = 0; 1775 preempt = true; 1776 } 1777 1778 if (preempt || sched_class_above(&ext_sched_class, 1779 rq->curr->sched_class)) 1780 resched_curr(rq); 1781 } else { 1782 raw_spin_unlock(&dsq->lock); 1783 } 1784 } 1785 1786 static void task_unlink_from_dsq(struct task_struct *p, 1787 struct scx_dispatch_q *dsq) 1788 { 1789 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); 1790 1791 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1792 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1793 RB_CLEAR_NODE(&p->scx.dsq_priq); 1794 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1795 } 1796 1797 list_del_init(&p->scx.dsq_list.node); 1798 dsq_mod_nr(dsq, -1); 1799 } 1800 1801 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1802 { 1803 struct scx_dispatch_q *dsq = p->scx.dsq; 1804 bool is_local = dsq == &rq->scx.local_dsq; 1805 1806 if (!dsq) { 1807 /* 1808 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. 1809 * Unlinking is all that's needed to cancel. 1810 */ 1811 if (unlikely(!list_empty(&p->scx.dsq_list.node))) 1812 list_del_init(&p->scx.dsq_list.node); 1813 1814 /* 1815 * When dispatching directly from the BPF scheduler to a local 1816 * DSQ, the task isn't associated with any DSQ but 1817 * @p->scx.holding_cpu may be set under the protection of 1818 * %SCX_OPSS_DISPATCHING. 1819 */ 1820 if (p->scx.holding_cpu >= 0) 1821 p->scx.holding_cpu = -1; 1822 1823 return; 1824 } 1825 1826 if (!is_local) 1827 raw_spin_lock(&dsq->lock); 1828 1829 /* 1830 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 1831 * change underneath us. 1832 */ 1833 if (p->scx.holding_cpu < 0) { 1834 /* @p must still be on @dsq, dequeue */ 1835 task_unlink_from_dsq(p, dsq); 1836 } else { 1837 /* 1838 * We're racing against dispatch_to_local_dsq() which already 1839 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 1840 * holding_cpu which tells dispatch_to_local_dsq() that it lost 1841 * the race. 1842 */ 1843 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); 1844 p->scx.holding_cpu = -1; 1845 } 1846 p->scx.dsq = NULL; 1847 1848 if (!is_local) 1849 raw_spin_unlock(&dsq->lock); 1850 } 1851 1852 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, 1853 struct task_struct *p) 1854 { 1855 struct scx_dispatch_q *dsq; 1856 1857 if (dsq_id == SCX_DSQ_LOCAL) 1858 return &rq->scx.local_dsq; 1859 1860 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 1861 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 1862 1863 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 1864 return find_global_dsq(p); 1865 1866 return &cpu_rq(cpu)->scx.local_dsq; 1867 } 1868 1869 if (dsq_id == SCX_DSQ_GLOBAL) 1870 dsq = find_global_dsq(p); 1871 else 1872 dsq = find_user_dsq(dsq_id); 1873 1874 if (unlikely(!dsq)) { 1875 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", 1876 dsq_id, p->comm, p->pid); 1877 return find_global_dsq(p); 1878 } 1879 1880 return dsq; 1881 } 1882 1883 static void mark_direct_dispatch(struct task_struct *ddsp_task, 1884 struct task_struct *p, u64 dsq_id, 1885 u64 enq_flags) 1886 { 1887 /* 1888 * Mark that dispatch already happened from ops.select_cpu() or 1889 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 1890 * which can never match a valid task pointer. 1891 */ 1892 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 1893 1894 /* @p must match the task on the enqueue path */ 1895 if (unlikely(p != ddsp_task)) { 1896 if (IS_ERR(ddsp_task)) 1897 scx_ops_error("%s[%d] already direct-dispatched", 1898 p->comm, p->pid); 1899 else 1900 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 1901 ddsp_task->comm, ddsp_task->pid, 1902 p->comm, p->pid); 1903 return; 1904 } 1905 1906 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 1907 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 1908 1909 p->scx.ddsp_dsq_id = dsq_id; 1910 p->scx.ddsp_enq_flags = enq_flags; 1911 } 1912 1913 static void direct_dispatch(struct task_struct *p, u64 enq_flags) 1914 { 1915 struct rq *rq = task_rq(p); 1916 struct scx_dispatch_q *dsq = 1917 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); 1918 1919 touch_core_sched_dispatch(rq, p); 1920 1921 p->scx.ddsp_enq_flags |= enq_flags; 1922 1923 /* 1924 * We are in the enqueue path with @rq locked and pinned, and thus can't 1925 * double lock a remote rq and enqueue to its local DSQ. For 1926 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer 1927 * the enqueue so that it's executed when @rq can be unlocked. 1928 */ 1929 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { 1930 unsigned long opss; 1931 1932 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; 1933 1934 switch (opss & SCX_OPSS_STATE_MASK) { 1935 case SCX_OPSS_NONE: 1936 break; 1937 case SCX_OPSS_QUEUEING: 1938 /* 1939 * As @p was never passed to the BPF side, _release is 1940 * not strictly necessary. Still do it for consistency. 1941 */ 1942 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1943 break; 1944 default: 1945 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()", 1946 p->comm, p->pid, opss); 1947 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1948 break; 1949 } 1950 1951 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1952 list_add_tail(&p->scx.dsq_list.node, 1953 &rq->scx.ddsp_deferred_locals); 1954 schedule_deferred(rq); 1955 return; 1956 } 1957 1958 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1959 } 1960 1961 static bool scx_rq_online(struct rq *rq) 1962 { 1963 /* 1964 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates 1965 * the online state as seen from the BPF scheduler. cpu_active() test 1966 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will 1967 * stay set until the current scheduling operation is complete even if 1968 * we aren't locking @rq. 1969 */ 1970 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); 1971 } 1972 1973 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1974 int sticky_cpu) 1975 { 1976 struct task_struct **ddsp_taskp; 1977 unsigned long qseq; 1978 1979 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 1980 1981 /* rq migration */ 1982 if (sticky_cpu == cpu_of(rq)) 1983 goto local_norefill; 1984 1985 /* 1986 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 1987 * is offline and are just running the hotplug path. Don't bother the 1988 * BPF scheduler. 1989 */ 1990 if (!scx_rq_online(rq)) 1991 goto local; 1992 1993 if (scx_rq_bypassing(rq)) 1994 goto global; 1995 1996 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1997 goto direct; 1998 1999 /* see %SCX_OPS_ENQ_EXITING */ 2000 if (!static_branch_unlikely(&scx_ops_enq_exiting) && 2001 unlikely(p->flags & PF_EXITING)) 2002 goto local; 2003 2004 if (!SCX_HAS_OP(enqueue)) 2005 goto global; 2006 2007 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 2008 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 2009 2010 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 2011 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 2012 2013 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 2014 WARN_ON_ONCE(*ddsp_taskp); 2015 *ddsp_taskp = p; 2016 2017 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags); 2018 2019 *ddsp_taskp = NULL; 2020 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 2021 goto direct; 2022 2023 /* 2024 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 2025 * dequeue may be waiting. The store_release matches their load_acquire. 2026 */ 2027 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 2028 return; 2029 2030 direct: 2031 direct_dispatch(p, enq_flags); 2032 return; 2033 2034 local: 2035 /* 2036 * For task-ordering, slice refill must be treated as implying the end 2037 * of the current slice. Otherwise, the longer @p stays on the CPU, the 2038 * higher priority it becomes from scx_prio_less()'s POV. 2039 */ 2040 touch_core_sched(rq, p); 2041 p->scx.slice = SCX_SLICE_DFL; 2042 local_norefill: 2043 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); 2044 return; 2045 2046 global: 2047 touch_core_sched(rq, p); /* see the comment in local: */ 2048 p->scx.slice = SCX_SLICE_DFL; 2049 dispatch_enqueue(find_global_dsq(p), p, enq_flags); 2050 } 2051 2052 static bool task_runnable(const struct task_struct *p) 2053 { 2054 return !list_empty(&p->scx.runnable_node); 2055 } 2056 2057 static void set_task_runnable(struct rq *rq, struct task_struct *p) 2058 { 2059 lockdep_assert_rq_held(rq); 2060 2061 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 2062 p->scx.runnable_at = jiffies; 2063 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 2064 } 2065 2066 /* 2067 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being 2068 * appened to the runnable_list. 2069 */ 2070 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 2071 } 2072 2073 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 2074 { 2075 list_del_init(&p->scx.runnable_node); 2076 if (reset_runnable_at) 2077 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 2078 } 2079 2080 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 2081 { 2082 int sticky_cpu = p->scx.sticky_cpu; 2083 2084 if (enq_flags & ENQUEUE_WAKEUP) 2085 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 2086 2087 enq_flags |= rq->scx.extra_enq_flags; 2088 2089 if (sticky_cpu >= 0) 2090 p->scx.sticky_cpu = -1; 2091 2092 /* 2093 * Restoring a running task will be immediately followed by 2094 * set_next_task_scx() which expects the task to not be on the BPF 2095 * scheduler as tasks can only start running through local DSQs. Force 2096 * direct-dispatch into the local DSQ by setting the sticky_cpu. 2097 */ 2098 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 2099 sticky_cpu = cpu_of(rq); 2100 2101 if (p->scx.flags & SCX_TASK_QUEUED) { 2102 WARN_ON_ONCE(!task_runnable(p)); 2103 goto out; 2104 } 2105 2106 set_task_runnable(rq, p); 2107 p->scx.flags |= SCX_TASK_QUEUED; 2108 rq->scx.nr_running++; 2109 add_nr_running(rq, 1); 2110 2111 if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p)) 2112 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); 2113 2114 if (enq_flags & SCX_ENQ_WAKEUP) 2115 touch_core_sched(rq, p); 2116 2117 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 2118 out: 2119 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; 2120 } 2121 2122 static void ops_dequeue(struct task_struct *p, u64 deq_flags) 2123 { 2124 unsigned long opss; 2125 2126 /* dequeue is always temporary, don't reset runnable_at */ 2127 clr_task_runnable(p, false); 2128 2129 /* acquire ensures that we see the preceding updates on QUEUED */ 2130 opss = atomic_long_read_acquire(&p->scx.ops_state); 2131 2132 switch (opss & SCX_OPSS_STATE_MASK) { 2133 case SCX_OPSS_NONE: 2134 break; 2135 case SCX_OPSS_QUEUEING: 2136 /* 2137 * QUEUEING is started and finished while holding @p's rq lock. 2138 * As we're holding the rq lock now, we shouldn't see QUEUEING. 2139 */ 2140 BUG(); 2141 case SCX_OPSS_QUEUED: 2142 if (SCX_HAS_OP(dequeue)) 2143 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags); 2144 2145 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2146 SCX_OPSS_NONE)) 2147 break; 2148 fallthrough; 2149 case SCX_OPSS_DISPATCHING: 2150 /* 2151 * If @p is being dispatched from the BPF scheduler to a DSQ, 2152 * wait for the transfer to complete so that @p doesn't get 2153 * added to its DSQ after dequeueing is complete. 2154 * 2155 * As we're waiting on DISPATCHING with the rq locked, the 2156 * dispatching side shouldn't try to lock the rq while 2157 * DISPATCHING is set. See dispatch_to_local_dsq(). 2158 * 2159 * DISPATCHING shouldn't have qseq set and control can reach 2160 * here with NONE @opss from the above QUEUED case block. 2161 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 2162 */ 2163 wait_ops_state(p, SCX_OPSS_DISPATCHING); 2164 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 2165 break; 2166 } 2167 } 2168 2169 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 2170 { 2171 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 2172 WARN_ON_ONCE(task_runnable(p)); 2173 return true; 2174 } 2175 2176 ops_dequeue(p, deq_flags); 2177 2178 /* 2179 * A currently running task which is going off @rq first gets dequeued 2180 * and then stops running. As we want running <-> stopping transitions 2181 * to be contained within runnable <-> quiescent transitions, trigger 2182 * ->stopping() early here instead of in put_prev_task_scx(). 2183 * 2184 * @p may go through multiple stopping <-> running transitions between 2185 * here and put_prev_task_scx() if task attribute changes occur while 2186 * balance_scx() leaves @rq unlocked. However, they don't contain any 2187 * information meaningful to the BPF scheduler and can be suppressed by 2188 * skipping the callbacks if the task is !QUEUED. 2189 */ 2190 if (SCX_HAS_OP(stopping) && task_current(rq, p)) { 2191 update_curr_scx(rq); 2192 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false); 2193 } 2194 2195 if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p)) 2196 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags); 2197 2198 if (deq_flags & SCX_DEQ_SLEEP) 2199 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 2200 else 2201 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 2202 2203 p->scx.flags &= ~SCX_TASK_QUEUED; 2204 rq->scx.nr_running--; 2205 sub_nr_running(rq, 1); 2206 2207 dispatch_dequeue(rq, p); 2208 return true; 2209 } 2210 2211 static void yield_task_scx(struct rq *rq) 2212 { 2213 struct task_struct *p = rq->curr; 2214 2215 if (SCX_HAS_OP(yield)) 2216 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL); 2217 else 2218 p->scx.slice = 0; 2219 } 2220 2221 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 2222 { 2223 struct task_struct *from = rq->curr; 2224 2225 if (SCX_HAS_OP(yield)) 2226 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to); 2227 else 2228 return false; 2229 } 2230 2231 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 2232 struct scx_dispatch_q *src_dsq, 2233 struct rq *dst_rq) 2234 { 2235 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; 2236 2237 /* @dsq is locked and @p is on @dst_rq */ 2238 lockdep_assert_held(&src_dsq->lock); 2239 lockdep_assert_rq_held(dst_rq); 2240 2241 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2242 2243 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 2244 list_add(&p->scx.dsq_list.node, &dst_dsq->list); 2245 else 2246 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); 2247 2248 dsq_mod_nr(dst_dsq, 1); 2249 p->scx.dsq = dst_dsq; 2250 } 2251 2252 #ifdef CONFIG_SMP 2253 /** 2254 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ 2255 * @p: task to move 2256 * @enq_flags: %SCX_ENQ_* 2257 * @src_rq: rq to move the task from, locked on entry, released on return 2258 * @dst_rq: rq to move the task into, locked on return 2259 * 2260 * Move @p which is currently on @src_rq to @dst_rq's local DSQ. 2261 */ 2262 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 2263 struct rq *src_rq, struct rq *dst_rq) 2264 { 2265 lockdep_assert_rq_held(src_rq); 2266 2267 /* the following marks @p MIGRATING which excludes dequeue */ 2268 deactivate_task(src_rq, p, 0); 2269 set_task_cpu(p, cpu_of(dst_rq)); 2270 p->scx.sticky_cpu = cpu_of(dst_rq); 2271 2272 raw_spin_rq_unlock(src_rq); 2273 raw_spin_rq_lock(dst_rq); 2274 2275 /* 2276 * We want to pass scx-specific enq_flags but activate_task() will 2277 * truncate the upper 32 bit. As we own @rq, we can pass them through 2278 * @rq->scx.extra_enq_flags instead. 2279 */ 2280 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); 2281 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); 2282 dst_rq->scx.extra_enq_flags = enq_flags; 2283 activate_task(dst_rq, p, 0); 2284 dst_rq->scx.extra_enq_flags = 0; 2285 } 2286 2287 /* 2288 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two 2289 * differences: 2290 * 2291 * - is_cpu_allowed() asks "Can this task run on this CPU?" while 2292 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to 2293 * this CPU?". 2294 * 2295 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task 2296 * must be allowed to finish on the CPU that it's currently on regardless of 2297 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the 2298 * BPF scheduler shouldn't attempt to migrate a task which has migration 2299 * disabled. 2300 * 2301 * - The BPF scheduler is bypassed while the rq is offline and we can always say 2302 * no to the BPF scheduler initiated migrations while offline. 2303 */ 2304 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, 2305 bool trigger_error) 2306 { 2307 int cpu = cpu_of(rq); 2308 2309 /* 2310 * We don't require the BPF scheduler to avoid dispatching to offline 2311 * CPUs mostly for convenience but also because CPUs can go offline 2312 * between scx_bpf_dispatch() calls and here. Trigger error iff the 2313 * picked CPU is outside the allowed mask. 2314 */ 2315 if (!task_allowed_on_cpu(p, cpu)) { 2316 if (trigger_error) 2317 scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]", 2318 cpu_of(rq), p->comm, p->pid); 2319 return false; 2320 } 2321 2322 if (unlikely(is_migration_disabled(p))) 2323 return false; 2324 2325 if (!scx_rq_online(rq)) 2326 return false; 2327 2328 return true; 2329 } 2330 2331 /** 2332 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq 2333 * @p: target task 2334 * @dsq: locked DSQ @p is currently on 2335 * @src_rq: rq @p is currently on, stable with @dsq locked 2336 * 2337 * Called with @dsq locked but no rq's locked. We want to move @p to a different 2338 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is 2339 * required when transferring into a local DSQ. Even when transferring into a 2340 * non-local DSQ, it's better to use the same mechanism to protect against 2341 * dequeues and maintain the invariant that @p->scx.dsq can only change while 2342 * @src_rq is locked, which e.g. scx_dump_task() depends on. 2343 * 2344 * We want to grab @src_rq but that can deadlock if we try while locking @dsq, 2345 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As 2346 * this may race with dequeue, which can't drop the rq lock or fail, do a little 2347 * dancing from our side. 2348 * 2349 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets 2350 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu 2351 * would be cleared to -1. While other cpus may have updated it to different 2352 * values afterwards, as this operation can't be preempted or recurse, the 2353 * holding_cpu can never become this CPU again before we're done. Thus, we can 2354 * tell whether we lost to dequeue by testing whether the holding_cpu still 2355 * points to this CPU. See dispatch_dequeue() for the counterpart. 2356 * 2357 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is 2358 * still valid. %false if lost to dequeue. 2359 */ 2360 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, 2361 struct scx_dispatch_q *dsq, 2362 struct rq *src_rq) 2363 { 2364 s32 cpu = raw_smp_processor_id(); 2365 2366 lockdep_assert_held(&dsq->lock); 2367 2368 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2369 task_unlink_from_dsq(p, dsq); 2370 p->scx.holding_cpu = cpu; 2371 2372 raw_spin_unlock(&dsq->lock); 2373 raw_spin_rq_lock(src_rq); 2374 2375 /* task_rq couldn't have changed if we're still the holding cpu */ 2376 return likely(p->scx.holding_cpu == cpu) && 2377 !WARN_ON_ONCE(src_rq != task_rq(p)); 2378 } 2379 2380 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, 2381 struct scx_dispatch_q *dsq, struct rq *src_rq) 2382 { 2383 raw_spin_rq_unlock(this_rq); 2384 2385 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { 2386 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); 2387 return true; 2388 } else { 2389 raw_spin_rq_unlock(src_rq); 2390 raw_spin_rq_lock(this_rq); 2391 return false; 2392 } 2393 } 2394 #else /* CONFIG_SMP */ 2395 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); } 2396 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; } 2397 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; } 2398 #endif /* CONFIG_SMP */ 2399 2400 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq) 2401 { 2402 struct task_struct *p; 2403 retry: 2404 /* 2405 * The caller can't expect to successfully consume a task if the task's 2406 * addition to @dsq isn't guaranteed to be visible somehow. Test 2407 * @dsq->list without locking and skip if it seems empty. 2408 */ 2409 if (list_empty(&dsq->list)) 2410 return false; 2411 2412 raw_spin_lock(&dsq->lock); 2413 2414 nldsq_for_each_task(p, dsq) { 2415 struct rq *task_rq = task_rq(p); 2416 2417 if (rq == task_rq) { 2418 task_unlink_from_dsq(p, dsq); 2419 move_local_task_to_local_dsq(p, 0, dsq, rq); 2420 raw_spin_unlock(&dsq->lock); 2421 return true; 2422 } 2423 2424 if (task_can_run_on_remote_rq(p, rq, false)) { 2425 if (likely(consume_remote_task(rq, p, dsq, task_rq))) 2426 return true; 2427 goto retry; 2428 } 2429 } 2430 2431 raw_spin_unlock(&dsq->lock); 2432 return false; 2433 } 2434 2435 static bool consume_global_dsq(struct rq *rq) 2436 { 2437 int node = cpu_to_node(cpu_of(rq)); 2438 2439 return consume_dispatch_q(rq, global_dsqs[node]); 2440 } 2441 2442 /** 2443 * dispatch_to_local_dsq - Dispatch a task to a local dsq 2444 * @rq: current rq which is locked 2445 * @dst_dsq: destination DSQ 2446 * @p: task to dispatch 2447 * @enq_flags: %SCX_ENQ_* 2448 * 2449 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local 2450 * DSQ. This function performs all the synchronization dancing needed because 2451 * local DSQs are protected with rq locks. 2452 * 2453 * The caller must have exclusive ownership of @p (e.g. through 2454 * %SCX_OPSS_DISPATCHING). 2455 */ 2456 static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq, 2457 struct task_struct *p, u64 enq_flags) 2458 { 2459 struct rq *src_rq = task_rq(p); 2460 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 2461 2462 /* 2463 * We're synchronized against dequeue through DISPATCHING. As @p can't 2464 * be dequeued, its task_rq and cpus_allowed are stable too. 2465 * 2466 * If dispatching to @rq that @p is already on, no lock dancing needed. 2467 */ 2468 if (rq == src_rq && rq == dst_rq) { 2469 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2470 return; 2471 } 2472 2473 #ifdef CONFIG_SMP 2474 if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { 2475 dispatch_enqueue(find_global_dsq(p), p, 2476 enq_flags | SCX_ENQ_CLEAR_OPSS); 2477 return; 2478 } 2479 2480 /* 2481 * @p is on a possibly remote @src_rq which we need to lock to move the 2482 * task. If dequeue is in progress, it'd be locking @src_rq and waiting 2483 * on DISPATCHING, so we can't grab @src_rq lock while holding 2484 * DISPATCHING. 2485 * 2486 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that 2487 * we're moving from a DSQ and use the same mechanism - mark the task 2488 * under transfer with holding_cpu, release DISPATCHING and then follow 2489 * the same protocol. See unlink_dsq_and_lock_src_rq(). 2490 */ 2491 p->scx.holding_cpu = raw_smp_processor_id(); 2492 2493 /* store_release ensures that dequeue sees the above */ 2494 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2495 2496 /* switch to @src_rq lock */ 2497 if (rq != src_rq) { 2498 raw_spin_rq_unlock(rq); 2499 raw_spin_rq_lock(src_rq); 2500 } 2501 2502 /* task_rq couldn't have changed if we're still the holding cpu */ 2503 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && 2504 !WARN_ON_ONCE(src_rq != task_rq(p))) { 2505 /* 2506 * If @p is staying on the same rq, there's no need to go 2507 * through the full deactivate/activate cycle. Optimize by 2508 * abbreviating move_remote_task_to_local_dsq(). 2509 */ 2510 if (src_rq == dst_rq) { 2511 p->scx.holding_cpu = -1; 2512 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags); 2513 } else { 2514 move_remote_task_to_local_dsq(p, enq_flags, 2515 src_rq, dst_rq); 2516 } 2517 2518 /* if the destination CPU is idle, wake it up */ 2519 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) 2520 resched_curr(dst_rq); 2521 } 2522 2523 /* switch back to @rq lock */ 2524 if (rq != dst_rq) { 2525 raw_spin_rq_unlock(dst_rq); 2526 raw_spin_rq_lock(rq); 2527 } 2528 #else /* CONFIG_SMP */ 2529 BUG(); /* control can not reach here on UP */ 2530 #endif /* CONFIG_SMP */ 2531 } 2532 2533 /** 2534 * finish_dispatch - Asynchronously finish dispatching a task 2535 * @rq: current rq which is locked 2536 * @p: task to finish dispatching 2537 * @qseq_at_dispatch: qseq when @p started getting dispatched 2538 * @dsq_id: destination DSQ ID 2539 * @enq_flags: %SCX_ENQ_* 2540 * 2541 * Dispatching to local DSQs may need to wait for queueing to complete or 2542 * require rq lock dancing. As we don't wanna do either while inside 2543 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2544 * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the 2545 * task and its qseq. Once ops.dispatch() returns, this function is called to 2546 * finish up. 2547 * 2548 * There is no guarantee that @p is still valid for dispatching or even that it 2549 * was valid in the first place. Make sure that the task is still owned by the 2550 * BPF scheduler and claim the ownership before dispatching. 2551 */ 2552 static void finish_dispatch(struct rq *rq, struct task_struct *p, 2553 unsigned long qseq_at_dispatch, 2554 u64 dsq_id, u64 enq_flags) 2555 { 2556 struct scx_dispatch_q *dsq; 2557 unsigned long opss; 2558 2559 touch_core_sched_dispatch(rq, p); 2560 retry: 2561 /* 2562 * No need for _acquire here. @p is accessed only after a successful 2563 * try_cmpxchg to DISPATCHING. 2564 */ 2565 opss = atomic_long_read(&p->scx.ops_state); 2566 2567 switch (opss & SCX_OPSS_STATE_MASK) { 2568 case SCX_OPSS_DISPATCHING: 2569 case SCX_OPSS_NONE: 2570 /* someone else already got to it */ 2571 return; 2572 case SCX_OPSS_QUEUED: 2573 /* 2574 * If qseq doesn't match, @p has gone through at least one 2575 * dispatch/dequeue and re-enqueue cycle between 2576 * scx_bpf_dispatch() and here and we have no claim on it. 2577 */ 2578 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2579 return; 2580 2581 /* 2582 * While we know @p is accessible, we don't yet have a claim on 2583 * it - the BPF scheduler is allowed to dispatch tasks 2584 * spuriously and there can be a racing dequeue attempt. Let's 2585 * claim @p by atomically transitioning it from QUEUED to 2586 * DISPATCHING. 2587 */ 2588 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2589 SCX_OPSS_DISPATCHING))) 2590 break; 2591 goto retry; 2592 case SCX_OPSS_QUEUEING: 2593 /* 2594 * do_enqueue_task() is in the process of transferring the task 2595 * to the BPF scheduler while holding @p's rq lock. As we aren't 2596 * holding any kernel or BPF resource that the enqueue path may 2597 * depend upon, it's safe to wait. 2598 */ 2599 wait_ops_state(p, opss); 2600 goto retry; 2601 } 2602 2603 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2604 2605 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p); 2606 2607 if (dsq->id == SCX_DSQ_LOCAL) 2608 dispatch_to_local_dsq(rq, dsq, p, enq_flags); 2609 else 2610 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2611 } 2612 2613 static void flush_dispatch_buf(struct rq *rq) 2614 { 2615 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2616 u32 u; 2617 2618 for (u = 0; u < dspc->cursor; u++) { 2619 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2620 2621 finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id, 2622 ent->enq_flags); 2623 } 2624 2625 dspc->nr_tasks += dspc->cursor; 2626 dspc->cursor = 0; 2627 } 2628 2629 static int balance_one(struct rq *rq, struct task_struct *prev) 2630 { 2631 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2632 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2633 int nr_loops = SCX_DSP_MAX_LOOPS; 2634 2635 lockdep_assert_rq_held(rq); 2636 rq->scx.flags |= SCX_RQ_IN_BALANCE; 2637 rq->scx.flags &= ~SCX_RQ_BAL_KEEP; 2638 2639 if (static_branch_unlikely(&scx_ops_cpu_preempt) && 2640 unlikely(rq->scx.cpu_released)) { 2641 /* 2642 * If the previous sched_class for the current CPU was not SCX, 2643 * notify the BPF scheduler that it again has control of the 2644 * core. This callback complements ->cpu_release(), which is 2645 * emitted in scx_next_task_picked(). 2646 */ 2647 if (SCX_HAS_OP(cpu_acquire)) 2648 SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); 2649 rq->scx.cpu_released = false; 2650 } 2651 2652 if (prev_on_scx) { 2653 update_curr_scx(rq); 2654 2655 /* 2656 * If @prev is runnable & has slice left, it has priority and 2657 * fetching more just increases latency for the fetched tasks. 2658 * Tell pick_task_scx() to keep running @prev. If the BPF 2659 * scheduler wants to handle this explicitly, it should 2660 * implement ->cpu_release(). 2661 * 2662 * See scx_ops_disable_workfn() for the explanation on the 2663 * bypassing test. 2664 */ 2665 if ((prev->scx.flags & SCX_TASK_QUEUED) && 2666 prev->scx.slice && !scx_rq_bypassing(rq)) { 2667 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2668 goto has_tasks; 2669 } 2670 } 2671 2672 /* if there already are tasks to run, nothing to do */ 2673 if (rq->scx.local_dsq.nr) 2674 goto has_tasks; 2675 2676 if (consume_global_dsq(rq)) 2677 goto has_tasks; 2678 2679 if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq)) 2680 goto no_tasks; 2681 2682 dspc->rq = rq; 2683 2684 /* 2685 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 2686 * the local DSQ might still end up empty after a successful 2687 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 2688 * produced some tasks, retry. The BPF scheduler may depend on this 2689 * looping behavior to simplify its implementation. 2690 */ 2691 do { 2692 dspc->nr_tasks = 0; 2693 2694 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq), 2695 prev_on_scx ? prev : NULL); 2696 2697 flush_dispatch_buf(rq); 2698 2699 if (rq->scx.local_dsq.nr) 2700 goto has_tasks; 2701 if (consume_global_dsq(rq)) 2702 goto has_tasks; 2703 2704 /* 2705 * ops.dispatch() can trap us in this loop by repeatedly 2706 * dispatching ineligible tasks. Break out once in a while to 2707 * allow the watchdog to run. As IRQ can't be enabled in 2708 * balance(), we want to complete this scheduling cycle and then 2709 * start a new one. IOW, we want to call resched_curr() on the 2710 * next, most likely idle, task, not the current one. Use 2711 * scx_bpf_kick_cpu() for deferred kicking. 2712 */ 2713 if (unlikely(!--nr_loops)) { 2714 scx_bpf_kick_cpu(cpu_of(rq), 0); 2715 break; 2716 } 2717 } while (dspc->nr_tasks); 2718 2719 no_tasks: 2720 /* 2721 * Didn't find another task to run. Keep running @prev unless 2722 * %SCX_OPS_ENQ_LAST is in effect. 2723 */ 2724 if ((prev->scx.flags & SCX_TASK_QUEUED) && 2725 (!static_branch_unlikely(&scx_ops_enq_last) || 2726 scx_rq_bypassing(rq))) { 2727 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2728 goto has_tasks; 2729 } 2730 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2731 return false; 2732 2733 has_tasks: 2734 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2735 return true; 2736 } 2737 2738 static int balance_scx(struct rq *rq, struct task_struct *prev, 2739 struct rq_flags *rf) 2740 { 2741 int ret; 2742 2743 rq_unpin_lock(rq, rf); 2744 2745 ret = balance_one(rq, prev); 2746 2747 #ifdef CONFIG_SCHED_SMT 2748 /* 2749 * When core-sched is enabled, this ops.balance() call will be followed 2750 * by pick_task_scx() on this CPU and the SMT siblings. Balance the 2751 * siblings too. 2752 */ 2753 if (sched_core_enabled(rq)) { 2754 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); 2755 int scpu; 2756 2757 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) { 2758 struct rq *srq = cpu_rq(scpu); 2759 struct task_struct *sprev = srq->curr; 2760 2761 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq)); 2762 update_rq_clock(srq); 2763 balance_one(srq, sprev); 2764 } 2765 } 2766 #endif 2767 rq_repin_lock(rq, rf); 2768 2769 return ret; 2770 } 2771 2772 static void process_ddsp_deferred_locals(struct rq *rq) 2773 { 2774 struct task_struct *p; 2775 2776 lockdep_assert_rq_held(rq); 2777 2778 /* 2779 * Now that @rq can be unlocked, execute the deferred enqueueing of 2780 * tasks directly dispatched to the local DSQs of other CPUs. See 2781 * direct_dispatch(). Keep popping from the head instead of using 2782 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq 2783 * temporarily. 2784 */ 2785 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, 2786 struct task_struct, scx.dsq_list.node))) { 2787 struct scx_dispatch_q *dsq; 2788 2789 list_del_init(&p->scx.dsq_list.node); 2790 2791 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); 2792 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2793 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags); 2794 } 2795 } 2796 2797 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 2798 { 2799 if (p->scx.flags & SCX_TASK_QUEUED) { 2800 /* 2801 * Core-sched might decide to execute @p before it is 2802 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 2803 */ 2804 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC); 2805 dispatch_dequeue(rq, p); 2806 } 2807 2808 p->se.exec_start = rq_clock_task(rq); 2809 2810 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2811 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) 2812 SCX_CALL_OP_TASK(SCX_KF_REST, running, p); 2813 2814 clr_task_runnable(p, true); 2815 2816 /* 2817 * @p is getting newly scheduled or got kicked after someone updated its 2818 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 2819 */ 2820 if ((p->scx.slice == SCX_SLICE_INF) != 2821 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 2822 if (p->scx.slice == SCX_SLICE_INF) 2823 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 2824 else 2825 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 2826 2827 sched_update_tick_dependency(rq); 2828 2829 /* 2830 * For now, let's refresh the load_avgs just when transitioning 2831 * in and out of nohz. In the future, we might want to add a 2832 * mechanism which calls the following periodically on 2833 * tick-stopped CPUs. 2834 */ 2835 update_other_load_avgs(rq); 2836 } 2837 } 2838 2839 static enum scx_cpu_preempt_reason 2840 preempt_reason_from_class(const struct sched_class *class) 2841 { 2842 #ifdef CONFIG_SMP 2843 if (class == &stop_sched_class) 2844 return SCX_CPU_PREEMPT_STOP; 2845 #endif 2846 if (class == &dl_sched_class) 2847 return SCX_CPU_PREEMPT_DL; 2848 if (class == &rt_sched_class) 2849 return SCX_CPU_PREEMPT_RT; 2850 return SCX_CPU_PREEMPT_UNKNOWN; 2851 } 2852 2853 static void switch_class(struct rq *rq, struct task_struct *next) 2854 { 2855 const struct sched_class *next_class = next->sched_class; 2856 2857 #ifdef CONFIG_SMP 2858 /* 2859 * Pairs with the smp_load_acquire() issued by a CPU in 2860 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a 2861 * resched. 2862 */ 2863 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); 2864 #endif 2865 if (!static_branch_unlikely(&scx_ops_cpu_preempt)) 2866 return; 2867 2868 /* 2869 * The callback is conceptually meant to convey that the CPU is no 2870 * longer under the control of SCX. Therefore, don't invoke the callback 2871 * if the next class is below SCX (in which case the BPF scheduler has 2872 * actively decided not to schedule any tasks on the CPU). 2873 */ 2874 if (sched_class_above(&ext_sched_class, next_class)) 2875 return; 2876 2877 /* 2878 * At this point we know that SCX was preempted by a higher priority 2879 * sched_class, so invoke the ->cpu_release() callback if we have not 2880 * done so already. We only send the callback once between SCX being 2881 * preempted, and it regaining control of the CPU. 2882 * 2883 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 2884 * next time that balance_scx() is invoked. 2885 */ 2886 if (!rq->scx.cpu_released) { 2887 if (SCX_HAS_OP(cpu_release)) { 2888 struct scx_cpu_release_args args = { 2889 .reason = preempt_reason_from_class(next_class), 2890 .task = next, 2891 }; 2892 2893 SCX_CALL_OP(SCX_KF_CPU_RELEASE, 2894 cpu_release, cpu_of(rq), &args); 2895 } 2896 rq->scx.cpu_released = true; 2897 } 2898 } 2899 2900 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, 2901 struct task_struct *next) 2902 { 2903 update_curr_scx(rq); 2904 2905 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2906 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 2907 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true); 2908 2909 if (p->scx.flags & SCX_TASK_QUEUED) { 2910 set_task_runnable(rq, p); 2911 2912 /* 2913 * If @p has slice left and is being put, @p is getting 2914 * preempted by a higher priority scheduler class or core-sched 2915 * forcing a different task. Leave it at the head of the local 2916 * DSQ. 2917 */ 2918 if (p->scx.slice && !scx_rq_bypassing(rq)) { 2919 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); 2920 return; 2921 } 2922 2923 /* 2924 * If @p is runnable but we're about to enter a lower 2925 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell 2926 * ops.enqueue() that @p is the only one available for this cpu, 2927 * which should trigger an explicit follow-up scheduling event. 2928 */ 2929 if (sched_class_above(&ext_sched_class, next->sched_class)) { 2930 WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last)); 2931 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 2932 } else { 2933 do_enqueue_task(rq, p, 0, -1); 2934 } 2935 } 2936 2937 if (next && next->sched_class != &ext_sched_class) 2938 switch_class(rq, next); 2939 } 2940 2941 static struct task_struct *first_local_task(struct rq *rq) 2942 { 2943 return list_first_entry_or_null(&rq->scx.local_dsq.list, 2944 struct task_struct, scx.dsq_list.node); 2945 } 2946 2947 static struct task_struct *pick_task_scx(struct rq *rq) 2948 { 2949 struct task_struct *prev = rq->curr; 2950 struct task_struct *p; 2951 2952 /* 2953 * If balance_scx() is telling us to keep running @prev, replenish slice 2954 * if necessary and keep running @prev. Otherwise, pop the first one 2955 * from the local DSQ. 2956 * 2957 * WORKAROUND: 2958 * 2959 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just 2960 * have gone through balance_scx(). Unfortunately, there currently is a 2961 * bug where fair could say yes on balance() but no on pick_task(), 2962 * which then ends up calling pick_task_scx() without preceding 2963 * balance_scx(). 2964 * 2965 * For now, ignore cases where $prev is not on SCX. This isn't great and 2966 * can theoretically lead to stalls. However, for switch_all cases, this 2967 * happens only while a BPF scheduler is being loaded or unloaded, and, 2968 * for partial cases, fair will likely keep triggering this CPU. 2969 * 2970 * Once fair is fixed, restore WARN_ON_ONCE(). 2971 */ 2972 if ((rq->scx.flags & SCX_RQ_BAL_KEEP) && 2973 prev->sched_class == &ext_sched_class) { 2974 p = prev; 2975 if (!p->scx.slice) 2976 p->scx.slice = SCX_SLICE_DFL; 2977 } else { 2978 p = first_local_task(rq); 2979 if (!p) 2980 return NULL; 2981 2982 if (unlikely(!p->scx.slice)) { 2983 if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) { 2984 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 2985 p->comm, p->pid, __func__); 2986 scx_warned_zero_slice = true; 2987 } 2988 p->scx.slice = SCX_SLICE_DFL; 2989 } 2990 } 2991 2992 return p; 2993 } 2994 2995 #ifdef CONFIG_SCHED_CORE 2996 /** 2997 * scx_prio_less - Task ordering for core-sched 2998 * @a: task A 2999 * @b: task B 3000 * 3001 * Core-sched is implemented as an additional scheduling layer on top of the 3002 * usual sched_class'es and needs to find out the expected task ordering. For 3003 * SCX, core-sched calls this function to interrogate the task ordering. 3004 * 3005 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 3006 * to implement the default task ordering. The older the timestamp, the higher 3007 * prority the task - the global FIFO ordering matching the default scheduling 3008 * behavior. 3009 * 3010 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 3011 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 3012 */ 3013 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 3014 bool in_fi) 3015 { 3016 /* 3017 * The const qualifiers are dropped from task_struct pointers when 3018 * calling ops.core_sched_before(). Accesses are controlled by the 3019 * verifier. 3020 */ 3021 if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a))) 3022 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before, 3023 (struct task_struct *)a, 3024 (struct task_struct *)b); 3025 else 3026 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 3027 } 3028 #endif /* CONFIG_SCHED_CORE */ 3029 3030 #ifdef CONFIG_SMP 3031 3032 static bool test_and_clear_cpu_idle(int cpu) 3033 { 3034 #ifdef CONFIG_SCHED_SMT 3035 /* 3036 * SMT mask should be cleared whether we can claim @cpu or not. The SMT 3037 * cluster is not wholly idle either way. This also prevents 3038 * scx_pick_idle_cpu() from getting caught in an infinite loop. 3039 */ 3040 if (sched_smt_active()) { 3041 const struct cpumask *smt = cpu_smt_mask(cpu); 3042 3043 /* 3044 * If offline, @cpu is not its own sibling and 3045 * scx_pick_idle_cpu() can get caught in an infinite loop as 3046 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu 3047 * is eventually cleared. 3048 */ 3049 if (cpumask_intersects(smt, idle_masks.smt)) 3050 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); 3051 else if (cpumask_test_cpu(cpu, idle_masks.smt)) 3052 __cpumask_clear_cpu(cpu, idle_masks.smt); 3053 } 3054 #endif 3055 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu); 3056 } 3057 3058 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) 3059 { 3060 int cpu; 3061 3062 retry: 3063 if (sched_smt_active()) { 3064 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed); 3065 if (cpu < nr_cpu_ids) 3066 goto found; 3067 3068 if (flags & SCX_PICK_IDLE_CORE) 3069 return -EBUSY; 3070 } 3071 3072 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed); 3073 if (cpu >= nr_cpu_ids) 3074 return -EBUSY; 3075 3076 found: 3077 if (test_and_clear_cpu_idle(cpu)) 3078 return cpu; 3079 else 3080 goto retry; 3081 } 3082 3083 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 3084 u64 wake_flags, bool *found) 3085 { 3086 s32 cpu; 3087 3088 *found = false; 3089 3090 /* 3091 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is 3092 * under utilized, wake up @p to the local DSQ of the waker. Checking 3093 * only for an empty local DSQ is insufficient as it could give the 3094 * wakee an unfair advantage when the system is oversaturated. 3095 * Checking only for the presence of idle CPUs is also insufficient as 3096 * the local DSQ of the waker could have tasks piled up on it even if 3097 * there is an idle core elsewhere on the system. 3098 */ 3099 cpu = smp_processor_id(); 3100 if ((wake_flags & SCX_WAKE_SYNC) && 3101 !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) && 3102 cpu_rq(cpu)->scx.local_dsq.nr == 0) { 3103 if (cpumask_test_cpu(cpu, p->cpus_ptr)) 3104 goto cpu_found; 3105 } 3106 3107 /* 3108 * If CPU has SMT, any wholly idle CPU is likely a better pick than 3109 * partially idle @prev_cpu. 3110 */ 3111 if (sched_smt_active()) { 3112 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) && 3113 test_and_clear_cpu_idle(prev_cpu)) { 3114 cpu = prev_cpu; 3115 goto cpu_found; 3116 } 3117 3118 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE); 3119 if (cpu >= 0) 3120 goto cpu_found; 3121 } 3122 3123 if (test_and_clear_cpu_idle(prev_cpu)) { 3124 cpu = prev_cpu; 3125 goto cpu_found; 3126 } 3127 3128 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0); 3129 if (cpu >= 0) 3130 goto cpu_found; 3131 3132 return prev_cpu; 3133 3134 cpu_found: 3135 *found = true; 3136 return cpu; 3137 } 3138 3139 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 3140 { 3141 /* 3142 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 3143 * can be a good migration opportunity with low cache and memory 3144 * footprint. Returning a CPU different than @prev_cpu triggers 3145 * immediate rq migration. However, for SCX, as the current rq 3146 * association doesn't dictate where the task is going to run, this 3147 * doesn't fit well. If necessary, we can later add a dedicated method 3148 * which can decide to preempt self to force it through the regular 3149 * scheduling path. 3150 */ 3151 if (unlikely(wake_flags & WF_EXEC)) 3152 return prev_cpu; 3153 3154 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) { 3155 s32 cpu; 3156 struct task_struct **ddsp_taskp; 3157 3158 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 3159 WARN_ON_ONCE(*ddsp_taskp); 3160 *ddsp_taskp = p; 3161 3162 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 3163 select_cpu, p, prev_cpu, wake_flags); 3164 *ddsp_taskp = NULL; 3165 if (ops_cpu_valid(cpu, "from ops.select_cpu()")) 3166 return cpu; 3167 else 3168 return prev_cpu; 3169 } else { 3170 bool found; 3171 s32 cpu; 3172 3173 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found); 3174 if (found) { 3175 p->scx.slice = SCX_SLICE_DFL; 3176 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 3177 } 3178 return cpu; 3179 } 3180 } 3181 3182 static void task_woken_scx(struct rq *rq, struct task_struct *p) 3183 { 3184 run_deferred(rq); 3185 } 3186 3187 static void set_cpus_allowed_scx(struct task_struct *p, 3188 struct affinity_context *ac) 3189 { 3190 set_cpus_allowed_common(p, ac); 3191 3192 /* 3193 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 3194 * differ from the configured one in @p->cpus_mask. Always tell the bpf 3195 * scheduler the effective one. 3196 * 3197 * Fine-grained memory write control is enforced by BPF making the const 3198 * designation pointless. Cast it away when calling the operation. 3199 */ 3200 if (SCX_HAS_OP(set_cpumask)) 3201 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, 3202 (struct cpumask *)p->cpus_ptr); 3203 } 3204 3205 static void reset_idle_masks(void) 3206 { 3207 /* 3208 * Consider all online cpus idle. Should converge to the actual state 3209 * quickly. 3210 */ 3211 cpumask_copy(idle_masks.cpu, cpu_online_mask); 3212 cpumask_copy(idle_masks.smt, cpu_online_mask); 3213 } 3214 3215 void __scx_update_idle(struct rq *rq, bool idle) 3216 { 3217 int cpu = cpu_of(rq); 3218 3219 if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) { 3220 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle); 3221 if (!static_branch_unlikely(&scx_builtin_idle_enabled)) 3222 return; 3223 } 3224 3225 if (idle) 3226 cpumask_set_cpu(cpu, idle_masks.cpu); 3227 else 3228 cpumask_clear_cpu(cpu, idle_masks.cpu); 3229 3230 #ifdef CONFIG_SCHED_SMT 3231 if (sched_smt_active()) { 3232 const struct cpumask *smt = cpu_smt_mask(cpu); 3233 3234 if (idle) { 3235 /* 3236 * idle_masks.smt handling is racy but that's fine as 3237 * it's only for optimization and self-correcting. 3238 */ 3239 for_each_cpu(cpu, smt) { 3240 if (!cpumask_test_cpu(cpu, idle_masks.cpu)) 3241 return; 3242 } 3243 cpumask_or(idle_masks.smt, idle_masks.smt, smt); 3244 } else { 3245 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); 3246 } 3247 } 3248 #endif 3249 } 3250 3251 static void handle_hotplug(struct rq *rq, bool online) 3252 { 3253 int cpu = cpu_of(rq); 3254 3255 atomic_long_inc(&scx_hotplug_seq); 3256 3257 if (online && SCX_HAS_OP(cpu_online)) 3258 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu); 3259 else if (!online && SCX_HAS_OP(cpu_offline)) 3260 SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu); 3261 else 3262 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 3263 "cpu %d going %s, exiting scheduler", cpu, 3264 online ? "online" : "offline"); 3265 } 3266 3267 void scx_rq_activate(struct rq *rq) 3268 { 3269 handle_hotplug(rq, true); 3270 } 3271 3272 void scx_rq_deactivate(struct rq *rq) 3273 { 3274 handle_hotplug(rq, false); 3275 } 3276 3277 static void rq_online_scx(struct rq *rq) 3278 { 3279 rq->scx.flags |= SCX_RQ_ONLINE; 3280 } 3281 3282 static void rq_offline_scx(struct rq *rq) 3283 { 3284 rq->scx.flags &= ~SCX_RQ_ONLINE; 3285 } 3286 3287 #else /* CONFIG_SMP */ 3288 3289 static bool test_and_clear_cpu_idle(int cpu) { return false; } 3290 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; } 3291 static void reset_idle_masks(void) {} 3292 3293 #endif /* CONFIG_SMP */ 3294 3295 static bool check_rq_for_timeouts(struct rq *rq) 3296 { 3297 struct task_struct *p; 3298 struct rq_flags rf; 3299 bool timed_out = false; 3300 3301 rq_lock_irqsave(rq, &rf); 3302 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 3303 unsigned long last_runnable = p->scx.runnable_at; 3304 3305 if (unlikely(time_after(jiffies, 3306 last_runnable + scx_watchdog_timeout))) { 3307 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 3308 3309 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3310 "%s[%d] failed to run for %u.%03us", 3311 p->comm, p->pid, 3312 dur_ms / 1000, dur_ms % 1000); 3313 timed_out = true; 3314 break; 3315 } 3316 } 3317 rq_unlock_irqrestore(rq, &rf); 3318 3319 return timed_out; 3320 } 3321 3322 static void scx_watchdog_workfn(struct work_struct *work) 3323 { 3324 int cpu; 3325 3326 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 3327 3328 for_each_online_cpu(cpu) { 3329 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 3330 break; 3331 3332 cond_resched(); 3333 } 3334 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 3335 scx_watchdog_timeout / 2); 3336 } 3337 3338 void scx_tick(struct rq *rq) 3339 { 3340 unsigned long last_check; 3341 3342 if (!scx_enabled()) 3343 return; 3344 3345 last_check = READ_ONCE(scx_watchdog_timestamp); 3346 if (unlikely(time_after(jiffies, 3347 last_check + READ_ONCE(scx_watchdog_timeout)))) { 3348 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 3349 3350 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3351 "watchdog failed to check in for %u.%03us", 3352 dur_ms / 1000, dur_ms % 1000); 3353 } 3354 3355 update_other_load_avgs(rq); 3356 } 3357 3358 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 3359 { 3360 update_curr_scx(rq); 3361 3362 /* 3363 * While disabling, always resched and refresh core-sched timestamp as 3364 * we can't trust the slice management or ops.core_sched_before(). 3365 */ 3366 if (scx_rq_bypassing(rq)) { 3367 curr->scx.slice = 0; 3368 touch_core_sched(rq, curr); 3369 } else if (SCX_HAS_OP(tick)) { 3370 SCX_CALL_OP(SCX_KF_REST, tick, curr); 3371 } 3372 3373 if (!curr->scx.slice) 3374 resched_curr(rq); 3375 } 3376 3377 #ifdef CONFIG_EXT_GROUP_SCHED 3378 static struct cgroup *tg_cgrp(struct task_group *tg) 3379 { 3380 /* 3381 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, 3382 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the 3383 * root cgroup. 3384 */ 3385 if (tg && tg->css.cgroup) 3386 return tg->css.cgroup; 3387 else 3388 return &cgrp_dfl_root.cgrp; 3389 } 3390 3391 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg), 3392 3393 #else /* CONFIG_EXT_GROUP_SCHED */ 3394 3395 #define SCX_INIT_TASK_ARGS_CGROUP(tg) 3396 3397 #endif /* CONFIG_EXT_GROUP_SCHED */ 3398 3399 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 3400 { 3401 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 3402 } 3403 3404 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 3405 { 3406 enum scx_task_state prev_state = scx_get_task_state(p); 3407 bool warn = false; 3408 3409 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 3410 3411 switch (state) { 3412 case SCX_TASK_NONE: 3413 break; 3414 case SCX_TASK_INIT: 3415 warn = prev_state != SCX_TASK_NONE; 3416 break; 3417 case SCX_TASK_READY: 3418 warn = prev_state == SCX_TASK_NONE; 3419 break; 3420 case SCX_TASK_ENABLED: 3421 warn = prev_state != SCX_TASK_READY; 3422 break; 3423 default: 3424 warn = true; 3425 return; 3426 } 3427 3428 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 3429 prev_state, state, p->comm, p->pid); 3430 3431 p->scx.flags &= ~SCX_TASK_STATE_MASK; 3432 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 3433 } 3434 3435 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork) 3436 { 3437 int ret; 3438 3439 p->scx.disallow = false; 3440 3441 if (SCX_HAS_OP(init_task)) { 3442 struct scx_init_task_args args = { 3443 SCX_INIT_TASK_ARGS_CGROUP(tg) 3444 .fork = fork, 3445 }; 3446 3447 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args); 3448 if (unlikely(ret)) { 3449 ret = ops_sanitize_err("init_task", ret); 3450 return ret; 3451 } 3452 } 3453 3454 scx_set_task_state(p, SCX_TASK_INIT); 3455 3456 if (p->scx.disallow) { 3457 if (!fork) { 3458 struct rq *rq; 3459 struct rq_flags rf; 3460 3461 rq = task_rq_lock(p, &rf); 3462 3463 /* 3464 * We're in the load path and @p->policy will be applied 3465 * right after. Reverting @p->policy here and rejecting 3466 * %SCHED_EXT transitions from scx_check_setscheduler() 3467 * guarantees that if ops.init_task() sets @p->disallow, 3468 * @p can never be in SCX. 3469 */ 3470 if (p->policy == SCHED_EXT) { 3471 p->policy = SCHED_NORMAL; 3472 atomic_long_inc(&scx_nr_rejected); 3473 } 3474 3475 task_rq_unlock(rq, p, &rf); 3476 } else if (p->policy == SCHED_EXT) { 3477 scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork", 3478 p->comm, p->pid); 3479 } 3480 } 3481 3482 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 3483 return 0; 3484 } 3485 3486 static void scx_ops_enable_task(struct task_struct *p) 3487 { 3488 u32 weight; 3489 3490 lockdep_assert_rq_held(task_rq(p)); 3491 3492 /* 3493 * Set the weight before calling ops.enable() so that the scheduler 3494 * doesn't see a stale value if they inspect the task struct. 3495 */ 3496 if (task_has_idle_policy(p)) 3497 weight = WEIGHT_IDLEPRIO; 3498 else 3499 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 3500 3501 p->scx.weight = sched_weight_to_cgroup(weight); 3502 3503 if (SCX_HAS_OP(enable)) 3504 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p); 3505 scx_set_task_state(p, SCX_TASK_ENABLED); 3506 3507 if (SCX_HAS_OP(set_weight)) 3508 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); 3509 } 3510 3511 static void scx_ops_disable_task(struct task_struct *p) 3512 { 3513 lockdep_assert_rq_held(task_rq(p)); 3514 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 3515 3516 if (SCX_HAS_OP(disable)) 3517 SCX_CALL_OP(SCX_KF_REST, disable, p); 3518 scx_set_task_state(p, SCX_TASK_READY); 3519 } 3520 3521 static void scx_ops_exit_task(struct task_struct *p) 3522 { 3523 struct scx_exit_task_args args = { 3524 .cancelled = false, 3525 }; 3526 3527 lockdep_assert_rq_held(task_rq(p)); 3528 3529 switch (scx_get_task_state(p)) { 3530 case SCX_TASK_NONE: 3531 return; 3532 case SCX_TASK_INIT: 3533 args.cancelled = true; 3534 break; 3535 case SCX_TASK_READY: 3536 break; 3537 case SCX_TASK_ENABLED: 3538 scx_ops_disable_task(p); 3539 break; 3540 default: 3541 WARN_ON_ONCE(true); 3542 return; 3543 } 3544 3545 if (SCX_HAS_OP(exit_task)) 3546 SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args); 3547 scx_set_task_state(p, SCX_TASK_NONE); 3548 } 3549 3550 void init_scx_entity(struct sched_ext_entity *scx) 3551 { 3552 /* 3553 * init_idle() calls this function again after fork sequence is 3554 * complete. Don't touch ->tasks_node as it's already linked. 3555 */ 3556 memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node)); 3557 3558 INIT_LIST_HEAD(&scx->dsq_list.node); 3559 RB_CLEAR_NODE(&scx->dsq_priq); 3560 scx->sticky_cpu = -1; 3561 scx->holding_cpu = -1; 3562 INIT_LIST_HEAD(&scx->runnable_node); 3563 scx->runnable_at = jiffies; 3564 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 3565 scx->slice = SCX_SLICE_DFL; 3566 } 3567 3568 void scx_pre_fork(struct task_struct *p) 3569 { 3570 /* 3571 * BPF scheduler enable/disable paths want to be able to iterate and 3572 * update all tasks which can become complex when racing forks. As 3573 * enable/disable are very cold paths, let's use a percpu_rwsem to 3574 * exclude forks. 3575 */ 3576 percpu_down_read(&scx_fork_rwsem); 3577 } 3578 3579 int scx_fork(struct task_struct *p) 3580 { 3581 percpu_rwsem_assert_held(&scx_fork_rwsem); 3582 3583 if (scx_ops_init_task_enabled) 3584 return scx_ops_init_task(p, task_group(p), true); 3585 else 3586 return 0; 3587 } 3588 3589 void scx_post_fork(struct task_struct *p) 3590 { 3591 if (scx_ops_init_task_enabled) { 3592 scx_set_task_state(p, SCX_TASK_READY); 3593 3594 /* 3595 * Enable the task immediately if it's running on sched_ext. 3596 * Otherwise, it'll be enabled in switching_to_scx() if and 3597 * when it's ever configured to run with a SCHED_EXT policy. 3598 */ 3599 if (p->sched_class == &ext_sched_class) { 3600 struct rq_flags rf; 3601 struct rq *rq; 3602 3603 rq = task_rq_lock(p, &rf); 3604 scx_ops_enable_task(p); 3605 task_rq_unlock(rq, p, &rf); 3606 } 3607 } 3608 3609 spin_lock_irq(&scx_tasks_lock); 3610 list_add_tail(&p->scx.tasks_node, &scx_tasks); 3611 spin_unlock_irq(&scx_tasks_lock); 3612 3613 percpu_up_read(&scx_fork_rwsem); 3614 } 3615 3616 void scx_cancel_fork(struct task_struct *p) 3617 { 3618 if (scx_enabled()) { 3619 struct rq *rq; 3620 struct rq_flags rf; 3621 3622 rq = task_rq_lock(p, &rf); 3623 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3624 scx_ops_exit_task(p); 3625 task_rq_unlock(rq, p, &rf); 3626 } 3627 3628 percpu_up_read(&scx_fork_rwsem); 3629 } 3630 3631 void sched_ext_free(struct task_struct *p) 3632 { 3633 unsigned long flags; 3634 3635 spin_lock_irqsave(&scx_tasks_lock, flags); 3636 list_del_init(&p->scx.tasks_node); 3637 spin_unlock_irqrestore(&scx_tasks_lock, flags); 3638 3639 /* 3640 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY -> 3641 * ENABLED transitions can't race us. Disable ops for @p. 3642 */ 3643 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3644 struct rq_flags rf; 3645 struct rq *rq; 3646 3647 rq = task_rq_lock(p, &rf); 3648 scx_ops_exit_task(p); 3649 task_rq_unlock(rq, p, &rf); 3650 } 3651 } 3652 3653 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3654 const struct load_weight *lw) 3655 { 3656 lockdep_assert_rq_held(task_rq(p)); 3657 3658 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3659 if (SCX_HAS_OP(set_weight)) 3660 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); 3661 } 3662 3663 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) 3664 { 3665 } 3666 3667 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3668 { 3669 scx_ops_enable_task(p); 3670 3671 /* 3672 * set_cpus_allowed_scx() is not called while @p is associated with a 3673 * different scheduler class. Keep the BPF scheduler up-to-date. 3674 */ 3675 if (SCX_HAS_OP(set_cpumask)) 3676 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, 3677 (struct cpumask *)p->cpus_ptr); 3678 } 3679 3680 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3681 { 3682 scx_ops_disable_task(p); 3683 } 3684 3685 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} 3686 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3687 3688 int scx_check_setscheduler(struct task_struct *p, int policy) 3689 { 3690 lockdep_assert_rq_held(task_rq(p)); 3691 3692 /* if disallow, reject transitioning into SCX */ 3693 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3694 p->policy != policy && policy == SCHED_EXT) 3695 return -EACCES; 3696 3697 return 0; 3698 } 3699 3700 #ifdef CONFIG_NO_HZ_FULL 3701 bool scx_can_stop_tick(struct rq *rq) 3702 { 3703 struct task_struct *p = rq->curr; 3704 3705 if (scx_rq_bypassing(rq)) 3706 return false; 3707 3708 if (p->sched_class != &ext_sched_class) 3709 return true; 3710 3711 /* 3712 * @rq can dispatch from different DSQs, so we can't tell whether it 3713 * needs the tick or not by looking at nr_running. Allow stopping ticks 3714 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3715 */ 3716 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3717 } 3718 #endif 3719 3720 #ifdef CONFIG_EXT_GROUP_SCHED 3721 3722 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); 3723 static bool scx_cgroup_enabled; 3724 static bool cgroup_warned_missing_weight; 3725 static bool cgroup_warned_missing_idle; 3726 3727 static void scx_cgroup_warn_missing_weight(struct task_group *tg) 3728 { 3729 if (scx_ops_enable_state() == SCX_OPS_DISABLED || 3730 cgroup_warned_missing_weight) 3731 return; 3732 3733 if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent) 3734 return; 3735 3736 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n", 3737 scx_ops.name); 3738 cgroup_warned_missing_weight = true; 3739 } 3740 3741 static void scx_cgroup_warn_missing_idle(struct task_group *tg) 3742 { 3743 if (!scx_cgroup_enabled || cgroup_warned_missing_idle) 3744 return; 3745 3746 if (!tg->idle) 3747 return; 3748 3749 pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n", 3750 scx_ops.name); 3751 cgroup_warned_missing_idle = true; 3752 } 3753 3754 int scx_tg_online(struct task_group *tg) 3755 { 3756 int ret = 0; 3757 3758 WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3759 3760 percpu_down_read(&scx_cgroup_rwsem); 3761 3762 scx_cgroup_warn_missing_weight(tg); 3763 3764 if (scx_cgroup_enabled) { 3765 if (SCX_HAS_OP(cgroup_init)) { 3766 struct scx_cgroup_init_args args = 3767 { .weight = tg->scx_weight }; 3768 3769 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, 3770 tg->css.cgroup, &args); 3771 if (ret) 3772 ret = ops_sanitize_err("cgroup_init", ret); 3773 } 3774 if (ret == 0) 3775 tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED; 3776 } else { 3777 tg->scx_flags |= SCX_TG_ONLINE; 3778 } 3779 3780 percpu_up_read(&scx_cgroup_rwsem); 3781 return ret; 3782 } 3783 3784 void scx_tg_offline(struct task_group *tg) 3785 { 3786 WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE)); 3787 3788 percpu_down_read(&scx_cgroup_rwsem); 3789 3790 if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED)) 3791 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup); 3792 tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); 3793 3794 percpu_up_read(&scx_cgroup_rwsem); 3795 } 3796 3797 int scx_cgroup_can_attach(struct cgroup_taskset *tset) 3798 { 3799 struct cgroup_subsys_state *css; 3800 struct task_struct *p; 3801 int ret; 3802 3803 /* released in scx_finish/cancel_attach() */ 3804 percpu_down_read(&scx_cgroup_rwsem); 3805 3806 if (!scx_cgroup_enabled) 3807 return 0; 3808 3809 cgroup_taskset_for_each(p, css, tset) { 3810 struct cgroup *from = tg_cgrp(task_group(p)); 3811 struct cgroup *to = tg_cgrp(css_tg(css)); 3812 3813 WARN_ON_ONCE(p->scx.cgrp_moving_from); 3814 3815 /* 3816 * sched_move_task() omits identity migrations. Let's match the 3817 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move() 3818 * always match one-to-one. 3819 */ 3820 if (from == to) 3821 continue; 3822 3823 if (SCX_HAS_OP(cgroup_prep_move)) { 3824 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move, 3825 p, from, css->cgroup); 3826 if (ret) 3827 goto err; 3828 } 3829 3830 p->scx.cgrp_moving_from = from; 3831 } 3832 3833 return 0; 3834 3835 err: 3836 cgroup_taskset_for_each(p, css, tset) { 3837 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) 3838 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p, 3839 p->scx.cgrp_moving_from, css->cgroup); 3840 p->scx.cgrp_moving_from = NULL; 3841 } 3842 3843 percpu_up_read(&scx_cgroup_rwsem); 3844 return ops_sanitize_err("cgroup_prep_move", ret); 3845 } 3846 3847 void scx_move_task(struct task_struct *p) 3848 { 3849 if (!scx_cgroup_enabled) 3850 return; 3851 3852 /* 3853 * We're called from sched_move_task() which handles both cgroup and 3854 * autogroup moves. Ignore the latter. 3855 * 3856 * Also ignore exiting tasks, because in the exit path tasks transition 3857 * from the autogroup to the root group, so task_group_is_autogroup() 3858 * alone isn't able to catch exiting autogroup tasks. This is safe for 3859 * cgroup_move(), because cgroup migrations never happen for PF_EXITING 3860 * tasks. 3861 */ 3862 if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING)) 3863 return; 3864 3865 /* 3866 * @p must have ops.cgroup_prep_move() called on it and thus 3867 * cgrp_moving_from set. 3868 */ 3869 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) 3870 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p, 3871 p->scx.cgrp_moving_from, tg_cgrp(task_group(p))); 3872 p->scx.cgrp_moving_from = NULL; 3873 } 3874 3875 void scx_cgroup_finish_attach(void) 3876 { 3877 percpu_up_read(&scx_cgroup_rwsem); 3878 } 3879 3880 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) 3881 { 3882 struct cgroup_subsys_state *css; 3883 struct task_struct *p; 3884 3885 if (!scx_cgroup_enabled) 3886 goto out_unlock; 3887 3888 cgroup_taskset_for_each(p, css, tset) { 3889 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) 3890 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p, 3891 p->scx.cgrp_moving_from, css->cgroup); 3892 p->scx.cgrp_moving_from = NULL; 3893 } 3894 out_unlock: 3895 percpu_up_read(&scx_cgroup_rwsem); 3896 } 3897 3898 void scx_group_set_weight(struct task_group *tg, unsigned long weight) 3899 { 3900 percpu_down_read(&scx_cgroup_rwsem); 3901 3902 if (scx_cgroup_enabled && tg->scx_weight != weight) { 3903 if (SCX_HAS_OP(cgroup_set_weight)) 3904 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight, 3905 tg_cgrp(tg), weight); 3906 tg->scx_weight = weight; 3907 } 3908 3909 percpu_up_read(&scx_cgroup_rwsem); 3910 } 3911 3912 void scx_group_set_idle(struct task_group *tg, bool idle) 3913 { 3914 percpu_down_read(&scx_cgroup_rwsem); 3915 scx_cgroup_warn_missing_idle(tg); 3916 percpu_up_read(&scx_cgroup_rwsem); 3917 } 3918 3919 static void scx_cgroup_lock(void) 3920 { 3921 percpu_down_write(&scx_cgroup_rwsem); 3922 } 3923 3924 static void scx_cgroup_unlock(void) 3925 { 3926 percpu_up_write(&scx_cgroup_rwsem); 3927 } 3928 3929 #else /* CONFIG_EXT_GROUP_SCHED */ 3930 3931 static inline void scx_cgroup_lock(void) {} 3932 static inline void scx_cgroup_unlock(void) {} 3933 3934 #endif /* CONFIG_EXT_GROUP_SCHED */ 3935 3936 /* 3937 * Omitted operations: 3938 * 3939 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 3940 * isn't tied to the CPU at that point. Preemption is implemented by resetting 3941 * the victim task's slice to 0 and triggering reschedule on the target CPU. 3942 * 3943 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 3944 * 3945 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 3946 * their current sched_class. Call them directly from sched core instead. 3947 */ 3948 DEFINE_SCHED_CLASS(ext) = { 3949 .enqueue_task = enqueue_task_scx, 3950 .dequeue_task = dequeue_task_scx, 3951 .yield_task = yield_task_scx, 3952 .yield_to_task = yield_to_task_scx, 3953 3954 .wakeup_preempt = wakeup_preempt_scx, 3955 3956 .balance = balance_scx, 3957 .pick_task = pick_task_scx, 3958 3959 .put_prev_task = put_prev_task_scx, 3960 .set_next_task = set_next_task_scx, 3961 3962 #ifdef CONFIG_SMP 3963 .select_task_rq = select_task_rq_scx, 3964 .task_woken = task_woken_scx, 3965 .set_cpus_allowed = set_cpus_allowed_scx, 3966 3967 .rq_online = rq_online_scx, 3968 .rq_offline = rq_offline_scx, 3969 #endif 3970 3971 .task_tick = task_tick_scx, 3972 3973 .switching_to = switching_to_scx, 3974 .switched_from = switched_from_scx, 3975 .switched_to = switched_to_scx, 3976 .reweight_task = reweight_task_scx, 3977 .prio_changed = prio_changed_scx, 3978 3979 .update_curr = update_curr_scx, 3980 3981 #ifdef CONFIG_UCLAMP_TASK 3982 .uclamp_enabled = 1, 3983 #endif 3984 }; 3985 3986 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 3987 { 3988 memset(dsq, 0, sizeof(*dsq)); 3989 3990 raw_spin_lock_init(&dsq->lock); 3991 INIT_LIST_HEAD(&dsq->list); 3992 dsq->id = dsq_id; 3993 } 3994 3995 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node) 3996 { 3997 struct scx_dispatch_q *dsq; 3998 int ret; 3999 4000 if (dsq_id & SCX_DSQ_FLAG_BUILTIN) 4001 return ERR_PTR(-EINVAL); 4002 4003 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 4004 if (!dsq) 4005 return ERR_PTR(-ENOMEM); 4006 4007 init_dsq(dsq, dsq_id); 4008 4009 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node, 4010 dsq_hash_params); 4011 if (ret) { 4012 kfree(dsq); 4013 return ERR_PTR(ret); 4014 } 4015 return dsq; 4016 } 4017 4018 static void free_dsq_irq_workfn(struct irq_work *irq_work) 4019 { 4020 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 4021 struct scx_dispatch_q *dsq, *tmp_dsq; 4022 4023 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 4024 kfree_rcu(dsq, rcu); 4025 } 4026 4027 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 4028 4029 static void destroy_dsq(u64 dsq_id) 4030 { 4031 struct scx_dispatch_q *dsq; 4032 unsigned long flags; 4033 4034 rcu_read_lock(); 4035 4036 dsq = find_user_dsq(dsq_id); 4037 if (!dsq) 4038 goto out_unlock_rcu; 4039 4040 raw_spin_lock_irqsave(&dsq->lock, flags); 4041 4042 if (dsq->nr) { 4043 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)", 4044 dsq->id, dsq->nr); 4045 goto out_unlock_dsq; 4046 } 4047 4048 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params)) 4049 goto out_unlock_dsq; 4050 4051 /* 4052 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 4053 * queueing more tasks. As this function can be called from anywhere, 4054 * freeing is bounced through an irq work to avoid nesting RCU 4055 * operations inside scheduler locks. 4056 */ 4057 dsq->id = SCX_DSQ_INVALID; 4058 llist_add(&dsq->free_node, &dsqs_to_free); 4059 irq_work_queue(&free_dsq_irq_work); 4060 4061 out_unlock_dsq: 4062 raw_spin_unlock_irqrestore(&dsq->lock, flags); 4063 out_unlock_rcu: 4064 rcu_read_unlock(); 4065 } 4066 4067 #ifdef CONFIG_EXT_GROUP_SCHED 4068 static void scx_cgroup_exit(void) 4069 { 4070 struct cgroup_subsys_state *css; 4071 4072 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4073 4074 scx_cgroup_enabled = false; 4075 4076 /* 4077 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk 4078 * cgroups and exit all the inited ones, all online cgroups are exited. 4079 */ 4080 rcu_read_lock(); 4081 css_for_each_descendant_post(css, &root_task_group.css) { 4082 struct task_group *tg = css_tg(css); 4083 4084 if (!(tg->scx_flags & SCX_TG_INITED)) 4085 continue; 4086 tg->scx_flags &= ~SCX_TG_INITED; 4087 4088 if (!scx_ops.cgroup_exit) 4089 continue; 4090 4091 if (WARN_ON_ONCE(!css_tryget(css))) 4092 continue; 4093 rcu_read_unlock(); 4094 4095 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup); 4096 4097 rcu_read_lock(); 4098 css_put(css); 4099 } 4100 rcu_read_unlock(); 4101 } 4102 4103 static int scx_cgroup_init(void) 4104 { 4105 struct cgroup_subsys_state *css; 4106 int ret; 4107 4108 percpu_rwsem_assert_held(&scx_cgroup_rwsem); 4109 4110 cgroup_warned_missing_weight = false; 4111 cgroup_warned_missing_idle = false; 4112 4113 /* 4114 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk 4115 * cgroups and init, all online cgroups are initialized. 4116 */ 4117 rcu_read_lock(); 4118 css_for_each_descendant_pre(css, &root_task_group.css) { 4119 struct task_group *tg = css_tg(css); 4120 struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; 4121 4122 scx_cgroup_warn_missing_weight(tg); 4123 scx_cgroup_warn_missing_idle(tg); 4124 4125 if ((tg->scx_flags & 4126 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) 4127 continue; 4128 4129 if (!scx_ops.cgroup_init) { 4130 tg->scx_flags |= SCX_TG_INITED; 4131 continue; 4132 } 4133 4134 if (WARN_ON_ONCE(!css_tryget(css))) 4135 continue; 4136 rcu_read_unlock(); 4137 4138 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, 4139 css->cgroup, &args); 4140 if (ret) { 4141 css_put(css); 4142 scx_ops_error("ops.cgroup_init() failed (%d)", ret); 4143 return ret; 4144 } 4145 tg->scx_flags |= SCX_TG_INITED; 4146 4147 rcu_read_lock(); 4148 css_put(css); 4149 } 4150 rcu_read_unlock(); 4151 4152 WARN_ON_ONCE(scx_cgroup_enabled); 4153 scx_cgroup_enabled = true; 4154 4155 return 0; 4156 } 4157 4158 #else 4159 static void scx_cgroup_exit(void) {} 4160 static int scx_cgroup_init(void) { return 0; } 4161 #endif 4162 4163 4164 /******************************************************************************** 4165 * Sysfs interface and ops enable/disable. 4166 */ 4167 4168 #define SCX_ATTR(_name) \ 4169 static struct kobj_attribute scx_attr_##_name = { \ 4170 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 4171 .show = scx_attr_##_name##_show, \ 4172 } 4173 4174 static ssize_t scx_attr_state_show(struct kobject *kobj, 4175 struct kobj_attribute *ka, char *buf) 4176 { 4177 return sysfs_emit(buf, "%s\n", 4178 scx_ops_enable_state_str[scx_ops_enable_state()]); 4179 } 4180 SCX_ATTR(state); 4181 4182 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 4183 struct kobj_attribute *ka, char *buf) 4184 { 4185 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 4186 } 4187 SCX_ATTR(switch_all); 4188 4189 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 4190 struct kobj_attribute *ka, char *buf) 4191 { 4192 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 4193 } 4194 SCX_ATTR(nr_rejected); 4195 4196 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 4197 struct kobj_attribute *ka, char *buf) 4198 { 4199 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 4200 } 4201 SCX_ATTR(hotplug_seq); 4202 4203 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj, 4204 struct kobj_attribute *ka, char *buf) 4205 { 4206 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq)); 4207 } 4208 SCX_ATTR(enable_seq); 4209 4210 static struct attribute *scx_global_attrs[] = { 4211 &scx_attr_state.attr, 4212 &scx_attr_switch_all.attr, 4213 &scx_attr_nr_rejected.attr, 4214 &scx_attr_hotplug_seq.attr, 4215 &scx_attr_enable_seq.attr, 4216 NULL, 4217 }; 4218 4219 static const struct attribute_group scx_global_attr_group = { 4220 .attrs = scx_global_attrs, 4221 }; 4222 4223 static void scx_kobj_release(struct kobject *kobj) 4224 { 4225 kfree(kobj); 4226 } 4227 4228 static ssize_t scx_attr_ops_show(struct kobject *kobj, 4229 struct kobj_attribute *ka, char *buf) 4230 { 4231 return sysfs_emit(buf, "%s\n", scx_ops.name); 4232 } 4233 SCX_ATTR(ops); 4234 4235 static struct attribute *scx_sched_attrs[] = { 4236 &scx_attr_ops.attr, 4237 NULL, 4238 }; 4239 ATTRIBUTE_GROUPS(scx_sched); 4240 4241 static const struct kobj_type scx_ktype = { 4242 .release = scx_kobj_release, 4243 .sysfs_ops = &kobj_sysfs_ops, 4244 .default_groups = scx_sched_groups, 4245 }; 4246 4247 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 4248 { 4249 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name); 4250 } 4251 4252 static const struct kset_uevent_ops scx_uevent_ops = { 4253 .uevent = scx_uevent, 4254 }; 4255 4256 /* 4257 * Used by sched_fork() and __setscheduler_prio() to pick the matching 4258 * sched_class. dl/rt are already handled. 4259 */ 4260 bool task_should_scx(int policy) 4261 { 4262 if (!scx_enabled() || 4263 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING)) 4264 return false; 4265 if (READ_ONCE(scx_switching_all)) 4266 return true; 4267 return policy == SCHED_EXT; 4268 } 4269 4270 /** 4271 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress 4272 * 4273 * Bypassing guarantees that all runnable tasks make forward progress without 4274 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 4275 * be held by tasks that the BPF scheduler is forgetting to run, which 4276 * unfortunately also excludes toggling the static branches. 4277 * 4278 * Let's work around by overriding a couple ops and modifying behaviors based on 4279 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4280 * to force global FIFO scheduling. 4281 * 4282 * - ops.select_cpu() is ignored and the default select_cpu() is used. 4283 * 4284 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4285 * %SCX_OPS_ENQ_LAST is also ignored. 4286 * 4287 * - ops.dispatch() is ignored. 4288 * 4289 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4290 * can't be trusted. Whenever a tick triggers, the running task is rotated to 4291 * the tail of the queue with core_sched_at touched. 4292 * 4293 * - pick_next_task() suppresses zero slice warning. 4294 * 4295 * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM 4296 * operations. 4297 * 4298 * - scx_prio_less() reverts to the default core_sched_at order. 4299 */ 4300 static void scx_ops_bypass(bool bypass) 4301 { 4302 int cpu; 4303 unsigned long flags; 4304 4305 raw_spin_lock_irqsave(&__scx_ops_bypass_lock, flags); 4306 if (bypass) { 4307 scx_ops_bypass_depth++; 4308 WARN_ON_ONCE(scx_ops_bypass_depth <= 0); 4309 if (scx_ops_bypass_depth != 1) 4310 goto unlock; 4311 } else { 4312 scx_ops_bypass_depth--; 4313 WARN_ON_ONCE(scx_ops_bypass_depth < 0); 4314 if (scx_ops_bypass_depth != 0) 4315 goto unlock; 4316 } 4317 4318 /* 4319 * No task property is changing. We just need to make sure all currently 4320 * queued tasks are re-queued according to the new scx_rq_bypassing() 4321 * state. As an optimization, walk each rq's runnable_list instead of 4322 * the scx_tasks list. 4323 * 4324 * This function can't trust the scheduler and thus can't use 4325 * cpus_read_lock(). Walk all possible CPUs instead of online. 4326 */ 4327 for_each_possible_cpu(cpu) { 4328 struct rq *rq = cpu_rq(cpu); 4329 struct rq_flags rf; 4330 struct task_struct *p, *n; 4331 4332 rq_lock(rq, &rf); 4333 4334 if (bypass) { 4335 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); 4336 rq->scx.flags |= SCX_RQ_BYPASSING; 4337 } else { 4338 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); 4339 rq->scx.flags &= ~SCX_RQ_BYPASSING; 4340 } 4341 4342 /* 4343 * We need to guarantee that no tasks are on the BPF scheduler 4344 * while bypassing. Either we see enabled or the enable path 4345 * sees scx_rq_bypassing() before moving tasks to SCX. 4346 */ 4347 if (!scx_enabled()) { 4348 rq_unlock_irqrestore(rq, &rf); 4349 continue; 4350 } 4351 4352 /* 4353 * The use of list_for_each_entry_safe_reverse() is required 4354 * because each task is going to be removed from and added back 4355 * to the runnable_list during iteration. Because they're added 4356 * to the tail of the list, safe reverse iteration can still 4357 * visit all nodes. 4358 */ 4359 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 4360 scx.runnable_node) { 4361 struct sched_enq_and_set_ctx ctx; 4362 4363 /* cycling deq/enq is enough, see the function comment */ 4364 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4365 sched_enq_and_set_task(&ctx); 4366 } 4367 4368 rq_unlock(rq, &rf); 4369 4370 /* resched to restore ticks and idle state */ 4371 resched_cpu(cpu); 4372 } 4373 unlock: 4374 raw_spin_unlock_irqrestore(&__scx_ops_bypass_lock, flags); 4375 } 4376 4377 static void free_exit_info(struct scx_exit_info *ei) 4378 { 4379 kfree(ei->dump); 4380 kfree(ei->msg); 4381 kfree(ei->bt); 4382 kfree(ei); 4383 } 4384 4385 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 4386 { 4387 struct scx_exit_info *ei; 4388 4389 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 4390 if (!ei) 4391 return NULL; 4392 4393 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4394 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4395 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL); 4396 4397 if (!ei->bt || !ei->msg || !ei->dump) { 4398 free_exit_info(ei); 4399 return NULL; 4400 } 4401 4402 return ei; 4403 } 4404 4405 static const char *scx_exit_reason(enum scx_exit_kind kind) 4406 { 4407 switch (kind) { 4408 case SCX_EXIT_UNREG: 4409 return "unregistered from user space"; 4410 case SCX_EXIT_UNREG_BPF: 4411 return "unregistered from BPF"; 4412 case SCX_EXIT_UNREG_KERN: 4413 return "unregistered from the main kernel"; 4414 case SCX_EXIT_SYSRQ: 4415 return "disabled by sysrq-S"; 4416 case SCX_EXIT_ERROR: 4417 return "runtime error"; 4418 case SCX_EXIT_ERROR_BPF: 4419 return "scx_bpf_error"; 4420 case SCX_EXIT_ERROR_STALL: 4421 return "runnable task stall"; 4422 default: 4423 return "<UNKNOWN>"; 4424 } 4425 } 4426 4427 static void scx_ops_disable_workfn(struct kthread_work *work) 4428 { 4429 struct scx_exit_info *ei = scx_exit_info; 4430 struct scx_task_iter sti; 4431 struct task_struct *p; 4432 struct rhashtable_iter rht_iter; 4433 struct scx_dispatch_q *dsq; 4434 int i, kind; 4435 4436 kind = atomic_read(&scx_exit_kind); 4437 while (true) { 4438 /* 4439 * NONE indicates that a new scx_ops has been registered since 4440 * disable was scheduled - don't kill the new ops. DONE 4441 * indicates that the ops has already been disabled. 4442 */ 4443 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE) 4444 return; 4445 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE)) 4446 break; 4447 } 4448 ei->kind = kind; 4449 ei->reason = scx_exit_reason(ei->kind); 4450 4451 /* guarantee forward progress by bypassing scx_ops */ 4452 scx_ops_bypass(true); 4453 4454 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) { 4455 case SCX_OPS_DISABLING: 4456 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 4457 break; 4458 case SCX_OPS_DISABLED: 4459 pr_warn("sched_ext: ops error detected without ops (%s)\n", 4460 scx_exit_info->msg); 4461 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 4462 SCX_OPS_DISABLING); 4463 goto done; 4464 default: 4465 break; 4466 } 4467 4468 /* 4469 * Here, every runnable task is guaranteed to make forward progress and 4470 * we can safely use blocking synchronization constructs. Actually 4471 * disable ops. 4472 */ 4473 mutex_lock(&scx_ops_enable_mutex); 4474 4475 static_branch_disable(&__scx_switched_all); 4476 WRITE_ONCE(scx_switching_all, false); 4477 4478 /* 4479 * Shut down cgroup support before tasks so that the cgroup attach path 4480 * doesn't race against scx_ops_exit_task(). 4481 */ 4482 scx_cgroup_lock(); 4483 scx_cgroup_exit(); 4484 scx_cgroup_unlock(); 4485 4486 /* 4487 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones 4488 * must be switched out and exited synchronously. 4489 */ 4490 percpu_down_write(&scx_fork_rwsem); 4491 4492 scx_ops_init_task_enabled = false; 4493 4494 scx_task_iter_start(&sti); 4495 while ((p = scx_task_iter_next_locked(&sti))) { 4496 const struct sched_class *old_class = p->sched_class; 4497 const struct sched_class *new_class = 4498 __setscheduler_class(p->policy, p->prio); 4499 struct sched_enq_and_set_ctx ctx; 4500 4501 if (old_class != new_class && p->se.sched_delayed) 4502 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); 4503 4504 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4505 4506 p->sched_class = new_class; 4507 check_class_changing(task_rq(p), p, old_class); 4508 4509 sched_enq_and_set_task(&ctx); 4510 4511 check_class_changed(task_rq(p), p, old_class, p->prio); 4512 scx_ops_exit_task(p); 4513 } 4514 scx_task_iter_stop(&sti); 4515 percpu_up_write(&scx_fork_rwsem); 4516 4517 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4518 static_branch_disable(&__scx_ops_enabled); 4519 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++) 4520 static_branch_disable(&scx_has_op[i]); 4521 static_branch_disable(&scx_ops_enq_last); 4522 static_branch_disable(&scx_ops_enq_exiting); 4523 static_branch_disable(&scx_ops_cpu_preempt); 4524 static_branch_disable(&scx_builtin_idle_enabled); 4525 synchronize_rcu(); 4526 4527 if (ei->kind >= SCX_EXIT_ERROR) { 4528 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4529 scx_ops.name, ei->reason); 4530 4531 if (ei->msg[0] != '\0') 4532 pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg); 4533 #ifdef CONFIG_STACKTRACE 4534 stack_trace_print(ei->bt, ei->bt_len, 2); 4535 #endif 4536 } else { 4537 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4538 scx_ops.name, ei->reason); 4539 } 4540 4541 if (scx_ops.exit) 4542 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei); 4543 4544 cancel_delayed_work_sync(&scx_watchdog_work); 4545 4546 /* 4547 * Delete the kobject from the hierarchy eagerly in addition to just 4548 * dropping a reference. Otherwise, if the object is deleted 4549 * asynchronously, sysfs could observe an object of the same name still 4550 * in the hierarchy when another scheduler is loaded. 4551 */ 4552 kobject_del(scx_root_kobj); 4553 kobject_put(scx_root_kobj); 4554 scx_root_kobj = NULL; 4555 4556 memset(&scx_ops, 0, sizeof(scx_ops)); 4557 4558 rhashtable_walk_enter(&dsq_hash, &rht_iter); 4559 do { 4560 rhashtable_walk_start(&rht_iter); 4561 4562 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 4563 destroy_dsq(dsq->id); 4564 4565 rhashtable_walk_stop(&rht_iter); 4566 } while (dsq == ERR_PTR(-EAGAIN)); 4567 rhashtable_walk_exit(&rht_iter); 4568 4569 free_percpu(scx_dsp_ctx); 4570 scx_dsp_ctx = NULL; 4571 scx_dsp_max_batch = 0; 4572 4573 free_exit_info(scx_exit_info); 4574 scx_exit_info = NULL; 4575 4576 mutex_unlock(&scx_ops_enable_mutex); 4577 4578 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 4579 SCX_OPS_DISABLING); 4580 done: 4581 scx_ops_bypass(false); 4582 } 4583 4584 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); 4585 4586 static void schedule_scx_ops_disable_work(void) 4587 { 4588 struct kthread_worker *helper = READ_ONCE(scx_ops_helper); 4589 4590 /* 4591 * We may be called spuriously before the first bpf_sched_ext_reg(). If 4592 * scx_ops_helper isn't set up yet, there's nothing to do. 4593 */ 4594 if (helper) 4595 kthread_queue_work(helper, &scx_ops_disable_work); 4596 } 4597 4598 static void scx_ops_disable(enum scx_exit_kind kind) 4599 { 4600 int none = SCX_EXIT_NONE; 4601 4602 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4603 kind = SCX_EXIT_ERROR; 4604 4605 atomic_try_cmpxchg(&scx_exit_kind, &none, kind); 4606 4607 schedule_scx_ops_disable_work(); 4608 } 4609 4610 static void dump_newline(struct seq_buf *s) 4611 { 4612 trace_sched_ext_dump(""); 4613 4614 /* @s may be zero sized and seq_buf triggers WARN if so */ 4615 if (s->size) 4616 seq_buf_putc(s, '\n'); 4617 } 4618 4619 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4620 { 4621 va_list args; 4622 4623 #ifdef CONFIG_TRACEPOINTS 4624 if (trace_sched_ext_dump_enabled()) { 4625 /* protected by scx_dump_state()::dump_lock */ 4626 static char line_buf[SCX_EXIT_MSG_LEN]; 4627 4628 va_start(args, fmt); 4629 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4630 va_end(args); 4631 4632 trace_sched_ext_dump(line_buf); 4633 } 4634 #endif 4635 /* @s may be zero sized and seq_buf triggers WARN if so */ 4636 if (s->size) { 4637 va_start(args, fmt); 4638 seq_buf_vprintf(s, fmt, args); 4639 va_end(args); 4640 4641 seq_buf_putc(s, '\n'); 4642 } 4643 } 4644 4645 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4646 const unsigned long *bt, unsigned int len) 4647 { 4648 unsigned int i; 4649 4650 for (i = 0; i < len; i++) 4651 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4652 } 4653 4654 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4655 { 4656 struct scx_dump_data *dd = &scx_dump_data; 4657 4658 lockdep_assert_irqs_disabled(); 4659 4660 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4661 dd->first = true; 4662 dd->cursor = 0; 4663 dd->s = s; 4664 dd->prefix = prefix; 4665 } 4666 4667 static void ops_dump_flush(void) 4668 { 4669 struct scx_dump_data *dd = &scx_dump_data; 4670 char *line = dd->buf.line; 4671 4672 if (!dd->cursor) 4673 return; 4674 4675 /* 4676 * There's something to flush and this is the first line. Insert a blank 4677 * line to distinguish ops dump. 4678 */ 4679 if (dd->first) { 4680 dump_newline(dd->s); 4681 dd->first = false; 4682 } 4683 4684 /* 4685 * There may be multiple lines in $line. Scan and emit each line 4686 * separately. 4687 */ 4688 while (true) { 4689 char *end = line; 4690 char c; 4691 4692 while (*end != '\n' && *end != '\0') 4693 end++; 4694 4695 /* 4696 * If $line overflowed, it may not have newline at the end. 4697 * Always emit with a newline. 4698 */ 4699 c = *end; 4700 *end = '\0'; 4701 dump_line(dd->s, "%s%s", dd->prefix, line); 4702 if (c == '\0') 4703 break; 4704 4705 /* move to the next line */ 4706 end++; 4707 if (*end == '\0') 4708 break; 4709 line = end; 4710 } 4711 4712 dd->cursor = 0; 4713 } 4714 4715 static void ops_dump_exit(void) 4716 { 4717 ops_dump_flush(); 4718 scx_dump_data.cpu = -1; 4719 } 4720 4721 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4722 struct task_struct *p, char marker) 4723 { 4724 static unsigned long bt[SCX_EXIT_BT_LEN]; 4725 char dsq_id_buf[19] = "(n/a)"; 4726 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4727 unsigned int bt_len = 0; 4728 4729 if (p->scx.dsq) 4730 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4731 (unsigned long long)p->scx.dsq->id); 4732 4733 dump_newline(s); 4734 dump_line(s, " %c%c %s[%d] %+ldms", 4735 marker, task_state_to_char(p), p->comm, p->pid, 4736 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4737 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4738 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4739 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4740 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4741 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu", 4742 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf, 4743 p->scx.dsq_vtime); 4744 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); 4745 4746 if (SCX_HAS_OP(dump_task)) { 4747 ops_dump_init(s, " "); 4748 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p); 4749 ops_dump_exit(); 4750 } 4751 4752 #ifdef CONFIG_STACKTRACE 4753 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 4754 #endif 4755 if (bt_len) { 4756 dump_newline(s); 4757 dump_stack_trace(s, " ", bt, bt_len); 4758 } 4759 } 4760 4761 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 4762 { 4763 static DEFINE_SPINLOCK(dump_lock); 4764 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 4765 struct scx_dump_ctx dctx = { 4766 .kind = ei->kind, 4767 .exit_code = ei->exit_code, 4768 .reason = ei->reason, 4769 .at_ns = ktime_get_ns(), 4770 .at_jiffies = jiffies, 4771 }; 4772 struct seq_buf s; 4773 unsigned long flags; 4774 char *buf; 4775 int cpu; 4776 4777 spin_lock_irqsave(&dump_lock, flags); 4778 4779 seq_buf_init(&s, ei->dump, dump_len); 4780 4781 if (ei->kind == SCX_EXIT_NONE) { 4782 dump_line(&s, "Debug dump triggered by %s", ei->reason); 4783 } else { 4784 dump_line(&s, "%s[%d] triggered exit kind %d:", 4785 current->comm, current->pid, ei->kind); 4786 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 4787 dump_newline(&s); 4788 dump_line(&s, "Backtrace:"); 4789 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 4790 } 4791 4792 if (SCX_HAS_OP(dump)) { 4793 ops_dump_init(&s, ""); 4794 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx); 4795 ops_dump_exit(); 4796 } 4797 4798 dump_newline(&s); 4799 dump_line(&s, "CPU states"); 4800 dump_line(&s, "----------"); 4801 4802 for_each_possible_cpu(cpu) { 4803 struct rq *rq = cpu_rq(cpu); 4804 struct rq_flags rf; 4805 struct task_struct *p; 4806 struct seq_buf ns; 4807 size_t avail, used; 4808 bool idle; 4809 4810 rq_lock(rq, &rf); 4811 4812 idle = list_empty(&rq->scx.runnable_list) && 4813 rq->curr->sched_class == &idle_sched_class; 4814 4815 if (idle && !SCX_HAS_OP(dump_cpu)) 4816 goto next; 4817 4818 /* 4819 * We don't yet know whether ops.dump_cpu() will produce output 4820 * and we may want to skip the default CPU dump if it doesn't. 4821 * Use a nested seq_buf to generate the standard dump so that we 4822 * can decide whether to commit later. 4823 */ 4824 avail = seq_buf_get_buf(&s, &buf); 4825 seq_buf_init(&ns, buf, avail); 4826 4827 dump_newline(&ns); 4828 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu", 4829 cpu, rq->scx.nr_running, rq->scx.flags, 4830 rq->scx.cpu_released, rq->scx.ops_qseq, 4831 rq->scx.pnt_seq); 4832 dump_line(&ns, " curr=%s[%d] class=%ps", 4833 rq->curr->comm, rq->curr->pid, 4834 rq->curr->sched_class); 4835 if (!cpumask_empty(rq->scx.cpus_to_kick)) 4836 dump_line(&ns, " cpus_to_kick : %*pb", 4837 cpumask_pr_args(rq->scx.cpus_to_kick)); 4838 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 4839 dump_line(&ns, " idle_to_kick : %*pb", 4840 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 4841 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 4842 dump_line(&ns, " cpus_to_preempt: %*pb", 4843 cpumask_pr_args(rq->scx.cpus_to_preempt)); 4844 if (!cpumask_empty(rq->scx.cpus_to_wait)) 4845 dump_line(&ns, " cpus_to_wait : %*pb", 4846 cpumask_pr_args(rq->scx.cpus_to_wait)); 4847 4848 used = seq_buf_used(&ns); 4849 if (SCX_HAS_OP(dump_cpu)) { 4850 ops_dump_init(&ns, " "); 4851 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle); 4852 ops_dump_exit(); 4853 } 4854 4855 /* 4856 * If idle && nothing generated by ops.dump_cpu(), there's 4857 * nothing interesting. Skip. 4858 */ 4859 if (idle && used == seq_buf_used(&ns)) 4860 goto next; 4861 4862 /* 4863 * $s may already have overflowed when $ns was created. If so, 4864 * calling commit on it will trigger BUG. 4865 */ 4866 if (avail) { 4867 seq_buf_commit(&s, seq_buf_used(&ns)); 4868 if (seq_buf_has_overflowed(&ns)) 4869 seq_buf_set_overflow(&s); 4870 } 4871 4872 if (rq->curr->sched_class == &ext_sched_class) 4873 scx_dump_task(&s, &dctx, rq->curr, '*'); 4874 4875 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 4876 scx_dump_task(&s, &dctx, p, ' '); 4877 next: 4878 rq_unlock(rq, &rf); 4879 } 4880 4881 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 4882 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 4883 trunc_marker, sizeof(trunc_marker)); 4884 4885 spin_unlock_irqrestore(&dump_lock, flags); 4886 } 4887 4888 static void scx_ops_error_irq_workfn(struct irq_work *irq_work) 4889 { 4890 struct scx_exit_info *ei = scx_exit_info; 4891 4892 if (ei->kind >= SCX_EXIT_ERROR) 4893 scx_dump_state(ei, scx_ops.exit_dump_len); 4894 4895 schedule_scx_ops_disable_work(); 4896 } 4897 4898 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); 4899 4900 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 4901 s64 exit_code, 4902 const char *fmt, ...) 4903 { 4904 struct scx_exit_info *ei = scx_exit_info; 4905 int none = SCX_EXIT_NONE; 4906 va_list args; 4907 4908 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind)) 4909 return; 4910 4911 ei->exit_code = exit_code; 4912 #ifdef CONFIG_STACKTRACE 4913 if (kind >= SCX_EXIT_ERROR) 4914 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 4915 #endif 4916 va_start(args, fmt); 4917 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 4918 va_end(args); 4919 4920 /* 4921 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 4922 * in scx_ops_disable_workfn(). 4923 */ 4924 ei->kind = kind; 4925 ei->reason = scx_exit_reason(ei->kind); 4926 4927 irq_work_queue(&scx_ops_error_irq_work); 4928 } 4929 4930 static struct kthread_worker *scx_create_rt_helper(const char *name) 4931 { 4932 struct kthread_worker *helper; 4933 4934 helper = kthread_create_worker(0, name); 4935 if (helper) 4936 sched_set_fifo(helper->task); 4937 return helper; 4938 } 4939 4940 static void check_hotplug_seq(const struct sched_ext_ops *ops) 4941 { 4942 unsigned long long global_hotplug_seq; 4943 4944 /* 4945 * If a hotplug event has occurred between when a scheduler was 4946 * initialized, and when we were able to attach, exit and notify user 4947 * space about it. 4948 */ 4949 if (ops->hotplug_seq) { 4950 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 4951 if (ops->hotplug_seq != global_hotplug_seq) { 4952 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 4953 "expected hotplug seq %llu did not match actual %llu", 4954 ops->hotplug_seq, global_hotplug_seq); 4955 } 4956 } 4957 } 4958 4959 static int validate_ops(const struct sched_ext_ops *ops) 4960 { 4961 /* 4962 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 4963 * ops.enqueue() callback isn't implemented. 4964 */ 4965 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 4966 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 4967 return -EINVAL; 4968 } 4969 4970 return 0; 4971 } 4972 4973 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) 4974 { 4975 struct scx_task_iter sti; 4976 struct task_struct *p; 4977 unsigned long timeout; 4978 int i, cpu, node, ret; 4979 4980 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 4981 cpu_possible_mask)) { 4982 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation"); 4983 return -EINVAL; 4984 } 4985 4986 mutex_lock(&scx_ops_enable_mutex); 4987 4988 if (!scx_ops_helper) { 4989 WRITE_ONCE(scx_ops_helper, 4990 scx_create_rt_helper("sched_ext_ops_helper")); 4991 if (!scx_ops_helper) { 4992 ret = -ENOMEM; 4993 goto err_unlock; 4994 } 4995 } 4996 4997 if (!global_dsqs) { 4998 struct scx_dispatch_q **dsqs; 4999 5000 dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL); 5001 if (!dsqs) { 5002 ret = -ENOMEM; 5003 goto err_unlock; 5004 } 5005 5006 for_each_node_state(node, N_POSSIBLE) { 5007 struct scx_dispatch_q *dsq; 5008 5009 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node); 5010 if (!dsq) { 5011 for_each_node_state(node, N_POSSIBLE) 5012 kfree(dsqs[node]); 5013 kfree(dsqs); 5014 ret = -ENOMEM; 5015 goto err_unlock; 5016 } 5017 5018 init_dsq(dsq, SCX_DSQ_GLOBAL); 5019 dsqs[node] = dsq; 5020 } 5021 5022 global_dsqs = dsqs; 5023 } 5024 5025 if (scx_ops_enable_state() != SCX_OPS_DISABLED) { 5026 ret = -EBUSY; 5027 goto err_unlock; 5028 } 5029 5030 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL); 5031 if (!scx_root_kobj) { 5032 ret = -ENOMEM; 5033 goto err_unlock; 5034 } 5035 5036 scx_root_kobj->kset = scx_kset; 5037 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root"); 5038 if (ret < 0) 5039 goto err; 5040 5041 scx_exit_info = alloc_exit_info(ops->exit_dump_len); 5042 if (!scx_exit_info) { 5043 ret = -ENOMEM; 5044 goto err_del; 5045 } 5046 5047 /* 5048 * Set scx_ops, transition to ENABLING and clear exit info to arm the 5049 * disable path. Failure triggers full disabling from here on. 5050 */ 5051 scx_ops = *ops; 5052 5053 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) != 5054 SCX_OPS_DISABLED); 5055 5056 atomic_set(&scx_exit_kind, SCX_EXIT_NONE); 5057 scx_warned_zero_slice = false; 5058 5059 atomic_long_set(&scx_nr_rejected, 0); 5060 5061 for_each_possible_cpu(cpu) 5062 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 5063 5064 /* 5065 * Keep CPUs stable during enable so that the BPF scheduler can track 5066 * online CPUs by watching ->on/offline_cpu() after ->init(). 5067 */ 5068 cpus_read_lock(); 5069 5070 if (scx_ops.init) { 5071 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init); 5072 if (ret) { 5073 ret = ops_sanitize_err("init", ret); 5074 cpus_read_unlock(); 5075 scx_ops_error("ops.init() failed (%d)", ret); 5076 goto err_disable; 5077 } 5078 } 5079 5080 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 5081 if (((void (**)(void))ops)[i]) 5082 static_branch_enable_cpuslocked(&scx_has_op[i]); 5083 5084 check_hotplug_seq(ops); 5085 cpus_read_unlock(); 5086 5087 ret = validate_ops(ops); 5088 if (ret) 5089 goto err_disable; 5090 5091 WARN_ON_ONCE(scx_dsp_ctx); 5092 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 5093 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 5094 scx_dsp_max_batch), 5095 __alignof__(struct scx_dsp_ctx)); 5096 if (!scx_dsp_ctx) { 5097 ret = -ENOMEM; 5098 goto err_disable; 5099 } 5100 5101 if (ops->timeout_ms) 5102 timeout = msecs_to_jiffies(ops->timeout_ms); 5103 else 5104 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 5105 5106 WRITE_ONCE(scx_watchdog_timeout, timeout); 5107 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 5108 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 5109 scx_watchdog_timeout / 2); 5110 5111 /* 5112 * Once __scx_ops_enabled is set, %current can be switched to SCX 5113 * anytime. This can lead to stalls as some BPF schedulers (e.g. 5114 * userspace scheduling) may not function correctly before all tasks are 5115 * switched. Init in bypass mode to guarantee forward progress. 5116 */ 5117 scx_ops_bypass(true); 5118 5119 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 5120 if (((void (**)(void))ops)[i]) 5121 static_branch_enable(&scx_has_op[i]); 5122 5123 if (ops->flags & SCX_OPS_ENQ_LAST) 5124 static_branch_enable(&scx_ops_enq_last); 5125 5126 if (ops->flags & SCX_OPS_ENQ_EXITING) 5127 static_branch_enable(&scx_ops_enq_exiting); 5128 if (scx_ops.cpu_acquire || scx_ops.cpu_release) 5129 static_branch_enable(&scx_ops_cpu_preempt); 5130 5131 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) { 5132 reset_idle_masks(); 5133 static_branch_enable(&scx_builtin_idle_enabled); 5134 } else { 5135 static_branch_disable(&scx_builtin_idle_enabled); 5136 } 5137 5138 /* 5139 * Lock out forks, cgroup on/offlining and moves before opening the 5140 * floodgate so that they don't wander into the operations prematurely. 5141 */ 5142 percpu_down_write(&scx_fork_rwsem); 5143 5144 WARN_ON_ONCE(scx_ops_init_task_enabled); 5145 scx_ops_init_task_enabled = true; 5146 5147 /* 5148 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 5149 * preventing new tasks from being added. No need to exclude tasks 5150 * leaving as sched_ext_free() can handle both prepped and enabled 5151 * tasks. Prep all tasks first and then enable them with preemption 5152 * disabled. 5153 * 5154 * All cgroups should be initialized before scx_ops_init_task() so that 5155 * the BPF scheduler can reliably track each task's cgroup membership 5156 * from scx_ops_init_task(). Lock out cgroup on/offlining and task 5157 * migrations while tasks are being initialized so that 5158 * scx_cgroup_can_attach() never sees uninitialized tasks. 5159 */ 5160 scx_cgroup_lock(); 5161 ret = scx_cgroup_init(); 5162 if (ret) 5163 goto err_disable_unlock_all; 5164 5165 scx_task_iter_start(&sti); 5166 while ((p = scx_task_iter_next_locked(&sti))) { 5167 /* 5168 * @p may already be dead, have lost all its usages counts and 5169 * be waiting for RCU grace period before being freed. @p can't 5170 * be initialized for SCX in such cases and should be ignored. 5171 */ 5172 if (!tryget_task_struct(p)) 5173 continue; 5174 5175 scx_task_iter_unlock(&sti); 5176 5177 ret = scx_ops_init_task(p, task_group(p), false); 5178 if (ret) { 5179 put_task_struct(p); 5180 scx_task_iter_relock(&sti); 5181 scx_task_iter_stop(&sti); 5182 scx_ops_error("ops.init_task() failed (%d) for %s[%d]", 5183 ret, p->comm, p->pid); 5184 goto err_disable_unlock_all; 5185 } 5186 5187 scx_set_task_state(p, SCX_TASK_READY); 5188 5189 put_task_struct(p); 5190 scx_task_iter_relock(&sti); 5191 } 5192 scx_task_iter_stop(&sti); 5193 scx_cgroup_unlock(); 5194 percpu_up_write(&scx_fork_rwsem); 5195 5196 /* 5197 * All tasks are READY. It's safe to turn on scx_enabled() and switch 5198 * all eligible tasks. 5199 */ 5200 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 5201 static_branch_enable(&__scx_ops_enabled); 5202 5203 /* 5204 * We're fully committed and can't fail. The task READY -> ENABLED 5205 * transitions here are synchronized against sched_ext_free() through 5206 * scx_tasks_lock. 5207 */ 5208 percpu_down_write(&scx_fork_rwsem); 5209 scx_task_iter_start(&sti); 5210 while ((p = scx_task_iter_next_locked(&sti))) { 5211 const struct sched_class *old_class = p->sched_class; 5212 const struct sched_class *new_class = 5213 __setscheduler_class(p->policy, p->prio); 5214 struct sched_enq_and_set_ctx ctx; 5215 5216 if (old_class != new_class && p->se.sched_delayed) 5217 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); 5218 5219 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 5220 5221 p->scx.slice = SCX_SLICE_DFL; 5222 p->sched_class = new_class; 5223 check_class_changing(task_rq(p), p, old_class); 5224 5225 sched_enq_and_set_task(&ctx); 5226 5227 check_class_changed(task_rq(p), p, old_class, p->prio); 5228 } 5229 scx_task_iter_stop(&sti); 5230 percpu_up_write(&scx_fork_rwsem); 5231 5232 scx_ops_bypass(false); 5233 5234 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { 5235 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE); 5236 goto err_disable; 5237 } 5238 5239 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 5240 static_branch_enable(&__scx_switched_all); 5241 5242 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n", 5243 scx_ops.name, scx_switched_all() ? "" : " (partial)"); 5244 kobject_uevent(scx_root_kobj, KOBJ_ADD); 5245 mutex_unlock(&scx_ops_enable_mutex); 5246 5247 atomic_long_inc(&scx_enable_seq); 5248 5249 return 0; 5250 5251 err_del: 5252 kobject_del(scx_root_kobj); 5253 err: 5254 kobject_put(scx_root_kobj); 5255 scx_root_kobj = NULL; 5256 if (scx_exit_info) { 5257 free_exit_info(scx_exit_info); 5258 scx_exit_info = NULL; 5259 } 5260 err_unlock: 5261 mutex_unlock(&scx_ops_enable_mutex); 5262 return ret; 5263 5264 err_disable_unlock_all: 5265 scx_cgroup_unlock(); 5266 percpu_up_write(&scx_fork_rwsem); 5267 scx_ops_bypass(false); 5268 err_disable: 5269 mutex_unlock(&scx_ops_enable_mutex); 5270 /* 5271 * Returning an error code here would not pass all the error information 5272 * to userspace. Record errno using scx_ops_error() for cases 5273 * scx_ops_error() wasn't already invoked and exit indicating success so 5274 * that the error is notified through ops.exit() with all the details. 5275 * 5276 * Flush scx_ops_disable_work to ensure that error is reported before 5277 * init completion. 5278 */ 5279 scx_ops_error("scx_ops_enable() failed (%d)", ret); 5280 kthread_flush_work(&scx_ops_disable_work); 5281 return 0; 5282 } 5283 5284 5285 /******************************************************************************** 5286 * bpf_struct_ops plumbing. 5287 */ 5288 #include <linux/bpf_verifier.h> 5289 #include <linux/bpf.h> 5290 #include <linux/btf.h> 5291 5292 extern struct btf *btf_vmlinux; 5293 static const struct btf_type *task_struct_type; 5294 static u32 task_struct_type_id; 5295 5296 static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size, 5297 enum bpf_access_type type, 5298 const struct bpf_prog *prog, 5299 struct bpf_insn_access_aux *info) 5300 { 5301 struct btf *btf = bpf_get_btf_vmlinux(); 5302 const struct bpf_struct_ops_desc *st_ops_desc; 5303 const struct btf_member *member; 5304 const struct btf_type *t; 5305 u32 btf_id, member_idx; 5306 const char *mname; 5307 5308 /* struct_ops op args are all sequential, 64-bit numbers */ 5309 if (off != arg_n * sizeof(__u64)) 5310 return false; 5311 5312 /* btf_id should be the type id of struct sched_ext_ops */ 5313 btf_id = prog->aux->attach_btf_id; 5314 st_ops_desc = bpf_struct_ops_find(btf, btf_id); 5315 if (!st_ops_desc) 5316 return false; 5317 5318 /* BTF type of struct sched_ext_ops */ 5319 t = st_ops_desc->type; 5320 5321 member_idx = prog->expected_attach_type; 5322 if (member_idx >= btf_type_vlen(t)) 5323 return false; 5324 5325 /* 5326 * Get the member name of this struct_ops program, which corresponds to 5327 * a field in struct sched_ext_ops. For example, the member name of the 5328 * dispatch struct_ops program (callback) is "dispatch". 5329 */ 5330 member = &btf_type_member(t)[member_idx]; 5331 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 5332 5333 if (!strcmp(mname, op)) { 5334 /* 5335 * The value is a pointer to a type (struct task_struct) given 5336 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED), 5337 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program 5338 * should check the pointer to make sure it is not NULL before 5339 * using it, or the verifier will reject the program. 5340 * 5341 * Longer term, this is something that should be addressed by 5342 * BTF, and be fully contained within the verifier. 5343 */ 5344 info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED; 5345 info->btf = btf_vmlinux; 5346 info->btf_id = task_struct_type_id; 5347 5348 return true; 5349 } 5350 5351 return false; 5352 } 5353 5354 static bool bpf_scx_is_valid_access(int off, int size, 5355 enum bpf_access_type type, 5356 const struct bpf_prog *prog, 5357 struct bpf_insn_access_aux *info) 5358 { 5359 if (type != BPF_READ) 5360 return false; 5361 if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) || 5362 set_arg_maybe_null("yield", 1, off, size, type, prog, info)) 5363 return true; 5364 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 5365 return false; 5366 if (off % size != 0) 5367 return false; 5368 5369 return btf_ctx_access(off, size, type, prog, info); 5370 } 5371 5372 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 5373 const struct bpf_reg_state *reg, int off, 5374 int size) 5375 { 5376 const struct btf_type *t; 5377 5378 t = btf_type_by_id(reg->btf, reg->btf_id); 5379 if (t == task_struct_type) { 5380 if (off >= offsetof(struct task_struct, scx.slice) && 5381 off + size <= offsetofend(struct task_struct, scx.slice)) 5382 return SCALAR_VALUE; 5383 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 5384 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 5385 return SCALAR_VALUE; 5386 if (off >= offsetof(struct task_struct, scx.disallow) && 5387 off + size <= offsetofend(struct task_struct, scx.disallow)) 5388 return SCALAR_VALUE; 5389 } 5390 5391 return -EACCES; 5392 } 5393 5394 static const struct bpf_func_proto * 5395 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5396 { 5397 switch (func_id) { 5398 case BPF_FUNC_task_storage_get: 5399 return &bpf_task_storage_get_proto; 5400 case BPF_FUNC_task_storage_delete: 5401 return &bpf_task_storage_delete_proto; 5402 default: 5403 return bpf_base_func_proto(func_id, prog); 5404 } 5405 } 5406 5407 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 5408 .get_func_proto = bpf_scx_get_func_proto, 5409 .is_valid_access = bpf_scx_is_valid_access, 5410 .btf_struct_access = bpf_scx_btf_struct_access, 5411 }; 5412 5413 static int bpf_scx_init_member(const struct btf_type *t, 5414 const struct btf_member *member, 5415 void *kdata, const void *udata) 5416 { 5417 const struct sched_ext_ops *uops = udata; 5418 struct sched_ext_ops *ops = kdata; 5419 u32 moff = __btf_member_bit_offset(t, member) / 8; 5420 int ret; 5421 5422 switch (moff) { 5423 case offsetof(struct sched_ext_ops, dispatch_max_batch): 5424 if (*(u32 *)(udata + moff) > INT_MAX) 5425 return -E2BIG; 5426 ops->dispatch_max_batch = *(u32 *)(udata + moff); 5427 return 1; 5428 case offsetof(struct sched_ext_ops, flags): 5429 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 5430 return -EINVAL; 5431 ops->flags = *(u64 *)(udata + moff); 5432 return 1; 5433 case offsetof(struct sched_ext_ops, name): 5434 ret = bpf_obj_name_cpy(ops->name, uops->name, 5435 sizeof(ops->name)); 5436 if (ret < 0) 5437 return ret; 5438 if (ret == 0) 5439 return -EINVAL; 5440 return 1; 5441 case offsetof(struct sched_ext_ops, timeout_ms): 5442 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 5443 SCX_WATCHDOG_MAX_TIMEOUT) 5444 return -E2BIG; 5445 ops->timeout_ms = *(u32 *)(udata + moff); 5446 return 1; 5447 case offsetof(struct sched_ext_ops, exit_dump_len): 5448 ops->exit_dump_len = 5449 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 5450 return 1; 5451 case offsetof(struct sched_ext_ops, hotplug_seq): 5452 ops->hotplug_seq = *(u64 *)(udata + moff); 5453 return 1; 5454 } 5455 5456 return 0; 5457 } 5458 5459 static int bpf_scx_check_member(const struct btf_type *t, 5460 const struct btf_member *member, 5461 const struct bpf_prog *prog) 5462 { 5463 u32 moff = __btf_member_bit_offset(t, member) / 8; 5464 5465 switch (moff) { 5466 case offsetof(struct sched_ext_ops, init_task): 5467 #ifdef CONFIG_EXT_GROUP_SCHED 5468 case offsetof(struct sched_ext_ops, cgroup_init): 5469 case offsetof(struct sched_ext_ops, cgroup_exit): 5470 case offsetof(struct sched_ext_ops, cgroup_prep_move): 5471 #endif 5472 case offsetof(struct sched_ext_ops, cpu_online): 5473 case offsetof(struct sched_ext_ops, cpu_offline): 5474 case offsetof(struct sched_ext_ops, init): 5475 case offsetof(struct sched_ext_ops, exit): 5476 break; 5477 default: 5478 if (prog->sleepable) 5479 return -EINVAL; 5480 } 5481 5482 return 0; 5483 } 5484 5485 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 5486 { 5487 return scx_ops_enable(kdata, link); 5488 } 5489 5490 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 5491 { 5492 scx_ops_disable(SCX_EXIT_UNREG); 5493 kthread_flush_work(&scx_ops_disable_work); 5494 } 5495 5496 static int bpf_scx_init(struct btf *btf) 5497 { 5498 s32 type_id; 5499 5500 type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT); 5501 if (type_id < 0) 5502 return -EINVAL; 5503 task_struct_type = btf_type_by_id(btf, type_id); 5504 task_struct_type_id = type_id; 5505 5506 return 0; 5507 } 5508 5509 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 5510 { 5511 /* 5512 * sched_ext does not support updating the actively-loaded BPF 5513 * scheduler, as registering a BPF scheduler can always fail if the 5514 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 5515 * etc. Similarly, we can always race with unregistration happening 5516 * elsewhere, such as with sysrq. 5517 */ 5518 return -EOPNOTSUPP; 5519 } 5520 5521 static int bpf_scx_validate(void *kdata) 5522 { 5523 return 0; 5524 } 5525 5526 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 5527 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {} 5528 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {} 5529 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {} 5530 static void tick_stub(struct task_struct *p) {} 5531 static void runnable_stub(struct task_struct *p, u64 enq_flags) {} 5532 static void running_stub(struct task_struct *p) {} 5533 static void stopping_stub(struct task_struct *p, bool runnable) {} 5534 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {} 5535 static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; } 5536 static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; } 5537 static void set_weight_stub(struct task_struct *p, u32 weight) {} 5538 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {} 5539 static void update_idle_stub(s32 cpu, bool idle) {} 5540 static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {} 5541 static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {} 5542 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 5543 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {} 5544 static void enable_stub(struct task_struct *p) {} 5545 static void disable_stub(struct task_struct *p) {} 5546 #ifdef CONFIG_EXT_GROUP_SCHED 5547 static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; } 5548 static void cgroup_exit_stub(struct cgroup *cgrp) {} 5549 static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; } 5550 static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5551 static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5552 static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {} 5553 #endif 5554 static void cpu_online_stub(s32 cpu) {} 5555 static void cpu_offline_stub(s32 cpu) {} 5556 static s32 init_stub(void) { return -EINVAL; } 5557 static void exit_stub(struct scx_exit_info *info) {} 5558 static void dump_stub(struct scx_dump_ctx *ctx) {} 5559 static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {} 5560 static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {} 5561 5562 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5563 .select_cpu = select_cpu_stub, 5564 .enqueue = enqueue_stub, 5565 .dequeue = dequeue_stub, 5566 .dispatch = dispatch_stub, 5567 .tick = tick_stub, 5568 .runnable = runnable_stub, 5569 .running = running_stub, 5570 .stopping = stopping_stub, 5571 .quiescent = quiescent_stub, 5572 .yield = yield_stub, 5573 .core_sched_before = core_sched_before_stub, 5574 .set_weight = set_weight_stub, 5575 .set_cpumask = set_cpumask_stub, 5576 .update_idle = update_idle_stub, 5577 .cpu_acquire = cpu_acquire_stub, 5578 .cpu_release = cpu_release_stub, 5579 .init_task = init_task_stub, 5580 .exit_task = exit_task_stub, 5581 .enable = enable_stub, 5582 .disable = disable_stub, 5583 #ifdef CONFIG_EXT_GROUP_SCHED 5584 .cgroup_init = cgroup_init_stub, 5585 .cgroup_exit = cgroup_exit_stub, 5586 .cgroup_prep_move = cgroup_prep_move_stub, 5587 .cgroup_move = cgroup_move_stub, 5588 .cgroup_cancel_move = cgroup_cancel_move_stub, 5589 .cgroup_set_weight = cgroup_set_weight_stub, 5590 #endif 5591 .cpu_online = cpu_online_stub, 5592 .cpu_offline = cpu_offline_stub, 5593 .init = init_stub, 5594 .exit = exit_stub, 5595 .dump = dump_stub, 5596 .dump_cpu = dump_cpu_stub, 5597 .dump_task = dump_task_stub, 5598 }; 5599 5600 static struct bpf_struct_ops bpf_sched_ext_ops = { 5601 .verifier_ops = &bpf_scx_verifier_ops, 5602 .reg = bpf_scx_reg, 5603 .unreg = bpf_scx_unreg, 5604 .check_member = bpf_scx_check_member, 5605 .init_member = bpf_scx_init_member, 5606 .init = bpf_scx_init, 5607 .update = bpf_scx_update, 5608 .validate = bpf_scx_validate, 5609 .name = "sched_ext_ops", 5610 .owner = THIS_MODULE, 5611 .cfi_stubs = &__bpf_ops_sched_ext_ops 5612 }; 5613 5614 5615 /******************************************************************************** 5616 * System integration and init. 5617 */ 5618 5619 static void sysrq_handle_sched_ext_reset(u8 key) 5620 { 5621 if (scx_ops_helper) 5622 scx_ops_disable(SCX_EXIT_SYSRQ); 5623 else 5624 pr_info("sched_ext: BPF scheduler not yet used\n"); 5625 } 5626 5627 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5628 .handler = sysrq_handle_sched_ext_reset, 5629 .help_msg = "reset-sched-ext(S)", 5630 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5631 .enable_mask = SYSRQ_ENABLE_RTNICE, 5632 }; 5633 5634 static void sysrq_handle_sched_ext_dump(u8 key) 5635 { 5636 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5637 5638 if (scx_enabled()) 5639 scx_dump_state(&ei, 0); 5640 } 5641 5642 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5643 .handler = sysrq_handle_sched_ext_dump, 5644 .help_msg = "dump-sched-ext(D)", 5645 .action_msg = "Trigger sched_ext debug dump", 5646 .enable_mask = SYSRQ_ENABLE_RTNICE, 5647 }; 5648 5649 static bool can_skip_idle_kick(struct rq *rq) 5650 { 5651 lockdep_assert_rq_held(rq); 5652 5653 /* 5654 * We can skip idle kicking if @rq is going to go through at least one 5655 * full SCX scheduling cycle before going idle. Just checking whether 5656 * curr is not idle is insufficient because we could be racing 5657 * balance_one() trying to pull the next task from a remote rq, which 5658 * may fail, and @rq may become idle afterwards. 5659 * 5660 * The race window is small and we don't and can't guarantee that @rq is 5661 * only kicked while idle anyway. Skip only when sure. 5662 */ 5663 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); 5664 } 5665 5666 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) 5667 { 5668 struct rq *rq = cpu_rq(cpu); 5669 struct scx_rq *this_scx = &this_rq->scx; 5670 bool should_wait = false; 5671 unsigned long flags; 5672 5673 raw_spin_rq_lock_irqsave(rq, flags); 5674 5675 /* 5676 * During CPU hotplug, a CPU may depend on kicking itself to make 5677 * forward progress. Allow kicking self regardless of online state. 5678 */ 5679 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) { 5680 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5681 if (rq->curr->sched_class == &ext_sched_class) 5682 rq->curr->scx.slice = 0; 5683 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5684 } 5685 5686 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5687 pseqs[cpu] = rq->scx.pnt_seq; 5688 should_wait = true; 5689 } 5690 5691 resched_curr(rq); 5692 } else { 5693 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5694 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5695 } 5696 5697 raw_spin_rq_unlock_irqrestore(rq, flags); 5698 5699 return should_wait; 5700 } 5701 5702 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5703 { 5704 struct rq *rq = cpu_rq(cpu); 5705 unsigned long flags; 5706 5707 raw_spin_rq_lock_irqsave(rq, flags); 5708 5709 if (!can_skip_idle_kick(rq) && 5710 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5711 resched_curr(rq); 5712 5713 raw_spin_rq_unlock_irqrestore(rq, flags); 5714 } 5715 5716 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5717 { 5718 struct rq *this_rq = this_rq(); 5719 struct scx_rq *this_scx = &this_rq->scx; 5720 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); 5721 bool should_wait = false; 5722 s32 cpu; 5723 5724 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5725 should_wait |= kick_one_cpu(cpu, this_rq, pseqs); 5726 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5727 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5728 } 5729 5730 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5731 kick_one_cpu_if_idle(cpu, this_rq); 5732 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5733 } 5734 5735 if (!should_wait) 5736 return; 5737 5738 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5739 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; 5740 5741 if (cpu != cpu_of(this_rq)) { 5742 /* 5743 * Pairs with smp_store_release() issued by this CPU in 5744 * scx_next_task_picked() on the resched path. 5745 * 5746 * We busy-wait here to guarantee that no other task can 5747 * be scheduled on our core before the target CPU has 5748 * entered the resched path. 5749 */ 5750 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu]) 5751 cpu_relax(); 5752 } 5753 5754 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5755 } 5756 } 5757 5758 /** 5759 * print_scx_info - print out sched_ext scheduler state 5760 * @log_lvl: the log level to use when printing 5761 * @p: target task 5762 * 5763 * If a sched_ext scheduler is enabled, print the name and state of the 5764 * scheduler. If @p is on sched_ext, print further information about the task. 5765 * 5766 * This function can be safely called on any task as long as the task_struct 5767 * itself is accessible. While safe, this function isn't synchronized and may 5768 * print out mixups or garbages of limited length. 5769 */ 5770 void print_scx_info(const char *log_lvl, struct task_struct *p) 5771 { 5772 enum scx_ops_enable_state state = scx_ops_enable_state(); 5773 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5774 char runnable_at_buf[22] = "?"; 5775 struct sched_class *class; 5776 unsigned long runnable_at; 5777 5778 if (state == SCX_OPS_DISABLED) 5779 return; 5780 5781 /* 5782 * Carefully check if the task was running on sched_ext, and then 5783 * carefully copy the time it's been runnable, and its state. 5784 */ 5785 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 5786 class != &ext_sched_class) { 5787 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name, 5788 scx_ops_enable_state_str[state], all); 5789 return; 5790 } 5791 5792 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 5793 sizeof(runnable_at))) 5794 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 5795 jiffies_delta_msecs(runnable_at, jiffies)); 5796 5797 /* print everything onto one line to conserve console space */ 5798 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 5799 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all, 5800 runnable_at_buf); 5801 } 5802 5803 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 5804 { 5805 /* 5806 * SCX schedulers often have userspace components which are sometimes 5807 * involved in critial scheduling paths. PM operations involve freezing 5808 * userspace which can lead to scheduling misbehaviors including stalls. 5809 * Let's bypass while PM operations are in progress. 5810 */ 5811 switch (event) { 5812 case PM_HIBERNATION_PREPARE: 5813 case PM_SUSPEND_PREPARE: 5814 case PM_RESTORE_PREPARE: 5815 scx_ops_bypass(true); 5816 break; 5817 case PM_POST_HIBERNATION: 5818 case PM_POST_SUSPEND: 5819 case PM_POST_RESTORE: 5820 scx_ops_bypass(false); 5821 break; 5822 } 5823 5824 return NOTIFY_OK; 5825 } 5826 5827 static struct notifier_block scx_pm_notifier = { 5828 .notifier_call = scx_pm_handler, 5829 }; 5830 5831 void __init init_sched_ext_class(void) 5832 { 5833 s32 cpu, v; 5834 5835 /* 5836 * The following is to prevent the compiler from optimizing out the enum 5837 * definitions so that BPF scheduler implementations can use them 5838 * through the generated vmlinux.h. 5839 */ 5840 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT | 5841 SCX_TG_ONLINE); 5842 5843 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); 5844 #ifdef CONFIG_SMP 5845 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL)); 5846 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL)); 5847 #endif 5848 scx_kick_cpus_pnt_seqs = 5849 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, 5850 __alignof__(scx_kick_cpus_pnt_seqs[0])); 5851 BUG_ON(!scx_kick_cpus_pnt_seqs); 5852 5853 for_each_possible_cpu(cpu) { 5854 struct rq *rq = cpu_rq(cpu); 5855 5856 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 5857 INIT_LIST_HEAD(&rq->scx.runnable_list); 5858 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); 5859 5860 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); 5861 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL)); 5862 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); 5863 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL)); 5864 init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn); 5865 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); 5866 5867 if (cpu_online(cpu)) 5868 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 5869 } 5870 5871 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 5872 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 5873 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 5874 } 5875 5876 5877 /******************************************************************************** 5878 * Helpers that can be called from the BPF scheduler. 5879 */ 5880 #include <linux/btf_ids.h> 5881 5882 __bpf_kfunc_start_defs(); 5883 5884 /** 5885 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() 5886 * @p: task_struct to select a CPU for 5887 * @prev_cpu: CPU @p was on previously 5888 * @wake_flags: %SCX_WAKE_* flags 5889 * @is_idle: out parameter indicating whether the returned CPU is idle 5890 * 5891 * Can only be called from ops.select_cpu() if the built-in CPU selection is 5892 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set. 5893 * @p, @prev_cpu and @wake_flags match ops.select_cpu(). 5894 * 5895 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is 5896 * currently idle and thus a good candidate for direct dispatching. 5897 */ 5898 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 5899 u64 wake_flags, bool *is_idle) 5900 { 5901 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 5902 scx_ops_error("built-in idle tracking is disabled"); 5903 goto prev_cpu; 5904 } 5905 5906 if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) 5907 goto prev_cpu; 5908 5909 #ifdef CONFIG_SMP 5910 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle); 5911 #endif 5912 5913 prev_cpu: 5914 *is_idle = false; 5915 return prev_cpu; 5916 } 5917 5918 __bpf_kfunc_end_defs(); 5919 5920 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu) 5921 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) 5922 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu) 5923 5924 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = { 5925 .owner = THIS_MODULE, 5926 .set = &scx_kfunc_ids_select_cpu, 5927 }; 5928 5929 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags) 5930 { 5931 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 5932 return false; 5933 5934 lockdep_assert_irqs_disabled(); 5935 5936 if (unlikely(!p)) { 5937 scx_ops_error("called with NULL task"); 5938 return false; 5939 } 5940 5941 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 5942 scx_ops_error("invalid enq_flags 0x%llx", enq_flags); 5943 return false; 5944 } 5945 5946 return true; 5947 } 5948 5949 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags) 5950 { 5951 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5952 struct task_struct *ddsp_task; 5953 5954 ddsp_task = __this_cpu_read(direct_dispatch_task); 5955 if (ddsp_task) { 5956 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags); 5957 return; 5958 } 5959 5960 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 5961 scx_ops_error("dispatch buffer overflow"); 5962 return; 5963 } 5964 5965 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 5966 .task = p, 5967 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 5968 .dsq_id = dsq_id, 5969 .enq_flags = enq_flags, 5970 }; 5971 } 5972 5973 __bpf_kfunc_start_defs(); 5974 5975 /** 5976 * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ 5977 * @p: task_struct to dispatch 5978 * @dsq_id: DSQ to dispatch to 5979 * @slice: duration @p can run for in nsecs, 0 to keep the current value 5980 * @enq_flags: SCX_ENQ_* 5981 * 5982 * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe 5983 * to call this function spuriously. Can be called from ops.enqueue(), 5984 * ops.select_cpu(), and ops.dispatch(). 5985 * 5986 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 5987 * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be 5988 * used to target the local DSQ of a CPU other than the enqueueing one. Use 5989 * ops.select_cpu() to be on the target CPU in the first place. 5990 * 5991 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 5992 * will be directly dispatched to the corresponding dispatch queue after 5993 * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be 5994 * dispatched to the local DSQ of the CPU returned by ops.select_cpu(). 5995 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 5996 * task is dispatched. 5997 * 5998 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 5999 * and this function can be called upto ops.dispatch_max_batch times to dispatch 6000 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 6001 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter. 6002 * 6003 * This function doesn't have any locking restrictions and may be called under 6004 * BPF locks (in the future when BPF introduces more flexible locking). 6005 * 6006 * @p is allowed to run for @slice. The scheduling path is triggered on slice 6007 * exhaustion. If zero, the current residual slice is maintained. If 6008 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 6009 * scx_bpf_kick_cpu() to trigger scheduling. 6010 */ 6011 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, 6012 u64 enq_flags) 6013 { 6014 if (!scx_dispatch_preamble(p, enq_flags)) 6015 return; 6016 6017 if (slice) 6018 p->scx.slice = slice; 6019 else 6020 p->scx.slice = p->scx.slice ?: 1; 6021 6022 scx_dispatch_commit(p, dsq_id, enq_flags); 6023 } 6024 6025 /** 6026 * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ 6027 * @p: task_struct to dispatch 6028 * @dsq_id: DSQ to dispatch to 6029 * @slice: duration @p can run for in nsecs, 0 to keep the current value 6030 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 6031 * @enq_flags: SCX_ENQ_* 6032 * 6033 * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id. 6034 * Tasks queued into the priority queue are ordered by @vtime and always 6035 * consumed after the tasks in the FIFO queue. All other aspects are identical 6036 * to scx_bpf_dispatch(). 6037 * 6038 * @vtime ordering is according to time_before64() which considers wrapping. A 6039 * numerically larger vtime may indicate an earlier position in the ordering and 6040 * vice-versa. 6041 */ 6042 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, 6043 u64 slice, u64 vtime, u64 enq_flags) 6044 { 6045 if (!scx_dispatch_preamble(p, enq_flags)) 6046 return; 6047 6048 if (slice) 6049 p->scx.slice = slice; 6050 else 6051 p->scx.slice = p->scx.slice ?: 1; 6052 6053 p->scx.dsq_vtime = vtime; 6054 6055 scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6056 } 6057 6058 __bpf_kfunc_end_defs(); 6059 6060 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 6061 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU) 6062 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU) 6063 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 6064 6065 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 6066 .owner = THIS_MODULE, 6067 .set = &scx_kfunc_ids_enqueue_dispatch, 6068 }; 6069 6070 static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit, 6071 struct task_struct *p, u64 dsq_id, 6072 u64 enq_flags) 6073 { 6074 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; 6075 struct rq *this_rq, *src_rq, *dst_rq, *locked_rq; 6076 bool dispatched = false; 6077 bool in_balance; 6078 unsigned long flags; 6079 6080 if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH)) 6081 return false; 6082 6083 /* 6084 * Can be called from either ops.dispatch() locking this_rq() or any 6085 * context where no rq lock is held. If latter, lock @p's task_rq which 6086 * we'll likely need anyway. 6087 */ 6088 src_rq = task_rq(p); 6089 6090 local_irq_save(flags); 6091 this_rq = this_rq(); 6092 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; 6093 6094 if (in_balance) { 6095 if (this_rq != src_rq) { 6096 raw_spin_rq_unlock(this_rq); 6097 raw_spin_rq_lock(src_rq); 6098 } 6099 } else { 6100 raw_spin_rq_lock(src_rq); 6101 } 6102 6103 locked_rq = src_rq; 6104 raw_spin_lock(&src_dsq->lock); 6105 6106 /* 6107 * Did someone else get to it? @p could have already left $src_dsq, got 6108 * re-enqueud, or be in the process of being consumed by someone else. 6109 */ 6110 if (unlikely(p->scx.dsq != src_dsq || 6111 u32_before(kit->cursor.priv, p->scx.dsq_seq) || 6112 p->scx.holding_cpu >= 0) || 6113 WARN_ON_ONCE(src_rq != task_rq(p))) { 6114 raw_spin_unlock(&src_dsq->lock); 6115 goto out; 6116 } 6117 6118 /* @p is still on $src_dsq and stable, determine the destination */ 6119 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p); 6120 6121 if (dst_dsq->id == SCX_DSQ_LOCAL) { 6122 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 6123 if (!task_can_run_on_remote_rq(p, dst_rq, true)) { 6124 dst_dsq = find_global_dsq(p); 6125 dst_rq = src_rq; 6126 } 6127 } else { 6128 /* no need to migrate if destination is a non-local DSQ */ 6129 dst_rq = src_rq; 6130 } 6131 6132 /* 6133 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different 6134 * CPU, @p will be migrated. 6135 */ 6136 if (dst_dsq->id == SCX_DSQ_LOCAL) { 6137 /* @p is going from a non-local DSQ to a local DSQ */ 6138 if (src_rq == dst_rq) { 6139 task_unlink_from_dsq(p, src_dsq); 6140 move_local_task_to_local_dsq(p, enq_flags, 6141 src_dsq, dst_rq); 6142 raw_spin_unlock(&src_dsq->lock); 6143 } else { 6144 raw_spin_unlock(&src_dsq->lock); 6145 move_remote_task_to_local_dsq(p, enq_flags, 6146 src_rq, dst_rq); 6147 locked_rq = dst_rq; 6148 } 6149 } else { 6150 /* 6151 * @p is going from a non-local DSQ to a non-local DSQ. As 6152 * $src_dsq is already locked, do an abbreviated dequeue. 6153 */ 6154 task_unlink_from_dsq(p, src_dsq); 6155 p->scx.dsq = NULL; 6156 raw_spin_unlock(&src_dsq->lock); 6157 6158 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) 6159 p->scx.dsq_vtime = kit->vtime; 6160 dispatch_enqueue(dst_dsq, p, enq_flags); 6161 } 6162 6163 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) 6164 p->scx.slice = kit->slice; 6165 6166 dispatched = true; 6167 out: 6168 if (in_balance) { 6169 if (this_rq != locked_rq) { 6170 raw_spin_rq_unlock(locked_rq); 6171 raw_spin_rq_lock(this_rq); 6172 } 6173 } else { 6174 raw_spin_rq_unlock_irqrestore(locked_rq, flags); 6175 } 6176 6177 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE | 6178 __SCX_DSQ_ITER_HAS_VTIME); 6179 return dispatched; 6180 } 6181 6182 __bpf_kfunc_start_defs(); 6183 6184 /** 6185 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 6186 * 6187 * Can only be called from ops.dispatch(). 6188 */ 6189 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 6190 { 6191 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6192 return 0; 6193 6194 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 6195 } 6196 6197 /** 6198 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 6199 * 6200 * Cancel the latest dispatch. Can be called multiple times to cancel further 6201 * dispatches. Can only be called from ops.dispatch(). 6202 */ 6203 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 6204 { 6205 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6206 6207 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6208 return; 6209 6210 if (dspc->cursor > 0) 6211 dspc->cursor--; 6212 else 6213 scx_ops_error("dispatch buffer underflow"); 6214 } 6215 6216 /** 6217 * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ 6218 * @dsq_id: DSQ to consume 6219 * 6220 * Consume a task from the non-local DSQ identified by @dsq_id and transfer it 6221 * to the current CPU's local DSQ for execution. Can only be called from 6222 * ops.dispatch(). 6223 * 6224 * This function flushes the in-flight dispatches from scx_bpf_dispatch() before 6225 * trying to consume the specified DSQ. It may also grab rq locks and thus can't 6226 * be called under any BPF locks. 6227 * 6228 * Returns %true if a task has been consumed, %false if there isn't any task to 6229 * consume. 6230 */ 6231 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id) 6232 { 6233 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6234 struct scx_dispatch_q *dsq; 6235 6236 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 6237 return false; 6238 6239 flush_dispatch_buf(dspc->rq); 6240 6241 dsq = find_user_dsq(dsq_id); 6242 if (unlikely(!dsq)) { 6243 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id); 6244 return false; 6245 } 6246 6247 if (consume_dispatch_q(dspc->rq, dsq)) { 6248 /* 6249 * A successfully consumed task can be dequeued before it starts 6250 * running while the CPU is trying to migrate other dispatched 6251 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty 6252 * local DSQ. 6253 */ 6254 dspc->nr_tasks++; 6255 return true; 6256 } else { 6257 return false; 6258 } 6259 } 6260 6261 /** 6262 * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ 6263 * @it__iter: DSQ iterator in progress 6264 * @slice: duration the dispatched task can run for in nsecs 6265 * 6266 * Override the slice of the next task that will be dispatched from @it__iter 6267 * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called, 6268 * the previous slice duration is kept. 6269 */ 6270 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice( 6271 struct bpf_iter_scx_dsq *it__iter, u64 slice) 6272 { 6273 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6274 6275 kit->slice = slice; 6276 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE; 6277 } 6278 6279 /** 6280 * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ 6281 * @it__iter: DSQ iterator in progress 6282 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ 6283 * 6284 * Override the vtime of the next task that will be dispatched from @it__iter 6285 * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the 6286 * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to 6287 * dispatch the next task, the override is ignored and cleared. 6288 */ 6289 __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime( 6290 struct bpf_iter_scx_dsq *it__iter, u64 vtime) 6291 { 6292 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6293 6294 kit->vtime = vtime; 6295 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME; 6296 } 6297 6298 /** 6299 * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ 6300 * @it__iter: DSQ iterator in progress 6301 * @p: task to transfer 6302 * @dsq_id: DSQ to move @p to 6303 * @enq_flags: SCX_ENQ_* 6304 * 6305 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ 6306 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can 6307 * be the destination. 6308 * 6309 * For the transfer to be successful, @p must still be on the DSQ and have been 6310 * queued before the DSQ iteration started. This function doesn't care whether 6311 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have 6312 * been queued before the iteration started. 6313 * 6314 * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to 6315 * update. 6316 * 6317 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq 6318 * lock (e.g. BPF timers or SYSCALL programs). 6319 * 6320 * Returns %true if @p has been consumed, %false if @p had already been consumed 6321 * or dequeued. 6322 */ 6323 __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, 6324 struct task_struct *p, u64 dsq_id, 6325 u64 enq_flags) 6326 { 6327 return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter, 6328 p, dsq_id, enq_flags); 6329 } 6330 6331 /** 6332 * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ 6333 * @it__iter: DSQ iterator in progress 6334 * @p: task to transfer 6335 * @dsq_id: DSQ to move @p to 6336 * @enq_flags: SCX_ENQ_* 6337 * 6338 * Transfer @p which is on the DSQ currently iterated by @it__iter to the 6339 * priority queue of the DSQ specified by @dsq_id. The destination must be a 6340 * user DSQ as only user DSQs support priority queue. 6341 * 6342 * @p's slice and vtime are kept by default. Use 6343 * scx_bpf_dispatch_from_dsq_set_slice() and 6344 * scx_bpf_dispatch_from_dsq_set_vtime() to update. 6345 * 6346 * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See 6347 * scx_bpf_dispatch_vtime() for more information on @vtime. 6348 */ 6349 __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter, 6350 struct task_struct *p, u64 dsq_id, 6351 u64 enq_flags) 6352 { 6353 return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter, 6354 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6355 } 6356 6357 __bpf_kfunc_end_defs(); 6358 6359 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 6360 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 6361 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 6362 BTF_ID_FLAGS(func, scx_bpf_consume) 6363 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice) 6364 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime) 6365 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU) 6366 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU) 6367 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 6368 6369 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 6370 .owner = THIS_MODULE, 6371 .set = &scx_kfunc_ids_dispatch, 6372 }; 6373 6374 __bpf_kfunc_start_defs(); 6375 6376 /** 6377 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6378 * 6379 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6380 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 6381 * processed tasks. Can only be called from ops.cpu_release(). 6382 */ 6383 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 6384 { 6385 LIST_HEAD(tasks); 6386 u32 nr_enqueued = 0; 6387 struct rq *rq; 6388 struct task_struct *p, *n; 6389 6390 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) 6391 return 0; 6392 6393 rq = cpu_rq(smp_processor_id()); 6394 lockdep_assert_rq_held(rq); 6395 6396 /* 6397 * The BPF scheduler may choose to dispatch tasks back to 6398 * @rq->scx.local_dsq. Move all candidate tasks off to a private list 6399 * first to avoid processing the same tasks repeatedly. 6400 */ 6401 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, 6402 scx.dsq_list.node) { 6403 /* 6404 * If @p is being migrated, @p's current CPU may not agree with 6405 * its allowed CPUs and the migration_cpu_stop is about to 6406 * deactivate and re-activate @p anyway. Skip re-enqueueing. 6407 * 6408 * While racing sched property changes may also dequeue and 6409 * re-enqueue a migrating task while its current CPU and allowed 6410 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6411 * the current local DSQ for running tasks and thus are not 6412 * visible to the BPF scheduler. 6413 */ 6414 if (p->migration_pending) 6415 continue; 6416 6417 dispatch_dequeue(rq, p); 6418 list_add_tail(&p->scx.dsq_list.node, &tasks); 6419 } 6420 6421 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { 6422 list_del_init(&p->scx.dsq_list.node); 6423 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 6424 nr_enqueued++; 6425 } 6426 6427 return nr_enqueued; 6428 } 6429 6430 __bpf_kfunc_end_defs(); 6431 6432 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 6433 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 6434 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 6435 6436 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 6437 .owner = THIS_MODULE, 6438 .set = &scx_kfunc_ids_cpu_release, 6439 }; 6440 6441 __bpf_kfunc_start_defs(); 6442 6443 /** 6444 * scx_bpf_create_dsq - Create a custom DSQ 6445 * @dsq_id: DSQ to create 6446 * @node: NUMA node to allocate from 6447 * 6448 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable 6449 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog. 6450 */ 6451 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 6452 { 6453 if (unlikely(node >= (int)nr_node_ids || 6454 (node < 0 && node != NUMA_NO_NODE))) 6455 return -EINVAL; 6456 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node)); 6457 } 6458 6459 __bpf_kfunc_end_defs(); 6460 6461 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 6462 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 6463 BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU) 6464 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU) 6465 BTF_KFUNCS_END(scx_kfunc_ids_unlocked) 6466 6467 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { 6468 .owner = THIS_MODULE, 6469 .set = &scx_kfunc_ids_unlocked, 6470 }; 6471 6472 __bpf_kfunc_start_defs(); 6473 6474 /** 6475 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 6476 * @cpu: cpu to kick 6477 * @flags: %SCX_KICK_* flags 6478 * 6479 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 6480 * trigger rescheduling on a busy CPU. This can be called from any online 6481 * scx_ops operation and the actual kicking is performed asynchronously through 6482 * an irq work. 6483 */ 6484 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 6485 { 6486 struct rq *this_rq; 6487 unsigned long irq_flags; 6488 6489 if (!ops_cpu_valid(cpu, NULL)) 6490 return; 6491 6492 local_irq_save(irq_flags); 6493 6494 this_rq = this_rq(); 6495 6496 /* 6497 * While bypassing for PM ops, IRQ handling may not be online which can 6498 * lead to irq_work_queue() malfunction such as infinite busy wait for 6499 * IRQ status update. Suppress kicking. 6500 */ 6501 if (scx_rq_bypassing(this_rq)) 6502 goto out; 6503 6504 /* 6505 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 6506 * rq locks. We can probably be smarter and avoid bouncing if called 6507 * from ops which don't hold a rq lock. 6508 */ 6509 if (flags & SCX_KICK_IDLE) { 6510 struct rq *target_rq = cpu_rq(cpu); 6511 6512 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 6513 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 6514 6515 if (raw_spin_rq_trylock(target_rq)) { 6516 if (can_skip_idle_kick(target_rq)) { 6517 raw_spin_rq_unlock(target_rq); 6518 goto out; 6519 } 6520 raw_spin_rq_unlock(target_rq); 6521 } 6522 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 6523 } else { 6524 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 6525 6526 if (flags & SCX_KICK_PREEMPT) 6527 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 6528 if (flags & SCX_KICK_WAIT) 6529 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 6530 } 6531 6532 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 6533 out: 6534 local_irq_restore(irq_flags); 6535 } 6536 6537 /** 6538 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 6539 * @dsq_id: id of the DSQ 6540 * 6541 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 6542 * -%ENOENT is returned. 6543 */ 6544 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 6545 { 6546 struct scx_dispatch_q *dsq; 6547 s32 ret; 6548 6549 preempt_disable(); 6550 6551 if (dsq_id == SCX_DSQ_LOCAL) { 6552 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 6553 goto out; 6554 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 6555 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 6556 6557 if (ops_cpu_valid(cpu, NULL)) { 6558 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 6559 goto out; 6560 } 6561 } else { 6562 dsq = find_user_dsq(dsq_id); 6563 if (dsq) { 6564 ret = READ_ONCE(dsq->nr); 6565 goto out; 6566 } 6567 } 6568 ret = -ENOENT; 6569 out: 6570 preempt_enable(); 6571 return ret; 6572 } 6573 6574 /** 6575 * scx_bpf_destroy_dsq - Destroy a custom DSQ 6576 * @dsq_id: DSQ to destroy 6577 * 6578 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 6579 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 6580 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 6581 * which doesn't exist. Can be called from any online scx_ops operations. 6582 */ 6583 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 6584 { 6585 destroy_dsq(dsq_id); 6586 } 6587 6588 /** 6589 * bpf_iter_scx_dsq_new - Create a DSQ iterator 6590 * @it: iterator to initialize 6591 * @dsq_id: DSQ to iterate 6592 * @flags: %SCX_DSQ_ITER_* 6593 * 6594 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 6595 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 6596 * tasks which are already queued when this function is invoked. 6597 */ 6598 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 6599 u64 flags) 6600 { 6601 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6602 6603 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 6604 sizeof(struct bpf_iter_scx_dsq)); 6605 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 6606 __alignof__(struct bpf_iter_scx_dsq)); 6607 6608 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) 6609 return -EINVAL; 6610 6611 kit->dsq = find_user_dsq(dsq_id); 6612 if (!kit->dsq) 6613 return -ENOENT; 6614 6615 INIT_LIST_HEAD(&kit->cursor.node); 6616 kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags; 6617 kit->cursor.priv = READ_ONCE(kit->dsq->seq); 6618 6619 return 0; 6620 } 6621 6622 /** 6623 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 6624 * @it: iterator to progress 6625 * 6626 * Return the next task. See bpf_iter_scx_dsq_new(). 6627 */ 6628 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 6629 { 6630 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6631 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV; 6632 struct task_struct *p; 6633 unsigned long flags; 6634 6635 if (!kit->dsq) 6636 return NULL; 6637 6638 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6639 6640 if (list_empty(&kit->cursor.node)) 6641 p = NULL; 6642 else 6643 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 6644 6645 /* 6646 * Only tasks which were queued before the iteration started are 6647 * visible. This bounds BPF iterations and guarantees that vtime never 6648 * jumps in the other direction while iterating. 6649 */ 6650 do { 6651 p = nldsq_next_task(kit->dsq, p, rev); 6652 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); 6653 6654 if (p) { 6655 if (rev) 6656 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 6657 else 6658 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 6659 } else { 6660 list_del_init(&kit->cursor.node); 6661 } 6662 6663 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6664 6665 return p; 6666 } 6667 6668 /** 6669 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 6670 * @it: iterator to destroy 6671 * 6672 * Undo scx_iter_scx_dsq_new(). 6673 */ 6674 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 6675 { 6676 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6677 6678 if (!kit->dsq) 6679 return; 6680 6681 if (!list_empty(&kit->cursor.node)) { 6682 unsigned long flags; 6683 6684 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6685 list_del_init(&kit->cursor.node); 6686 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6687 } 6688 kit->dsq = NULL; 6689 } 6690 6691 __bpf_kfunc_end_defs(); 6692 6693 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size, 6694 char *fmt, unsigned long long *data, u32 data__sz) 6695 { 6696 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 6697 s32 ret; 6698 6699 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 6700 (data__sz && !data)) { 6701 scx_ops_error("invalid data=%p and data__sz=%u", 6702 (void *)data, data__sz); 6703 return -EINVAL; 6704 } 6705 6706 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 6707 if (ret < 0) { 6708 scx_ops_error("failed to read data fields (%d)", ret); 6709 return ret; 6710 } 6711 6712 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 6713 &bprintf_data); 6714 if (ret < 0) { 6715 scx_ops_error("format preparation failed (%d)", ret); 6716 return ret; 6717 } 6718 6719 ret = bstr_printf(line_buf, line_size, fmt, 6720 bprintf_data.bin_args); 6721 bpf_bprintf_cleanup(&bprintf_data); 6722 if (ret < 0) { 6723 scx_ops_error("(\"%s\", %p, %u) failed to format", 6724 fmt, data, data__sz); 6725 return ret; 6726 } 6727 6728 return ret; 6729 } 6730 6731 static s32 bstr_format(struct scx_bstr_buf *buf, 6732 char *fmt, unsigned long long *data, u32 data__sz) 6733 { 6734 return __bstr_format(buf->data, buf->line, sizeof(buf->line), 6735 fmt, data, data__sz); 6736 } 6737 6738 __bpf_kfunc_start_defs(); 6739 6740 /** 6741 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 6742 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 6743 * @fmt: error message format string 6744 * @data: format string parameters packaged using ___bpf_fill() macro 6745 * @data__sz: @data len, must end in '__sz' for the verifier 6746 * 6747 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 6748 * disabling. 6749 */ 6750 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 6751 unsigned long long *data, u32 data__sz) 6752 { 6753 unsigned long flags; 6754 6755 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6756 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6757 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s", 6758 scx_exit_bstr_buf.line); 6759 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6760 } 6761 6762 /** 6763 * scx_bpf_error_bstr - Indicate fatal error 6764 * @fmt: error message format string 6765 * @data: format string parameters packaged using ___bpf_fill() macro 6766 * @data__sz: @data len, must end in '__sz' for the verifier 6767 * 6768 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 6769 * disabling. 6770 */ 6771 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 6772 u32 data__sz) 6773 { 6774 unsigned long flags; 6775 6776 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6777 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6778 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s", 6779 scx_exit_bstr_buf.line); 6780 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6781 } 6782 6783 /** 6784 * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler 6785 * @fmt: format string 6786 * @data: format string parameters packaged using ___bpf_fill() macro 6787 * @data__sz: @data len, must end in '__sz' for the verifier 6788 * 6789 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 6790 * dump_task() to generate extra debug dump specific to the BPF scheduler. 6791 * 6792 * The extra dump may be multiple lines. A single line may be split over 6793 * multiple calls. The last line is automatically terminated. 6794 */ 6795 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 6796 u32 data__sz) 6797 { 6798 struct scx_dump_data *dd = &scx_dump_data; 6799 struct scx_bstr_buf *buf = &dd->buf; 6800 s32 ret; 6801 6802 if (raw_smp_processor_id() != dd->cpu) { 6803 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends"); 6804 return; 6805 } 6806 6807 /* append the formatted string to the line buf */ 6808 ret = __bstr_format(buf->data, buf->line + dd->cursor, 6809 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 6810 if (ret < 0) { 6811 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 6812 dd->prefix, fmt, data, data__sz, ret); 6813 return; 6814 } 6815 6816 dd->cursor += ret; 6817 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 6818 6819 if (!dd->cursor) 6820 return; 6821 6822 /* 6823 * If the line buf overflowed or ends in a newline, flush it into the 6824 * dump. This is to allow the caller to generate a single line over 6825 * multiple calls. As ops_dump_flush() can also handle multiple lines in 6826 * the line buf, the only case which can lead to an unexpected 6827 * truncation is when the caller keeps generating newlines in the middle 6828 * instead of the end consecutively. Don't do that. 6829 */ 6830 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 6831 ops_dump_flush(); 6832 } 6833 6834 /** 6835 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 6836 * @cpu: CPU of interest 6837 * 6838 * Return the maximum relative capacity of @cpu in relation to the most 6839 * performant CPU in the system. The return value is in the range [1, 6840 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 6841 */ 6842 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 6843 { 6844 if (ops_cpu_valid(cpu, NULL)) 6845 return arch_scale_cpu_capacity(cpu); 6846 else 6847 return SCX_CPUPERF_ONE; 6848 } 6849 6850 /** 6851 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 6852 * @cpu: CPU of interest 6853 * 6854 * Return the current relative performance of @cpu in relation to its maximum. 6855 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 6856 * 6857 * The current performance level of a CPU in relation to the maximum performance 6858 * available in the system can be calculated as follows: 6859 * 6860 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 6861 * 6862 * The result is in the range [1, %SCX_CPUPERF_ONE]. 6863 */ 6864 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 6865 { 6866 if (ops_cpu_valid(cpu, NULL)) 6867 return arch_scale_freq_capacity(cpu); 6868 else 6869 return SCX_CPUPERF_ONE; 6870 } 6871 6872 /** 6873 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 6874 * @cpu: CPU of interest 6875 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 6876 * @flags: %SCX_CPUPERF_* flags 6877 * 6878 * Set the target performance level of @cpu to @perf. @perf is in linear 6879 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 6880 * schedutil cpufreq governor chooses the target frequency. 6881 * 6882 * The actual performance level chosen, CPU grouping, and the overhead and 6883 * latency of the operations are dependent on the hardware and cpufreq driver in 6884 * use. Consult hardware and cpufreq documentation for more information. The 6885 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 6886 */ 6887 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 6888 { 6889 if (unlikely(perf > SCX_CPUPERF_ONE)) { 6890 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu); 6891 return; 6892 } 6893 6894 if (ops_cpu_valid(cpu, NULL)) { 6895 struct rq *rq = cpu_rq(cpu); 6896 6897 rq->scx.cpuperf_target = perf; 6898 6899 rcu_read_lock_sched_notrace(); 6900 cpufreq_update_util(cpu_rq(cpu), 0); 6901 rcu_read_unlock_sched_notrace(); 6902 } 6903 } 6904 6905 /** 6906 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 6907 * 6908 * All valid CPU IDs in the system are smaller than the returned value. 6909 */ 6910 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 6911 { 6912 return nr_cpu_ids; 6913 } 6914 6915 /** 6916 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 6917 */ 6918 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 6919 { 6920 return cpu_possible_mask; 6921 } 6922 6923 /** 6924 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 6925 */ 6926 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 6927 { 6928 return cpu_online_mask; 6929 } 6930 6931 /** 6932 * scx_bpf_put_cpumask - Release a possible/online cpumask 6933 * @cpumask: cpumask to release 6934 */ 6935 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 6936 { 6937 /* 6938 * Empty function body because we aren't actually acquiring or releasing 6939 * a reference to a global cpumask, which is read-only in the caller and 6940 * is never released. The acquire / release semantics here are just used 6941 * to make the cpumask is a trusted pointer in the caller. 6942 */ 6943 } 6944 6945 /** 6946 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking 6947 * per-CPU cpumask. 6948 * 6949 * Returns NULL if idle tracking is not enabled, or running on a UP kernel. 6950 */ 6951 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) 6952 { 6953 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6954 scx_ops_error("built-in idle tracking is disabled"); 6955 return cpu_none_mask; 6956 } 6957 6958 #ifdef CONFIG_SMP 6959 return idle_masks.cpu; 6960 #else 6961 return cpu_none_mask; 6962 #endif 6963 } 6964 6965 /** 6966 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking, 6967 * per-physical-core cpumask. Can be used to determine if an entire physical 6968 * core is free. 6969 * 6970 * Returns NULL if idle tracking is not enabled, or running on a UP kernel. 6971 */ 6972 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) 6973 { 6974 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6975 scx_ops_error("built-in idle tracking is disabled"); 6976 return cpu_none_mask; 6977 } 6978 6979 #ifdef CONFIG_SMP 6980 if (sched_smt_active()) 6981 return idle_masks.smt; 6982 else 6983 return idle_masks.cpu; 6984 #else 6985 return cpu_none_mask; 6986 #endif 6987 } 6988 6989 /** 6990 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to 6991 * either the percpu, or SMT idle-tracking cpumask. 6992 */ 6993 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) 6994 { 6995 /* 6996 * Empty function body because we aren't actually acquiring or releasing 6997 * a reference to a global idle cpumask, which is read-only in the 6998 * caller and is never released. The acquire / release semantics here 6999 * are just used to make the cpumask a trusted pointer in the caller. 7000 */ 7001 } 7002 7003 /** 7004 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state 7005 * @cpu: cpu to test and clear idle for 7006 * 7007 * Returns %true if @cpu was idle and its idle state was successfully cleared. 7008 * %false otherwise. 7009 * 7010 * Unavailable if ops.update_idle() is implemented and 7011 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 7012 */ 7013 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) 7014 { 7015 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 7016 scx_ops_error("built-in idle tracking is disabled"); 7017 return false; 7018 } 7019 7020 if (ops_cpu_valid(cpu, NULL)) 7021 return test_and_clear_cpu_idle(cpu); 7022 else 7023 return false; 7024 } 7025 7026 /** 7027 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu 7028 * @cpus_allowed: Allowed cpumask 7029 * @flags: %SCX_PICK_IDLE_CPU_* flags 7030 * 7031 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu 7032 * number on success. -%EBUSY if no matching cpu was found. 7033 * 7034 * Idle CPU tracking may race against CPU scheduling state transitions. For 7035 * example, this function may return -%EBUSY as CPUs are transitioning into the 7036 * idle state. If the caller then assumes that there will be dispatch events on 7037 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs 7038 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and 7039 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch 7040 * event in the near future. 7041 * 7042 * Unavailable if ops.update_idle() is implemented and 7043 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 7044 */ 7045 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, 7046 u64 flags) 7047 { 7048 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 7049 scx_ops_error("built-in idle tracking is disabled"); 7050 return -EBUSY; 7051 } 7052 7053 return scx_pick_idle_cpu(cpus_allowed, flags); 7054 } 7055 7056 /** 7057 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU 7058 * @cpus_allowed: Allowed cpumask 7059 * @flags: %SCX_PICK_IDLE_CPU_* flags 7060 * 7061 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 7062 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 7063 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 7064 * empty. 7065 * 7066 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 7067 * set, this function can't tell which CPUs are idle and will always pick any 7068 * CPU. 7069 */ 7070 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed, 7071 u64 flags) 7072 { 7073 s32 cpu; 7074 7075 if (static_branch_likely(&scx_builtin_idle_enabled)) { 7076 cpu = scx_pick_idle_cpu(cpus_allowed, flags); 7077 if (cpu >= 0) 7078 return cpu; 7079 } 7080 7081 cpu = cpumask_any_distribute(cpus_allowed); 7082 if (cpu < nr_cpu_ids) 7083 return cpu; 7084 else 7085 return -EBUSY; 7086 } 7087 7088 /** 7089 * scx_bpf_task_running - Is task currently running? 7090 * @p: task of interest 7091 */ 7092 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 7093 { 7094 return task_rq(p)->curr == p; 7095 } 7096 7097 /** 7098 * scx_bpf_task_cpu - CPU a task is currently associated with 7099 * @p: task of interest 7100 */ 7101 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 7102 { 7103 return task_cpu(p); 7104 } 7105 7106 /** 7107 * scx_bpf_cpu_rq - Fetch the rq of a CPU 7108 * @cpu: CPU of the rq 7109 */ 7110 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 7111 { 7112 if (!ops_cpu_valid(cpu, NULL)) 7113 return NULL; 7114 7115 return cpu_rq(cpu); 7116 } 7117 7118 /** 7119 * scx_bpf_task_cgroup - Return the sched cgroup of a task 7120 * @p: task of interest 7121 * 7122 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with 7123 * from the scheduler's POV. SCX operations should use this function to 7124 * determine @p's current cgroup as, unlike following @p->cgroups, 7125 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all 7126 * rq-locked operations. Can be called on the parameter tasks of rq-locked 7127 * operations. The restriction guarantees that @p's rq is locked by the caller. 7128 */ 7129 #ifdef CONFIG_CGROUP_SCHED 7130 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) 7131 { 7132 struct task_group *tg = p->sched_task_group; 7133 struct cgroup *cgrp = &cgrp_dfl_root.cgrp; 7134 7135 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p)) 7136 goto out; 7137 7138 /* 7139 * A task_group may either be a cgroup or an autogroup. In the latter 7140 * case, @tg->css.cgroup is %NULL. A task_group can't become the other 7141 * kind once created. 7142 */ 7143 if (tg && tg->css.cgroup) 7144 cgrp = tg->css.cgroup; 7145 else 7146 cgrp = &cgrp_dfl_root.cgrp; 7147 out: 7148 cgroup_get(cgrp); 7149 return cgrp; 7150 } 7151 #endif 7152 7153 __bpf_kfunc_end_defs(); 7154 7155 BTF_KFUNCS_START(scx_kfunc_ids_any) 7156 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 7157 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 7158 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 7159 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 7160 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 7161 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 7162 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) 7163 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) 7164 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) 7165 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 7166 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 7167 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 7168 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 7169 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 7170 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 7171 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 7172 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) 7173 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) 7174 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) 7175 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) 7176 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) 7177 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) 7178 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 7179 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 7180 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 7181 #ifdef CONFIG_CGROUP_SCHED 7182 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) 7183 #endif 7184 BTF_KFUNCS_END(scx_kfunc_ids_any) 7185 7186 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 7187 .owner = THIS_MODULE, 7188 .set = &scx_kfunc_ids_any, 7189 }; 7190 7191 static int __init scx_init(void) 7192 { 7193 int ret; 7194 7195 /* 7196 * kfunc registration can't be done from init_sched_ext_class() as 7197 * register_btf_kfunc_id_set() needs most of the system to be up. 7198 * 7199 * Some kfuncs are context-sensitive and can only be called from 7200 * specific SCX ops. They are grouped into BTF sets accordingly. 7201 * Unfortunately, BPF currently doesn't have a way of enforcing such 7202 * restrictions. Eventually, the verifier should be able to enforce 7203 * them. For now, register them the same and make each kfunc explicitly 7204 * check using scx_kf_allowed(). 7205 */ 7206 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7207 &scx_kfunc_set_select_cpu)) || 7208 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7209 &scx_kfunc_set_enqueue_dispatch)) || 7210 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7211 &scx_kfunc_set_dispatch)) || 7212 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7213 &scx_kfunc_set_cpu_release)) || 7214 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7215 &scx_kfunc_set_unlocked)) || 7216 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7217 &scx_kfunc_set_unlocked)) || 7218 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7219 &scx_kfunc_set_any)) || 7220 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 7221 &scx_kfunc_set_any)) || 7222 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7223 &scx_kfunc_set_any))) { 7224 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 7225 return ret; 7226 } 7227 7228 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 7229 if (ret) { 7230 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 7231 return ret; 7232 } 7233 7234 ret = register_pm_notifier(&scx_pm_notifier); 7235 if (ret) { 7236 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 7237 return ret; 7238 } 7239 7240 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 7241 if (!scx_kset) { 7242 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 7243 return -ENOMEM; 7244 } 7245 7246 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 7247 if (ret < 0) { 7248 pr_err("sched_ext: Failed to add global attributes\n"); 7249 return ret; 7250 } 7251 7252 return 0; 7253 } 7254 __initcall(scx_init); 7255