1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void))) 10 11 enum scx_consts { 12 SCX_DSP_DFL_MAX_BATCH = 32, 13 SCX_DSP_MAX_LOOPS = 32, 14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, 15 16 SCX_EXIT_BT_LEN = 64, 17 SCX_EXIT_MSG_LEN = 1024, 18 SCX_EXIT_DUMP_DFL_LEN = 32768, 19 20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE, 21 }; 22 23 enum scx_exit_kind { 24 SCX_EXIT_NONE, 25 SCX_EXIT_DONE, 26 27 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */ 28 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */ 29 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */ 30 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ 31 32 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ 33 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ 34 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ 35 }; 36 37 /* 38 * An exit code can be specified when exiting with scx_bpf_exit() or 39 * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN 40 * respectively. The codes are 64bit of the format: 41 * 42 * Bits: [63 .. 48 47 .. 32 31 .. 0] 43 * [ SYS ACT ] [ SYS RSN ] [ USR ] 44 * 45 * SYS ACT: System-defined exit actions 46 * SYS RSN: System-defined exit reasons 47 * USR : User-defined exit codes and reasons 48 * 49 * Using the above, users may communicate intention and context by ORing system 50 * actions and/or system reasons with a user-defined exit code. 51 */ 52 enum scx_exit_code { 53 /* Reasons */ 54 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32, 55 56 /* Actions */ 57 SCX_ECODE_ACT_RESTART = 1LLU << 48, 58 }; 59 60 /* 61 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is 62 * being disabled. 63 */ 64 struct scx_exit_info { 65 /* %SCX_EXIT_* - broad category of the exit reason */ 66 enum scx_exit_kind kind; 67 68 /* exit code if gracefully exiting */ 69 s64 exit_code; 70 71 /* textual representation of the above */ 72 const char *reason; 73 74 /* backtrace if exiting due to an error */ 75 unsigned long *bt; 76 u32 bt_len; 77 78 /* informational message */ 79 char *msg; 80 81 /* debug dump */ 82 char *dump; 83 }; 84 85 /* sched_ext_ops.flags */ 86 enum scx_ops_flags { 87 /* 88 * Keep built-in idle tracking even if ops.update_idle() is implemented. 89 */ 90 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0, 91 92 /* 93 * By default, if there are no other task to run on the CPU, ext core 94 * keeps running the current task even after its slice expires. If this 95 * flag is specified, such tasks are passed to ops.enqueue() with 96 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info. 97 */ 98 SCX_OPS_ENQ_LAST = 1LLU << 1, 99 100 /* 101 * An exiting task may schedule after PF_EXITING is set. In such cases, 102 * bpf_task_from_pid() may not be able to find the task and if the BPF 103 * scheduler depends on pid lookup for dispatching, the task will be 104 * lost leading to various issues including RCU grace period stalls. 105 * 106 * To mask this problem, by default, unhashed tasks are automatically 107 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't 108 * depend on pid lookups and wants to handle these tasks directly, the 109 * following flag can be used. 110 */ 111 SCX_OPS_ENQ_EXITING = 1LLU << 2, 112 113 /* 114 * If set, only tasks with policy set to SCHED_EXT are attached to 115 * sched_ext. If clear, SCHED_NORMAL tasks are also included. 116 */ 117 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3, 118 119 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | 120 SCX_OPS_ENQ_LAST | 121 SCX_OPS_ENQ_EXITING | 122 SCX_OPS_SWITCH_PARTIAL, 123 }; 124 125 /* argument container for ops.init_task() */ 126 struct scx_init_task_args { 127 /* 128 * Set if ops.init_task() is being invoked on the fork path, as opposed 129 * to the scheduler transition path. 130 */ 131 bool fork; 132 }; 133 134 /* argument container for ops.exit_task() */ 135 struct scx_exit_task_args { 136 /* Whether the task exited before running on sched_ext. */ 137 bool cancelled; 138 }; 139 140 enum scx_cpu_preempt_reason { 141 /* next task is being scheduled by &sched_class_rt */ 142 SCX_CPU_PREEMPT_RT, 143 /* next task is being scheduled by &sched_class_dl */ 144 SCX_CPU_PREEMPT_DL, 145 /* next task is being scheduled by &sched_class_stop */ 146 SCX_CPU_PREEMPT_STOP, 147 /* unknown reason for SCX being preempted */ 148 SCX_CPU_PREEMPT_UNKNOWN, 149 }; 150 151 /* 152 * Argument container for ops->cpu_acquire(). Currently empty, but may be 153 * expanded in the future. 154 */ 155 struct scx_cpu_acquire_args {}; 156 157 /* argument container for ops->cpu_release() */ 158 struct scx_cpu_release_args { 159 /* the reason the CPU was preempted */ 160 enum scx_cpu_preempt_reason reason; 161 162 /* the task that's going to be scheduled on the CPU */ 163 struct task_struct *task; 164 }; 165 166 /* 167 * Informational context provided to dump operations. 168 */ 169 struct scx_dump_ctx { 170 enum scx_exit_kind kind; 171 s64 exit_code; 172 const char *reason; 173 u64 at_ns; 174 u64 at_jiffies; 175 }; 176 177 /** 178 * struct sched_ext_ops - Operation table for BPF scheduler implementation 179 * 180 * Userland can implement an arbitrary scheduling policy by implementing and 181 * loading operations in this table. 182 */ 183 struct sched_ext_ops { 184 /** 185 * select_cpu - Pick the target CPU for a task which is being woken up 186 * @p: task being woken up 187 * @prev_cpu: the cpu @p was on before sleeping 188 * @wake_flags: SCX_WAKE_* 189 * 190 * Decision made here isn't final. @p may be moved to any CPU while it 191 * is getting dispatched for execution later. However, as @p is not on 192 * the rq at this point, getting the eventual execution CPU right here 193 * saves a small bit of overhead down the line. 194 * 195 * If an idle CPU is returned, the CPU is kicked and will try to 196 * dispatch. While an explicit custom mechanism can be added, 197 * select_cpu() serves as the default way to wake up idle CPUs. 198 * 199 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p 200 * is dispatched, the ops.enqueue() callback will be skipped. Finally, 201 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the 202 * local DSQ of whatever CPU is returned by this callback. 203 */ 204 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); 205 206 /** 207 * enqueue - Enqueue a task on the BPF scheduler 208 * @p: task being enqueued 209 * @enq_flags: %SCX_ENQ_* 210 * 211 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch() 212 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf 213 * scheduler owns @p and if it fails to dispatch @p, the task will 214 * stall. 215 * 216 * If @p was dispatched from ops.select_cpu(), this callback is 217 * skipped. 218 */ 219 void (*enqueue)(struct task_struct *p, u64 enq_flags); 220 221 /** 222 * dequeue - Remove a task from the BPF scheduler 223 * @p: task being dequeued 224 * @deq_flags: %SCX_DEQ_* 225 * 226 * Remove @p from the BPF scheduler. This is usually called to isolate 227 * the task while updating its scheduling properties (e.g. priority). 228 * 229 * The ext core keeps track of whether the BPF side owns a given task or 230 * not and can gracefully ignore spurious dispatches from BPF side, 231 * which makes it safe to not implement this method. However, depending 232 * on the scheduling logic, this can lead to confusing behaviors - e.g. 233 * scheduling position not being updated across a priority change. 234 */ 235 void (*dequeue)(struct task_struct *p, u64 deq_flags); 236 237 /** 238 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs 239 * @cpu: CPU to dispatch tasks for 240 * @prev: previous task being switched out 241 * 242 * Called when a CPU's local dsq is empty. The operation should dispatch 243 * one or more tasks from the BPF scheduler into the DSQs using 244 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using 245 * scx_bpf_consume(). 246 * 247 * The maximum number of times scx_bpf_dispatch() can be called without 248 * an intervening scx_bpf_consume() is specified by 249 * ops.dispatch_max_batch. See the comments on top of the two functions 250 * for more details. 251 * 252 * When not %NULL, @prev is an SCX task with its slice depleted. If 253 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in 254 * @prev->scx.flags, it is not enqueued yet and will be enqueued after 255 * ops.dispatch() returns. To keep executing @prev, return without 256 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST. 257 */ 258 void (*dispatch)(s32 cpu, struct task_struct *prev); 259 260 /** 261 * tick - Periodic tick 262 * @p: task running currently 263 * 264 * This operation is called every 1/HZ seconds on CPUs which are 265 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an 266 * immediate dispatch cycle on the CPU. 267 */ 268 void (*tick)(struct task_struct *p); 269 270 /** 271 * runnable - A task is becoming runnable on its associated CPU 272 * @p: task becoming runnable 273 * @enq_flags: %SCX_ENQ_* 274 * 275 * This and the following three functions can be used to track a task's 276 * execution state transitions. A task becomes ->runnable() on a CPU, 277 * and then goes through one or more ->running() and ->stopping() pairs 278 * as it runs on the CPU, and eventually becomes ->quiescent() when it's 279 * done running on the CPU. 280 * 281 * @p is becoming runnable on the CPU because it's 282 * 283 * - waking up (%SCX_ENQ_WAKEUP) 284 * - being moved from another CPU 285 * - being restored after temporarily taken off the queue for an 286 * attribute change. 287 * 288 * This and ->enqueue() are related but not coupled. This operation 289 * notifies @p's state transition and may not be followed by ->enqueue() 290 * e.g. when @p is being dispatched to a remote CPU, or when @p is 291 * being enqueued on a CPU experiencing a hotplug event. Likewise, a 292 * task may be ->enqueue()'d without being preceded by this operation 293 * e.g. after exhausting its slice. 294 */ 295 void (*runnable)(struct task_struct *p, u64 enq_flags); 296 297 /** 298 * running - A task is starting to run on its associated CPU 299 * @p: task starting to run 300 * 301 * See ->runnable() for explanation on the task state notifiers. 302 */ 303 void (*running)(struct task_struct *p); 304 305 /** 306 * stopping - A task is stopping execution 307 * @p: task stopping to run 308 * @runnable: is task @p still runnable? 309 * 310 * See ->runnable() for explanation on the task state notifiers. If 311 * !@runnable, ->quiescent() will be invoked after this operation 312 * returns. 313 */ 314 void (*stopping)(struct task_struct *p, bool runnable); 315 316 /** 317 * quiescent - A task is becoming not runnable on its associated CPU 318 * @p: task becoming not runnable 319 * @deq_flags: %SCX_DEQ_* 320 * 321 * See ->runnable() for explanation on the task state notifiers. 322 * 323 * @p is becoming quiescent on the CPU because it's 324 * 325 * - sleeping (%SCX_DEQ_SLEEP) 326 * - being moved to another CPU 327 * - being temporarily taken off the queue for an attribute change 328 * (%SCX_DEQ_SAVE) 329 * 330 * This and ->dequeue() are related but not coupled. This operation 331 * notifies @p's state transition and may not be preceded by ->dequeue() 332 * e.g. when @p is being dispatched to a remote CPU. 333 */ 334 void (*quiescent)(struct task_struct *p, u64 deq_flags); 335 336 /** 337 * yield - Yield CPU 338 * @from: yielding task 339 * @to: optional yield target task 340 * 341 * If @to is NULL, @from is yielding the CPU to other runnable tasks. 342 * The BPF scheduler should ensure that other available tasks are 343 * dispatched before the yielding task. Return value is ignored in this 344 * case. 345 * 346 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf 347 * scheduler can implement the request, return %true; otherwise, %false. 348 */ 349 bool (*yield)(struct task_struct *from, struct task_struct *to); 350 351 /** 352 * core_sched_before - Task ordering for core-sched 353 * @a: task A 354 * @b: task B 355 * 356 * Used by core-sched to determine the ordering between two tasks. See 357 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on 358 * core-sched. 359 * 360 * Both @a and @b are runnable and may or may not currently be queued on 361 * the BPF scheduler. Should return %true if @a should run before @b. 362 * %false if there's no required ordering or @b should run before @a. 363 * 364 * If not specified, the default is ordering them according to when they 365 * became runnable. 366 */ 367 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); 368 369 /** 370 * set_weight - Set task weight 371 * @p: task to set weight for 372 * @weight: new weight [1..10000] 373 * 374 * Update @p's weight to @weight. 375 */ 376 void (*set_weight)(struct task_struct *p, u32 weight); 377 378 /** 379 * set_cpumask - Set CPU affinity 380 * @p: task to set CPU affinity for 381 * @cpumask: cpumask of cpus that @p can run on 382 * 383 * Update @p's CPU affinity to @cpumask. 384 */ 385 void (*set_cpumask)(struct task_struct *p, 386 const struct cpumask *cpumask); 387 388 /** 389 * update_idle - Update the idle state of a CPU 390 * @cpu: CPU to udpate the idle state for 391 * @idle: whether entering or exiting the idle state 392 * 393 * This operation is called when @rq's CPU goes or leaves the idle 394 * state. By default, implementing this operation disables the built-in 395 * idle CPU tracking and the following helpers become unavailable: 396 * 397 * - scx_bpf_select_cpu_dfl() 398 * - scx_bpf_test_and_clear_cpu_idle() 399 * - scx_bpf_pick_idle_cpu() 400 * 401 * The user also must implement ops.select_cpu() as the default 402 * implementation relies on scx_bpf_select_cpu_dfl(). 403 * 404 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle 405 * tracking. 406 */ 407 void (*update_idle)(s32 cpu, bool idle); 408 409 /** 410 * cpu_acquire - A CPU is becoming available to the BPF scheduler 411 * @cpu: The CPU being acquired by the BPF scheduler. 412 * @args: Acquire arguments, see the struct definition. 413 * 414 * A CPU that was previously released from the BPF scheduler is now once 415 * again under its control. 416 */ 417 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); 418 419 /** 420 * cpu_release - A CPU is taken away from the BPF scheduler 421 * @cpu: The CPU being released by the BPF scheduler. 422 * @args: Release arguments, see the struct definition. 423 * 424 * The specified CPU is no longer under the control of the BPF 425 * scheduler. This could be because it was preempted by a higher 426 * priority sched_class, though there may be other reasons as well. The 427 * caller should consult @args->reason to determine the cause. 428 */ 429 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); 430 431 /** 432 * init_task - Initialize a task to run in a BPF scheduler 433 * @p: task to initialize for BPF scheduling 434 * @args: init arguments, see the struct definition 435 * 436 * Either we're loading a BPF scheduler or a new task is being forked. 437 * Initialize @p for BPF scheduling. This operation may block and can 438 * be used for allocations, and is called exactly once for a task. 439 * 440 * Return 0 for success, -errno for failure. An error return while 441 * loading will abort loading of the BPF scheduler. During a fork, it 442 * will abort that specific fork. 443 */ 444 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args); 445 446 /** 447 * exit_task - Exit a previously-running task from the system 448 * @p: task to exit 449 * 450 * @p is exiting or the BPF scheduler is being unloaded. Perform any 451 * necessary cleanup for @p. 452 */ 453 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args); 454 455 /** 456 * enable - Enable BPF scheduling for a task 457 * @p: task to enable BPF scheduling for 458 * 459 * Enable @p for BPF scheduling. enable() is called on @p any time it 460 * enters SCX, and is always paired with a matching disable(). 461 */ 462 void (*enable)(struct task_struct *p); 463 464 /** 465 * disable - Disable BPF scheduling for a task 466 * @p: task to disable BPF scheduling for 467 * 468 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. 469 * Disable BPF scheduling for @p. A disable() call is always matched 470 * with a prior enable() call. 471 */ 472 void (*disable)(struct task_struct *p); 473 474 /** 475 * dump - Dump BPF scheduler state on error 476 * @ctx: debug dump context 477 * 478 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump. 479 */ 480 void (*dump)(struct scx_dump_ctx *ctx); 481 482 /** 483 * dump_cpu - Dump BPF scheduler state for a CPU on error 484 * @ctx: debug dump context 485 * @cpu: CPU to generate debug dump for 486 * @idle: @cpu is currently idle without any runnable tasks 487 * 488 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 489 * @cpu. If @idle is %true and this operation doesn't produce any 490 * output, @cpu is skipped for dump. 491 */ 492 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle); 493 494 /** 495 * dump_task - Dump BPF scheduler state for a runnable task on error 496 * @ctx: debug dump context 497 * @p: runnable task to generate debug dump for 498 * 499 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for 500 * @p. 501 */ 502 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p); 503 504 /* 505 * All online ops must come before ops.cpu_online(). 506 */ 507 508 /** 509 * cpu_online - A CPU became online 510 * @cpu: CPU which just came up 511 * 512 * @cpu just came online. @cpu will not call ops.enqueue() or 513 * ops.dispatch(), nor run tasks associated with other CPUs beforehand. 514 */ 515 void (*cpu_online)(s32 cpu); 516 517 /** 518 * cpu_offline - A CPU is going offline 519 * @cpu: CPU which is going offline 520 * 521 * @cpu is going offline. @cpu will not call ops.enqueue() or 522 * ops.dispatch(), nor run tasks associated with other CPUs afterwards. 523 */ 524 void (*cpu_offline)(s32 cpu); 525 526 /* 527 * All CPU hotplug ops must come before ops.init(). 528 */ 529 530 /** 531 * init - Initialize the BPF scheduler 532 */ 533 s32 (*init)(void); 534 535 /** 536 * exit - Clean up after the BPF scheduler 537 * @info: Exit info 538 */ 539 void (*exit)(struct scx_exit_info *info); 540 541 /** 542 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch 543 */ 544 u32 dispatch_max_batch; 545 546 /** 547 * flags - %SCX_OPS_* flags 548 */ 549 u64 flags; 550 551 /** 552 * timeout_ms - The maximum amount of time, in milliseconds, that a 553 * runnable task should be able to wait before being scheduled. The 554 * maximum timeout may not exceed the default timeout of 30 seconds. 555 * 556 * Defaults to the maximum allowed timeout value of 30 seconds. 557 */ 558 u32 timeout_ms; 559 560 /** 561 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default 562 * value of 32768 is used. 563 */ 564 u32 exit_dump_len; 565 566 /** 567 * hotplug_seq - A sequence number that may be set by the scheduler to 568 * detect when a hotplug event has occurred during the loading process. 569 * If 0, no detection occurs. Otherwise, the scheduler will fail to 570 * load if the sequence number does not match @scx_hotplug_seq on the 571 * enable path. 572 */ 573 u64 hotplug_seq; 574 575 /** 576 * name - BPF scheduler's name 577 * 578 * Must be a non-zero valid BPF object name including only isalnum(), 579 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the 580 * BPF scheduler is enabled. 581 */ 582 char name[SCX_OPS_NAME_LEN]; 583 }; 584 585 enum scx_opi { 586 SCX_OPI_BEGIN = 0, 587 SCX_OPI_NORMAL_BEGIN = 0, 588 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online), 589 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online), 590 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init), 591 SCX_OPI_END = SCX_OP_IDX(init), 592 }; 593 594 enum scx_wake_flags { 595 /* expose select WF_* flags as enums */ 596 SCX_WAKE_FORK = WF_FORK, 597 SCX_WAKE_TTWU = WF_TTWU, 598 SCX_WAKE_SYNC = WF_SYNC, 599 }; 600 601 enum scx_enq_flags { 602 /* expose select ENQUEUE_* flags as enums */ 603 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP, 604 SCX_ENQ_HEAD = ENQUEUE_HEAD, 605 606 /* high 32bits are SCX specific */ 607 608 /* 609 * Set the following to trigger preemption when calling 610 * scx_bpf_dispatch() with a local dsq as the target. The slice of the 611 * current task is cleared to zero and the CPU is kicked into the 612 * scheduling path. Implies %SCX_ENQ_HEAD. 613 */ 614 SCX_ENQ_PREEMPT = 1LLU << 32, 615 616 /* 617 * The task being enqueued was previously enqueued on the current CPU's 618 * %SCX_DSQ_LOCAL, but was removed from it in a call to the 619 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was 620 * invoked in a ->cpu_release() callback, and the task is again 621 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the 622 * task will not be scheduled on the CPU until at least the next invocation 623 * of the ->cpu_acquire() callback. 624 */ 625 SCX_ENQ_REENQ = 1LLU << 40, 626 627 /* 628 * The task being enqueued is the only task available for the cpu. By 629 * default, ext core keeps executing such tasks but when 630 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the 631 * %SCX_ENQ_LAST flag set. 632 * 633 * If the BPF scheduler wants to continue executing the task, 634 * ops.enqueue() should dispatch the task to %SCX_DSQ_LOCAL immediately. 635 * If the task gets queued on a different dsq or the BPF side, the BPF 636 * scheduler is responsible for triggering a follow-up scheduling event. 637 * Otherwise, Execution may stall. 638 */ 639 SCX_ENQ_LAST = 1LLU << 41, 640 641 /* high 8 bits are internal */ 642 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56, 643 644 SCX_ENQ_CLEAR_OPSS = 1LLU << 56, 645 SCX_ENQ_DSQ_PRIQ = 1LLU << 57, 646 }; 647 648 enum scx_deq_flags { 649 /* expose select DEQUEUE_* flags as enums */ 650 SCX_DEQ_SLEEP = DEQUEUE_SLEEP, 651 652 /* high 32bits are SCX specific */ 653 654 /* 655 * The generic core-sched layer decided to execute the task even though 656 * it hasn't been dispatched yet. Dequeue from the BPF side. 657 */ 658 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, 659 }; 660 661 enum scx_pick_idle_cpu_flags { 662 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */ 663 }; 664 665 enum scx_kick_flags { 666 /* 667 * Kick the target CPU if idle. Guarantees that the target CPU goes 668 * through at least one full scheduling cycle before going idle. If the 669 * target CPU can be determined to be currently not idle and going to go 670 * through a scheduling cycle before going idle, noop. 671 */ 672 SCX_KICK_IDLE = 1LLU << 0, 673 674 /* 675 * Preempt the current task and execute the dispatch path. If the 676 * current task of the target CPU is an SCX task, its ->scx.slice is 677 * cleared to zero before the scheduling path is invoked so that the 678 * task expires and the dispatch path is invoked. 679 */ 680 SCX_KICK_PREEMPT = 1LLU << 1, 681 682 /* 683 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will 684 * return after the target CPU finishes picking the next task. 685 */ 686 SCX_KICK_WAIT = 1LLU << 2, 687 }; 688 689 enum scx_ops_enable_state { 690 SCX_OPS_PREPPING, 691 SCX_OPS_ENABLING, 692 SCX_OPS_ENABLED, 693 SCX_OPS_DISABLING, 694 SCX_OPS_DISABLED, 695 }; 696 697 static const char *scx_ops_enable_state_str[] = { 698 [SCX_OPS_PREPPING] = "prepping", 699 [SCX_OPS_ENABLING] = "enabling", 700 [SCX_OPS_ENABLED] = "enabled", 701 [SCX_OPS_DISABLING] = "disabling", 702 [SCX_OPS_DISABLED] = "disabled", 703 }; 704 705 /* 706 * sched_ext_entity->ops_state 707 * 708 * Used to track the task ownership between the SCX core and the BPF scheduler. 709 * State transitions look as follows: 710 * 711 * NONE -> QUEUEING -> QUEUED -> DISPATCHING 712 * ^ | | 713 * | v v 714 * \-------------------------------/ 715 * 716 * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 717 * sites for explanations on the conditions being waited upon and why they are 718 * safe. Transitions out of them into NONE or QUEUED must store_release and the 719 * waiters should load_acquire. 720 * 721 * Tracking scx_ops_state enables sched_ext core to reliably determine whether 722 * any given task can be dispatched by the BPF scheduler at all times and thus 723 * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 724 * to try to dispatch any task anytime regardless of its state as the SCX core 725 * can safely reject invalid dispatches. 726 */ 727 enum scx_ops_state { 728 SCX_OPSS_NONE, /* owned by the SCX core */ 729 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */ 730 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */ 731 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */ 732 733 /* 734 * QSEQ brands each QUEUED instance so that, when dispatch races 735 * dequeue/requeue, the dispatcher can tell whether it still has a claim 736 * on the task being dispatched. 737 * 738 * As some 32bit archs can't do 64bit store_release/load_acquire, 739 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on 740 * 32bit machines. The dispatch race window QSEQ protects is very narrow 741 * and runs with IRQ disabled. 30 bits should be sufficient. 742 */ 743 SCX_OPSS_QSEQ_SHIFT = 2, 744 }; 745 746 /* Use macros to ensure that the type is unsigned long for the masks */ 747 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1) 748 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK) 749 750 /* 751 * During exit, a task may schedule after losing its PIDs. When disabling the 752 * BPF scheduler, we need to be able to iterate tasks in every state to 753 * guarantee system safety. Maintain a dedicated task list which contains every 754 * task between its fork and eventual free. 755 */ 756 static DEFINE_SPINLOCK(scx_tasks_lock); 757 static LIST_HEAD(scx_tasks); 758 759 /* ops enable/disable */ 760 static struct kthread_worker *scx_ops_helper; 761 static DEFINE_MUTEX(scx_ops_enable_mutex); 762 DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); 763 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 764 static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); 765 static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0); 766 static bool scx_switching_all; 767 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 768 769 static struct sched_ext_ops scx_ops; 770 static bool scx_warned_zero_slice; 771 772 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); 773 static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); 774 static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); 775 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); 776 777 struct static_key_false scx_has_op[SCX_OPI_END] = 778 { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT }; 779 780 static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE); 781 static struct scx_exit_info *scx_exit_info; 782 783 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 784 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 785 786 /* 787 * The maximum amount of time in jiffies that a task may be runnable without 788 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 789 * scx_ops_error(). 790 */ 791 static unsigned long scx_watchdog_timeout; 792 793 /* 794 * The last time the delayed work was run. This delayed work relies on 795 * ksoftirqd being able to run to service timer interrupts, so it's possible 796 * that this work itself could get wedged. To account for this, we check that 797 * it's not stalled in the timer tick, and trigger an error if it is. 798 */ 799 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 800 801 static struct delayed_work scx_watchdog_work; 802 803 /* idle tracking */ 804 #ifdef CONFIG_SMP 805 #ifdef CONFIG_CPUMASK_OFFSTACK 806 #define CL_ALIGNED_IF_ONSTACK 807 #else 808 #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp 809 #endif 810 811 static struct { 812 cpumask_var_t cpu; 813 cpumask_var_t smt; 814 } idle_masks CL_ALIGNED_IF_ONSTACK; 815 816 #endif /* CONFIG_SMP */ 817 818 /* for %SCX_KICK_WAIT */ 819 static unsigned long __percpu *scx_kick_cpus_pnt_seqs; 820 821 /* 822 * Direct dispatch marker. 823 * 824 * Non-NULL values are used for direct dispatch from enqueue path. A valid 825 * pointer points to the task currently being enqueued. An ERR_PTR value is used 826 * to indicate that direct dispatch has already happened. 827 */ 828 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 829 830 /* dispatch queues */ 831 static struct scx_dispatch_q __cacheline_aligned_in_smp scx_dsq_global; 832 833 static const struct rhashtable_params dsq_hash_params = { 834 .key_len = 8, 835 .key_offset = offsetof(struct scx_dispatch_q, id), 836 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 837 }; 838 839 static struct rhashtable dsq_hash; 840 static LLIST_HEAD(dsqs_to_free); 841 842 /* dispatch buf */ 843 struct scx_dsp_buf_ent { 844 struct task_struct *task; 845 unsigned long qseq; 846 u64 dsq_id; 847 u64 enq_flags; 848 }; 849 850 static u32 scx_dsp_max_batch; 851 852 struct scx_dsp_ctx { 853 struct rq *rq; 854 struct rq_flags *rf; 855 u32 cursor; 856 u32 nr_tasks; 857 struct scx_dsp_buf_ent buf[]; 858 }; 859 860 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 861 862 /* string formatting from BPF */ 863 struct scx_bstr_buf { 864 u64 data[MAX_BPRINTF_VARARGS]; 865 char line[SCX_EXIT_MSG_LEN]; 866 }; 867 868 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 869 static struct scx_bstr_buf scx_exit_bstr_buf; 870 871 /* ops debug dump */ 872 struct scx_dump_data { 873 s32 cpu; 874 bool first; 875 s32 cursor; 876 struct seq_buf *s; 877 const char *prefix; 878 struct scx_bstr_buf buf; 879 }; 880 881 struct scx_dump_data scx_dump_data = { 882 .cpu = -1, 883 }; 884 885 /* /sys/kernel/sched_ext interface */ 886 static struct kset *scx_kset; 887 static struct kobject *scx_root_kobj; 888 889 #define CREATE_TRACE_POINTS 890 #include <trace/events/sched_ext.h> 891 892 static void scx_bpf_kick_cpu(s32 cpu, u64 flags); 893 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 894 s64 exit_code, 895 const char *fmt, ...); 896 897 #define scx_ops_error_kind(err, fmt, args...) \ 898 scx_ops_exit_kind((err), 0, fmt, ##args) 899 900 #define scx_ops_exit(code, fmt, args...) \ 901 scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args) 902 903 #define scx_ops_error(fmt, args...) \ 904 scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args) 905 906 #define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)]) 907 908 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 909 { 910 if (time_after(at, now)) 911 return jiffies_to_msecs(at - now); 912 else 913 return -(long)jiffies_to_msecs(now - at); 914 } 915 916 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 917 static u32 higher_bits(u32 flags) 918 { 919 return ~((1 << fls(flags)) - 1); 920 } 921 922 /* return the mask with only the highest bit set */ 923 static u32 highest_bit(u32 flags) 924 { 925 int bit = fls(flags); 926 return ((u64)1 << bit) >> 1; 927 } 928 929 static bool u32_before(u32 a, u32 b) 930 { 931 return (s32)(a - b) < 0; 932 } 933 934 /* 935 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 936 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 937 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 938 * whether it's running from an allowed context. 939 * 940 * @mask is constant, always inline to cull the mask calculations. 941 */ 942 static __always_inline void scx_kf_allow(u32 mask) 943 { 944 /* nesting is allowed only in increasing scx_kf_mask order */ 945 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 946 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 947 current->scx.kf_mask, mask); 948 current->scx.kf_mask |= mask; 949 barrier(); 950 } 951 952 static void scx_kf_disallow(u32 mask) 953 { 954 barrier(); 955 current->scx.kf_mask &= ~mask; 956 } 957 958 #define SCX_CALL_OP(mask, op, args...) \ 959 do { \ 960 if (mask) { \ 961 scx_kf_allow(mask); \ 962 scx_ops.op(args); \ 963 scx_kf_disallow(mask); \ 964 } else { \ 965 scx_ops.op(args); \ 966 } \ 967 } while (0) 968 969 #define SCX_CALL_OP_RET(mask, op, args...) \ 970 ({ \ 971 __typeof__(scx_ops.op(args)) __ret; \ 972 if (mask) { \ 973 scx_kf_allow(mask); \ 974 __ret = scx_ops.op(args); \ 975 scx_kf_disallow(mask); \ 976 } else { \ 977 __ret = scx_ops.op(args); \ 978 } \ 979 __ret; \ 980 }) 981 982 /* 983 * Some kfuncs are allowed only on the tasks that are subjects of the 984 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 985 * restrictions, the following SCX_CALL_OP_*() variants should be used when 986 * invoking scx_ops operations that take task arguments. These can only be used 987 * for non-nesting operations due to the way the tasks are tracked. 988 * 989 * kfuncs which can only operate on such tasks can in turn use 990 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 991 * the specific task. 992 */ 993 #define SCX_CALL_OP_TASK(mask, op, task, args...) \ 994 do { \ 995 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 996 current->scx.kf_tasks[0] = task; \ 997 SCX_CALL_OP(mask, op, task, ##args); \ 998 current->scx.kf_tasks[0] = NULL; \ 999 } while (0) 1000 1001 #define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \ 1002 ({ \ 1003 __typeof__(scx_ops.op(task, ##args)) __ret; \ 1004 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1005 current->scx.kf_tasks[0] = task; \ 1006 __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \ 1007 current->scx.kf_tasks[0] = NULL; \ 1008 __ret; \ 1009 }) 1010 1011 #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \ 1012 ({ \ 1013 __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \ 1014 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 1015 current->scx.kf_tasks[0] = task0; \ 1016 current->scx.kf_tasks[1] = task1; \ 1017 __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \ 1018 current->scx.kf_tasks[0] = NULL; \ 1019 current->scx.kf_tasks[1] = NULL; \ 1020 __ret; \ 1021 }) 1022 1023 /* @mask is constant, always inline to cull unnecessary branches */ 1024 static __always_inline bool scx_kf_allowed(u32 mask) 1025 { 1026 if (unlikely(!(current->scx.kf_mask & mask))) { 1027 scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x", 1028 mask, current->scx.kf_mask); 1029 return false; 1030 } 1031 1032 if (unlikely((mask & SCX_KF_SLEEPABLE) && in_interrupt())) { 1033 scx_ops_error("sleepable kfunc called from non-sleepable context"); 1034 return false; 1035 } 1036 1037 /* 1038 * Enforce nesting boundaries. e.g. A kfunc which can be called from 1039 * DISPATCH must not be called if we're running DEQUEUE which is nested 1040 * inside ops.dispatch(). We don't need to check the SCX_KF_SLEEPABLE 1041 * boundary thanks to the above in_interrupt() check. 1042 */ 1043 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 1044 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 1045 scx_ops_error("cpu_release kfunc called from a nested operation"); 1046 return false; 1047 } 1048 1049 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 1050 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 1051 scx_ops_error("dispatch kfunc called from a nested operation"); 1052 return false; 1053 } 1054 1055 return true; 1056 } 1057 1058 /* see SCX_CALL_OP_TASK() */ 1059 static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask, 1060 struct task_struct *p) 1061 { 1062 if (!scx_kf_allowed(mask)) 1063 return false; 1064 1065 if (unlikely((p != current->scx.kf_tasks[0] && 1066 p != current->scx.kf_tasks[1]))) { 1067 scx_ops_error("called on a task not being operated on"); 1068 return false; 1069 } 1070 1071 return true; 1072 } 1073 1074 /** 1075 * nldsq_next_task - Iterate to the next task in a non-local DSQ 1076 * @dsq: user dsq being interated 1077 * @cur: current position, %NULL to start iteration 1078 * @rev: walk backwards 1079 * 1080 * Returns %NULL when iteration is finished. 1081 */ 1082 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 1083 struct task_struct *cur, bool rev) 1084 { 1085 struct list_head *list_node; 1086 struct scx_dsq_list_node *dsq_lnode; 1087 1088 lockdep_assert_held(&dsq->lock); 1089 1090 if (cur) 1091 list_node = &cur->scx.dsq_list.node; 1092 else 1093 list_node = &dsq->list; 1094 1095 /* find the next task, need to skip BPF iteration cursors */ 1096 do { 1097 if (rev) 1098 list_node = list_node->prev; 1099 else 1100 list_node = list_node->next; 1101 1102 if (list_node == &dsq->list) 1103 return NULL; 1104 1105 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 1106 node); 1107 } while (dsq_lnode->is_bpf_iter_cursor); 1108 1109 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 1110 } 1111 1112 #define nldsq_for_each_task(p, dsq) \ 1113 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 1114 (p) = nldsq_next_task((dsq), (p), false)) 1115 1116 1117 /* 1118 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 1119 * dispatch order. BPF-visible iterator is opaque and larger to allow future 1120 * changes without breaking backward compatibility. Can be used with 1121 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 1122 */ 1123 enum scx_dsq_iter_flags { 1124 /* iterate in the reverse dispatch order */ 1125 SCX_DSQ_ITER_REV = 1U << 0, 1126 1127 __SCX_DSQ_ITER_ALL_FLAGS = SCX_DSQ_ITER_REV, 1128 }; 1129 1130 struct bpf_iter_scx_dsq_kern { 1131 struct scx_dsq_list_node cursor; 1132 struct scx_dispatch_q *dsq; 1133 u32 dsq_seq; 1134 u32 flags; 1135 } __attribute__((aligned(8))); 1136 1137 struct bpf_iter_scx_dsq { 1138 u64 __opaque[6]; 1139 } __attribute__((aligned(8))); 1140 1141 1142 /* 1143 * SCX task iterator. 1144 */ 1145 struct scx_task_iter { 1146 struct sched_ext_entity cursor; 1147 struct task_struct *locked; 1148 struct rq *rq; 1149 struct rq_flags rf; 1150 }; 1151 1152 /** 1153 * scx_task_iter_init - Initialize a task iterator 1154 * @iter: iterator to init 1155 * 1156 * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized, 1157 * @iter must eventually be exited with scx_task_iter_exit(). 1158 * 1159 * scx_tasks_lock may be released between this and the first next() call or 1160 * between any two next() calls. If scx_tasks_lock is released between two 1161 * next() calls, the caller is responsible for ensuring that the task being 1162 * iterated remains accessible either through RCU read lock or obtaining a 1163 * reference count. 1164 * 1165 * All tasks which existed when the iteration started are guaranteed to be 1166 * visited as long as they still exist. 1167 */ 1168 static void scx_task_iter_init(struct scx_task_iter *iter) 1169 { 1170 lockdep_assert_held(&scx_tasks_lock); 1171 1172 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 1173 list_add(&iter->cursor.tasks_node, &scx_tasks); 1174 iter->locked = NULL; 1175 } 1176 1177 /** 1178 * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator 1179 * @iter: iterator to unlock rq for 1180 * 1181 * If @iter is in the middle of a locked iteration, it may be locking the rq of 1182 * the task currently being visited. Unlock the rq if so. This function can be 1183 * safely called anytime during an iteration. 1184 * 1185 * Returns %true if the rq @iter was locking is unlocked. %false if @iter was 1186 * not locking an rq. 1187 */ 1188 static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter) 1189 { 1190 if (iter->locked) { 1191 task_rq_unlock(iter->rq, iter->locked, &iter->rf); 1192 iter->locked = NULL; 1193 return true; 1194 } else { 1195 return false; 1196 } 1197 } 1198 1199 /** 1200 * scx_task_iter_exit - Exit a task iterator 1201 * @iter: iterator to exit 1202 * 1203 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held. 1204 * If the iterator holds a task's rq lock, that rq lock is released. See 1205 * scx_task_iter_init() for details. 1206 */ 1207 static void scx_task_iter_exit(struct scx_task_iter *iter) 1208 { 1209 lockdep_assert_held(&scx_tasks_lock); 1210 1211 scx_task_iter_rq_unlock(iter); 1212 list_del_init(&iter->cursor.tasks_node); 1213 } 1214 1215 /** 1216 * scx_task_iter_next - Next task 1217 * @iter: iterator to walk 1218 * 1219 * Visit the next task. See scx_task_iter_init() for details. 1220 */ 1221 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 1222 { 1223 struct list_head *cursor = &iter->cursor.tasks_node; 1224 struct sched_ext_entity *pos; 1225 1226 lockdep_assert_held(&scx_tasks_lock); 1227 1228 list_for_each_entry(pos, cursor, tasks_node) { 1229 if (&pos->tasks_node == &scx_tasks) 1230 return NULL; 1231 if (!(pos->flags & SCX_TASK_CURSOR)) { 1232 list_move(cursor, &pos->tasks_node); 1233 return container_of(pos, struct task_struct, scx); 1234 } 1235 } 1236 1237 /* can't happen, should always terminate at scx_tasks above */ 1238 BUG(); 1239 } 1240 1241 /** 1242 * scx_task_iter_next_locked - Next non-idle task with its rq locked 1243 * @iter: iterator to walk 1244 * @include_dead: Whether we should include dead tasks in the iteration 1245 * 1246 * Visit the non-idle task with its rq lock held. Allows callers to specify 1247 * whether they would like to filter out dead tasks. See scx_task_iter_init() 1248 * for details. 1249 */ 1250 static struct task_struct * 1251 scx_task_iter_next_locked(struct scx_task_iter *iter, bool include_dead) 1252 { 1253 struct task_struct *p; 1254 retry: 1255 scx_task_iter_rq_unlock(iter); 1256 1257 while ((p = scx_task_iter_next(iter))) { 1258 /* 1259 * is_idle_task() tests %PF_IDLE which may not be set for CPUs 1260 * which haven't yet been onlined. Test sched_class directly. 1261 */ 1262 if (p->sched_class != &idle_sched_class) 1263 break; 1264 } 1265 if (!p) 1266 return NULL; 1267 1268 iter->rq = task_rq_lock(p, &iter->rf); 1269 iter->locked = p; 1270 1271 /* 1272 * If we see %TASK_DEAD, @p already disabled preemption, is about to do 1273 * the final __schedule(), won't ever need to be scheduled again and can 1274 * thus be safely ignored. If we don't see %TASK_DEAD, @p can't enter 1275 * the final __schedle() while we're locking its rq and thus will stay 1276 * alive until the rq is unlocked. 1277 */ 1278 if (!include_dead && READ_ONCE(p->__state) == TASK_DEAD) 1279 goto retry; 1280 1281 return p; 1282 } 1283 1284 static enum scx_ops_enable_state scx_ops_enable_state(void) 1285 { 1286 return atomic_read(&scx_ops_enable_state_var); 1287 } 1288 1289 static enum scx_ops_enable_state 1290 scx_ops_set_enable_state(enum scx_ops_enable_state to) 1291 { 1292 return atomic_xchg(&scx_ops_enable_state_var, to); 1293 } 1294 1295 static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to, 1296 enum scx_ops_enable_state from) 1297 { 1298 int from_v = from; 1299 1300 return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to); 1301 } 1302 1303 static bool scx_ops_bypassing(void) 1304 { 1305 return unlikely(atomic_read(&scx_ops_bypass_depth)); 1306 } 1307 1308 /** 1309 * wait_ops_state - Busy-wait the specified ops state to end 1310 * @p: target task 1311 * @opss: state to wait the end of 1312 * 1313 * Busy-wait for @p to transition out of @opss. This can only be used when the 1314 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 1315 * has load_acquire semantics to ensure that the caller can see the updates made 1316 * in the enqueueing and dispatching paths. 1317 */ 1318 static void wait_ops_state(struct task_struct *p, unsigned long opss) 1319 { 1320 do { 1321 cpu_relax(); 1322 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 1323 } 1324 1325 /** 1326 * ops_cpu_valid - Verify a cpu number 1327 * @cpu: cpu number which came from a BPF ops 1328 * @where: extra information reported on error 1329 * 1330 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 1331 * Verify that it is in range and one of the possible cpus. If invalid, trigger 1332 * an ops error. 1333 */ 1334 static bool ops_cpu_valid(s32 cpu, const char *where) 1335 { 1336 if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) { 1337 return true; 1338 } else { 1339 scx_ops_error("invalid CPU %d%s%s", cpu, 1340 where ? " " : "", where ?: ""); 1341 return false; 1342 } 1343 } 1344 1345 /** 1346 * ops_sanitize_err - Sanitize a -errno value 1347 * @ops_name: operation to blame on failure 1348 * @err: -errno value to sanitize 1349 * 1350 * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return 1351 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 1352 * cause misbehaviors. For an example, a large negative return from 1353 * ops.init_task() triggers an oops when passed up the call chain because the 1354 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 1355 * handled as a pointer. 1356 */ 1357 static int ops_sanitize_err(const char *ops_name, s32 err) 1358 { 1359 if (err < 0 && err >= -MAX_ERRNO) 1360 return err; 1361 1362 scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err); 1363 return -EPROTO; 1364 } 1365 1366 /** 1367 * touch_core_sched - Update timestamp used for core-sched task ordering 1368 * @rq: rq to read clock from, must be locked 1369 * @p: task to update the timestamp for 1370 * 1371 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 1372 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 1373 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 1374 * exhaustion). 1375 */ 1376 static void touch_core_sched(struct rq *rq, struct task_struct *p) 1377 { 1378 #ifdef CONFIG_SCHED_CORE 1379 /* 1380 * It's okay to update the timestamp spuriously. Use 1381 * sched_core_disabled() which is cheaper than enabled(). 1382 */ 1383 if (!sched_core_disabled()) 1384 p->scx.core_sched_at = rq_clock_task(rq); 1385 #endif 1386 } 1387 1388 /** 1389 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 1390 * @rq: rq to read clock from, must be locked 1391 * @p: task being dispatched 1392 * 1393 * If the BPF scheduler implements custom core-sched ordering via 1394 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 1395 * ordering within each local DSQ. This function is called from dispatch paths 1396 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 1397 */ 1398 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 1399 { 1400 lockdep_assert_rq_held(rq); 1401 assert_clock_updated(rq); 1402 1403 #ifdef CONFIG_SCHED_CORE 1404 if (SCX_HAS_OP(core_sched_before)) 1405 touch_core_sched(rq, p); 1406 #endif 1407 } 1408 1409 static void update_curr_scx(struct rq *rq) 1410 { 1411 struct task_struct *curr = rq->curr; 1412 u64 now = rq_clock_task(rq); 1413 u64 delta_exec; 1414 1415 if (time_before_eq64(now, curr->se.exec_start)) 1416 return; 1417 1418 delta_exec = now - curr->se.exec_start; 1419 curr->se.exec_start = now; 1420 curr->se.sum_exec_runtime += delta_exec; 1421 account_group_exec_runtime(curr, delta_exec); 1422 cgroup_account_cputime(curr, delta_exec); 1423 1424 if (curr->scx.slice != SCX_SLICE_INF) { 1425 curr->scx.slice -= min(curr->scx.slice, delta_exec); 1426 if (!curr->scx.slice) 1427 touch_core_sched(rq, curr); 1428 } 1429 } 1430 1431 static bool scx_dsq_priq_less(struct rb_node *node_a, 1432 const struct rb_node *node_b) 1433 { 1434 const struct task_struct *a = 1435 container_of(node_a, struct task_struct, scx.dsq_priq); 1436 const struct task_struct *b = 1437 container_of(node_b, struct task_struct, scx.dsq_priq); 1438 1439 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 1440 } 1441 1442 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 1443 { 1444 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */ 1445 WRITE_ONCE(dsq->nr, dsq->nr + delta); 1446 } 1447 1448 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, 1449 u64 enq_flags) 1450 { 1451 bool is_local = dsq->id == SCX_DSQ_LOCAL; 1452 1453 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1454 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 1455 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 1456 1457 if (!is_local) { 1458 raw_spin_lock(&dsq->lock); 1459 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 1460 scx_ops_error("attempting to dispatch to a destroyed dsq"); 1461 /* fall back to the global dsq */ 1462 raw_spin_unlock(&dsq->lock); 1463 dsq = &scx_dsq_global; 1464 raw_spin_lock(&dsq->lock); 1465 } 1466 } 1467 1468 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1469 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1470 /* 1471 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1472 * their FIFO queues. To avoid confusion and accidentally 1473 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1474 * disallow any internal DSQ from doing vtime ordering of 1475 * tasks. 1476 */ 1477 scx_ops_error("cannot use vtime ordering for built-in DSQs"); 1478 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1479 } 1480 1481 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1482 struct rb_node *rbp; 1483 1484 /* 1485 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1486 * linked to both the rbtree and list on PRIQs, this can only be 1487 * tested easily when adding the first task. 1488 */ 1489 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1490 nldsq_next_task(dsq, NULL, false))) 1491 scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1492 dsq->id); 1493 1494 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1495 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1496 1497 /* 1498 * Find the previous task and insert after it on the list so 1499 * that @dsq->list is vtime ordered. 1500 */ 1501 rbp = rb_prev(&p->scx.dsq_priq); 1502 if (rbp) { 1503 struct task_struct *prev = 1504 container_of(rbp, struct task_struct, 1505 scx.dsq_priq); 1506 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1507 } else { 1508 list_add(&p->scx.dsq_list.node, &dsq->list); 1509 } 1510 } else { 1511 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1512 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1513 scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1514 dsq->id); 1515 1516 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1517 list_add(&p->scx.dsq_list.node, &dsq->list); 1518 else 1519 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1520 } 1521 1522 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1523 dsq->seq++; 1524 p->scx.dsq_seq = dsq->seq; 1525 1526 dsq_mod_nr(dsq, 1); 1527 p->scx.dsq = dsq; 1528 1529 /* 1530 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1531 * direct dispatch path, but we clear them here because the direct 1532 * dispatch verdict may be overridden on the enqueue path during e.g. 1533 * bypass. 1534 */ 1535 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1536 p->scx.ddsp_enq_flags = 0; 1537 1538 /* 1539 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1540 * match waiters' load_acquire. 1541 */ 1542 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1543 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1544 1545 if (is_local) { 1546 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 1547 bool preempt = false; 1548 1549 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1550 rq->curr->sched_class == &ext_sched_class) { 1551 rq->curr->scx.slice = 0; 1552 preempt = true; 1553 } 1554 1555 if (preempt || sched_class_above(&ext_sched_class, 1556 rq->curr->sched_class)) 1557 resched_curr(rq); 1558 } else { 1559 raw_spin_unlock(&dsq->lock); 1560 } 1561 } 1562 1563 static void task_unlink_from_dsq(struct task_struct *p, 1564 struct scx_dispatch_q *dsq) 1565 { 1566 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1567 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1568 RB_CLEAR_NODE(&p->scx.dsq_priq); 1569 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1570 } 1571 1572 list_del_init(&p->scx.dsq_list.node); 1573 } 1574 1575 static bool task_linked_on_dsq(struct task_struct *p) 1576 { 1577 return !list_empty(&p->scx.dsq_list.node); 1578 } 1579 1580 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1581 { 1582 struct scx_dispatch_q *dsq = p->scx.dsq; 1583 bool is_local = dsq == &rq->scx.local_dsq; 1584 1585 if (!dsq) { 1586 WARN_ON_ONCE(task_linked_on_dsq(p)); 1587 /* 1588 * When dispatching directly from the BPF scheduler to a local 1589 * DSQ, the task isn't associated with any DSQ but 1590 * @p->scx.holding_cpu may be set under the protection of 1591 * %SCX_OPSS_DISPATCHING. 1592 */ 1593 if (p->scx.holding_cpu >= 0) 1594 p->scx.holding_cpu = -1; 1595 return; 1596 } 1597 1598 if (!is_local) 1599 raw_spin_lock(&dsq->lock); 1600 1601 /* 1602 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 1603 * change underneath us. 1604 */ 1605 if (p->scx.holding_cpu < 0) { 1606 /* @p must still be on @dsq, dequeue */ 1607 WARN_ON_ONCE(!task_linked_on_dsq(p)); 1608 task_unlink_from_dsq(p, dsq); 1609 dsq_mod_nr(dsq, -1); 1610 } else { 1611 /* 1612 * We're racing against dispatch_to_local_dsq() which already 1613 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 1614 * holding_cpu which tells dispatch_to_local_dsq() that it lost 1615 * the race. 1616 */ 1617 WARN_ON_ONCE(task_linked_on_dsq(p)); 1618 p->scx.holding_cpu = -1; 1619 } 1620 p->scx.dsq = NULL; 1621 1622 if (!is_local) 1623 raw_spin_unlock(&dsq->lock); 1624 } 1625 1626 static struct scx_dispatch_q *find_user_dsq(u64 dsq_id) 1627 { 1628 return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); 1629 } 1630 1631 static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id) 1632 { 1633 lockdep_assert(rcu_read_lock_any_held()); 1634 1635 if (dsq_id == SCX_DSQ_GLOBAL) 1636 return &scx_dsq_global; 1637 else 1638 return find_user_dsq(dsq_id); 1639 } 1640 1641 static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id, 1642 struct task_struct *p) 1643 { 1644 struct scx_dispatch_q *dsq; 1645 1646 if (dsq_id == SCX_DSQ_LOCAL) 1647 return &rq->scx.local_dsq; 1648 1649 dsq = find_non_local_dsq(dsq_id); 1650 if (unlikely(!dsq)) { 1651 scx_ops_error("non-existent DSQ 0x%llx for %s[%d]", 1652 dsq_id, p->comm, p->pid); 1653 return &scx_dsq_global; 1654 } 1655 1656 return dsq; 1657 } 1658 1659 static void mark_direct_dispatch(struct task_struct *ddsp_task, 1660 struct task_struct *p, u64 dsq_id, 1661 u64 enq_flags) 1662 { 1663 /* 1664 * Mark that dispatch already happened from ops.select_cpu() or 1665 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 1666 * which can never match a valid task pointer. 1667 */ 1668 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 1669 1670 /* @p must match the task on the enqueue path */ 1671 if (unlikely(p != ddsp_task)) { 1672 if (IS_ERR(ddsp_task)) 1673 scx_ops_error("%s[%d] already direct-dispatched", 1674 p->comm, p->pid); 1675 else 1676 scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 1677 ddsp_task->comm, ddsp_task->pid, 1678 p->comm, p->pid); 1679 return; 1680 } 1681 1682 /* 1683 * %SCX_DSQ_LOCAL_ON is not supported during direct dispatch because 1684 * dispatching to the local DSQ of a different CPU requires unlocking 1685 * the current rq which isn't allowed in the enqueue path. Use 1686 * ops.select_cpu() to be on the target CPU and then %SCX_DSQ_LOCAL. 1687 */ 1688 if (unlikely((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON)) { 1689 scx_ops_error("SCX_DSQ_LOCAL_ON can't be used for direct-dispatch"); 1690 return; 1691 } 1692 1693 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 1694 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 1695 1696 p->scx.ddsp_dsq_id = dsq_id; 1697 p->scx.ddsp_enq_flags = enq_flags; 1698 } 1699 1700 static void direct_dispatch(struct task_struct *p, u64 enq_flags) 1701 { 1702 struct scx_dispatch_q *dsq; 1703 1704 touch_core_sched_dispatch(task_rq(p), p); 1705 1706 enq_flags |= (p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1707 dsq = find_dsq_for_dispatch(task_rq(p), p->scx.ddsp_dsq_id, p); 1708 dispatch_enqueue(dsq, p, enq_flags); 1709 } 1710 1711 static bool scx_rq_online(struct rq *rq) 1712 { 1713 return likely(rq->scx.flags & SCX_RQ_ONLINE); 1714 } 1715 1716 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1717 int sticky_cpu) 1718 { 1719 struct task_struct **ddsp_taskp; 1720 unsigned long qseq; 1721 1722 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 1723 1724 /* rq migration */ 1725 if (sticky_cpu == cpu_of(rq)) 1726 goto local_norefill; 1727 1728 /* 1729 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 1730 * is offline and are just running the hotplug path. Don't bother the 1731 * BPF scheduler. 1732 */ 1733 if (!scx_rq_online(rq)) 1734 goto local; 1735 1736 if (scx_ops_bypassing()) { 1737 if (enq_flags & SCX_ENQ_LAST) 1738 goto local; 1739 else 1740 goto global; 1741 } 1742 1743 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1744 goto direct; 1745 1746 /* see %SCX_OPS_ENQ_EXITING */ 1747 if (!static_branch_unlikely(&scx_ops_enq_exiting) && 1748 unlikely(p->flags & PF_EXITING)) 1749 goto local; 1750 1751 /* see %SCX_OPS_ENQ_LAST */ 1752 if (!static_branch_unlikely(&scx_ops_enq_last) && 1753 (enq_flags & SCX_ENQ_LAST)) 1754 goto local; 1755 1756 if (!SCX_HAS_OP(enqueue)) 1757 goto global; 1758 1759 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 1760 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 1761 1762 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1763 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 1764 1765 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 1766 WARN_ON_ONCE(*ddsp_taskp); 1767 *ddsp_taskp = p; 1768 1769 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags); 1770 1771 *ddsp_taskp = NULL; 1772 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1773 goto direct; 1774 1775 /* 1776 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 1777 * dequeue may be waiting. The store_release matches their load_acquire. 1778 */ 1779 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 1780 return; 1781 1782 direct: 1783 direct_dispatch(p, enq_flags); 1784 return; 1785 1786 local: 1787 /* 1788 * For task-ordering, slice refill must be treated as implying the end 1789 * of the current slice. Otherwise, the longer @p stays on the CPU, the 1790 * higher priority it becomes from scx_prio_less()'s POV. 1791 */ 1792 touch_core_sched(rq, p); 1793 p->scx.slice = SCX_SLICE_DFL; 1794 local_norefill: 1795 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); 1796 return; 1797 1798 global: 1799 touch_core_sched(rq, p); /* see the comment in local: */ 1800 p->scx.slice = SCX_SLICE_DFL; 1801 dispatch_enqueue(&scx_dsq_global, p, enq_flags); 1802 } 1803 1804 static bool task_runnable(const struct task_struct *p) 1805 { 1806 return !list_empty(&p->scx.runnable_node); 1807 } 1808 1809 static void set_task_runnable(struct rq *rq, struct task_struct *p) 1810 { 1811 lockdep_assert_rq_held(rq); 1812 1813 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 1814 p->scx.runnable_at = jiffies; 1815 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 1816 } 1817 1818 /* 1819 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being 1820 * appened to the runnable_list. 1821 */ 1822 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 1823 } 1824 1825 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 1826 { 1827 list_del_init(&p->scx.runnable_node); 1828 if (reset_runnable_at) 1829 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1830 } 1831 1832 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1833 { 1834 int sticky_cpu = p->scx.sticky_cpu; 1835 1836 enq_flags |= rq->scx.extra_enq_flags; 1837 1838 if (sticky_cpu >= 0) 1839 p->scx.sticky_cpu = -1; 1840 1841 /* 1842 * Restoring a running task will be immediately followed by 1843 * set_next_task_scx() which expects the task to not be on the BPF 1844 * scheduler as tasks can only start running through local DSQs. Force 1845 * direct-dispatch into the local DSQ by setting the sticky_cpu. 1846 */ 1847 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 1848 sticky_cpu = cpu_of(rq); 1849 1850 if (p->scx.flags & SCX_TASK_QUEUED) { 1851 WARN_ON_ONCE(!task_runnable(p)); 1852 return; 1853 } 1854 1855 set_task_runnable(rq, p); 1856 p->scx.flags |= SCX_TASK_QUEUED; 1857 rq->scx.nr_running++; 1858 add_nr_running(rq, 1); 1859 1860 if (SCX_HAS_OP(runnable)) 1861 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); 1862 1863 if (enq_flags & SCX_ENQ_WAKEUP) 1864 touch_core_sched(rq, p); 1865 1866 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 1867 } 1868 1869 static void ops_dequeue(struct task_struct *p, u64 deq_flags) 1870 { 1871 unsigned long opss; 1872 1873 /* dequeue is always temporary, don't reset runnable_at */ 1874 clr_task_runnable(p, false); 1875 1876 /* acquire ensures that we see the preceding updates on QUEUED */ 1877 opss = atomic_long_read_acquire(&p->scx.ops_state); 1878 1879 switch (opss & SCX_OPSS_STATE_MASK) { 1880 case SCX_OPSS_NONE: 1881 break; 1882 case SCX_OPSS_QUEUEING: 1883 /* 1884 * QUEUEING is started and finished while holding @p's rq lock. 1885 * As we're holding the rq lock now, we shouldn't see QUEUEING. 1886 */ 1887 BUG(); 1888 case SCX_OPSS_QUEUED: 1889 if (SCX_HAS_OP(dequeue)) 1890 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags); 1891 1892 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 1893 SCX_OPSS_NONE)) 1894 break; 1895 fallthrough; 1896 case SCX_OPSS_DISPATCHING: 1897 /* 1898 * If @p is being dispatched from the BPF scheduler to a DSQ, 1899 * wait for the transfer to complete so that @p doesn't get 1900 * added to its DSQ after dequeueing is complete. 1901 * 1902 * As we're waiting on DISPATCHING with the rq locked, the 1903 * dispatching side shouldn't try to lock the rq while 1904 * DISPATCHING is set. See dispatch_to_local_dsq(). 1905 * 1906 * DISPATCHING shouldn't have qseq set and control can reach 1907 * here with NONE @opss from the above QUEUED case block. 1908 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 1909 */ 1910 wait_ops_state(p, SCX_OPSS_DISPATCHING); 1911 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1912 break; 1913 } 1914 } 1915 1916 static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 1917 { 1918 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 1919 WARN_ON_ONCE(task_runnable(p)); 1920 return; 1921 } 1922 1923 ops_dequeue(p, deq_flags); 1924 1925 /* 1926 * A currently running task which is going off @rq first gets dequeued 1927 * and then stops running. As we want running <-> stopping transitions 1928 * to be contained within runnable <-> quiescent transitions, trigger 1929 * ->stopping() early here instead of in put_prev_task_scx(). 1930 * 1931 * @p may go through multiple stopping <-> running transitions between 1932 * here and put_prev_task_scx() if task attribute changes occur while 1933 * balance_scx() leaves @rq unlocked. However, they don't contain any 1934 * information meaningful to the BPF scheduler and can be suppressed by 1935 * skipping the callbacks if the task is !QUEUED. 1936 */ 1937 if (SCX_HAS_OP(stopping) && task_current(rq, p)) { 1938 update_curr_scx(rq); 1939 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false); 1940 } 1941 1942 if (SCX_HAS_OP(quiescent)) 1943 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags); 1944 1945 if (deq_flags & SCX_DEQ_SLEEP) 1946 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 1947 else 1948 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 1949 1950 p->scx.flags &= ~SCX_TASK_QUEUED; 1951 rq->scx.nr_running--; 1952 sub_nr_running(rq, 1); 1953 1954 dispatch_dequeue(rq, p); 1955 } 1956 1957 static void yield_task_scx(struct rq *rq) 1958 { 1959 struct task_struct *p = rq->curr; 1960 1961 if (SCX_HAS_OP(yield)) 1962 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL); 1963 else 1964 p->scx.slice = 0; 1965 } 1966 1967 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 1968 { 1969 struct task_struct *from = rq->curr; 1970 1971 if (SCX_HAS_OP(yield)) 1972 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to); 1973 else 1974 return false; 1975 } 1976 1977 #ifdef CONFIG_SMP 1978 /** 1979 * move_task_to_local_dsq - Move a task from a different rq to a local DSQ 1980 * @rq: rq to move the task into, currently locked 1981 * @p: task to move 1982 * @enq_flags: %SCX_ENQ_* 1983 * 1984 * Move @p which is currently on a different rq to @rq's local DSQ. The caller 1985 * must: 1986 * 1987 * 1. Start with exclusive access to @p either through its DSQ lock or 1988 * %SCX_OPSS_DISPATCHING flag. 1989 * 1990 * 2. Set @p->scx.holding_cpu to raw_smp_processor_id(). 1991 * 1992 * 3. Remember task_rq(@p). Release the exclusive access so that we don't 1993 * deadlock with dequeue. 1994 * 1995 * 4. Lock @rq and the task_rq from #3. 1996 * 1997 * 5. Call this function. 1998 * 1999 * Returns %true if @p was successfully moved. %false after racing dequeue and 2000 * losing. 2001 */ 2002 static bool move_task_to_local_dsq(struct rq *rq, struct task_struct *p, 2003 u64 enq_flags) 2004 { 2005 struct rq *task_rq; 2006 2007 lockdep_assert_rq_held(rq); 2008 2009 /* 2010 * If dequeue got to @p while we were trying to lock both rq's, it'd 2011 * have cleared @p->scx.holding_cpu to -1. While other cpus may have 2012 * updated it to different values afterwards, as this operation can't be 2013 * preempted or recurse, @p->scx.holding_cpu can never become 2014 * raw_smp_processor_id() again before we're done. Thus, we can tell 2015 * whether we lost to dequeue by testing whether @p->scx.holding_cpu is 2016 * still raw_smp_processor_id(). 2017 * 2018 * See dispatch_dequeue() for the counterpart. 2019 */ 2020 if (unlikely(p->scx.holding_cpu != raw_smp_processor_id())) 2021 return false; 2022 2023 /* @p->rq couldn't have changed if we're still the holding cpu */ 2024 task_rq = task_rq(p); 2025 lockdep_assert_rq_held(task_rq); 2026 2027 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(rq), p->cpus_ptr)); 2028 deactivate_task(task_rq, p, 0); 2029 set_task_cpu(p, cpu_of(rq)); 2030 p->scx.sticky_cpu = cpu_of(rq); 2031 2032 /* 2033 * We want to pass scx-specific enq_flags but activate_task() will 2034 * truncate the upper 32 bit. As we own @rq, we can pass them through 2035 * @rq->scx.extra_enq_flags instead. 2036 */ 2037 WARN_ON_ONCE(rq->scx.extra_enq_flags); 2038 rq->scx.extra_enq_flags = enq_flags; 2039 activate_task(rq, p, 0); 2040 rq->scx.extra_enq_flags = 0; 2041 2042 return true; 2043 } 2044 2045 /** 2046 * dispatch_to_local_dsq_lock - Ensure source and destination rq's are locked 2047 * @rq: current rq which is locked 2048 * @rf: rq_flags to use when unlocking @rq 2049 * @src_rq: rq to move task from 2050 * @dst_rq: rq to move task to 2051 * 2052 * We're holding @rq lock and trying to dispatch a task from @src_rq to 2053 * @dst_rq's local DSQ and thus need to lock both @src_rq and @dst_rq. Whether 2054 * @rq stays locked isn't important as long as the state is restored after 2055 * dispatch_to_local_dsq_unlock(). 2056 */ 2057 static void dispatch_to_local_dsq_lock(struct rq *rq, struct rq_flags *rf, 2058 struct rq *src_rq, struct rq *dst_rq) 2059 { 2060 rq_unpin_lock(rq, rf); 2061 2062 if (src_rq == dst_rq) { 2063 raw_spin_rq_unlock(rq); 2064 raw_spin_rq_lock(dst_rq); 2065 } else if (rq == src_rq) { 2066 double_lock_balance(rq, dst_rq); 2067 rq_repin_lock(rq, rf); 2068 } else if (rq == dst_rq) { 2069 double_lock_balance(rq, src_rq); 2070 rq_repin_lock(rq, rf); 2071 } else { 2072 raw_spin_rq_unlock(rq); 2073 double_rq_lock(src_rq, dst_rq); 2074 } 2075 } 2076 2077 /** 2078 * dispatch_to_local_dsq_unlock - Undo dispatch_to_local_dsq_lock() 2079 * @rq: current rq which is locked 2080 * @rf: rq_flags to use when unlocking @rq 2081 * @src_rq: rq to move task from 2082 * @dst_rq: rq to move task to 2083 * 2084 * Unlock @src_rq and @dst_rq and ensure that @rq is locked on return. 2085 */ 2086 static void dispatch_to_local_dsq_unlock(struct rq *rq, struct rq_flags *rf, 2087 struct rq *src_rq, struct rq *dst_rq) 2088 { 2089 if (src_rq == dst_rq) { 2090 raw_spin_rq_unlock(dst_rq); 2091 raw_spin_rq_lock(rq); 2092 rq_repin_lock(rq, rf); 2093 } else if (rq == src_rq) { 2094 double_unlock_balance(rq, dst_rq); 2095 } else if (rq == dst_rq) { 2096 double_unlock_balance(rq, src_rq); 2097 } else { 2098 double_rq_unlock(src_rq, dst_rq); 2099 raw_spin_rq_lock(rq); 2100 rq_repin_lock(rq, rf); 2101 } 2102 } 2103 #endif /* CONFIG_SMP */ 2104 2105 static void consume_local_task(struct rq *rq, struct scx_dispatch_q *dsq, 2106 struct task_struct *p) 2107 { 2108 lockdep_assert_held(&dsq->lock); /* released on return */ 2109 2110 /* @dsq is locked and @p is on this rq */ 2111 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2112 task_unlink_from_dsq(p, dsq); 2113 list_add_tail(&p->scx.dsq_list.node, &rq->scx.local_dsq.list); 2114 dsq_mod_nr(dsq, -1); 2115 dsq_mod_nr(&rq->scx.local_dsq, 1); 2116 p->scx.dsq = &rq->scx.local_dsq; 2117 raw_spin_unlock(&dsq->lock); 2118 } 2119 2120 #ifdef CONFIG_SMP 2121 /* 2122 * Similar to kernel/sched/core.c::is_cpu_allowed() but we're testing whether @p 2123 * can be pulled to @rq. 2124 */ 2125 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq) 2126 { 2127 int cpu = cpu_of(rq); 2128 2129 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2130 return false; 2131 if (unlikely(is_migration_disabled(p))) 2132 return false; 2133 if (!(p->flags & PF_KTHREAD) && unlikely(!task_cpu_possible(cpu, p))) 2134 return false; 2135 if (!scx_rq_online(rq)) 2136 return false; 2137 return true; 2138 } 2139 2140 static bool consume_remote_task(struct rq *rq, struct rq_flags *rf, 2141 struct scx_dispatch_q *dsq, 2142 struct task_struct *p, struct rq *task_rq) 2143 { 2144 bool moved = false; 2145 2146 lockdep_assert_held(&dsq->lock); /* released on return */ 2147 2148 /* 2149 * @dsq is locked and @p is on a remote rq. @p is currently protected by 2150 * @dsq->lock. We want to pull @p to @rq but may deadlock if we grab 2151 * @task_rq while holding @dsq and @rq locks. As dequeue can't drop the 2152 * rq lock or fail, do a little dancing from our side. See 2153 * move_task_to_local_dsq(). 2154 */ 2155 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 2156 task_unlink_from_dsq(p, dsq); 2157 dsq_mod_nr(dsq, -1); 2158 p->scx.holding_cpu = raw_smp_processor_id(); 2159 raw_spin_unlock(&dsq->lock); 2160 2161 rq_unpin_lock(rq, rf); 2162 double_lock_balance(rq, task_rq); 2163 rq_repin_lock(rq, rf); 2164 2165 moved = move_task_to_local_dsq(rq, p, 0); 2166 2167 double_unlock_balance(rq, task_rq); 2168 2169 return moved; 2170 } 2171 #else /* CONFIG_SMP */ 2172 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq) { return false; } 2173 static bool consume_remote_task(struct rq *rq, struct rq_flags *rf, 2174 struct scx_dispatch_q *dsq, 2175 struct task_struct *p, struct rq *task_rq) { return false; } 2176 #endif /* CONFIG_SMP */ 2177 2178 static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf, 2179 struct scx_dispatch_q *dsq) 2180 { 2181 struct task_struct *p; 2182 retry: 2183 /* 2184 * The caller can't expect to successfully consume a task if the task's 2185 * addition to @dsq isn't guaranteed to be visible somehow. Test 2186 * @dsq->list without locking and skip if it seems empty. 2187 */ 2188 if (list_empty(&dsq->list)) 2189 return false; 2190 2191 raw_spin_lock(&dsq->lock); 2192 2193 nldsq_for_each_task(p, dsq) { 2194 struct rq *task_rq = task_rq(p); 2195 2196 if (rq == task_rq) { 2197 consume_local_task(rq, dsq, p); 2198 return true; 2199 } 2200 2201 if (task_can_run_on_remote_rq(p, rq)) { 2202 if (likely(consume_remote_task(rq, rf, dsq, p, task_rq))) 2203 return true; 2204 goto retry; 2205 } 2206 } 2207 2208 raw_spin_unlock(&dsq->lock); 2209 return false; 2210 } 2211 2212 enum dispatch_to_local_dsq_ret { 2213 DTL_DISPATCHED, /* successfully dispatched */ 2214 DTL_LOST, /* lost race to dequeue */ 2215 DTL_NOT_LOCAL, /* destination is not a local DSQ */ 2216 DTL_INVALID, /* invalid local dsq_id */ 2217 }; 2218 2219 /** 2220 * dispatch_to_local_dsq - Dispatch a task to a local dsq 2221 * @rq: current rq which is locked 2222 * @rf: rq_flags to use when unlocking @rq 2223 * @dsq_id: destination dsq ID 2224 * @p: task to dispatch 2225 * @enq_flags: %SCX_ENQ_* 2226 * 2227 * We're holding @rq lock and want to dispatch @p to the local DSQ identified by 2228 * @dsq_id. This function performs all the synchronization dancing needed 2229 * because local DSQs are protected with rq locks. 2230 * 2231 * The caller must have exclusive ownership of @p (e.g. through 2232 * %SCX_OPSS_DISPATCHING). 2233 */ 2234 static enum dispatch_to_local_dsq_ret 2235 dispatch_to_local_dsq(struct rq *rq, struct rq_flags *rf, u64 dsq_id, 2236 struct task_struct *p, u64 enq_flags) 2237 { 2238 struct rq *src_rq = task_rq(p); 2239 struct rq *dst_rq; 2240 2241 /* 2242 * We're synchronized against dequeue through DISPATCHING. As @p can't 2243 * be dequeued, its task_rq and cpus_allowed are stable too. 2244 */ 2245 if (dsq_id == SCX_DSQ_LOCAL) { 2246 dst_rq = rq; 2247 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 2248 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 2249 2250 if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 2251 return DTL_INVALID; 2252 dst_rq = cpu_rq(cpu); 2253 } else { 2254 return DTL_NOT_LOCAL; 2255 } 2256 2257 /* if dispatching to @rq that @p is already on, no lock dancing needed */ 2258 if (rq == src_rq && rq == dst_rq) { 2259 dispatch_enqueue(&dst_rq->scx.local_dsq, p, 2260 enq_flags | SCX_ENQ_CLEAR_OPSS); 2261 return DTL_DISPATCHED; 2262 } 2263 2264 #ifdef CONFIG_SMP 2265 if (cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)) { 2266 struct rq *locked_dst_rq = dst_rq; 2267 bool dsp; 2268 2269 /* 2270 * @p is on a possibly remote @src_rq which we need to lock to 2271 * move the task. If dequeue is in progress, it'd be locking 2272 * @src_rq and waiting on DISPATCHING, so we can't grab @src_rq 2273 * lock while holding DISPATCHING. 2274 * 2275 * As DISPATCHING guarantees that @p is wholly ours, we can 2276 * pretend that we're moving from a DSQ and use the same 2277 * mechanism - mark the task under transfer with holding_cpu, 2278 * release DISPATCHING and then follow the same protocol. 2279 */ 2280 p->scx.holding_cpu = raw_smp_processor_id(); 2281 2282 /* store_release ensures that dequeue sees the above */ 2283 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2284 2285 dispatch_to_local_dsq_lock(rq, rf, src_rq, locked_dst_rq); 2286 2287 /* 2288 * We don't require the BPF scheduler to avoid dispatching to 2289 * offline CPUs mostly for convenience but also because CPUs can 2290 * go offline between scx_bpf_dispatch() calls and here. If @p 2291 * is destined to an offline CPU, queue it on its current CPU 2292 * instead, which should always be safe. As this is an allowed 2293 * behavior, don't trigger an ops error. 2294 */ 2295 if (!scx_rq_online(dst_rq)) 2296 dst_rq = src_rq; 2297 2298 if (src_rq == dst_rq) { 2299 /* 2300 * As @p is staying on the same rq, there's no need to 2301 * go through the full deactivate/activate cycle. 2302 * Optimize by abbreviating the operations in 2303 * move_task_to_local_dsq(). 2304 */ 2305 dsp = p->scx.holding_cpu == raw_smp_processor_id(); 2306 if (likely(dsp)) { 2307 p->scx.holding_cpu = -1; 2308 dispatch_enqueue(&dst_rq->scx.local_dsq, p, 2309 enq_flags); 2310 } 2311 } else { 2312 dsp = move_task_to_local_dsq(dst_rq, p, enq_flags); 2313 } 2314 2315 /* if the destination CPU is idle, wake it up */ 2316 if (dsp && sched_class_above(p->sched_class, 2317 dst_rq->curr->sched_class)) 2318 resched_curr(dst_rq); 2319 2320 dispatch_to_local_dsq_unlock(rq, rf, src_rq, locked_dst_rq); 2321 2322 return dsp ? DTL_DISPATCHED : DTL_LOST; 2323 } 2324 #endif /* CONFIG_SMP */ 2325 2326 scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]", 2327 cpu_of(dst_rq), p->comm, p->pid); 2328 return DTL_INVALID; 2329 } 2330 2331 /** 2332 * finish_dispatch - Asynchronously finish dispatching a task 2333 * @rq: current rq which is locked 2334 * @rf: rq_flags to use when unlocking @rq 2335 * @p: task to finish dispatching 2336 * @qseq_at_dispatch: qseq when @p started getting dispatched 2337 * @dsq_id: destination DSQ ID 2338 * @enq_flags: %SCX_ENQ_* 2339 * 2340 * Dispatching to local DSQs may need to wait for queueing to complete or 2341 * require rq lock dancing. As we don't wanna do either while inside 2342 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2343 * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the 2344 * task and its qseq. Once ops.dispatch() returns, this function is called to 2345 * finish up. 2346 * 2347 * There is no guarantee that @p is still valid for dispatching or even that it 2348 * was valid in the first place. Make sure that the task is still owned by the 2349 * BPF scheduler and claim the ownership before dispatching. 2350 */ 2351 static void finish_dispatch(struct rq *rq, struct rq_flags *rf, 2352 struct task_struct *p, 2353 unsigned long qseq_at_dispatch, 2354 u64 dsq_id, u64 enq_flags) 2355 { 2356 struct scx_dispatch_q *dsq; 2357 unsigned long opss; 2358 2359 touch_core_sched_dispatch(rq, p); 2360 retry: 2361 /* 2362 * No need for _acquire here. @p is accessed only after a successful 2363 * try_cmpxchg to DISPATCHING. 2364 */ 2365 opss = atomic_long_read(&p->scx.ops_state); 2366 2367 switch (opss & SCX_OPSS_STATE_MASK) { 2368 case SCX_OPSS_DISPATCHING: 2369 case SCX_OPSS_NONE: 2370 /* someone else already got to it */ 2371 return; 2372 case SCX_OPSS_QUEUED: 2373 /* 2374 * If qseq doesn't match, @p has gone through at least one 2375 * dispatch/dequeue and re-enqueue cycle between 2376 * scx_bpf_dispatch() and here and we have no claim on it. 2377 */ 2378 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2379 return; 2380 2381 /* 2382 * While we know @p is accessible, we don't yet have a claim on 2383 * it - the BPF scheduler is allowed to dispatch tasks 2384 * spuriously and there can be a racing dequeue attempt. Let's 2385 * claim @p by atomically transitioning it from QUEUED to 2386 * DISPATCHING. 2387 */ 2388 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2389 SCX_OPSS_DISPATCHING))) 2390 break; 2391 goto retry; 2392 case SCX_OPSS_QUEUEING: 2393 /* 2394 * do_enqueue_task() is in the process of transferring the task 2395 * to the BPF scheduler while holding @p's rq lock. As we aren't 2396 * holding any kernel or BPF resource that the enqueue path may 2397 * depend upon, it's safe to wait. 2398 */ 2399 wait_ops_state(p, opss); 2400 goto retry; 2401 } 2402 2403 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2404 2405 switch (dispatch_to_local_dsq(rq, rf, dsq_id, p, enq_flags)) { 2406 case DTL_DISPATCHED: 2407 break; 2408 case DTL_LOST: 2409 break; 2410 case DTL_INVALID: 2411 dsq_id = SCX_DSQ_GLOBAL; 2412 fallthrough; 2413 case DTL_NOT_LOCAL: 2414 dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()), 2415 dsq_id, p); 2416 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2417 break; 2418 } 2419 } 2420 2421 static void flush_dispatch_buf(struct rq *rq, struct rq_flags *rf) 2422 { 2423 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2424 u32 u; 2425 2426 for (u = 0; u < dspc->cursor; u++) { 2427 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2428 2429 finish_dispatch(rq, rf, ent->task, ent->qseq, ent->dsq_id, 2430 ent->enq_flags); 2431 } 2432 2433 dspc->nr_tasks += dspc->cursor; 2434 dspc->cursor = 0; 2435 } 2436 2437 static int balance_one(struct rq *rq, struct task_struct *prev, 2438 struct rq_flags *rf, bool local) 2439 { 2440 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2441 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2442 int nr_loops = SCX_DSP_MAX_LOOPS; 2443 bool has_tasks = false; 2444 2445 lockdep_assert_rq_held(rq); 2446 rq->scx.flags |= SCX_RQ_BALANCING; 2447 2448 if (static_branch_unlikely(&scx_ops_cpu_preempt) && 2449 unlikely(rq->scx.cpu_released)) { 2450 /* 2451 * If the previous sched_class for the current CPU was not SCX, 2452 * notify the BPF scheduler that it again has control of the 2453 * core. This callback complements ->cpu_release(), which is 2454 * emitted in scx_next_task_picked(). 2455 */ 2456 if (SCX_HAS_OP(cpu_acquire)) 2457 SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); 2458 rq->scx.cpu_released = false; 2459 } 2460 2461 if (prev_on_scx) { 2462 WARN_ON_ONCE(local && (prev->scx.flags & SCX_TASK_BAL_KEEP)); 2463 update_curr_scx(rq); 2464 2465 /* 2466 * If @prev is runnable & has slice left, it has priority and 2467 * fetching more just increases latency for the fetched tasks. 2468 * Tell put_prev_task_scx() to put @prev on local_dsq. If the 2469 * BPF scheduler wants to handle this explicitly, it should 2470 * implement ->cpu_released(). 2471 * 2472 * See scx_ops_disable_workfn() for the explanation on the 2473 * bypassing test. 2474 * 2475 * When balancing a remote CPU for core-sched, there won't be a 2476 * following put_prev_task_scx() call and we don't own 2477 * %SCX_TASK_BAL_KEEP. Instead, pick_task_scx() will test the 2478 * same conditions later and pick @rq->curr accordingly. 2479 */ 2480 if ((prev->scx.flags & SCX_TASK_QUEUED) && 2481 prev->scx.slice && !scx_ops_bypassing()) { 2482 if (local) 2483 prev->scx.flags |= SCX_TASK_BAL_KEEP; 2484 goto has_tasks; 2485 } 2486 } 2487 2488 /* if there already are tasks to run, nothing to do */ 2489 if (rq->scx.local_dsq.nr) 2490 goto has_tasks; 2491 2492 if (consume_dispatch_q(rq, rf, &scx_dsq_global)) 2493 goto has_tasks; 2494 2495 if (!SCX_HAS_OP(dispatch) || scx_ops_bypassing() || !scx_rq_online(rq)) 2496 goto out; 2497 2498 dspc->rq = rq; 2499 dspc->rf = rf; 2500 2501 /* 2502 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 2503 * the local DSQ might still end up empty after a successful 2504 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 2505 * produced some tasks, retry. The BPF scheduler may depend on this 2506 * looping behavior to simplify its implementation. 2507 */ 2508 do { 2509 dspc->nr_tasks = 0; 2510 2511 SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq), 2512 prev_on_scx ? prev : NULL); 2513 2514 flush_dispatch_buf(rq, rf); 2515 2516 if (rq->scx.local_dsq.nr) 2517 goto has_tasks; 2518 if (consume_dispatch_q(rq, rf, &scx_dsq_global)) 2519 goto has_tasks; 2520 2521 /* 2522 * ops.dispatch() can trap us in this loop by repeatedly 2523 * dispatching ineligible tasks. Break out once in a while to 2524 * allow the watchdog to run. As IRQ can't be enabled in 2525 * balance(), we want to complete this scheduling cycle and then 2526 * start a new one. IOW, we want to call resched_curr() on the 2527 * next, most likely idle, task, not the current one. Use 2528 * scx_bpf_kick_cpu() for deferred kicking. 2529 */ 2530 if (unlikely(!--nr_loops)) { 2531 scx_bpf_kick_cpu(cpu_of(rq), 0); 2532 break; 2533 } 2534 } while (dspc->nr_tasks); 2535 2536 goto out; 2537 2538 has_tasks: 2539 has_tasks = true; 2540 out: 2541 rq->scx.flags &= ~SCX_RQ_BALANCING; 2542 return has_tasks; 2543 } 2544 2545 static int balance_scx(struct rq *rq, struct task_struct *prev, 2546 struct rq_flags *rf) 2547 { 2548 int ret; 2549 2550 ret = balance_one(rq, prev, rf, true); 2551 2552 #ifdef CONFIG_SCHED_SMT 2553 /* 2554 * When core-sched is enabled, this ops.balance() call will be followed 2555 * by put_prev_scx() and pick_task_scx() on this CPU and pick_task_scx() 2556 * on the SMT siblings. Balance the siblings too. 2557 */ 2558 if (sched_core_enabled(rq)) { 2559 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); 2560 int scpu; 2561 2562 for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) { 2563 struct rq *srq = cpu_rq(scpu); 2564 struct rq_flags srf; 2565 struct task_struct *sprev = srq->curr; 2566 2567 /* 2568 * While core-scheduling, rq lock is shared among 2569 * siblings but the debug annotations and rq clock 2570 * aren't. Do pinning dance to transfer the ownership. 2571 */ 2572 WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq)); 2573 rq_unpin_lock(rq, rf); 2574 rq_pin_lock(srq, &srf); 2575 2576 update_rq_clock(srq); 2577 balance_one(srq, sprev, &srf, false); 2578 2579 rq_unpin_lock(srq, &srf); 2580 rq_repin_lock(rq, rf); 2581 } 2582 } 2583 #endif 2584 return ret; 2585 } 2586 2587 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 2588 { 2589 if (p->scx.flags & SCX_TASK_QUEUED) { 2590 /* 2591 * Core-sched might decide to execute @p before it is 2592 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 2593 */ 2594 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC); 2595 dispatch_dequeue(rq, p); 2596 } 2597 2598 p->se.exec_start = rq_clock_task(rq); 2599 2600 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2601 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) 2602 SCX_CALL_OP_TASK(SCX_KF_REST, running, p); 2603 2604 clr_task_runnable(p, true); 2605 2606 /* 2607 * @p is getting newly scheduled or got kicked after someone updated its 2608 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 2609 */ 2610 if ((p->scx.slice == SCX_SLICE_INF) != 2611 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 2612 if (p->scx.slice == SCX_SLICE_INF) 2613 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 2614 else 2615 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 2616 2617 sched_update_tick_dependency(rq); 2618 2619 /* 2620 * For now, let's refresh the load_avgs just when transitioning 2621 * in and out of nohz. In the future, we might want to add a 2622 * mechanism which calls the following periodically on 2623 * tick-stopped CPUs. 2624 */ 2625 update_other_load_avgs(rq); 2626 } 2627 } 2628 2629 static void put_prev_task_scx(struct rq *rq, struct task_struct *p) 2630 { 2631 #ifndef CONFIG_SMP 2632 /* 2633 * UP workaround. 2634 * 2635 * Because SCX may transfer tasks across CPUs during dispatch, dispatch 2636 * is performed from its balance operation which isn't called in UP. 2637 * Let's work around by calling it from the operations which come right 2638 * after. 2639 * 2640 * 1. If the prev task is on SCX, pick_next_task() calls 2641 * .put_prev_task() right after. As .put_prev_task() is also called 2642 * from other places, we need to distinguish the calls which can be 2643 * done by looking at the previous task's state - if still queued or 2644 * dequeued with %SCX_DEQ_SLEEP, the caller must be pick_next_task(). 2645 * This case is handled here. 2646 * 2647 * 2. If the prev task is not on SCX, the first following call into SCX 2648 * will be .pick_next_task(), which is covered by calling 2649 * balance_scx() from pick_next_task_scx(). 2650 * 2651 * Note that we can't merge the first case into the second as 2652 * balance_scx() must be called before the previous SCX task goes 2653 * through put_prev_task_scx(). 2654 * 2655 * As UP doesn't transfer tasks around, balance_scx() doesn't need @rf. 2656 * Pass in %NULL. 2657 */ 2658 if (p->scx.flags & (SCX_TASK_QUEUED | SCX_TASK_DEQD_FOR_SLEEP)) 2659 balance_scx(rq, p, NULL); 2660 #endif 2661 2662 update_curr_scx(rq); 2663 2664 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2665 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 2666 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true); 2667 2668 /* 2669 * If we're being called from put_prev_task_balance(), balance_scx() may 2670 * have decided that @p should keep running. 2671 */ 2672 if (p->scx.flags & SCX_TASK_BAL_KEEP) { 2673 p->scx.flags &= ~SCX_TASK_BAL_KEEP; 2674 set_task_runnable(rq, p); 2675 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); 2676 return; 2677 } 2678 2679 if (p->scx.flags & SCX_TASK_QUEUED) { 2680 set_task_runnable(rq, p); 2681 2682 /* 2683 * If @p has slice left and balance_scx() didn't tag it for 2684 * keeping, @p is getting preempted by a higher priority 2685 * scheduler class or core-sched forcing a different task. Leave 2686 * it at the head of the local DSQ. 2687 */ 2688 if (p->scx.slice && !scx_ops_bypassing()) { 2689 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); 2690 return; 2691 } 2692 2693 /* 2694 * If we're in the pick_next_task path, balance_scx() should 2695 * have already populated the local DSQ if there are any other 2696 * available tasks. If empty, tell ops.enqueue() that @p is the 2697 * only one available for this cpu. ops.enqueue() should put it 2698 * on the local DSQ so that the subsequent pick_next_task_scx() 2699 * can find the task unless it wants to trigger a separate 2700 * follow-up scheduling event. 2701 */ 2702 if (list_empty(&rq->scx.local_dsq.list)) 2703 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 2704 else 2705 do_enqueue_task(rq, p, 0, -1); 2706 } 2707 } 2708 2709 static struct task_struct *first_local_task(struct rq *rq) 2710 { 2711 return list_first_entry_or_null(&rq->scx.local_dsq.list, 2712 struct task_struct, scx.dsq_list.node); 2713 } 2714 2715 static struct task_struct *pick_next_task_scx(struct rq *rq) 2716 { 2717 struct task_struct *p; 2718 2719 #ifndef CONFIG_SMP 2720 /* UP workaround - see the comment at the head of put_prev_task_scx() */ 2721 if (unlikely(rq->curr->sched_class != &ext_sched_class)) 2722 balance_scx(rq, rq->curr, NULL); 2723 #endif 2724 2725 p = first_local_task(rq); 2726 if (!p) 2727 return NULL; 2728 2729 set_next_task_scx(rq, p, true); 2730 2731 if (unlikely(!p->scx.slice)) { 2732 if (!scx_ops_bypassing() && !scx_warned_zero_slice) { 2733 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n", 2734 p->comm, p->pid); 2735 scx_warned_zero_slice = true; 2736 } 2737 p->scx.slice = SCX_SLICE_DFL; 2738 } 2739 2740 return p; 2741 } 2742 2743 #ifdef CONFIG_SCHED_CORE 2744 /** 2745 * scx_prio_less - Task ordering for core-sched 2746 * @a: task A 2747 * @b: task B 2748 * 2749 * Core-sched is implemented as an additional scheduling layer on top of the 2750 * usual sched_class'es and needs to find out the expected task ordering. For 2751 * SCX, core-sched calls this function to interrogate the task ordering. 2752 * 2753 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 2754 * to implement the default task ordering. The older the timestamp, the higher 2755 * prority the task - the global FIFO ordering matching the default scheduling 2756 * behavior. 2757 * 2758 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 2759 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 2760 */ 2761 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 2762 bool in_fi) 2763 { 2764 /* 2765 * The const qualifiers are dropped from task_struct pointers when 2766 * calling ops.core_sched_before(). Accesses are controlled by the 2767 * verifier. 2768 */ 2769 if (SCX_HAS_OP(core_sched_before) && !scx_ops_bypassing()) 2770 return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before, 2771 (struct task_struct *)a, 2772 (struct task_struct *)b); 2773 else 2774 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 2775 } 2776 2777 /** 2778 * pick_task_scx - Pick a candidate task for core-sched 2779 * @rq: rq to pick the candidate task from 2780 * 2781 * Core-sched calls this function on each SMT sibling to determine the next 2782 * tasks to run on the SMT siblings. balance_one() has been called on all 2783 * siblings and put_prev_task_scx() has been called only for the current CPU. 2784 * 2785 * As put_prev_task_scx() hasn't been called on remote CPUs, we can't just look 2786 * at the first task in the local dsq. @rq->curr has to be considered explicitly 2787 * to mimic %SCX_TASK_BAL_KEEP. 2788 */ 2789 static struct task_struct *pick_task_scx(struct rq *rq) 2790 { 2791 struct task_struct *curr = rq->curr; 2792 struct task_struct *first = first_local_task(rq); 2793 2794 if (curr->scx.flags & SCX_TASK_QUEUED) { 2795 /* is curr the only runnable task? */ 2796 if (!first) 2797 return curr; 2798 2799 /* 2800 * Does curr trump first? We can always go by core_sched_at for 2801 * this comparison as it represents global FIFO ordering when 2802 * the default core-sched ordering is used and local-DSQ FIFO 2803 * ordering otherwise. 2804 * 2805 * We can have a task with an earlier timestamp on the DSQ. For 2806 * example, when a current task is preempted by a sibling 2807 * picking a different cookie, the task would be requeued at the 2808 * head of the local DSQ with an earlier timestamp than the 2809 * core-sched picked next task. Besides, the BPF scheduler may 2810 * dispatch any tasks to the local DSQ anytime. 2811 */ 2812 if (curr->scx.slice && time_before64(curr->scx.core_sched_at, 2813 first->scx.core_sched_at)) 2814 return curr; 2815 } 2816 2817 return first; /* this may be %NULL */ 2818 } 2819 #endif /* CONFIG_SCHED_CORE */ 2820 2821 static enum scx_cpu_preempt_reason 2822 preempt_reason_from_class(const struct sched_class *class) 2823 { 2824 #ifdef CONFIG_SMP 2825 if (class == &stop_sched_class) 2826 return SCX_CPU_PREEMPT_STOP; 2827 #endif 2828 if (class == &dl_sched_class) 2829 return SCX_CPU_PREEMPT_DL; 2830 if (class == &rt_sched_class) 2831 return SCX_CPU_PREEMPT_RT; 2832 return SCX_CPU_PREEMPT_UNKNOWN; 2833 } 2834 2835 static void switch_class_scx(struct rq *rq, struct task_struct *next) 2836 { 2837 const struct sched_class *next_class = next->sched_class; 2838 2839 if (!scx_enabled()) 2840 return; 2841 #ifdef CONFIG_SMP 2842 /* 2843 * Pairs with the smp_load_acquire() issued by a CPU in 2844 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a 2845 * resched. 2846 */ 2847 smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); 2848 #endif 2849 if (!static_branch_unlikely(&scx_ops_cpu_preempt)) 2850 return; 2851 2852 /* 2853 * The callback is conceptually meant to convey that the CPU is no 2854 * longer under the control of SCX. Therefore, don't invoke the callback 2855 * if the next class is below SCX (in which case the BPF scheduler has 2856 * actively decided not to schedule any tasks on the CPU). 2857 */ 2858 if (sched_class_above(&ext_sched_class, next_class)) 2859 return; 2860 2861 /* 2862 * At this point we know that SCX was preempted by a higher priority 2863 * sched_class, so invoke the ->cpu_release() callback if we have not 2864 * done so already. We only send the callback once between SCX being 2865 * preempted, and it regaining control of the CPU. 2866 * 2867 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 2868 * next time that balance_scx() is invoked. 2869 */ 2870 if (!rq->scx.cpu_released) { 2871 if (SCX_HAS_OP(cpu_release)) { 2872 struct scx_cpu_release_args args = { 2873 .reason = preempt_reason_from_class(next_class), 2874 .task = next, 2875 }; 2876 2877 SCX_CALL_OP(SCX_KF_CPU_RELEASE, 2878 cpu_release, cpu_of(rq), &args); 2879 } 2880 rq->scx.cpu_released = true; 2881 } 2882 } 2883 2884 #ifdef CONFIG_SMP 2885 2886 static bool test_and_clear_cpu_idle(int cpu) 2887 { 2888 #ifdef CONFIG_SCHED_SMT 2889 /* 2890 * SMT mask should be cleared whether we can claim @cpu or not. The SMT 2891 * cluster is not wholly idle either way. This also prevents 2892 * scx_pick_idle_cpu() from getting caught in an infinite loop. 2893 */ 2894 if (sched_smt_active()) { 2895 const struct cpumask *smt = cpu_smt_mask(cpu); 2896 2897 /* 2898 * If offline, @cpu is not its own sibling and 2899 * scx_pick_idle_cpu() can get caught in an infinite loop as 2900 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu 2901 * is eventually cleared. 2902 */ 2903 if (cpumask_intersects(smt, idle_masks.smt)) 2904 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); 2905 else if (cpumask_test_cpu(cpu, idle_masks.smt)) 2906 __cpumask_clear_cpu(cpu, idle_masks.smt); 2907 } 2908 #endif 2909 return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu); 2910 } 2911 2912 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) 2913 { 2914 int cpu; 2915 2916 retry: 2917 if (sched_smt_active()) { 2918 cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed); 2919 if (cpu < nr_cpu_ids) 2920 goto found; 2921 2922 if (flags & SCX_PICK_IDLE_CORE) 2923 return -EBUSY; 2924 } 2925 2926 cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed); 2927 if (cpu >= nr_cpu_ids) 2928 return -EBUSY; 2929 2930 found: 2931 if (test_and_clear_cpu_idle(cpu)) 2932 return cpu; 2933 else 2934 goto retry; 2935 } 2936 2937 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 2938 u64 wake_flags, bool *found) 2939 { 2940 s32 cpu; 2941 2942 *found = false; 2943 2944 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 2945 scx_ops_error("built-in idle tracking is disabled"); 2946 return prev_cpu; 2947 } 2948 2949 /* 2950 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is 2951 * under utilized, wake up @p to the local DSQ of the waker. Checking 2952 * only for an empty local DSQ is insufficient as it could give the 2953 * wakee an unfair advantage when the system is oversaturated. 2954 * Checking only for the presence of idle CPUs is also insufficient as 2955 * the local DSQ of the waker could have tasks piled up on it even if 2956 * there is an idle core elsewhere on the system. 2957 */ 2958 cpu = smp_processor_id(); 2959 if ((wake_flags & SCX_WAKE_SYNC) && p->nr_cpus_allowed > 1 && 2960 !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) && 2961 cpu_rq(cpu)->scx.local_dsq.nr == 0) { 2962 if (cpumask_test_cpu(cpu, p->cpus_ptr)) 2963 goto cpu_found; 2964 } 2965 2966 if (p->nr_cpus_allowed == 1) { 2967 if (test_and_clear_cpu_idle(prev_cpu)) { 2968 cpu = prev_cpu; 2969 goto cpu_found; 2970 } else { 2971 return prev_cpu; 2972 } 2973 } 2974 2975 /* 2976 * If CPU has SMT, any wholly idle CPU is likely a better pick than 2977 * partially idle @prev_cpu. 2978 */ 2979 if (sched_smt_active()) { 2980 if (cpumask_test_cpu(prev_cpu, idle_masks.smt) && 2981 test_and_clear_cpu_idle(prev_cpu)) { 2982 cpu = prev_cpu; 2983 goto cpu_found; 2984 } 2985 2986 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE); 2987 if (cpu >= 0) 2988 goto cpu_found; 2989 } 2990 2991 if (test_and_clear_cpu_idle(prev_cpu)) { 2992 cpu = prev_cpu; 2993 goto cpu_found; 2994 } 2995 2996 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0); 2997 if (cpu >= 0) 2998 goto cpu_found; 2999 3000 return prev_cpu; 3001 3002 cpu_found: 3003 *found = true; 3004 return cpu; 3005 } 3006 3007 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 3008 { 3009 /* 3010 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 3011 * can be a good migration opportunity with low cache and memory 3012 * footprint. Returning a CPU different than @prev_cpu triggers 3013 * immediate rq migration. However, for SCX, as the current rq 3014 * association doesn't dictate where the task is going to run, this 3015 * doesn't fit well. If necessary, we can later add a dedicated method 3016 * which can decide to preempt self to force it through the regular 3017 * scheduling path. 3018 */ 3019 if (unlikely(wake_flags & WF_EXEC)) 3020 return prev_cpu; 3021 3022 if (SCX_HAS_OP(select_cpu)) { 3023 s32 cpu; 3024 struct task_struct **ddsp_taskp; 3025 3026 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 3027 WARN_ON_ONCE(*ddsp_taskp); 3028 *ddsp_taskp = p; 3029 3030 cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 3031 select_cpu, p, prev_cpu, wake_flags); 3032 *ddsp_taskp = NULL; 3033 if (ops_cpu_valid(cpu, "from ops.select_cpu()")) 3034 return cpu; 3035 else 3036 return prev_cpu; 3037 } else { 3038 bool found; 3039 s32 cpu; 3040 3041 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found); 3042 if (found) { 3043 p->scx.slice = SCX_SLICE_DFL; 3044 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 3045 } 3046 return cpu; 3047 } 3048 } 3049 3050 static void set_cpus_allowed_scx(struct task_struct *p, 3051 struct affinity_context *ac) 3052 { 3053 set_cpus_allowed_common(p, ac); 3054 3055 /* 3056 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 3057 * differ from the configured one in @p->cpus_mask. Always tell the bpf 3058 * scheduler the effective one. 3059 * 3060 * Fine-grained memory write control is enforced by BPF making the const 3061 * designation pointless. Cast it away when calling the operation. 3062 */ 3063 if (SCX_HAS_OP(set_cpumask)) 3064 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, 3065 (struct cpumask *)p->cpus_ptr); 3066 } 3067 3068 static void reset_idle_masks(void) 3069 { 3070 /* 3071 * Consider all online cpus idle. Should converge to the actual state 3072 * quickly. 3073 */ 3074 cpumask_copy(idle_masks.cpu, cpu_online_mask); 3075 cpumask_copy(idle_masks.smt, cpu_online_mask); 3076 } 3077 3078 void __scx_update_idle(struct rq *rq, bool idle) 3079 { 3080 int cpu = cpu_of(rq); 3081 3082 if (SCX_HAS_OP(update_idle)) { 3083 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle); 3084 if (!static_branch_unlikely(&scx_builtin_idle_enabled)) 3085 return; 3086 } 3087 3088 if (idle) 3089 cpumask_set_cpu(cpu, idle_masks.cpu); 3090 else 3091 cpumask_clear_cpu(cpu, idle_masks.cpu); 3092 3093 #ifdef CONFIG_SCHED_SMT 3094 if (sched_smt_active()) { 3095 const struct cpumask *smt = cpu_smt_mask(cpu); 3096 3097 if (idle) { 3098 /* 3099 * idle_masks.smt handling is racy but that's fine as 3100 * it's only for optimization and self-correcting. 3101 */ 3102 for_each_cpu(cpu, smt) { 3103 if (!cpumask_test_cpu(cpu, idle_masks.cpu)) 3104 return; 3105 } 3106 cpumask_or(idle_masks.smt, idle_masks.smt, smt); 3107 } else { 3108 cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); 3109 } 3110 } 3111 #endif 3112 } 3113 3114 static void handle_hotplug(struct rq *rq, bool online) 3115 { 3116 int cpu = cpu_of(rq); 3117 3118 atomic_long_inc(&scx_hotplug_seq); 3119 3120 if (online && SCX_HAS_OP(cpu_online)) 3121 SCX_CALL_OP(SCX_KF_SLEEPABLE, cpu_online, cpu); 3122 else if (!online && SCX_HAS_OP(cpu_offline)) 3123 SCX_CALL_OP(SCX_KF_SLEEPABLE, cpu_offline, cpu); 3124 else 3125 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 3126 "cpu %d going %s, exiting scheduler", cpu, 3127 online ? "online" : "offline"); 3128 } 3129 3130 void scx_rq_activate(struct rq *rq) 3131 { 3132 handle_hotplug(rq, true); 3133 } 3134 3135 void scx_rq_deactivate(struct rq *rq) 3136 { 3137 handle_hotplug(rq, false); 3138 } 3139 3140 static void rq_online_scx(struct rq *rq) 3141 { 3142 rq->scx.flags |= SCX_RQ_ONLINE; 3143 } 3144 3145 static void rq_offline_scx(struct rq *rq) 3146 { 3147 rq->scx.flags &= ~SCX_RQ_ONLINE; 3148 } 3149 3150 #else /* CONFIG_SMP */ 3151 3152 static bool test_and_clear_cpu_idle(int cpu) { return false; } 3153 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; } 3154 static void reset_idle_masks(void) {} 3155 3156 #endif /* CONFIG_SMP */ 3157 3158 static bool check_rq_for_timeouts(struct rq *rq) 3159 { 3160 struct task_struct *p; 3161 struct rq_flags rf; 3162 bool timed_out = false; 3163 3164 rq_lock_irqsave(rq, &rf); 3165 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 3166 unsigned long last_runnable = p->scx.runnable_at; 3167 3168 if (unlikely(time_after(jiffies, 3169 last_runnable + scx_watchdog_timeout))) { 3170 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 3171 3172 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3173 "%s[%d] failed to run for %u.%03us", 3174 p->comm, p->pid, 3175 dur_ms / 1000, dur_ms % 1000); 3176 timed_out = true; 3177 break; 3178 } 3179 } 3180 rq_unlock_irqrestore(rq, &rf); 3181 3182 return timed_out; 3183 } 3184 3185 static void scx_watchdog_workfn(struct work_struct *work) 3186 { 3187 int cpu; 3188 3189 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 3190 3191 for_each_online_cpu(cpu) { 3192 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 3193 break; 3194 3195 cond_resched(); 3196 } 3197 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 3198 scx_watchdog_timeout / 2); 3199 } 3200 3201 void scx_tick(struct rq *rq) 3202 { 3203 unsigned long last_check; 3204 3205 if (!scx_enabled()) 3206 return; 3207 3208 last_check = READ_ONCE(scx_watchdog_timestamp); 3209 if (unlikely(time_after(jiffies, 3210 last_check + READ_ONCE(scx_watchdog_timeout)))) { 3211 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 3212 3213 scx_ops_error_kind(SCX_EXIT_ERROR_STALL, 3214 "watchdog failed to check in for %u.%03us", 3215 dur_ms / 1000, dur_ms % 1000); 3216 } 3217 3218 update_other_load_avgs(rq); 3219 } 3220 3221 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 3222 { 3223 update_curr_scx(rq); 3224 3225 /* 3226 * While disabling, always resched and refresh core-sched timestamp as 3227 * we can't trust the slice management or ops.core_sched_before(). 3228 */ 3229 if (scx_ops_bypassing()) { 3230 curr->scx.slice = 0; 3231 touch_core_sched(rq, curr); 3232 } else if (SCX_HAS_OP(tick)) { 3233 SCX_CALL_OP(SCX_KF_REST, tick, curr); 3234 } 3235 3236 if (!curr->scx.slice) 3237 resched_curr(rq); 3238 } 3239 3240 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 3241 { 3242 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 3243 } 3244 3245 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 3246 { 3247 enum scx_task_state prev_state = scx_get_task_state(p); 3248 bool warn = false; 3249 3250 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 3251 3252 switch (state) { 3253 case SCX_TASK_NONE: 3254 break; 3255 case SCX_TASK_INIT: 3256 warn = prev_state != SCX_TASK_NONE; 3257 break; 3258 case SCX_TASK_READY: 3259 warn = prev_state == SCX_TASK_NONE; 3260 break; 3261 case SCX_TASK_ENABLED: 3262 warn = prev_state != SCX_TASK_READY; 3263 break; 3264 default: 3265 warn = true; 3266 return; 3267 } 3268 3269 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 3270 prev_state, state, p->comm, p->pid); 3271 3272 p->scx.flags &= ~SCX_TASK_STATE_MASK; 3273 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 3274 } 3275 3276 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork) 3277 { 3278 int ret; 3279 3280 p->scx.disallow = false; 3281 3282 if (SCX_HAS_OP(init_task)) { 3283 struct scx_init_task_args args = { 3284 .fork = fork, 3285 }; 3286 3287 ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, init_task, p, &args); 3288 if (unlikely(ret)) { 3289 ret = ops_sanitize_err("init_task", ret); 3290 return ret; 3291 } 3292 } 3293 3294 scx_set_task_state(p, SCX_TASK_INIT); 3295 3296 if (p->scx.disallow) { 3297 struct rq *rq; 3298 struct rq_flags rf; 3299 3300 rq = task_rq_lock(p, &rf); 3301 3302 /* 3303 * We're either in fork or load path and @p->policy will be 3304 * applied right after. Reverting @p->policy here and rejecting 3305 * %SCHED_EXT transitions from scx_check_setscheduler() 3306 * guarantees that if ops.init_task() sets @p->disallow, @p can 3307 * never be in SCX. 3308 */ 3309 if (p->policy == SCHED_EXT) { 3310 p->policy = SCHED_NORMAL; 3311 atomic_long_inc(&scx_nr_rejected); 3312 } 3313 3314 task_rq_unlock(rq, p, &rf); 3315 } 3316 3317 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 3318 return 0; 3319 } 3320 3321 static void scx_ops_enable_task(struct task_struct *p) 3322 { 3323 u32 weight; 3324 3325 lockdep_assert_rq_held(task_rq(p)); 3326 3327 /* 3328 * Set the weight before calling ops.enable() so that the scheduler 3329 * doesn't see a stale value if they inspect the task struct. 3330 */ 3331 if (task_has_idle_policy(p)) 3332 weight = WEIGHT_IDLEPRIO; 3333 else 3334 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 3335 3336 p->scx.weight = sched_weight_to_cgroup(weight); 3337 3338 if (SCX_HAS_OP(enable)) 3339 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p); 3340 scx_set_task_state(p, SCX_TASK_ENABLED); 3341 3342 if (SCX_HAS_OP(set_weight)) 3343 SCX_CALL_OP(SCX_KF_REST, set_weight, p, p->scx.weight); 3344 } 3345 3346 static void scx_ops_disable_task(struct task_struct *p) 3347 { 3348 lockdep_assert_rq_held(task_rq(p)); 3349 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 3350 3351 if (SCX_HAS_OP(disable)) 3352 SCX_CALL_OP(SCX_KF_REST, disable, p); 3353 scx_set_task_state(p, SCX_TASK_READY); 3354 } 3355 3356 static void scx_ops_exit_task(struct task_struct *p) 3357 { 3358 struct scx_exit_task_args args = { 3359 .cancelled = false, 3360 }; 3361 3362 lockdep_assert_rq_held(task_rq(p)); 3363 3364 switch (scx_get_task_state(p)) { 3365 case SCX_TASK_NONE: 3366 return; 3367 case SCX_TASK_INIT: 3368 args.cancelled = true; 3369 break; 3370 case SCX_TASK_READY: 3371 break; 3372 case SCX_TASK_ENABLED: 3373 scx_ops_disable_task(p); 3374 break; 3375 default: 3376 WARN_ON_ONCE(true); 3377 return; 3378 } 3379 3380 if (SCX_HAS_OP(exit_task)) 3381 SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args); 3382 scx_set_task_state(p, SCX_TASK_NONE); 3383 } 3384 3385 void init_scx_entity(struct sched_ext_entity *scx) 3386 { 3387 /* 3388 * init_idle() calls this function again after fork sequence is 3389 * complete. Don't touch ->tasks_node as it's already linked. 3390 */ 3391 memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node)); 3392 3393 INIT_LIST_HEAD(&scx->dsq_list.node); 3394 RB_CLEAR_NODE(&scx->dsq_priq); 3395 scx->sticky_cpu = -1; 3396 scx->holding_cpu = -1; 3397 INIT_LIST_HEAD(&scx->runnable_node); 3398 scx->runnable_at = jiffies; 3399 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 3400 scx->slice = SCX_SLICE_DFL; 3401 } 3402 3403 void scx_pre_fork(struct task_struct *p) 3404 { 3405 /* 3406 * BPF scheduler enable/disable paths want to be able to iterate and 3407 * update all tasks which can become complex when racing forks. As 3408 * enable/disable are very cold paths, let's use a percpu_rwsem to 3409 * exclude forks. 3410 */ 3411 percpu_down_read(&scx_fork_rwsem); 3412 } 3413 3414 int scx_fork(struct task_struct *p) 3415 { 3416 percpu_rwsem_assert_held(&scx_fork_rwsem); 3417 3418 if (scx_enabled()) 3419 return scx_ops_init_task(p, task_group(p), true); 3420 else 3421 return 0; 3422 } 3423 3424 void scx_post_fork(struct task_struct *p) 3425 { 3426 if (scx_enabled()) { 3427 scx_set_task_state(p, SCX_TASK_READY); 3428 3429 /* 3430 * Enable the task immediately if it's running on sched_ext. 3431 * Otherwise, it'll be enabled in switching_to_scx() if and 3432 * when it's ever configured to run with a SCHED_EXT policy. 3433 */ 3434 if (p->sched_class == &ext_sched_class) { 3435 struct rq_flags rf; 3436 struct rq *rq; 3437 3438 rq = task_rq_lock(p, &rf); 3439 scx_ops_enable_task(p); 3440 task_rq_unlock(rq, p, &rf); 3441 } 3442 } 3443 3444 spin_lock_irq(&scx_tasks_lock); 3445 list_add_tail(&p->scx.tasks_node, &scx_tasks); 3446 spin_unlock_irq(&scx_tasks_lock); 3447 3448 percpu_up_read(&scx_fork_rwsem); 3449 } 3450 3451 void scx_cancel_fork(struct task_struct *p) 3452 { 3453 if (scx_enabled()) { 3454 struct rq *rq; 3455 struct rq_flags rf; 3456 3457 rq = task_rq_lock(p, &rf); 3458 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3459 scx_ops_exit_task(p); 3460 task_rq_unlock(rq, p, &rf); 3461 } 3462 3463 percpu_up_read(&scx_fork_rwsem); 3464 } 3465 3466 void sched_ext_free(struct task_struct *p) 3467 { 3468 unsigned long flags; 3469 3470 spin_lock_irqsave(&scx_tasks_lock, flags); 3471 list_del_init(&p->scx.tasks_node); 3472 spin_unlock_irqrestore(&scx_tasks_lock, flags); 3473 3474 /* 3475 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY -> 3476 * ENABLED transitions can't race us. Disable ops for @p. 3477 */ 3478 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3479 struct rq_flags rf; 3480 struct rq *rq; 3481 3482 rq = task_rq_lock(p, &rf); 3483 scx_ops_exit_task(p); 3484 task_rq_unlock(rq, p, &rf); 3485 } 3486 } 3487 3488 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3489 const struct load_weight *lw) 3490 { 3491 lockdep_assert_rq_held(task_rq(p)); 3492 3493 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3494 if (SCX_HAS_OP(set_weight)) 3495 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); 3496 } 3497 3498 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) 3499 { 3500 } 3501 3502 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3503 { 3504 scx_ops_enable_task(p); 3505 3506 /* 3507 * set_cpus_allowed_scx() is not called while @p is associated with a 3508 * different scheduler class. Keep the BPF scheduler up-to-date. 3509 */ 3510 if (SCX_HAS_OP(set_cpumask)) 3511 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, 3512 (struct cpumask *)p->cpus_ptr); 3513 } 3514 3515 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3516 { 3517 scx_ops_disable_task(p); 3518 } 3519 3520 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} 3521 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3522 3523 int scx_check_setscheduler(struct task_struct *p, int policy) 3524 { 3525 lockdep_assert_rq_held(task_rq(p)); 3526 3527 /* if disallow, reject transitioning into SCX */ 3528 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3529 p->policy != policy && policy == SCHED_EXT) 3530 return -EACCES; 3531 3532 return 0; 3533 } 3534 3535 #ifdef CONFIG_NO_HZ_FULL 3536 bool scx_can_stop_tick(struct rq *rq) 3537 { 3538 struct task_struct *p = rq->curr; 3539 3540 if (scx_ops_bypassing()) 3541 return false; 3542 3543 if (p->sched_class != &ext_sched_class) 3544 return true; 3545 3546 /* 3547 * @rq can dispatch from different DSQs, so we can't tell whether it 3548 * needs the tick or not by looking at nr_running. Allow stopping ticks 3549 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3550 */ 3551 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3552 } 3553 #endif 3554 3555 /* 3556 * Omitted operations: 3557 * 3558 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 3559 * isn't tied to the CPU at that point. Preemption is implemented by resetting 3560 * the victim task's slice to 0 and triggering reschedule on the target CPU. 3561 * 3562 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 3563 * 3564 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 3565 * their current sched_class. Call them directly from sched core instead. 3566 * 3567 * - task_woken: Unnecessary. 3568 */ 3569 DEFINE_SCHED_CLASS(ext) = { 3570 .enqueue_task = enqueue_task_scx, 3571 .dequeue_task = dequeue_task_scx, 3572 .yield_task = yield_task_scx, 3573 .yield_to_task = yield_to_task_scx, 3574 3575 .wakeup_preempt = wakeup_preempt_scx, 3576 3577 .pick_next_task = pick_next_task_scx, 3578 3579 .put_prev_task = put_prev_task_scx, 3580 .set_next_task = set_next_task_scx, 3581 3582 .switch_class = switch_class_scx, 3583 3584 #ifdef CONFIG_SMP 3585 .balance = balance_scx, 3586 .select_task_rq = select_task_rq_scx, 3587 .set_cpus_allowed = set_cpus_allowed_scx, 3588 3589 .rq_online = rq_online_scx, 3590 .rq_offline = rq_offline_scx, 3591 #endif 3592 3593 #ifdef CONFIG_SCHED_CORE 3594 .pick_task = pick_task_scx, 3595 #endif 3596 3597 .task_tick = task_tick_scx, 3598 3599 .switching_to = switching_to_scx, 3600 .switched_from = switched_from_scx, 3601 .switched_to = switched_to_scx, 3602 .reweight_task = reweight_task_scx, 3603 .prio_changed = prio_changed_scx, 3604 3605 .update_curr = update_curr_scx, 3606 3607 #ifdef CONFIG_UCLAMP_TASK 3608 .uclamp_enabled = 1, 3609 #endif 3610 }; 3611 3612 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 3613 { 3614 memset(dsq, 0, sizeof(*dsq)); 3615 3616 raw_spin_lock_init(&dsq->lock); 3617 INIT_LIST_HEAD(&dsq->list); 3618 dsq->id = dsq_id; 3619 } 3620 3621 static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node) 3622 { 3623 struct scx_dispatch_q *dsq; 3624 int ret; 3625 3626 if (dsq_id & SCX_DSQ_FLAG_BUILTIN) 3627 return ERR_PTR(-EINVAL); 3628 3629 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 3630 if (!dsq) 3631 return ERR_PTR(-ENOMEM); 3632 3633 init_dsq(dsq, dsq_id); 3634 3635 ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node, 3636 dsq_hash_params); 3637 if (ret) { 3638 kfree(dsq); 3639 return ERR_PTR(ret); 3640 } 3641 return dsq; 3642 } 3643 3644 static void free_dsq_irq_workfn(struct irq_work *irq_work) 3645 { 3646 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 3647 struct scx_dispatch_q *dsq, *tmp_dsq; 3648 3649 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 3650 kfree_rcu(dsq, rcu); 3651 } 3652 3653 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 3654 3655 static void destroy_dsq(u64 dsq_id) 3656 { 3657 struct scx_dispatch_q *dsq; 3658 unsigned long flags; 3659 3660 rcu_read_lock(); 3661 3662 dsq = find_user_dsq(dsq_id); 3663 if (!dsq) 3664 goto out_unlock_rcu; 3665 3666 raw_spin_lock_irqsave(&dsq->lock, flags); 3667 3668 if (dsq->nr) { 3669 scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)", 3670 dsq->id, dsq->nr); 3671 goto out_unlock_dsq; 3672 } 3673 3674 if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params)) 3675 goto out_unlock_dsq; 3676 3677 /* 3678 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 3679 * queueing more tasks. As this function can be called from anywhere, 3680 * freeing is bounced through an irq work to avoid nesting RCU 3681 * operations inside scheduler locks. 3682 */ 3683 dsq->id = SCX_DSQ_INVALID; 3684 llist_add(&dsq->free_node, &dsqs_to_free); 3685 irq_work_queue(&free_dsq_irq_work); 3686 3687 out_unlock_dsq: 3688 raw_spin_unlock_irqrestore(&dsq->lock, flags); 3689 out_unlock_rcu: 3690 rcu_read_unlock(); 3691 } 3692 3693 3694 /******************************************************************************** 3695 * Sysfs interface and ops enable/disable. 3696 */ 3697 3698 #define SCX_ATTR(_name) \ 3699 static struct kobj_attribute scx_attr_##_name = { \ 3700 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 3701 .show = scx_attr_##_name##_show, \ 3702 } 3703 3704 static ssize_t scx_attr_state_show(struct kobject *kobj, 3705 struct kobj_attribute *ka, char *buf) 3706 { 3707 return sysfs_emit(buf, "%s\n", 3708 scx_ops_enable_state_str[scx_ops_enable_state()]); 3709 } 3710 SCX_ATTR(state); 3711 3712 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 3713 struct kobj_attribute *ka, char *buf) 3714 { 3715 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 3716 } 3717 SCX_ATTR(switch_all); 3718 3719 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 3720 struct kobj_attribute *ka, char *buf) 3721 { 3722 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 3723 } 3724 SCX_ATTR(nr_rejected); 3725 3726 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 3727 struct kobj_attribute *ka, char *buf) 3728 { 3729 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 3730 } 3731 SCX_ATTR(hotplug_seq); 3732 3733 static struct attribute *scx_global_attrs[] = { 3734 &scx_attr_state.attr, 3735 &scx_attr_switch_all.attr, 3736 &scx_attr_nr_rejected.attr, 3737 &scx_attr_hotplug_seq.attr, 3738 NULL, 3739 }; 3740 3741 static const struct attribute_group scx_global_attr_group = { 3742 .attrs = scx_global_attrs, 3743 }; 3744 3745 static void scx_kobj_release(struct kobject *kobj) 3746 { 3747 kfree(kobj); 3748 } 3749 3750 static ssize_t scx_attr_ops_show(struct kobject *kobj, 3751 struct kobj_attribute *ka, char *buf) 3752 { 3753 return sysfs_emit(buf, "%s\n", scx_ops.name); 3754 } 3755 SCX_ATTR(ops); 3756 3757 static struct attribute *scx_sched_attrs[] = { 3758 &scx_attr_ops.attr, 3759 NULL, 3760 }; 3761 ATTRIBUTE_GROUPS(scx_sched); 3762 3763 static const struct kobj_type scx_ktype = { 3764 .release = scx_kobj_release, 3765 .sysfs_ops = &kobj_sysfs_ops, 3766 .default_groups = scx_sched_groups, 3767 }; 3768 3769 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 3770 { 3771 return add_uevent_var(env, "SCXOPS=%s", scx_ops.name); 3772 } 3773 3774 static const struct kset_uevent_ops scx_uevent_ops = { 3775 .uevent = scx_uevent, 3776 }; 3777 3778 /* 3779 * Used by sched_fork() and __setscheduler_prio() to pick the matching 3780 * sched_class. dl/rt are already handled. 3781 */ 3782 bool task_should_scx(struct task_struct *p) 3783 { 3784 if (!scx_enabled() || 3785 unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING)) 3786 return false; 3787 if (READ_ONCE(scx_switching_all)) 3788 return true; 3789 return p->policy == SCHED_EXT; 3790 } 3791 3792 /** 3793 * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress 3794 * 3795 * Bypassing guarantees that all runnable tasks make forward progress without 3796 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 3797 * be held by tasks that the BPF scheduler is forgetting to run, which 3798 * unfortunately also excludes toggling the static branches. 3799 * 3800 * Let's work around by overriding a couple ops and modifying behaviors based on 3801 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 3802 * to force global FIFO scheduling. 3803 * 3804 * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 3805 * 3806 * b. ops.dispatch() is ignored. 3807 * 3808 * c. balance_scx() never sets %SCX_TASK_BAL_KEEP as the slice value can't be 3809 * trusted. Whenever a tick triggers, the running task is rotated to the tail 3810 * of the queue with core_sched_at touched. 3811 * 3812 * d. pick_next_task() suppresses zero slice warning. 3813 * 3814 * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM 3815 * operations. 3816 * 3817 * f. scx_prio_less() reverts to the default core_sched_at order. 3818 */ 3819 static void scx_ops_bypass(bool bypass) 3820 { 3821 int depth, cpu; 3822 3823 if (bypass) { 3824 depth = atomic_inc_return(&scx_ops_bypass_depth); 3825 WARN_ON_ONCE(depth <= 0); 3826 if (depth != 1) 3827 return; 3828 } else { 3829 depth = atomic_dec_return(&scx_ops_bypass_depth); 3830 WARN_ON_ONCE(depth < 0); 3831 if (depth != 0) 3832 return; 3833 } 3834 3835 /* 3836 * We need to guarantee that no tasks are on the BPF scheduler while 3837 * bypassing. Either we see enabled or the enable path sees the 3838 * increased bypass_depth before moving tasks to SCX. 3839 */ 3840 if (!scx_enabled()) 3841 return; 3842 3843 /* 3844 * No task property is changing. We just need to make sure all currently 3845 * queued tasks are re-queued according to the new scx_ops_bypassing() 3846 * state. As an optimization, walk each rq's runnable_list instead of 3847 * the scx_tasks list. 3848 * 3849 * This function can't trust the scheduler and thus can't use 3850 * cpus_read_lock(). Walk all possible CPUs instead of online. 3851 */ 3852 for_each_possible_cpu(cpu) { 3853 struct rq *rq = cpu_rq(cpu); 3854 struct rq_flags rf; 3855 struct task_struct *p, *n; 3856 3857 rq_lock_irqsave(rq, &rf); 3858 3859 /* 3860 * The use of list_for_each_entry_safe_reverse() is required 3861 * because each task is going to be removed from and added back 3862 * to the runnable_list during iteration. Because they're added 3863 * to the tail of the list, safe reverse iteration can still 3864 * visit all nodes. 3865 */ 3866 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 3867 scx.runnable_node) { 3868 struct sched_enq_and_set_ctx ctx; 3869 3870 /* cycling deq/enq is enough, see the function comment */ 3871 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 3872 sched_enq_and_set_task(&ctx); 3873 } 3874 3875 rq_unlock_irqrestore(rq, &rf); 3876 3877 /* kick to restore ticks */ 3878 resched_cpu(cpu); 3879 } 3880 } 3881 3882 static void free_exit_info(struct scx_exit_info *ei) 3883 { 3884 kfree(ei->dump); 3885 kfree(ei->msg); 3886 kfree(ei->bt); 3887 kfree(ei); 3888 } 3889 3890 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 3891 { 3892 struct scx_exit_info *ei; 3893 3894 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 3895 if (!ei) 3896 return NULL; 3897 3898 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 3899 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 3900 ei->dump = kzalloc(exit_dump_len, GFP_KERNEL); 3901 3902 if (!ei->bt || !ei->msg || !ei->dump) { 3903 free_exit_info(ei); 3904 return NULL; 3905 } 3906 3907 return ei; 3908 } 3909 3910 static const char *scx_exit_reason(enum scx_exit_kind kind) 3911 { 3912 switch (kind) { 3913 case SCX_EXIT_UNREG: 3914 return "Scheduler unregistered from user space"; 3915 case SCX_EXIT_UNREG_BPF: 3916 return "Scheduler unregistered from BPF"; 3917 case SCX_EXIT_UNREG_KERN: 3918 return "Scheduler unregistered from the main kernel"; 3919 case SCX_EXIT_SYSRQ: 3920 return "disabled by sysrq-S"; 3921 case SCX_EXIT_ERROR: 3922 return "runtime error"; 3923 case SCX_EXIT_ERROR_BPF: 3924 return "scx_bpf_error"; 3925 case SCX_EXIT_ERROR_STALL: 3926 return "runnable task stall"; 3927 default: 3928 return "<UNKNOWN>"; 3929 } 3930 } 3931 3932 static void scx_ops_disable_workfn(struct kthread_work *work) 3933 { 3934 struct scx_exit_info *ei = scx_exit_info; 3935 struct scx_task_iter sti; 3936 struct task_struct *p; 3937 struct rhashtable_iter rht_iter; 3938 struct scx_dispatch_q *dsq; 3939 int i, kind; 3940 3941 kind = atomic_read(&scx_exit_kind); 3942 while (true) { 3943 /* 3944 * NONE indicates that a new scx_ops has been registered since 3945 * disable was scheduled - don't kill the new ops. DONE 3946 * indicates that the ops has already been disabled. 3947 */ 3948 if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE) 3949 return; 3950 if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE)) 3951 break; 3952 } 3953 ei->kind = kind; 3954 ei->reason = scx_exit_reason(ei->kind); 3955 3956 /* guarantee forward progress by bypassing scx_ops */ 3957 scx_ops_bypass(true); 3958 3959 switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) { 3960 case SCX_OPS_DISABLING: 3961 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 3962 break; 3963 case SCX_OPS_DISABLED: 3964 pr_warn("sched_ext: ops error detected without ops (%s)\n", 3965 scx_exit_info->msg); 3966 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 3967 SCX_OPS_DISABLING); 3968 goto done; 3969 default: 3970 break; 3971 } 3972 3973 /* 3974 * Here, every runnable task is guaranteed to make forward progress and 3975 * we can safely use blocking synchronization constructs. Actually 3976 * disable ops. 3977 */ 3978 mutex_lock(&scx_ops_enable_mutex); 3979 3980 static_branch_disable(&__scx_switched_all); 3981 WRITE_ONCE(scx_switching_all, false); 3982 3983 /* 3984 * Avoid racing against fork. See scx_ops_enable() for explanation on 3985 * the locking order. 3986 */ 3987 percpu_down_write(&scx_fork_rwsem); 3988 cpus_read_lock(); 3989 3990 spin_lock_irq(&scx_tasks_lock); 3991 scx_task_iter_init(&sti); 3992 /* 3993 * Invoke scx_ops_exit_task() on all non-idle tasks, including 3994 * TASK_DEAD tasks. Because dead tasks may have a nonzero refcount, 3995 * we may not have invoked sched_ext_free() on them by the time a 3996 * scheduler is disabled. We must therefore exit the task here, or we'd 3997 * fail to invoke ops.exit_task(), as the scheduler will have been 3998 * unloaded by the time the task is subsequently exited on the 3999 * sched_ext_free() path. 4000 */ 4001 while ((p = scx_task_iter_next_locked(&sti, true))) { 4002 const struct sched_class *old_class = p->sched_class; 4003 struct sched_enq_and_set_ctx ctx; 4004 4005 if (READ_ONCE(p->__state) != TASK_DEAD) { 4006 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, 4007 &ctx); 4008 4009 p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL); 4010 __setscheduler_prio(p, p->prio); 4011 check_class_changing(task_rq(p), p, old_class); 4012 4013 sched_enq_and_set_task(&ctx); 4014 4015 check_class_changed(task_rq(p), p, old_class, p->prio); 4016 } 4017 scx_ops_exit_task(p); 4018 } 4019 scx_task_iter_exit(&sti); 4020 spin_unlock_irq(&scx_tasks_lock); 4021 4022 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4023 static_branch_disable_cpuslocked(&__scx_ops_enabled); 4024 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++) 4025 static_branch_disable_cpuslocked(&scx_has_op[i]); 4026 static_branch_disable_cpuslocked(&scx_ops_enq_last); 4027 static_branch_disable_cpuslocked(&scx_ops_enq_exiting); 4028 static_branch_disable_cpuslocked(&scx_ops_cpu_preempt); 4029 static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); 4030 synchronize_rcu(); 4031 4032 cpus_read_unlock(); 4033 percpu_up_write(&scx_fork_rwsem); 4034 4035 if (ei->kind >= SCX_EXIT_ERROR) { 4036 printk(KERN_ERR "sched_ext: BPF scheduler \"%s\" errored, disabling\n", scx_ops.name); 4037 4038 if (ei->msg[0] == '\0') 4039 printk(KERN_ERR "sched_ext: %s\n", ei->reason); 4040 else 4041 printk(KERN_ERR "sched_ext: %s (%s)\n", ei->reason, ei->msg); 4042 4043 stack_trace_print(ei->bt, ei->bt_len, 2); 4044 } 4045 4046 if (scx_ops.exit) 4047 SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei); 4048 4049 cancel_delayed_work_sync(&scx_watchdog_work); 4050 4051 /* 4052 * Delete the kobject from the hierarchy eagerly in addition to just 4053 * dropping a reference. Otherwise, if the object is deleted 4054 * asynchronously, sysfs could observe an object of the same name still 4055 * in the hierarchy when another scheduler is loaded. 4056 */ 4057 kobject_del(scx_root_kobj); 4058 kobject_put(scx_root_kobj); 4059 scx_root_kobj = NULL; 4060 4061 memset(&scx_ops, 0, sizeof(scx_ops)); 4062 4063 rhashtable_walk_enter(&dsq_hash, &rht_iter); 4064 do { 4065 rhashtable_walk_start(&rht_iter); 4066 4067 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 4068 destroy_dsq(dsq->id); 4069 4070 rhashtable_walk_stop(&rht_iter); 4071 } while (dsq == ERR_PTR(-EAGAIN)); 4072 rhashtable_walk_exit(&rht_iter); 4073 4074 free_percpu(scx_dsp_ctx); 4075 scx_dsp_ctx = NULL; 4076 scx_dsp_max_batch = 0; 4077 4078 free_exit_info(scx_exit_info); 4079 scx_exit_info = NULL; 4080 4081 mutex_unlock(&scx_ops_enable_mutex); 4082 4083 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != 4084 SCX_OPS_DISABLING); 4085 done: 4086 scx_ops_bypass(false); 4087 } 4088 4089 static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); 4090 4091 static void schedule_scx_ops_disable_work(void) 4092 { 4093 struct kthread_worker *helper = READ_ONCE(scx_ops_helper); 4094 4095 /* 4096 * We may be called spuriously before the first bpf_sched_ext_reg(). If 4097 * scx_ops_helper isn't set up yet, there's nothing to do. 4098 */ 4099 if (helper) 4100 kthread_queue_work(helper, &scx_ops_disable_work); 4101 } 4102 4103 static void scx_ops_disable(enum scx_exit_kind kind) 4104 { 4105 int none = SCX_EXIT_NONE; 4106 4107 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4108 kind = SCX_EXIT_ERROR; 4109 4110 atomic_try_cmpxchg(&scx_exit_kind, &none, kind); 4111 4112 schedule_scx_ops_disable_work(); 4113 } 4114 4115 static void dump_newline(struct seq_buf *s) 4116 { 4117 trace_sched_ext_dump(""); 4118 4119 /* @s may be zero sized and seq_buf triggers WARN if so */ 4120 if (s->size) 4121 seq_buf_putc(s, '\n'); 4122 } 4123 4124 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4125 { 4126 va_list args; 4127 4128 #ifdef CONFIG_TRACEPOINTS 4129 if (trace_sched_ext_dump_enabled()) { 4130 /* protected by scx_dump_state()::dump_lock */ 4131 static char line_buf[SCX_EXIT_MSG_LEN]; 4132 4133 va_start(args, fmt); 4134 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4135 va_end(args); 4136 4137 trace_sched_ext_dump(line_buf); 4138 } 4139 #endif 4140 /* @s may be zero sized and seq_buf triggers WARN if so */ 4141 if (s->size) { 4142 va_start(args, fmt); 4143 seq_buf_vprintf(s, fmt, args); 4144 va_end(args); 4145 4146 seq_buf_putc(s, '\n'); 4147 } 4148 } 4149 4150 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4151 const unsigned long *bt, unsigned int len) 4152 { 4153 unsigned int i; 4154 4155 for (i = 0; i < len; i++) 4156 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4157 } 4158 4159 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4160 { 4161 struct scx_dump_data *dd = &scx_dump_data; 4162 4163 lockdep_assert_irqs_disabled(); 4164 4165 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4166 dd->first = true; 4167 dd->cursor = 0; 4168 dd->s = s; 4169 dd->prefix = prefix; 4170 } 4171 4172 static void ops_dump_flush(void) 4173 { 4174 struct scx_dump_data *dd = &scx_dump_data; 4175 char *line = dd->buf.line; 4176 4177 if (!dd->cursor) 4178 return; 4179 4180 /* 4181 * There's something to flush and this is the first line. Insert a blank 4182 * line to distinguish ops dump. 4183 */ 4184 if (dd->first) { 4185 dump_newline(dd->s); 4186 dd->first = false; 4187 } 4188 4189 /* 4190 * There may be multiple lines in $line. Scan and emit each line 4191 * separately. 4192 */ 4193 while (true) { 4194 char *end = line; 4195 char c; 4196 4197 while (*end != '\n' && *end != '\0') 4198 end++; 4199 4200 /* 4201 * If $line overflowed, it may not have newline at the end. 4202 * Always emit with a newline. 4203 */ 4204 c = *end; 4205 *end = '\0'; 4206 dump_line(dd->s, "%s%s", dd->prefix, line); 4207 if (c == '\0') 4208 break; 4209 4210 /* move to the next line */ 4211 end++; 4212 if (*end == '\0') 4213 break; 4214 line = end; 4215 } 4216 4217 dd->cursor = 0; 4218 } 4219 4220 static void ops_dump_exit(void) 4221 { 4222 ops_dump_flush(); 4223 scx_dump_data.cpu = -1; 4224 } 4225 4226 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4227 struct task_struct *p, char marker) 4228 { 4229 static unsigned long bt[SCX_EXIT_BT_LEN]; 4230 char dsq_id_buf[19] = "(n/a)"; 4231 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4232 unsigned int bt_len; 4233 4234 if (p->scx.dsq) 4235 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4236 (unsigned long long)p->scx.dsq->id); 4237 4238 dump_newline(s); 4239 dump_line(s, " %c%c %s[%d] %+ldms", 4240 marker, task_state_to_char(p), p->comm, p->pid, 4241 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4242 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4243 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4244 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4245 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4246 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu", 4247 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf, 4248 p->scx.dsq_vtime); 4249 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); 4250 4251 if (SCX_HAS_OP(dump_task)) { 4252 ops_dump_init(s, " "); 4253 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p); 4254 ops_dump_exit(); 4255 } 4256 4257 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 4258 if (bt_len) { 4259 dump_newline(s); 4260 dump_stack_trace(s, " ", bt, bt_len); 4261 } 4262 } 4263 4264 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 4265 { 4266 static DEFINE_SPINLOCK(dump_lock); 4267 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 4268 struct scx_dump_ctx dctx = { 4269 .kind = ei->kind, 4270 .exit_code = ei->exit_code, 4271 .reason = ei->reason, 4272 .at_ns = ktime_get_ns(), 4273 .at_jiffies = jiffies, 4274 }; 4275 struct seq_buf s; 4276 unsigned long flags; 4277 char *buf; 4278 int cpu; 4279 4280 spin_lock_irqsave(&dump_lock, flags); 4281 4282 seq_buf_init(&s, ei->dump, dump_len); 4283 4284 if (ei->kind == SCX_EXIT_NONE) { 4285 dump_line(&s, "Debug dump triggered by %s", ei->reason); 4286 } else { 4287 dump_line(&s, "%s[%d] triggered exit kind %d:", 4288 current->comm, current->pid, ei->kind); 4289 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 4290 dump_newline(&s); 4291 dump_line(&s, "Backtrace:"); 4292 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 4293 } 4294 4295 if (SCX_HAS_OP(dump)) { 4296 ops_dump_init(&s, ""); 4297 SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx); 4298 ops_dump_exit(); 4299 } 4300 4301 dump_newline(&s); 4302 dump_line(&s, "CPU states"); 4303 dump_line(&s, "----------"); 4304 4305 for_each_possible_cpu(cpu) { 4306 struct rq *rq = cpu_rq(cpu); 4307 struct rq_flags rf; 4308 struct task_struct *p; 4309 struct seq_buf ns; 4310 size_t avail, used; 4311 bool idle; 4312 4313 rq_lock(rq, &rf); 4314 4315 idle = list_empty(&rq->scx.runnable_list) && 4316 rq->curr->sched_class == &idle_sched_class; 4317 4318 if (idle && !SCX_HAS_OP(dump_cpu)) 4319 goto next; 4320 4321 /* 4322 * We don't yet know whether ops.dump_cpu() will produce output 4323 * and we may want to skip the default CPU dump if it doesn't. 4324 * Use a nested seq_buf to generate the standard dump so that we 4325 * can decide whether to commit later. 4326 */ 4327 avail = seq_buf_get_buf(&s, &buf); 4328 seq_buf_init(&ns, buf, avail); 4329 4330 dump_newline(&ns); 4331 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu", 4332 cpu, rq->scx.nr_running, rq->scx.flags, 4333 rq->scx.cpu_released, rq->scx.ops_qseq, 4334 rq->scx.pnt_seq); 4335 dump_line(&ns, " curr=%s[%d] class=%ps", 4336 rq->curr->comm, rq->curr->pid, 4337 rq->curr->sched_class); 4338 if (!cpumask_empty(rq->scx.cpus_to_kick)) 4339 dump_line(&ns, " cpus_to_kick : %*pb", 4340 cpumask_pr_args(rq->scx.cpus_to_kick)); 4341 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 4342 dump_line(&ns, " idle_to_kick : %*pb", 4343 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 4344 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 4345 dump_line(&ns, " cpus_to_preempt: %*pb", 4346 cpumask_pr_args(rq->scx.cpus_to_preempt)); 4347 if (!cpumask_empty(rq->scx.cpus_to_wait)) 4348 dump_line(&ns, " cpus_to_wait : %*pb", 4349 cpumask_pr_args(rq->scx.cpus_to_wait)); 4350 4351 used = seq_buf_used(&ns); 4352 if (SCX_HAS_OP(dump_cpu)) { 4353 ops_dump_init(&ns, " "); 4354 SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle); 4355 ops_dump_exit(); 4356 } 4357 4358 /* 4359 * If idle && nothing generated by ops.dump_cpu(), there's 4360 * nothing interesting. Skip. 4361 */ 4362 if (idle && used == seq_buf_used(&ns)) 4363 goto next; 4364 4365 /* 4366 * $s may already have overflowed when $ns was created. If so, 4367 * calling commit on it will trigger BUG. 4368 */ 4369 if (avail) { 4370 seq_buf_commit(&s, seq_buf_used(&ns)); 4371 if (seq_buf_has_overflowed(&ns)) 4372 seq_buf_set_overflow(&s); 4373 } 4374 4375 if (rq->curr->sched_class == &ext_sched_class) 4376 scx_dump_task(&s, &dctx, rq->curr, '*'); 4377 4378 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 4379 scx_dump_task(&s, &dctx, p, ' '); 4380 next: 4381 rq_unlock(rq, &rf); 4382 } 4383 4384 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 4385 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 4386 trunc_marker, sizeof(trunc_marker)); 4387 4388 spin_unlock_irqrestore(&dump_lock, flags); 4389 } 4390 4391 static void scx_ops_error_irq_workfn(struct irq_work *irq_work) 4392 { 4393 struct scx_exit_info *ei = scx_exit_info; 4394 4395 if (ei->kind >= SCX_EXIT_ERROR) 4396 scx_dump_state(ei, scx_ops.exit_dump_len); 4397 4398 schedule_scx_ops_disable_work(); 4399 } 4400 4401 static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); 4402 4403 static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind, 4404 s64 exit_code, 4405 const char *fmt, ...) 4406 { 4407 struct scx_exit_info *ei = scx_exit_info; 4408 int none = SCX_EXIT_NONE; 4409 va_list args; 4410 4411 if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind)) 4412 return; 4413 4414 ei->exit_code = exit_code; 4415 4416 if (kind >= SCX_EXIT_ERROR) 4417 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 4418 4419 va_start(args, fmt); 4420 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 4421 va_end(args); 4422 4423 /* 4424 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 4425 * in scx_ops_disable_workfn(). 4426 */ 4427 ei->kind = kind; 4428 ei->reason = scx_exit_reason(ei->kind); 4429 4430 irq_work_queue(&scx_ops_error_irq_work); 4431 } 4432 4433 static struct kthread_worker *scx_create_rt_helper(const char *name) 4434 { 4435 struct kthread_worker *helper; 4436 4437 helper = kthread_create_worker(0, name); 4438 if (helper) 4439 sched_set_fifo(helper->task); 4440 return helper; 4441 } 4442 4443 static void check_hotplug_seq(const struct sched_ext_ops *ops) 4444 { 4445 unsigned long long global_hotplug_seq; 4446 4447 /* 4448 * If a hotplug event has occurred between when a scheduler was 4449 * initialized, and when we were able to attach, exit and notify user 4450 * space about it. 4451 */ 4452 if (ops->hotplug_seq) { 4453 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 4454 if (ops->hotplug_seq != global_hotplug_seq) { 4455 scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 4456 "expected hotplug seq %llu did not match actual %llu", 4457 ops->hotplug_seq, global_hotplug_seq); 4458 } 4459 } 4460 } 4461 4462 static int validate_ops(const struct sched_ext_ops *ops) 4463 { 4464 /* 4465 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 4466 * ops.enqueue() callback isn't implemented. 4467 */ 4468 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 4469 scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 4470 return -EINVAL; 4471 } 4472 4473 return 0; 4474 } 4475 4476 static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) 4477 { 4478 struct scx_task_iter sti; 4479 struct task_struct *p; 4480 unsigned long timeout; 4481 int i, cpu, ret; 4482 4483 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 4484 cpu_possible_mask)) { 4485 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation"); 4486 return -EINVAL; 4487 } 4488 4489 mutex_lock(&scx_ops_enable_mutex); 4490 4491 if (!scx_ops_helper) { 4492 WRITE_ONCE(scx_ops_helper, 4493 scx_create_rt_helper("sched_ext_ops_helper")); 4494 if (!scx_ops_helper) { 4495 ret = -ENOMEM; 4496 goto err_unlock; 4497 } 4498 } 4499 4500 if (scx_ops_enable_state() != SCX_OPS_DISABLED) { 4501 ret = -EBUSY; 4502 goto err_unlock; 4503 } 4504 4505 scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL); 4506 if (!scx_root_kobj) { 4507 ret = -ENOMEM; 4508 goto err_unlock; 4509 } 4510 4511 scx_root_kobj->kset = scx_kset; 4512 ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root"); 4513 if (ret < 0) 4514 goto err; 4515 4516 scx_exit_info = alloc_exit_info(ops->exit_dump_len); 4517 if (!scx_exit_info) { 4518 ret = -ENOMEM; 4519 goto err_del; 4520 } 4521 4522 /* 4523 * Set scx_ops, transition to PREPPING and clear exit info to arm the 4524 * disable path. Failure triggers full disabling from here on. 4525 */ 4526 scx_ops = *ops; 4527 4528 WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_PREPPING) != 4529 SCX_OPS_DISABLED); 4530 4531 atomic_set(&scx_exit_kind, SCX_EXIT_NONE); 4532 scx_warned_zero_slice = false; 4533 4534 atomic_long_set(&scx_nr_rejected, 0); 4535 4536 for_each_possible_cpu(cpu) 4537 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 4538 4539 /* 4540 * Keep CPUs stable during enable so that the BPF scheduler can track 4541 * online CPUs by watching ->on/offline_cpu() after ->init(). 4542 */ 4543 cpus_read_lock(); 4544 4545 if (scx_ops.init) { 4546 ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, init); 4547 if (ret) { 4548 ret = ops_sanitize_err("init", ret); 4549 goto err_disable_unlock_cpus; 4550 } 4551 } 4552 4553 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 4554 if (((void (**)(void))ops)[i]) 4555 static_branch_enable_cpuslocked(&scx_has_op[i]); 4556 4557 cpus_read_unlock(); 4558 4559 ret = validate_ops(ops); 4560 if (ret) 4561 goto err_disable; 4562 4563 WARN_ON_ONCE(scx_dsp_ctx); 4564 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 4565 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 4566 scx_dsp_max_batch), 4567 __alignof__(struct scx_dsp_ctx)); 4568 if (!scx_dsp_ctx) { 4569 ret = -ENOMEM; 4570 goto err_disable; 4571 } 4572 4573 if (ops->timeout_ms) 4574 timeout = msecs_to_jiffies(ops->timeout_ms); 4575 else 4576 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 4577 4578 WRITE_ONCE(scx_watchdog_timeout, timeout); 4579 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 4580 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 4581 scx_watchdog_timeout / 2); 4582 4583 /* 4584 * Lock out forks before opening the floodgate so that they don't wander 4585 * into the operations prematurely. 4586 * 4587 * We don't need to keep the CPUs stable but grab cpus_read_lock() to 4588 * ease future locking changes for cgroup suport. 4589 * 4590 * Note that cpu_hotplug_lock must nest inside scx_fork_rwsem due to the 4591 * following dependency chain: 4592 * 4593 * scx_fork_rwsem --> pernet_ops_rwsem --> cpu_hotplug_lock 4594 */ 4595 percpu_down_write(&scx_fork_rwsem); 4596 cpus_read_lock(); 4597 4598 check_hotplug_seq(ops); 4599 4600 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 4601 if (((void (**)(void))ops)[i]) 4602 static_branch_enable_cpuslocked(&scx_has_op[i]); 4603 4604 if (ops->flags & SCX_OPS_ENQ_LAST) 4605 static_branch_enable_cpuslocked(&scx_ops_enq_last); 4606 4607 if (ops->flags & SCX_OPS_ENQ_EXITING) 4608 static_branch_enable_cpuslocked(&scx_ops_enq_exiting); 4609 if (scx_ops.cpu_acquire || scx_ops.cpu_release) 4610 static_branch_enable_cpuslocked(&scx_ops_cpu_preempt); 4611 4612 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) { 4613 reset_idle_masks(); 4614 static_branch_enable_cpuslocked(&scx_builtin_idle_enabled); 4615 } else { 4616 static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); 4617 } 4618 4619 static_branch_enable_cpuslocked(&__scx_ops_enabled); 4620 4621 /* 4622 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 4623 * preventing new tasks from being added. No need to exclude tasks 4624 * leaving as sched_ext_free() can handle both prepped and enabled 4625 * tasks. Prep all tasks first and then enable them with preemption 4626 * disabled. 4627 */ 4628 spin_lock_irq(&scx_tasks_lock); 4629 4630 scx_task_iter_init(&sti); 4631 while ((p = scx_task_iter_next_locked(&sti, false))) { 4632 get_task_struct(p); 4633 scx_task_iter_rq_unlock(&sti); 4634 spin_unlock_irq(&scx_tasks_lock); 4635 4636 ret = scx_ops_init_task(p, task_group(p), false); 4637 if (ret) { 4638 put_task_struct(p); 4639 spin_lock_irq(&scx_tasks_lock); 4640 scx_task_iter_exit(&sti); 4641 spin_unlock_irq(&scx_tasks_lock); 4642 pr_err("sched_ext: ops.init_task() failed (%d) for %s[%d] while loading\n", 4643 ret, p->comm, p->pid); 4644 goto err_disable_unlock_all; 4645 } 4646 4647 put_task_struct(p); 4648 spin_lock_irq(&scx_tasks_lock); 4649 } 4650 scx_task_iter_exit(&sti); 4651 4652 /* 4653 * All tasks are prepped but are still ops-disabled. Ensure that 4654 * %current can't be scheduled out and switch everyone. 4655 * preempt_disable() is necessary because we can't guarantee that 4656 * %current won't be starved if scheduled out while switching. 4657 */ 4658 preempt_disable(); 4659 4660 /* 4661 * From here on, the disable path must assume that tasks have ops 4662 * enabled and need to be recovered. 4663 * 4664 * Transition to ENABLING fails iff the BPF scheduler has already 4665 * triggered scx_bpf_error(). Returning an error code here would lose 4666 * the recorded error information. Exit indicating success so that the 4667 * error is notified through ops.exit() with all the details. 4668 */ 4669 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLING, SCX_OPS_PREPPING)) { 4670 preempt_enable(); 4671 spin_unlock_irq(&scx_tasks_lock); 4672 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE); 4673 ret = 0; 4674 goto err_disable_unlock_all; 4675 } 4676 4677 /* 4678 * We're fully committed and can't fail. The PREPPED -> ENABLED 4679 * transitions here are synchronized against sched_ext_free() through 4680 * scx_tasks_lock. 4681 */ 4682 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 4683 4684 scx_task_iter_init(&sti); 4685 while ((p = scx_task_iter_next_locked(&sti, false))) { 4686 const struct sched_class *old_class = p->sched_class; 4687 struct sched_enq_and_set_ctx ctx; 4688 4689 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); 4690 4691 scx_set_task_state(p, SCX_TASK_READY); 4692 __setscheduler_prio(p, p->prio); 4693 check_class_changing(task_rq(p), p, old_class); 4694 4695 sched_enq_and_set_task(&ctx); 4696 4697 check_class_changed(task_rq(p), p, old_class, p->prio); 4698 } 4699 scx_task_iter_exit(&sti); 4700 4701 spin_unlock_irq(&scx_tasks_lock); 4702 preempt_enable(); 4703 cpus_read_unlock(); 4704 percpu_up_write(&scx_fork_rwsem); 4705 4706 /* see above ENABLING transition for the explanation on exiting with 0 */ 4707 if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { 4708 WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE); 4709 ret = 0; 4710 goto err_disable; 4711 } 4712 4713 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 4714 static_branch_enable(&__scx_switched_all); 4715 4716 kobject_uevent(scx_root_kobj, KOBJ_ADD); 4717 mutex_unlock(&scx_ops_enable_mutex); 4718 4719 return 0; 4720 4721 err_del: 4722 kobject_del(scx_root_kobj); 4723 err: 4724 kobject_put(scx_root_kobj); 4725 scx_root_kobj = NULL; 4726 if (scx_exit_info) { 4727 free_exit_info(scx_exit_info); 4728 scx_exit_info = NULL; 4729 } 4730 err_unlock: 4731 mutex_unlock(&scx_ops_enable_mutex); 4732 return ret; 4733 4734 err_disable_unlock_all: 4735 percpu_up_write(&scx_fork_rwsem); 4736 err_disable_unlock_cpus: 4737 cpus_read_unlock(); 4738 err_disable: 4739 mutex_unlock(&scx_ops_enable_mutex); 4740 /* must be fully disabled before returning */ 4741 scx_ops_disable(SCX_EXIT_ERROR); 4742 kthread_flush_work(&scx_ops_disable_work); 4743 return ret; 4744 } 4745 4746 4747 /******************************************************************************** 4748 * bpf_struct_ops plumbing. 4749 */ 4750 #include <linux/bpf_verifier.h> 4751 #include <linux/bpf.h> 4752 #include <linux/btf.h> 4753 4754 extern struct btf *btf_vmlinux; 4755 static const struct btf_type *task_struct_type; 4756 static u32 task_struct_type_id; 4757 4758 static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size, 4759 enum bpf_access_type type, 4760 const struct bpf_prog *prog, 4761 struct bpf_insn_access_aux *info) 4762 { 4763 struct btf *btf = bpf_get_btf_vmlinux(); 4764 const struct bpf_struct_ops_desc *st_ops_desc; 4765 const struct btf_member *member; 4766 const struct btf_type *t; 4767 u32 btf_id, member_idx; 4768 const char *mname; 4769 4770 /* struct_ops op args are all sequential, 64-bit numbers */ 4771 if (off != arg_n * sizeof(__u64)) 4772 return false; 4773 4774 /* btf_id should be the type id of struct sched_ext_ops */ 4775 btf_id = prog->aux->attach_btf_id; 4776 st_ops_desc = bpf_struct_ops_find(btf, btf_id); 4777 if (!st_ops_desc) 4778 return false; 4779 4780 /* BTF type of struct sched_ext_ops */ 4781 t = st_ops_desc->type; 4782 4783 member_idx = prog->expected_attach_type; 4784 if (member_idx >= btf_type_vlen(t)) 4785 return false; 4786 4787 /* 4788 * Get the member name of this struct_ops program, which corresponds to 4789 * a field in struct sched_ext_ops. For example, the member name of the 4790 * dispatch struct_ops program (callback) is "dispatch". 4791 */ 4792 member = &btf_type_member(t)[member_idx]; 4793 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 4794 4795 if (!strcmp(mname, op)) { 4796 /* 4797 * The value is a pointer to a type (struct task_struct) given 4798 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED), 4799 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program 4800 * should check the pointer to make sure it is not NULL before 4801 * using it, or the verifier will reject the program. 4802 * 4803 * Longer term, this is something that should be addressed by 4804 * BTF, and be fully contained within the verifier. 4805 */ 4806 info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED; 4807 info->btf = btf_vmlinux; 4808 info->btf_id = task_struct_type_id; 4809 4810 return true; 4811 } 4812 4813 return false; 4814 } 4815 4816 static bool bpf_scx_is_valid_access(int off, int size, 4817 enum bpf_access_type type, 4818 const struct bpf_prog *prog, 4819 struct bpf_insn_access_aux *info) 4820 { 4821 if (type != BPF_READ) 4822 return false; 4823 if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) || 4824 set_arg_maybe_null("yield", 1, off, size, type, prog, info)) 4825 return true; 4826 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 4827 return false; 4828 if (off % size != 0) 4829 return false; 4830 4831 return btf_ctx_access(off, size, type, prog, info); 4832 } 4833 4834 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 4835 const struct bpf_reg_state *reg, int off, 4836 int size) 4837 { 4838 const struct btf_type *t; 4839 4840 t = btf_type_by_id(reg->btf, reg->btf_id); 4841 if (t == task_struct_type) { 4842 if (off >= offsetof(struct task_struct, scx.slice) && 4843 off + size <= offsetofend(struct task_struct, scx.slice)) 4844 return SCALAR_VALUE; 4845 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 4846 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 4847 return SCALAR_VALUE; 4848 if (off >= offsetof(struct task_struct, scx.disallow) && 4849 off + size <= offsetofend(struct task_struct, scx.disallow)) 4850 return SCALAR_VALUE; 4851 } 4852 4853 return -EACCES; 4854 } 4855 4856 static const struct bpf_func_proto * 4857 bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 4858 { 4859 switch (func_id) { 4860 case BPF_FUNC_task_storage_get: 4861 return &bpf_task_storage_get_proto; 4862 case BPF_FUNC_task_storage_delete: 4863 return &bpf_task_storage_delete_proto; 4864 default: 4865 return bpf_base_func_proto(func_id, prog); 4866 } 4867 } 4868 4869 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 4870 .get_func_proto = bpf_scx_get_func_proto, 4871 .is_valid_access = bpf_scx_is_valid_access, 4872 .btf_struct_access = bpf_scx_btf_struct_access, 4873 }; 4874 4875 static int bpf_scx_init_member(const struct btf_type *t, 4876 const struct btf_member *member, 4877 void *kdata, const void *udata) 4878 { 4879 const struct sched_ext_ops *uops = udata; 4880 struct sched_ext_ops *ops = kdata; 4881 u32 moff = __btf_member_bit_offset(t, member) / 8; 4882 int ret; 4883 4884 switch (moff) { 4885 case offsetof(struct sched_ext_ops, dispatch_max_batch): 4886 if (*(u32 *)(udata + moff) > INT_MAX) 4887 return -E2BIG; 4888 ops->dispatch_max_batch = *(u32 *)(udata + moff); 4889 return 1; 4890 case offsetof(struct sched_ext_ops, flags): 4891 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 4892 return -EINVAL; 4893 ops->flags = *(u64 *)(udata + moff); 4894 return 1; 4895 case offsetof(struct sched_ext_ops, name): 4896 ret = bpf_obj_name_cpy(ops->name, uops->name, 4897 sizeof(ops->name)); 4898 if (ret < 0) 4899 return ret; 4900 if (ret == 0) 4901 return -EINVAL; 4902 return 1; 4903 case offsetof(struct sched_ext_ops, timeout_ms): 4904 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 4905 SCX_WATCHDOG_MAX_TIMEOUT) 4906 return -E2BIG; 4907 ops->timeout_ms = *(u32 *)(udata + moff); 4908 return 1; 4909 case offsetof(struct sched_ext_ops, exit_dump_len): 4910 ops->exit_dump_len = 4911 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 4912 return 1; 4913 case offsetof(struct sched_ext_ops, hotplug_seq): 4914 ops->hotplug_seq = *(u64 *)(udata + moff); 4915 return 1; 4916 } 4917 4918 return 0; 4919 } 4920 4921 static int bpf_scx_check_member(const struct btf_type *t, 4922 const struct btf_member *member, 4923 const struct bpf_prog *prog) 4924 { 4925 u32 moff = __btf_member_bit_offset(t, member) / 8; 4926 4927 switch (moff) { 4928 case offsetof(struct sched_ext_ops, init_task): 4929 case offsetof(struct sched_ext_ops, cpu_online): 4930 case offsetof(struct sched_ext_ops, cpu_offline): 4931 case offsetof(struct sched_ext_ops, init): 4932 case offsetof(struct sched_ext_ops, exit): 4933 break; 4934 default: 4935 if (prog->sleepable) 4936 return -EINVAL; 4937 } 4938 4939 return 0; 4940 } 4941 4942 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 4943 { 4944 return scx_ops_enable(kdata, link); 4945 } 4946 4947 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 4948 { 4949 scx_ops_disable(SCX_EXIT_UNREG); 4950 kthread_flush_work(&scx_ops_disable_work); 4951 } 4952 4953 static int bpf_scx_init(struct btf *btf) 4954 { 4955 u32 type_id; 4956 4957 type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT); 4958 if (type_id < 0) 4959 return -EINVAL; 4960 task_struct_type = btf_type_by_id(btf, type_id); 4961 task_struct_type_id = type_id; 4962 4963 return 0; 4964 } 4965 4966 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 4967 { 4968 /* 4969 * sched_ext does not support updating the actively-loaded BPF 4970 * scheduler, as registering a BPF scheduler can always fail if the 4971 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 4972 * etc. Similarly, we can always race with unregistration happening 4973 * elsewhere, such as with sysrq. 4974 */ 4975 return -EOPNOTSUPP; 4976 } 4977 4978 static int bpf_scx_validate(void *kdata) 4979 { 4980 return 0; 4981 } 4982 4983 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 4984 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {} 4985 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {} 4986 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {} 4987 static void runnable_stub(struct task_struct *p, u64 enq_flags) {} 4988 static void running_stub(struct task_struct *p) {} 4989 static void stopping_stub(struct task_struct *p, bool runnable) {} 4990 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {} 4991 static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; } 4992 static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; } 4993 static void set_weight_stub(struct task_struct *p, u32 weight) {} 4994 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {} 4995 static void update_idle_stub(s32 cpu, bool idle) {} 4996 static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {} 4997 static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {} 4998 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 4999 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {} 5000 static void enable_stub(struct task_struct *p) {} 5001 static void disable_stub(struct task_struct *p) {} 5002 static void cpu_online_stub(s32 cpu) {} 5003 static void cpu_offline_stub(s32 cpu) {} 5004 static s32 init_stub(void) { return -EINVAL; } 5005 static void exit_stub(struct scx_exit_info *info) {} 5006 5007 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5008 .select_cpu = select_cpu_stub, 5009 .enqueue = enqueue_stub, 5010 .dequeue = dequeue_stub, 5011 .dispatch = dispatch_stub, 5012 .runnable = runnable_stub, 5013 .running = running_stub, 5014 .stopping = stopping_stub, 5015 .quiescent = quiescent_stub, 5016 .yield = yield_stub, 5017 .core_sched_before = core_sched_before_stub, 5018 .set_weight = set_weight_stub, 5019 .set_cpumask = set_cpumask_stub, 5020 .update_idle = update_idle_stub, 5021 .cpu_acquire = cpu_acquire_stub, 5022 .cpu_release = cpu_release_stub, 5023 .init_task = init_task_stub, 5024 .exit_task = exit_task_stub, 5025 .enable = enable_stub, 5026 .disable = disable_stub, 5027 .cpu_online = cpu_online_stub, 5028 .cpu_offline = cpu_offline_stub, 5029 .init = init_stub, 5030 .exit = exit_stub, 5031 }; 5032 5033 static struct bpf_struct_ops bpf_sched_ext_ops = { 5034 .verifier_ops = &bpf_scx_verifier_ops, 5035 .reg = bpf_scx_reg, 5036 .unreg = bpf_scx_unreg, 5037 .check_member = bpf_scx_check_member, 5038 .init_member = bpf_scx_init_member, 5039 .init = bpf_scx_init, 5040 .update = bpf_scx_update, 5041 .validate = bpf_scx_validate, 5042 .name = "sched_ext_ops", 5043 .owner = THIS_MODULE, 5044 .cfi_stubs = &__bpf_ops_sched_ext_ops 5045 }; 5046 5047 5048 /******************************************************************************** 5049 * System integration and init. 5050 */ 5051 5052 static void sysrq_handle_sched_ext_reset(u8 key) 5053 { 5054 if (scx_ops_helper) 5055 scx_ops_disable(SCX_EXIT_SYSRQ); 5056 else 5057 pr_info("sched_ext: BPF scheduler not yet used\n"); 5058 } 5059 5060 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5061 .handler = sysrq_handle_sched_ext_reset, 5062 .help_msg = "reset-sched-ext(S)", 5063 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5064 .enable_mask = SYSRQ_ENABLE_RTNICE, 5065 }; 5066 5067 static void sysrq_handle_sched_ext_dump(u8 key) 5068 { 5069 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5070 5071 if (scx_enabled()) 5072 scx_dump_state(&ei, 0); 5073 } 5074 5075 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5076 .handler = sysrq_handle_sched_ext_dump, 5077 .help_msg = "dump-sched-ext(D)", 5078 .action_msg = "Trigger sched_ext debug dump", 5079 .enable_mask = SYSRQ_ENABLE_RTNICE, 5080 }; 5081 5082 static bool can_skip_idle_kick(struct rq *rq) 5083 { 5084 lockdep_assert_rq_held(rq); 5085 5086 /* 5087 * We can skip idle kicking if @rq is going to go through at least one 5088 * full SCX scheduling cycle before going idle. Just checking whether 5089 * curr is not idle is insufficient because we could be racing 5090 * balance_one() trying to pull the next task from a remote rq, which 5091 * may fail, and @rq may become idle afterwards. 5092 * 5093 * The race window is small and we don't and can't guarantee that @rq is 5094 * only kicked while idle anyway. Skip only when sure. 5095 */ 5096 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_BALANCING); 5097 } 5098 5099 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) 5100 { 5101 struct rq *rq = cpu_rq(cpu); 5102 struct scx_rq *this_scx = &this_rq->scx; 5103 bool should_wait = false; 5104 unsigned long flags; 5105 5106 raw_spin_rq_lock_irqsave(rq, flags); 5107 5108 /* 5109 * During CPU hotplug, a CPU may depend on kicking itself to make 5110 * forward progress. Allow kicking self regardless of online state. 5111 */ 5112 if (cpu_online(cpu) || cpu == cpu_of(this_rq)) { 5113 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5114 if (rq->curr->sched_class == &ext_sched_class) 5115 rq->curr->scx.slice = 0; 5116 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5117 } 5118 5119 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5120 pseqs[cpu] = rq->scx.pnt_seq; 5121 should_wait = true; 5122 } 5123 5124 resched_curr(rq); 5125 } else { 5126 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5127 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5128 } 5129 5130 raw_spin_rq_unlock_irqrestore(rq, flags); 5131 5132 return should_wait; 5133 } 5134 5135 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5136 { 5137 struct rq *rq = cpu_rq(cpu); 5138 unsigned long flags; 5139 5140 raw_spin_rq_lock_irqsave(rq, flags); 5141 5142 if (!can_skip_idle_kick(rq) && 5143 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5144 resched_curr(rq); 5145 5146 raw_spin_rq_unlock_irqrestore(rq, flags); 5147 } 5148 5149 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5150 { 5151 struct rq *this_rq = this_rq(); 5152 struct scx_rq *this_scx = &this_rq->scx; 5153 unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); 5154 bool should_wait = false; 5155 s32 cpu; 5156 5157 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5158 should_wait |= kick_one_cpu(cpu, this_rq, pseqs); 5159 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5160 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5161 } 5162 5163 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5164 kick_one_cpu_if_idle(cpu, this_rq); 5165 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5166 } 5167 5168 if (!should_wait) 5169 return; 5170 5171 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5172 unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; 5173 5174 if (cpu != cpu_of(this_rq)) { 5175 /* 5176 * Pairs with smp_store_release() issued by this CPU in 5177 * scx_next_task_picked() on the resched path. 5178 * 5179 * We busy-wait here to guarantee that no other task can 5180 * be scheduled on our core before the target CPU has 5181 * entered the resched path. 5182 */ 5183 while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu]) 5184 cpu_relax(); 5185 } 5186 5187 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5188 } 5189 } 5190 5191 /** 5192 * print_scx_info - print out sched_ext scheduler state 5193 * @log_lvl: the log level to use when printing 5194 * @p: target task 5195 * 5196 * If a sched_ext scheduler is enabled, print the name and state of the 5197 * scheduler. If @p is on sched_ext, print further information about the task. 5198 * 5199 * This function can be safely called on any task as long as the task_struct 5200 * itself is accessible. While safe, this function isn't synchronized and may 5201 * print out mixups or garbages of limited length. 5202 */ 5203 void print_scx_info(const char *log_lvl, struct task_struct *p) 5204 { 5205 enum scx_ops_enable_state state = scx_ops_enable_state(); 5206 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5207 char runnable_at_buf[22] = "?"; 5208 struct sched_class *class; 5209 unsigned long runnable_at; 5210 5211 if (state == SCX_OPS_DISABLED) 5212 return; 5213 5214 /* 5215 * Carefully check if the task was running on sched_ext, and then 5216 * carefully copy the time it's been runnable, and its state. 5217 */ 5218 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 5219 class != &ext_sched_class) { 5220 printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name, 5221 scx_ops_enable_state_str[state], all); 5222 return; 5223 } 5224 5225 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 5226 sizeof(runnable_at))) 5227 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 5228 jiffies_delta_msecs(runnable_at, jiffies)); 5229 5230 /* print everything onto one line to conserve console space */ 5231 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 5232 log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all, 5233 runnable_at_buf); 5234 } 5235 5236 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 5237 { 5238 /* 5239 * SCX schedulers often have userspace components which are sometimes 5240 * involved in critial scheduling paths. PM operations involve freezing 5241 * userspace which can lead to scheduling misbehaviors including stalls. 5242 * Let's bypass while PM operations are in progress. 5243 */ 5244 switch (event) { 5245 case PM_HIBERNATION_PREPARE: 5246 case PM_SUSPEND_PREPARE: 5247 case PM_RESTORE_PREPARE: 5248 scx_ops_bypass(true); 5249 break; 5250 case PM_POST_HIBERNATION: 5251 case PM_POST_SUSPEND: 5252 case PM_POST_RESTORE: 5253 scx_ops_bypass(false); 5254 break; 5255 } 5256 5257 return NOTIFY_OK; 5258 } 5259 5260 static struct notifier_block scx_pm_notifier = { 5261 .notifier_call = scx_pm_handler, 5262 }; 5263 5264 void __init init_sched_ext_class(void) 5265 { 5266 s32 cpu, v; 5267 5268 /* 5269 * The following is to prevent the compiler from optimizing out the enum 5270 * definitions so that BPF scheduler implementations can use them 5271 * through the generated vmlinux.h. 5272 */ 5273 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT); 5274 5275 BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); 5276 init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); 5277 #ifdef CONFIG_SMP 5278 BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL)); 5279 BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL)); 5280 #endif 5281 scx_kick_cpus_pnt_seqs = 5282 __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, 5283 __alignof__(scx_kick_cpus_pnt_seqs[0])); 5284 BUG_ON(!scx_kick_cpus_pnt_seqs); 5285 5286 for_each_possible_cpu(cpu) { 5287 struct rq *rq = cpu_rq(cpu); 5288 5289 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 5290 INIT_LIST_HEAD(&rq->scx.runnable_list); 5291 5292 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); 5293 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL)); 5294 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); 5295 BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL)); 5296 init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); 5297 5298 if (cpu_online(cpu)) 5299 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 5300 } 5301 5302 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 5303 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 5304 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 5305 } 5306 5307 5308 /******************************************************************************** 5309 * Helpers that can be called from the BPF scheduler. 5310 */ 5311 #include <linux/btf_ids.h> 5312 5313 __bpf_kfunc_start_defs(); 5314 5315 /** 5316 * scx_bpf_create_dsq - Create a custom DSQ 5317 * @dsq_id: DSQ to create 5318 * @node: NUMA node to allocate from 5319 * 5320 * Create a custom DSQ identified by @dsq_id. Can be called from ops.init() and 5321 * ops.init_task(). 5322 */ 5323 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 5324 { 5325 if (!scx_kf_allowed(SCX_KF_SLEEPABLE)) 5326 return -EINVAL; 5327 5328 if (unlikely(node >= (int)nr_node_ids || 5329 (node < 0 && node != NUMA_NO_NODE))) 5330 return -EINVAL; 5331 return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node)); 5332 } 5333 5334 __bpf_kfunc_end_defs(); 5335 5336 BTF_KFUNCS_START(scx_kfunc_ids_sleepable) 5337 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 5338 BTF_KFUNCS_END(scx_kfunc_ids_sleepable) 5339 5340 static const struct btf_kfunc_id_set scx_kfunc_set_sleepable = { 5341 .owner = THIS_MODULE, 5342 .set = &scx_kfunc_ids_sleepable, 5343 }; 5344 5345 __bpf_kfunc_start_defs(); 5346 5347 /** 5348 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() 5349 * @p: task_struct to select a CPU for 5350 * @prev_cpu: CPU @p was on previously 5351 * @wake_flags: %SCX_WAKE_* flags 5352 * @is_idle: out parameter indicating whether the returned CPU is idle 5353 * 5354 * Can only be called from ops.select_cpu() if the built-in CPU selection is 5355 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set. 5356 * @p, @prev_cpu and @wake_flags match ops.select_cpu(). 5357 * 5358 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is 5359 * currently idle and thus a good candidate for direct dispatching. 5360 */ 5361 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 5362 u64 wake_flags, bool *is_idle) 5363 { 5364 if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) { 5365 *is_idle = false; 5366 return prev_cpu; 5367 } 5368 #ifdef CONFIG_SMP 5369 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle); 5370 #else 5371 *is_idle = false; 5372 return prev_cpu; 5373 #endif 5374 } 5375 5376 __bpf_kfunc_end_defs(); 5377 5378 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu) 5379 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) 5380 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu) 5381 5382 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = { 5383 .owner = THIS_MODULE, 5384 .set = &scx_kfunc_ids_select_cpu, 5385 }; 5386 5387 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags) 5388 { 5389 if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 5390 return false; 5391 5392 lockdep_assert_irqs_disabled(); 5393 5394 if (unlikely(!p)) { 5395 scx_ops_error("called with NULL task"); 5396 return false; 5397 } 5398 5399 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 5400 scx_ops_error("invalid enq_flags 0x%llx", enq_flags); 5401 return false; 5402 } 5403 5404 return true; 5405 } 5406 5407 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags) 5408 { 5409 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5410 struct task_struct *ddsp_task; 5411 5412 ddsp_task = __this_cpu_read(direct_dispatch_task); 5413 if (ddsp_task) { 5414 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags); 5415 return; 5416 } 5417 5418 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 5419 scx_ops_error("dispatch buffer overflow"); 5420 return; 5421 } 5422 5423 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 5424 .task = p, 5425 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 5426 .dsq_id = dsq_id, 5427 .enq_flags = enq_flags, 5428 }; 5429 } 5430 5431 __bpf_kfunc_start_defs(); 5432 5433 /** 5434 * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ 5435 * @p: task_struct to dispatch 5436 * @dsq_id: DSQ to dispatch to 5437 * @slice: duration @p can run for in nsecs 5438 * @enq_flags: SCX_ENQ_* 5439 * 5440 * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe 5441 * to call this function spuriously. Can be called from ops.enqueue(), 5442 * ops.select_cpu(), and ops.dispatch(). 5443 * 5444 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 5445 * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be 5446 * used to target the local DSQ of a CPU other than the enqueueing one. Use 5447 * ops.select_cpu() to be on the target CPU in the first place. 5448 * 5449 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 5450 * will be directly dispatched to the corresponding dispatch queue after 5451 * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be 5452 * dispatched to the local DSQ of the CPU returned by ops.select_cpu(). 5453 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 5454 * task is dispatched. 5455 * 5456 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 5457 * and this function can be called upto ops.dispatch_max_batch times to dispatch 5458 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 5459 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter. 5460 * 5461 * This function doesn't have any locking restrictions and may be called under 5462 * BPF locks (in the future when BPF introduces more flexible locking). 5463 * 5464 * @p is allowed to run for @slice. The scheduling path is triggered on slice 5465 * exhaustion. If zero, the current residual slice is maintained. If 5466 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 5467 * scx_bpf_kick_cpu() to trigger scheduling. 5468 */ 5469 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, 5470 u64 enq_flags) 5471 { 5472 if (!scx_dispatch_preamble(p, enq_flags)) 5473 return; 5474 5475 if (slice) 5476 p->scx.slice = slice; 5477 else 5478 p->scx.slice = p->scx.slice ?: 1; 5479 5480 scx_dispatch_commit(p, dsq_id, enq_flags); 5481 } 5482 5483 /** 5484 * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ 5485 * @p: task_struct to dispatch 5486 * @dsq_id: DSQ to dispatch to 5487 * @slice: duration @p can run for in nsecs 5488 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 5489 * @enq_flags: SCX_ENQ_* 5490 * 5491 * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id. 5492 * Tasks queued into the priority queue are ordered by @vtime and always 5493 * consumed after the tasks in the FIFO queue. All other aspects are identical 5494 * to scx_bpf_dispatch(). 5495 * 5496 * @vtime ordering is according to time_before64() which considers wrapping. A 5497 * numerically larger vtime may indicate an earlier position in the ordering and 5498 * vice-versa. 5499 */ 5500 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, 5501 u64 slice, u64 vtime, u64 enq_flags) 5502 { 5503 if (!scx_dispatch_preamble(p, enq_flags)) 5504 return; 5505 5506 if (slice) 5507 p->scx.slice = slice; 5508 else 5509 p->scx.slice = p->scx.slice ?: 1; 5510 5511 p->scx.dsq_vtime = vtime; 5512 5513 scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 5514 } 5515 5516 __bpf_kfunc_end_defs(); 5517 5518 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 5519 BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU) 5520 BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU) 5521 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 5522 5523 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 5524 .owner = THIS_MODULE, 5525 .set = &scx_kfunc_ids_enqueue_dispatch, 5526 }; 5527 5528 __bpf_kfunc_start_defs(); 5529 5530 /** 5531 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 5532 * 5533 * Can only be called from ops.dispatch(). 5534 */ 5535 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 5536 { 5537 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 5538 return 0; 5539 5540 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 5541 } 5542 5543 /** 5544 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 5545 * 5546 * Cancel the latest dispatch. Can be called multiple times to cancel further 5547 * dispatches. Can only be called from ops.dispatch(). 5548 */ 5549 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 5550 { 5551 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5552 5553 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 5554 return; 5555 5556 if (dspc->cursor > 0) 5557 dspc->cursor--; 5558 else 5559 scx_ops_error("dispatch buffer underflow"); 5560 } 5561 5562 /** 5563 * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ 5564 * @dsq_id: DSQ to consume 5565 * 5566 * Consume a task from the non-local DSQ identified by @dsq_id and transfer it 5567 * to the current CPU's local DSQ for execution. Can only be called from 5568 * ops.dispatch(). 5569 * 5570 * This function flushes the in-flight dispatches from scx_bpf_dispatch() before 5571 * trying to consume the specified DSQ. It may also grab rq locks and thus can't 5572 * be called under any BPF locks. 5573 * 5574 * Returns %true if a task has been consumed, %false if there isn't any task to 5575 * consume. 5576 */ 5577 __bpf_kfunc bool scx_bpf_consume(u64 dsq_id) 5578 { 5579 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5580 struct scx_dispatch_q *dsq; 5581 5582 if (!scx_kf_allowed(SCX_KF_DISPATCH)) 5583 return false; 5584 5585 flush_dispatch_buf(dspc->rq, dspc->rf); 5586 5587 dsq = find_non_local_dsq(dsq_id); 5588 if (unlikely(!dsq)) { 5589 scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id); 5590 return false; 5591 } 5592 5593 if (consume_dispatch_q(dspc->rq, dspc->rf, dsq)) { 5594 /* 5595 * A successfully consumed task can be dequeued before it starts 5596 * running while the CPU is trying to migrate other dispatched 5597 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty 5598 * local DSQ. 5599 */ 5600 dspc->nr_tasks++; 5601 return true; 5602 } else { 5603 return false; 5604 } 5605 } 5606 5607 __bpf_kfunc_end_defs(); 5608 5609 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 5610 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 5611 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 5612 BTF_ID_FLAGS(func, scx_bpf_consume) 5613 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 5614 5615 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 5616 .owner = THIS_MODULE, 5617 .set = &scx_kfunc_ids_dispatch, 5618 }; 5619 5620 __bpf_kfunc_start_defs(); 5621 5622 /** 5623 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 5624 * 5625 * Iterate over all of the tasks currently enqueued on the local DSQ of the 5626 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 5627 * processed tasks. Can only be called from ops.cpu_release(). 5628 */ 5629 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 5630 { 5631 u32 nr_enqueued, i; 5632 struct rq *rq; 5633 5634 if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) 5635 return 0; 5636 5637 rq = cpu_rq(smp_processor_id()); 5638 lockdep_assert_rq_held(rq); 5639 5640 /* 5641 * Get the number of tasks on the local DSQ before iterating over it to 5642 * pull off tasks. The enqueue callback below can signal that it wants 5643 * the task to stay on the local DSQ, and we want to prevent the BPF 5644 * scheduler from causing us to loop indefinitely. 5645 */ 5646 nr_enqueued = rq->scx.local_dsq.nr; 5647 for (i = 0; i < nr_enqueued; i++) { 5648 struct task_struct *p; 5649 5650 p = first_local_task(rq); 5651 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != 5652 SCX_OPSS_NONE); 5653 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 5654 WARN_ON_ONCE(p->scx.holding_cpu != -1); 5655 dispatch_dequeue(rq, p); 5656 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 5657 } 5658 5659 return nr_enqueued; 5660 } 5661 5662 __bpf_kfunc_end_defs(); 5663 5664 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 5665 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 5666 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 5667 5668 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 5669 .owner = THIS_MODULE, 5670 .set = &scx_kfunc_ids_cpu_release, 5671 }; 5672 5673 __bpf_kfunc_start_defs(); 5674 5675 /** 5676 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 5677 * @cpu: cpu to kick 5678 * @flags: %SCX_KICK_* flags 5679 * 5680 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 5681 * trigger rescheduling on a busy CPU. This can be called from any online 5682 * scx_ops operation and the actual kicking is performed asynchronously through 5683 * an irq work. 5684 */ 5685 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 5686 { 5687 struct rq *this_rq; 5688 unsigned long irq_flags; 5689 5690 if (!ops_cpu_valid(cpu, NULL)) 5691 return; 5692 5693 /* 5694 * While bypassing for PM ops, IRQ handling may not be online which can 5695 * lead to irq_work_queue() malfunction such as infinite busy wait for 5696 * IRQ status update. Suppress kicking. 5697 */ 5698 if (scx_ops_bypassing()) 5699 return; 5700 5701 local_irq_save(irq_flags); 5702 5703 this_rq = this_rq(); 5704 5705 /* 5706 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 5707 * rq locks. We can probably be smarter and avoid bouncing if called 5708 * from ops which don't hold a rq lock. 5709 */ 5710 if (flags & SCX_KICK_IDLE) { 5711 struct rq *target_rq = cpu_rq(cpu); 5712 5713 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 5714 scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 5715 5716 if (raw_spin_rq_trylock(target_rq)) { 5717 if (can_skip_idle_kick(target_rq)) { 5718 raw_spin_rq_unlock(target_rq); 5719 goto out; 5720 } 5721 raw_spin_rq_unlock(target_rq); 5722 } 5723 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 5724 } else { 5725 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 5726 5727 if (flags & SCX_KICK_PREEMPT) 5728 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 5729 if (flags & SCX_KICK_WAIT) 5730 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 5731 } 5732 5733 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 5734 out: 5735 local_irq_restore(irq_flags); 5736 } 5737 5738 /** 5739 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 5740 * @dsq_id: id of the DSQ 5741 * 5742 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 5743 * -%ENOENT is returned. 5744 */ 5745 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 5746 { 5747 struct scx_dispatch_q *dsq; 5748 s32 ret; 5749 5750 preempt_disable(); 5751 5752 if (dsq_id == SCX_DSQ_LOCAL) { 5753 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 5754 goto out; 5755 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 5756 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 5757 5758 if (ops_cpu_valid(cpu, NULL)) { 5759 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 5760 goto out; 5761 } 5762 } else { 5763 dsq = find_non_local_dsq(dsq_id); 5764 if (dsq) { 5765 ret = READ_ONCE(dsq->nr); 5766 goto out; 5767 } 5768 } 5769 ret = -ENOENT; 5770 out: 5771 preempt_enable(); 5772 return ret; 5773 } 5774 5775 /** 5776 * scx_bpf_destroy_dsq - Destroy a custom DSQ 5777 * @dsq_id: DSQ to destroy 5778 * 5779 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 5780 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 5781 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 5782 * which doesn't exist. Can be called from any online scx_ops operations. 5783 */ 5784 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 5785 { 5786 destroy_dsq(dsq_id); 5787 } 5788 5789 /** 5790 * bpf_iter_scx_dsq_new - Create a DSQ iterator 5791 * @it: iterator to initialize 5792 * @dsq_id: DSQ to iterate 5793 * @flags: %SCX_DSQ_ITER_* 5794 * 5795 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 5796 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 5797 * tasks which are already queued when this function is invoked. 5798 */ 5799 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 5800 u64 flags) 5801 { 5802 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 5803 5804 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 5805 sizeof(struct bpf_iter_scx_dsq)); 5806 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 5807 __alignof__(struct bpf_iter_scx_dsq)); 5808 5809 if (flags & ~__SCX_DSQ_ITER_ALL_FLAGS) 5810 return -EINVAL; 5811 5812 kit->dsq = find_non_local_dsq(dsq_id); 5813 if (!kit->dsq) 5814 return -ENOENT; 5815 5816 INIT_LIST_HEAD(&kit->cursor.node); 5817 kit->cursor.is_bpf_iter_cursor = true; 5818 kit->dsq_seq = READ_ONCE(kit->dsq->seq); 5819 kit->flags = flags; 5820 5821 return 0; 5822 } 5823 5824 /** 5825 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 5826 * @it: iterator to progress 5827 * 5828 * Return the next task. See bpf_iter_scx_dsq_new(). 5829 */ 5830 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 5831 { 5832 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 5833 bool rev = kit->flags & SCX_DSQ_ITER_REV; 5834 struct task_struct *p; 5835 unsigned long flags; 5836 5837 if (!kit->dsq) 5838 return NULL; 5839 5840 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 5841 5842 if (list_empty(&kit->cursor.node)) 5843 p = NULL; 5844 else 5845 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 5846 5847 /* 5848 * Only tasks which were queued before the iteration started are 5849 * visible. This bounds BPF iterations and guarantees that vtime never 5850 * jumps in the other direction while iterating. 5851 */ 5852 do { 5853 p = nldsq_next_task(kit->dsq, p, rev); 5854 } while (p && unlikely(u32_before(kit->dsq_seq, p->scx.dsq_seq))); 5855 5856 if (p) { 5857 if (rev) 5858 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 5859 else 5860 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 5861 } else { 5862 list_del_init(&kit->cursor.node); 5863 } 5864 5865 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 5866 5867 return p; 5868 } 5869 5870 /** 5871 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 5872 * @it: iterator to destroy 5873 * 5874 * Undo scx_iter_scx_dsq_new(). 5875 */ 5876 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 5877 { 5878 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 5879 5880 if (!kit->dsq) 5881 return; 5882 5883 if (!list_empty(&kit->cursor.node)) { 5884 unsigned long flags; 5885 5886 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 5887 list_del_init(&kit->cursor.node); 5888 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 5889 } 5890 kit->dsq = NULL; 5891 } 5892 5893 __bpf_kfunc_end_defs(); 5894 5895 static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size, 5896 char *fmt, unsigned long long *data, u32 data__sz) 5897 { 5898 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 5899 s32 ret; 5900 5901 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 5902 (data__sz && !data)) { 5903 scx_ops_error("invalid data=%p and data__sz=%u", 5904 (void *)data, data__sz); 5905 return -EINVAL; 5906 } 5907 5908 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 5909 if (ret < 0) { 5910 scx_ops_error("failed to read data fields (%d)", ret); 5911 return ret; 5912 } 5913 5914 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 5915 &bprintf_data); 5916 if (ret < 0) { 5917 scx_ops_error("format preparation failed (%d)", ret); 5918 return ret; 5919 } 5920 5921 ret = bstr_printf(line_buf, line_size, fmt, 5922 bprintf_data.bin_args); 5923 bpf_bprintf_cleanup(&bprintf_data); 5924 if (ret < 0) { 5925 scx_ops_error("(\"%s\", %p, %u) failed to format", 5926 fmt, data, data__sz); 5927 return ret; 5928 } 5929 5930 return ret; 5931 } 5932 5933 static s32 bstr_format(struct scx_bstr_buf *buf, 5934 char *fmt, unsigned long long *data, u32 data__sz) 5935 { 5936 return __bstr_format(buf->data, buf->line, sizeof(buf->line), 5937 fmt, data, data__sz); 5938 } 5939 5940 __bpf_kfunc_start_defs(); 5941 5942 /** 5943 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 5944 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 5945 * @fmt: error message format string 5946 * @data: format string parameters packaged using ___bpf_fill() macro 5947 * @data__sz: @data len, must end in '__sz' for the verifier 5948 * 5949 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 5950 * disabling. 5951 */ 5952 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 5953 unsigned long long *data, u32 data__sz) 5954 { 5955 unsigned long flags; 5956 5957 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 5958 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 5959 scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s", 5960 scx_exit_bstr_buf.line); 5961 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 5962 } 5963 5964 /** 5965 * scx_bpf_error_bstr - Indicate fatal error 5966 * @fmt: error message format string 5967 * @data: format string parameters packaged using ___bpf_fill() macro 5968 * @data__sz: @data len, must end in '__sz' for the verifier 5969 * 5970 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 5971 * disabling. 5972 */ 5973 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 5974 u32 data__sz) 5975 { 5976 unsigned long flags; 5977 5978 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 5979 if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 5980 scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s", 5981 scx_exit_bstr_buf.line); 5982 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 5983 } 5984 5985 /** 5986 * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler 5987 * @fmt: format string 5988 * @data: format string parameters packaged using ___bpf_fill() macro 5989 * @data__sz: @data len, must end in '__sz' for the verifier 5990 * 5991 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 5992 * dump_task() to generate extra debug dump specific to the BPF scheduler. 5993 * 5994 * The extra dump may be multiple lines. A single line may be split over 5995 * multiple calls. The last line is automatically terminated. 5996 */ 5997 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 5998 u32 data__sz) 5999 { 6000 struct scx_dump_data *dd = &scx_dump_data; 6001 struct scx_bstr_buf *buf = &dd->buf; 6002 s32 ret; 6003 6004 if (raw_smp_processor_id() != dd->cpu) { 6005 scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends"); 6006 return; 6007 } 6008 6009 /* append the formatted string to the line buf */ 6010 ret = __bstr_format(buf->data, buf->line + dd->cursor, 6011 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 6012 if (ret < 0) { 6013 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 6014 dd->prefix, fmt, data, data__sz, ret); 6015 return; 6016 } 6017 6018 dd->cursor += ret; 6019 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 6020 6021 if (!dd->cursor) 6022 return; 6023 6024 /* 6025 * If the line buf overflowed or ends in a newline, flush it into the 6026 * dump. This is to allow the caller to generate a single line over 6027 * multiple calls. As ops_dump_flush() can also handle multiple lines in 6028 * the line buf, the only case which can lead to an unexpected 6029 * truncation is when the caller keeps generating newlines in the middle 6030 * instead of the end consecutively. Don't do that. 6031 */ 6032 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 6033 ops_dump_flush(); 6034 } 6035 6036 /** 6037 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 6038 * @cpu: CPU of interest 6039 * 6040 * Return the maximum relative capacity of @cpu in relation to the most 6041 * performant CPU in the system. The return value is in the range [1, 6042 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 6043 */ 6044 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 6045 { 6046 if (ops_cpu_valid(cpu, NULL)) 6047 return arch_scale_cpu_capacity(cpu); 6048 else 6049 return SCX_CPUPERF_ONE; 6050 } 6051 6052 /** 6053 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 6054 * @cpu: CPU of interest 6055 * 6056 * Return the current relative performance of @cpu in relation to its maximum. 6057 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 6058 * 6059 * The current performance level of a CPU in relation to the maximum performance 6060 * available in the system can be calculated as follows: 6061 * 6062 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 6063 * 6064 * The result is in the range [1, %SCX_CPUPERF_ONE]. 6065 */ 6066 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 6067 { 6068 if (ops_cpu_valid(cpu, NULL)) 6069 return arch_scale_freq_capacity(cpu); 6070 else 6071 return SCX_CPUPERF_ONE; 6072 } 6073 6074 /** 6075 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 6076 * @cpu: CPU of interest 6077 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 6078 * @flags: %SCX_CPUPERF_* flags 6079 * 6080 * Set the target performance level of @cpu to @perf. @perf is in linear 6081 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 6082 * schedutil cpufreq governor chooses the target frequency. 6083 * 6084 * The actual performance level chosen, CPU grouping, and the overhead and 6085 * latency of the operations are dependent on the hardware and cpufreq driver in 6086 * use. Consult hardware and cpufreq documentation for more information. The 6087 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 6088 */ 6089 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 6090 { 6091 if (unlikely(perf > SCX_CPUPERF_ONE)) { 6092 scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu); 6093 return; 6094 } 6095 6096 if (ops_cpu_valid(cpu, NULL)) { 6097 struct rq *rq = cpu_rq(cpu); 6098 6099 rq->scx.cpuperf_target = perf; 6100 6101 rcu_read_lock_sched_notrace(); 6102 cpufreq_update_util(cpu_rq(cpu), 0); 6103 rcu_read_unlock_sched_notrace(); 6104 } 6105 } 6106 6107 /** 6108 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 6109 * 6110 * All valid CPU IDs in the system are smaller than the returned value. 6111 */ 6112 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 6113 { 6114 return nr_cpu_ids; 6115 } 6116 6117 /** 6118 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 6119 */ 6120 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 6121 { 6122 return cpu_possible_mask; 6123 } 6124 6125 /** 6126 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 6127 */ 6128 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 6129 { 6130 return cpu_online_mask; 6131 } 6132 6133 /** 6134 * scx_bpf_put_cpumask - Release a possible/online cpumask 6135 * @cpumask: cpumask to release 6136 */ 6137 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 6138 { 6139 /* 6140 * Empty function body because we aren't actually acquiring or releasing 6141 * a reference to a global cpumask, which is read-only in the caller and 6142 * is never released. The acquire / release semantics here are just used 6143 * to make the cpumask is a trusted pointer in the caller. 6144 */ 6145 } 6146 6147 /** 6148 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking 6149 * per-CPU cpumask. 6150 * 6151 * Returns NULL if idle tracking is not enabled, or running on a UP kernel. 6152 */ 6153 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) 6154 { 6155 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6156 scx_ops_error("built-in idle tracking is disabled"); 6157 return cpu_none_mask; 6158 } 6159 6160 #ifdef CONFIG_SMP 6161 return idle_masks.cpu; 6162 #else 6163 return cpu_none_mask; 6164 #endif 6165 } 6166 6167 /** 6168 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking, 6169 * per-physical-core cpumask. Can be used to determine if an entire physical 6170 * core is free. 6171 * 6172 * Returns NULL if idle tracking is not enabled, or running on a UP kernel. 6173 */ 6174 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) 6175 { 6176 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6177 scx_ops_error("built-in idle tracking is disabled"); 6178 return cpu_none_mask; 6179 } 6180 6181 #ifdef CONFIG_SMP 6182 if (sched_smt_active()) 6183 return idle_masks.smt; 6184 else 6185 return idle_masks.cpu; 6186 #else 6187 return cpu_none_mask; 6188 #endif 6189 } 6190 6191 /** 6192 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to 6193 * either the percpu, or SMT idle-tracking cpumask. 6194 */ 6195 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) 6196 { 6197 /* 6198 * Empty function body because we aren't actually acquiring or releasing 6199 * a reference to a global idle cpumask, which is read-only in the 6200 * caller and is never released. The acquire / release semantics here 6201 * are just used to make the cpumask a trusted pointer in the caller. 6202 */ 6203 } 6204 6205 /** 6206 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state 6207 * @cpu: cpu to test and clear idle for 6208 * 6209 * Returns %true if @cpu was idle and its idle state was successfully cleared. 6210 * %false otherwise. 6211 * 6212 * Unavailable if ops.update_idle() is implemented and 6213 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 6214 */ 6215 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) 6216 { 6217 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6218 scx_ops_error("built-in idle tracking is disabled"); 6219 return false; 6220 } 6221 6222 if (ops_cpu_valid(cpu, NULL)) 6223 return test_and_clear_cpu_idle(cpu); 6224 else 6225 return false; 6226 } 6227 6228 /** 6229 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu 6230 * @cpus_allowed: Allowed cpumask 6231 * @flags: %SCX_PICK_IDLE_CPU_* flags 6232 * 6233 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu 6234 * number on success. -%EBUSY if no matching cpu was found. 6235 * 6236 * Idle CPU tracking may race against CPU scheduling state transitions. For 6237 * example, this function may return -%EBUSY as CPUs are transitioning into the 6238 * idle state. If the caller then assumes that there will be dispatch events on 6239 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs 6240 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and 6241 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch 6242 * event in the near future. 6243 * 6244 * Unavailable if ops.update_idle() is implemented and 6245 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 6246 */ 6247 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, 6248 u64 flags) 6249 { 6250 if (!static_branch_likely(&scx_builtin_idle_enabled)) { 6251 scx_ops_error("built-in idle tracking is disabled"); 6252 return -EBUSY; 6253 } 6254 6255 return scx_pick_idle_cpu(cpus_allowed, flags); 6256 } 6257 6258 /** 6259 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU 6260 * @cpus_allowed: Allowed cpumask 6261 * @flags: %SCX_PICK_IDLE_CPU_* flags 6262 * 6263 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 6264 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 6265 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 6266 * empty. 6267 * 6268 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 6269 * set, this function can't tell which CPUs are idle and will always pick any 6270 * CPU. 6271 */ 6272 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed, 6273 u64 flags) 6274 { 6275 s32 cpu; 6276 6277 if (static_branch_likely(&scx_builtin_idle_enabled)) { 6278 cpu = scx_pick_idle_cpu(cpus_allowed, flags); 6279 if (cpu >= 0) 6280 return cpu; 6281 } 6282 6283 cpu = cpumask_any_distribute(cpus_allowed); 6284 if (cpu < nr_cpu_ids) 6285 return cpu; 6286 else 6287 return -EBUSY; 6288 } 6289 6290 /** 6291 * scx_bpf_task_running - Is task currently running? 6292 * @p: task of interest 6293 */ 6294 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 6295 { 6296 return task_rq(p)->curr == p; 6297 } 6298 6299 /** 6300 * scx_bpf_task_cpu - CPU a task is currently associated with 6301 * @p: task of interest 6302 */ 6303 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 6304 { 6305 return task_cpu(p); 6306 } 6307 6308 /** 6309 * scx_bpf_cpu_rq - Fetch the rq of a CPU 6310 * @cpu: CPU of the rq 6311 */ 6312 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 6313 { 6314 if (!ops_cpu_valid(cpu, NULL)) 6315 return NULL; 6316 6317 return cpu_rq(cpu); 6318 } 6319 6320 __bpf_kfunc_end_defs(); 6321 6322 BTF_KFUNCS_START(scx_kfunc_ids_any) 6323 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 6324 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 6325 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 6326 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 6327 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 6328 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 6329 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) 6330 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) 6331 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) 6332 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 6333 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 6334 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 6335 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 6336 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 6337 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 6338 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 6339 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) 6340 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) 6341 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) 6342 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) 6343 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) 6344 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) 6345 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 6346 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 6347 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 6348 BTF_KFUNCS_END(scx_kfunc_ids_any) 6349 6350 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 6351 .owner = THIS_MODULE, 6352 .set = &scx_kfunc_ids_any, 6353 }; 6354 6355 static int __init scx_init(void) 6356 { 6357 int ret; 6358 6359 /* 6360 * kfunc registration can't be done from init_sched_ext_class() as 6361 * register_btf_kfunc_id_set() needs most of the system to be up. 6362 * 6363 * Some kfuncs are context-sensitive and can only be called from 6364 * specific SCX ops. They are grouped into BTF sets accordingly. 6365 * Unfortunately, BPF currently doesn't have a way of enforcing such 6366 * restrictions. Eventually, the verifier should be able to enforce 6367 * them. For now, register them the same and make each kfunc explicitly 6368 * check using scx_kf_allowed(). 6369 */ 6370 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6371 &scx_kfunc_set_sleepable)) || 6372 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6373 &scx_kfunc_set_select_cpu)) || 6374 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6375 &scx_kfunc_set_enqueue_dispatch)) || 6376 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6377 &scx_kfunc_set_dispatch)) || 6378 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6379 &scx_kfunc_set_cpu_release)) || 6380 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 6381 &scx_kfunc_set_any)) || 6382 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 6383 &scx_kfunc_set_any)) || 6384 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 6385 &scx_kfunc_set_any))) { 6386 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 6387 return ret; 6388 } 6389 6390 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 6391 if (ret) { 6392 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 6393 return ret; 6394 } 6395 6396 ret = register_pm_notifier(&scx_pm_notifier); 6397 if (ret) { 6398 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 6399 return ret; 6400 } 6401 6402 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 6403 if (!scx_kset) { 6404 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 6405 return -ENOMEM; 6406 } 6407 6408 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 6409 if (ret < 0) { 6410 pr_err("sched_ext: Failed to add global attributes\n"); 6411 return ret; 6412 } 6413 6414 return 0; 6415 } 6416 __initcall(scx_init); 6417