1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Built-in idle CPU tracking policy. 6 * 7 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 8 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 9 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 10 * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com> 11 */ 12 #include "ext_idle.h" 13 14 /* Enable/disable built-in idle CPU selection policy */ 15 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); 16 17 /* Enable/disable per-node idle cpumasks */ 18 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node); 19 20 #ifdef CONFIG_SMP 21 /* Enable/disable LLC aware optimizations */ 22 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc); 23 24 /* Enable/disable NUMA aware optimizations */ 25 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa); 26 27 /* 28 * cpumasks to track idle CPUs within each NUMA node. 29 * 30 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask 31 * from is used to track all the idle CPUs in the system. 32 */ 33 struct scx_idle_cpus { 34 cpumask_var_t cpu; 35 cpumask_var_t smt; 36 }; 37 38 /* 39 * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE 40 * is not enabled). 41 */ 42 static struct scx_idle_cpus scx_idle_global_masks; 43 44 /* 45 * Per-node idle cpumasks. 46 */ 47 static struct scx_idle_cpus **scx_idle_node_masks; 48 49 /* 50 * Return the idle masks associated to a target @node. 51 * 52 * NUMA_NO_NODE identifies the global idle cpumask. 53 */ 54 static struct scx_idle_cpus *idle_cpumask(int node) 55 { 56 return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node]; 57 } 58 59 /* 60 * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if 61 * per-node idle cpumasks are disabled. 62 */ 63 static int scx_cpu_node_if_enabled(int cpu) 64 { 65 if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) 66 return NUMA_NO_NODE; 67 68 return cpu_to_node(cpu); 69 } 70 71 bool scx_idle_test_and_clear_cpu(int cpu) 72 { 73 int node = scx_cpu_node_if_enabled(cpu); 74 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; 75 76 #ifdef CONFIG_SCHED_SMT 77 /* 78 * SMT mask should be cleared whether we can claim @cpu or not. The SMT 79 * cluster is not wholly idle either way. This also prevents 80 * scx_pick_idle_cpu() from getting caught in an infinite loop. 81 */ 82 if (sched_smt_active()) { 83 const struct cpumask *smt = cpu_smt_mask(cpu); 84 struct cpumask *idle_smts = idle_cpumask(node)->smt; 85 86 /* 87 * If offline, @cpu is not its own sibling and 88 * scx_pick_idle_cpu() can get caught in an infinite loop as 89 * @cpu is never cleared from the idle SMT mask. Ensure that 90 * @cpu is eventually cleared. 91 * 92 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to 93 * reduce memory writes, which may help alleviate cache 94 * coherence pressure. 95 */ 96 if (cpumask_intersects(smt, idle_smts)) 97 cpumask_andnot(idle_smts, idle_smts, smt); 98 else if (cpumask_test_cpu(cpu, idle_smts)) 99 __cpumask_clear_cpu(cpu, idle_smts); 100 } 101 #endif 102 103 return cpumask_test_and_clear_cpu(cpu, idle_cpus); 104 } 105 106 /* 107 * Pick an idle CPU in a specific NUMA node. 108 */ 109 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags) 110 { 111 int cpu; 112 113 retry: 114 if (sched_smt_active()) { 115 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); 116 if (cpu < nr_cpu_ids) 117 goto found; 118 119 if (flags & SCX_PICK_IDLE_CORE) 120 return -EBUSY; 121 } 122 123 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed); 124 if (cpu >= nr_cpu_ids) 125 return -EBUSY; 126 127 found: 128 if (scx_idle_test_and_clear_cpu(cpu)) 129 return cpu; 130 else 131 goto retry; 132 } 133 134 /* 135 * Tracks nodes that have not yet been visited when searching for an idle 136 * CPU across all available nodes. 137 */ 138 static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited); 139 140 /* 141 * Search for an idle CPU across all nodes, excluding @node. 142 */ 143 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) 144 { 145 nodemask_t *unvisited; 146 s32 cpu = -EBUSY; 147 148 preempt_disable(); 149 unvisited = this_cpu_ptr(&per_cpu_unvisited); 150 151 /* 152 * Restrict the search to the online nodes (excluding the current 153 * node that has been visited already). 154 */ 155 nodes_copy(*unvisited, node_states[N_ONLINE]); 156 node_clear(node, *unvisited); 157 158 /* 159 * Traverse all nodes in order of increasing distance, starting 160 * from @node. 161 * 162 * This loop is O(N^2), with N being the amount of NUMA nodes, 163 * which might be quite expensive in large NUMA systems. However, 164 * this complexity comes into play only when a scheduler enables 165 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU 166 * without specifying a target NUMA node, so it shouldn't be a 167 * bottleneck is most cases. 168 * 169 * As a future optimization we may want to cache the list of nodes 170 * in a per-node array, instead of actually traversing them every 171 * time. 172 */ 173 for_each_node_numadist(node, *unvisited) { 174 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); 175 if (cpu >= 0) 176 break; 177 } 178 preempt_enable(); 179 180 return cpu; 181 } 182 183 /* 184 * Find an idle CPU in the system, starting from @node. 185 */ 186 s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags) 187 { 188 s32 cpu; 189 190 /* 191 * Always search in the starting node first (this is an 192 * optimization that can save some cycles even when the search is 193 * not limited to a single node). 194 */ 195 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); 196 if (cpu >= 0) 197 return cpu; 198 199 /* 200 * Stop the search if we are using only a single global cpumask 201 * (NUMA_NO_NODE) or if the search is restricted to the first node 202 * only. 203 */ 204 if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE) 205 return -EBUSY; 206 207 /* 208 * Extend the search to the other online nodes. 209 */ 210 return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags); 211 } 212 213 /* 214 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC 215 * domain is not defined). 216 */ 217 static unsigned int llc_weight(s32 cpu) 218 { 219 struct sched_domain *sd; 220 221 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 222 if (!sd) 223 return 0; 224 225 return sd->span_weight; 226 } 227 228 /* 229 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC 230 * domain is not defined). 231 */ 232 static struct cpumask *llc_span(s32 cpu) 233 { 234 struct sched_domain *sd; 235 236 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 237 if (!sd) 238 return 0; 239 240 return sched_domain_span(sd); 241 } 242 243 /* 244 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the 245 * NUMA domain is not defined). 246 */ 247 static unsigned int numa_weight(s32 cpu) 248 { 249 struct sched_domain *sd; 250 struct sched_group *sg; 251 252 sd = rcu_dereference(per_cpu(sd_numa, cpu)); 253 if (!sd) 254 return 0; 255 sg = sd->groups; 256 if (!sg) 257 return 0; 258 259 return sg->group_weight; 260 } 261 262 /* 263 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA 264 * domain is not defined). 265 */ 266 static struct cpumask *numa_span(s32 cpu) 267 { 268 struct sched_domain *sd; 269 struct sched_group *sg; 270 271 sd = rcu_dereference(per_cpu(sd_numa, cpu)); 272 if (!sd) 273 return NULL; 274 sg = sd->groups; 275 if (!sg) 276 return NULL; 277 278 return sched_group_span(sg); 279 } 280 281 /* 282 * Return true if the LLC domains do not perfectly overlap with the NUMA 283 * domains, false otherwise. 284 */ 285 static bool llc_numa_mismatch(void) 286 { 287 int cpu; 288 289 /* 290 * We need to scan all online CPUs to verify whether their scheduling 291 * domains overlap. 292 * 293 * While it is rare to encounter architectures with asymmetric NUMA 294 * topologies, CPU hotplugging or virtualized environments can result 295 * in asymmetric configurations. 296 * 297 * For example: 298 * 299 * NUMA 0: 300 * - LLC 0: cpu0..cpu7 301 * - LLC 1: cpu8..cpu15 [offline] 302 * 303 * NUMA 1: 304 * - LLC 0: cpu16..cpu23 305 * - LLC 1: cpu24..cpu31 306 * 307 * In this case, if we only check the first online CPU (cpu0), we might 308 * incorrectly assume that the LLC and NUMA domains are fully 309 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC 310 * domains). 311 */ 312 for_each_online_cpu(cpu) 313 if (llc_weight(cpu) != numa_weight(cpu)) 314 return true; 315 316 return false; 317 } 318 319 /* 320 * Initialize topology-aware scheduling. 321 * 322 * Detect if the system has multiple LLC or multiple NUMA domains and enable 323 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle 324 * selection policy. 325 * 326 * Assumption: the kernel's internal topology representation assumes that each 327 * CPU belongs to a single LLC domain, and that each LLC domain is entirely 328 * contained within a single NUMA node. 329 */ 330 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) 331 { 332 bool enable_llc = false, enable_numa = false; 333 unsigned int nr_cpus; 334 s32 cpu = cpumask_first(cpu_online_mask); 335 336 /* 337 * Enable LLC domain optimization only when there are multiple LLC 338 * domains among the online CPUs. If all online CPUs are part of a 339 * single LLC domain, the idle CPU selection logic can choose any 340 * online CPU without bias. 341 * 342 * Note that it is sufficient to check the LLC domain of the first 343 * online CPU to determine whether a single LLC domain includes all 344 * CPUs. 345 */ 346 rcu_read_lock(); 347 nr_cpus = llc_weight(cpu); 348 if (nr_cpus > 0) { 349 if (nr_cpus < num_online_cpus()) 350 enable_llc = true; 351 pr_debug("sched_ext: LLC=%*pb weight=%u\n", 352 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); 353 } 354 355 /* 356 * Enable NUMA optimization only when there are multiple NUMA domains 357 * among the online CPUs and the NUMA domains don't perfectly overlaps 358 * with the LLC domains. 359 * 360 * If all CPUs belong to the same NUMA node and the same LLC domain, 361 * enabling both NUMA and LLC optimizations is unnecessary, as checking 362 * for an idle CPU in the same domain twice is redundant. 363 * 364 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA 365 * optimization, as we would naturally select idle CPUs within 366 * specific NUMA nodes querying the corresponding per-node cpumask. 367 */ 368 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { 369 nr_cpus = numa_weight(cpu); 370 if (nr_cpus > 0) { 371 if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) 372 enable_numa = true; 373 pr_debug("sched_ext: NUMA=%*pb weight=%u\n", 374 cpumask_pr_args(numa_span(cpu)), nr_cpus); 375 } 376 } 377 rcu_read_unlock(); 378 379 pr_debug("sched_ext: LLC idle selection %s\n", 380 str_enabled_disabled(enable_llc)); 381 pr_debug("sched_ext: NUMA idle selection %s\n", 382 str_enabled_disabled(enable_numa)); 383 384 if (enable_llc) 385 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc); 386 else 387 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc); 388 if (enable_numa) 389 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa); 390 else 391 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa); 392 } 393 394 /* 395 * Built-in CPU idle selection policy: 396 * 397 * 1. Prioritize full-idle cores: 398 * - always prioritize CPUs from fully idle cores (both logical CPUs are 399 * idle) to avoid interference caused by SMT. 400 * 401 * 2. Reuse the same CPU: 402 * - prefer the last used CPU to take advantage of cached data (L1, L2) and 403 * branch prediction optimizations. 404 * 405 * 3. Pick a CPU within the same LLC (Last-Level Cache): 406 * - if the above conditions aren't met, pick a CPU that shares the same LLC 407 * to maintain cache locality. 408 * 409 * 4. Pick a CPU within the same NUMA node, if enabled: 410 * - choose a CPU from the same NUMA node to reduce memory access latency. 411 * 412 * 5. Pick any idle CPU usable by the task. 413 * 414 * Step 3 and 4 are performed only if the system has, respectively, 415 * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and 416 * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs. 417 * 418 * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always 419 * begin in @prev_cpu's node and proceed to other nodes in order of 420 * increasing distance. 421 * 422 * Return the picked CPU if idle, or a negative value otherwise. 423 * 424 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because 425 * we never call ops.select_cpu() for them, see select_task_rq(). 426 */ 427 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags) 428 { 429 const struct cpumask *llc_cpus = NULL; 430 const struct cpumask *numa_cpus = NULL; 431 int node = scx_cpu_node_if_enabled(prev_cpu); 432 s32 cpu; 433 434 /* 435 * This is necessary to protect llc_cpus. 436 */ 437 rcu_read_lock(); 438 439 /* 440 * Determine the scheduling domain only if the task is allowed to run 441 * on all CPUs. 442 * 443 * This is done primarily for efficiency, as it avoids the overhead of 444 * updating a cpumask every time we need to select an idle CPU (which 445 * can be costly in large SMP systems), but it also aligns logically: 446 * if a task's scheduling domain is restricted by user-space (through 447 * CPU affinity), the task will simply use the flat scheduling domain 448 * defined by user-space. 449 */ 450 if (p->nr_cpus_allowed >= num_possible_cpus()) { 451 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) 452 numa_cpus = numa_span(prev_cpu); 453 454 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) 455 llc_cpus = llc_span(prev_cpu); 456 } 457 458 /* 459 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU. 460 */ 461 if (wake_flags & SCX_WAKE_SYNC) { 462 int waker_node; 463 464 /* 465 * If the waker's CPU is cache affine and prev_cpu is idle, 466 * then avoid a migration. 467 */ 468 cpu = smp_processor_id(); 469 if (cpus_share_cache(cpu, prev_cpu) && 470 scx_idle_test_and_clear_cpu(prev_cpu)) { 471 cpu = prev_cpu; 472 goto out_unlock; 473 } 474 475 /* 476 * If the waker's local DSQ is empty, and the system is under 477 * utilized, try to wake up @p to the local DSQ of the waker. 478 * 479 * Checking only for an empty local DSQ is insufficient as it 480 * could give the wakee an unfair advantage when the system is 481 * oversaturated. 482 * 483 * Checking only for the presence of idle CPUs is also 484 * insufficient as the local DSQ of the waker could have tasks 485 * piled up on it even if there is an idle core elsewhere on 486 * the system. 487 */ 488 waker_node = cpu_to_node(cpu); 489 if (!(current->flags & PF_EXITING) && 490 cpu_rq(cpu)->scx.local_dsq.nr == 0 && 491 (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) && 492 !cpumask_empty(idle_cpumask(waker_node)->cpu)) { 493 if (cpumask_test_cpu(cpu, p->cpus_ptr)) 494 goto out_unlock; 495 } 496 } 497 498 /* 499 * If CPU has SMT, any wholly idle CPU is likely a better pick than 500 * partially idle @prev_cpu. 501 */ 502 if (sched_smt_active()) { 503 /* 504 * Keep using @prev_cpu if it's part of a fully idle core. 505 */ 506 if (cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && 507 scx_idle_test_and_clear_cpu(prev_cpu)) { 508 cpu = prev_cpu; 509 goto out_unlock; 510 } 511 512 /* 513 * Search for any fully idle core in the same LLC domain. 514 */ 515 if (llc_cpus) { 516 cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE); 517 if (cpu >= 0) 518 goto out_unlock; 519 } 520 521 /* 522 * Search for any fully idle core in the same NUMA node. 523 */ 524 if (numa_cpus) { 525 cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE); 526 if (cpu >= 0) 527 goto out_unlock; 528 } 529 530 /* 531 * Search for any full-idle core usable by the task. 532 * 533 * If the node-aware idle CPU selection policy is enabled 534 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always 535 * begin in prev_cpu's node and proceed to other nodes in 536 * order of increasing distance. 537 */ 538 cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE); 539 if (cpu >= 0) 540 goto out_unlock; 541 542 /* 543 * Give up if we're strictly looking for a full-idle SMT 544 * core. 545 */ 546 if (flags & SCX_PICK_IDLE_CORE) { 547 cpu = prev_cpu; 548 goto out_unlock; 549 } 550 } 551 552 /* 553 * Use @prev_cpu if it's idle. 554 */ 555 if (scx_idle_test_and_clear_cpu(prev_cpu)) { 556 cpu = prev_cpu; 557 goto out_unlock; 558 } 559 560 /* 561 * Search for any idle CPU in the same LLC domain. 562 */ 563 if (llc_cpus) { 564 cpu = pick_idle_cpu_in_node(llc_cpus, node, 0); 565 if (cpu >= 0) 566 goto out_unlock; 567 } 568 569 /* 570 * Search for any idle CPU in the same NUMA node. 571 */ 572 if (numa_cpus) { 573 cpu = pick_idle_cpu_in_node(numa_cpus, node, 0); 574 if (cpu >= 0) 575 goto out_unlock; 576 } 577 578 /* 579 * Search for any idle CPU usable by the task. 580 * 581 * If the node-aware idle CPU selection policy is enabled 582 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin 583 * in prev_cpu's node and proceed to other nodes in order of 584 * increasing distance. 585 */ 586 cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags); 587 if (cpu >= 0) 588 goto out_unlock; 589 590 out_unlock: 591 rcu_read_unlock(); 592 593 return cpu; 594 } 595 596 /* 597 * Initialize global and per-node idle cpumasks. 598 */ 599 void scx_idle_init_masks(void) 600 { 601 int node; 602 603 /* Allocate global idle cpumasks */ 604 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL)); 605 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL)); 606 607 /* Allocate per-node idle cpumasks */ 608 scx_idle_node_masks = kcalloc(num_possible_nodes(), 609 sizeof(*scx_idle_node_masks), GFP_KERNEL); 610 BUG_ON(!scx_idle_node_masks); 611 612 for_each_node(node) { 613 scx_idle_node_masks[node] = kzalloc_node(sizeof(**scx_idle_node_masks), 614 GFP_KERNEL, node); 615 BUG_ON(!scx_idle_node_masks[node]); 616 617 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->cpu, GFP_KERNEL, node)); 618 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->smt, GFP_KERNEL, node)); 619 } 620 } 621 622 static void update_builtin_idle(int cpu, bool idle) 623 { 624 int node = scx_cpu_node_if_enabled(cpu); 625 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; 626 627 assign_cpu(cpu, idle_cpus, idle); 628 629 #ifdef CONFIG_SCHED_SMT 630 if (sched_smt_active()) { 631 const struct cpumask *smt = cpu_smt_mask(cpu); 632 struct cpumask *idle_smts = idle_cpumask(node)->smt; 633 634 if (idle) { 635 /* 636 * idle_smt handling is racy but that's fine as it's 637 * only for optimization and self-correcting. 638 */ 639 if (!cpumask_subset(smt, idle_cpus)) 640 return; 641 cpumask_or(idle_smts, idle_smts, smt); 642 } else { 643 cpumask_andnot(idle_smts, idle_smts, smt); 644 } 645 } 646 #endif 647 } 648 649 /* 650 * Update the idle state of a CPU to @idle. 651 * 652 * If @do_notify is true, ops.update_idle() is invoked to notify the scx 653 * scheduler of an actual idle state transition (idle to busy or vice 654 * versa). If @do_notify is false, only the idle state in the idle masks is 655 * refreshed without invoking ops.update_idle(). 656 * 657 * This distinction is necessary, because an idle CPU can be "reserved" and 658 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as 659 * busy even if no tasks are dispatched. In this case, the CPU may return 660 * to idle without a true state transition. Refreshing the idle masks 661 * without invoking ops.update_idle() ensures accurate idle state tracking 662 * while avoiding unnecessary updates and maintaining balanced state 663 * transitions. 664 */ 665 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify) 666 { 667 int cpu = cpu_of(rq); 668 669 lockdep_assert_rq_held(rq); 670 671 /* 672 * Trigger ops.update_idle() only when transitioning from a task to 673 * the idle thread and vice versa. 674 * 675 * Idle transitions are indicated by do_notify being set to true, 676 * managed by put_prev_task_idle()/set_next_task_idle(). 677 */ 678 if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq)) 679 SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle); 680 681 /* 682 * Update the idle masks: 683 * - for real idle transitions (do_notify == true) 684 * - for idle-to-idle transitions (indicated by the previous task 685 * being the idle thread, managed by pick_task_idle()) 686 * 687 * Skip updating idle masks if the previous task is not the idle 688 * thread, since set_next_task_idle() has already handled it when 689 * transitioning from a task to the idle thread (calling this 690 * function with do_notify == true). 691 * 692 * In this way we can avoid updating the idle masks twice, 693 * unnecessarily. 694 */ 695 if (static_branch_likely(&scx_builtin_idle_enabled)) 696 if (do_notify || is_idle_task(rq->curr)) 697 update_builtin_idle(cpu, idle); 698 } 699 700 static void reset_idle_masks(struct sched_ext_ops *ops) 701 { 702 int node; 703 704 /* 705 * Consider all online cpus idle. Should converge to the actual state 706 * quickly. 707 */ 708 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { 709 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask); 710 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask); 711 return; 712 } 713 714 for_each_node(node) { 715 const struct cpumask *node_mask = cpumask_of_node(node); 716 717 cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask); 718 cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask); 719 } 720 } 721 #endif /* CONFIG_SMP */ 722 723 void scx_idle_enable(struct sched_ext_ops *ops) 724 { 725 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) 726 static_branch_enable(&scx_builtin_idle_enabled); 727 else 728 static_branch_disable(&scx_builtin_idle_enabled); 729 730 if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) 731 static_branch_enable(&scx_builtin_idle_per_node); 732 else 733 static_branch_disable(&scx_builtin_idle_per_node); 734 735 #ifdef CONFIG_SMP 736 reset_idle_masks(ops); 737 #endif 738 } 739 740 void scx_idle_disable(void) 741 { 742 static_branch_disable(&scx_builtin_idle_enabled); 743 static_branch_disable(&scx_builtin_idle_per_node); 744 } 745 746 /******************************************************************************** 747 * Helpers that can be called from the BPF scheduler. 748 */ 749 750 static int validate_node(int node) 751 { 752 if (!static_branch_likely(&scx_builtin_idle_per_node)) { 753 scx_ops_error("per-node idle tracking is disabled"); 754 return -EOPNOTSUPP; 755 } 756 757 /* Return no entry for NUMA_NO_NODE (not a critical scx error) */ 758 if (node == NUMA_NO_NODE) 759 return -ENOENT; 760 761 /* Make sure node is in a valid range */ 762 if (node < 0 || node >= nr_node_ids) { 763 scx_ops_error("invalid node %d", node); 764 return -EINVAL; 765 } 766 767 /* Make sure the node is part of the set of possible nodes */ 768 if (!node_possible(node)) { 769 scx_ops_error("unavailable node %d", node); 770 return -EINVAL; 771 } 772 773 return node; 774 } 775 776 __bpf_kfunc_start_defs(); 777 778 static bool check_builtin_idle_enabled(void) 779 { 780 if (static_branch_likely(&scx_builtin_idle_enabled)) 781 return true; 782 783 scx_ops_error("built-in idle tracking is disabled"); 784 return false; 785 } 786 787 /** 788 * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or 789 * trigger an error if @cpu is invalid 790 * @cpu: target CPU 791 */ 792 __bpf_kfunc int scx_bpf_cpu_node(s32 cpu) 793 { 794 #ifdef CONFIG_NUMA 795 if (!ops_cpu_valid(cpu, NULL)) 796 return NUMA_NO_NODE; 797 798 return cpu_to_node(cpu); 799 #else 800 return 0; 801 #endif 802 } 803 804 /** 805 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() 806 * @p: task_struct to select a CPU for 807 * @prev_cpu: CPU @p was on previously 808 * @wake_flags: %SCX_WAKE_* flags 809 * @is_idle: out parameter indicating whether the returned CPU is idle 810 * 811 * Can only be called from ops.select_cpu() if the built-in CPU selection is 812 * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set. 813 * @p, @prev_cpu and @wake_flags match ops.select_cpu(). 814 * 815 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is 816 * currently idle and thus a good candidate for direct dispatching. 817 */ 818 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 819 u64 wake_flags, bool *is_idle) 820 { 821 #ifdef CONFIG_SMP 822 s32 cpu; 823 #endif 824 if (!ops_cpu_valid(prev_cpu, NULL)) 825 goto prev_cpu; 826 827 if (!check_builtin_idle_enabled()) 828 goto prev_cpu; 829 830 if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) 831 goto prev_cpu; 832 833 #ifdef CONFIG_SMP 834 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); 835 if (cpu >= 0) { 836 *is_idle = true; 837 return cpu; 838 } 839 #endif 840 841 prev_cpu: 842 *is_idle = false; 843 return prev_cpu; 844 } 845 846 /** 847 * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the 848 * idle-tracking per-CPU cpumask of a target NUMA node. 849 * @node: target NUMA node 850 * 851 * Returns an empty cpumask if idle tracking is not enabled, if @node is 852 * not valid, or running on a UP kernel. In this case the actual error will 853 * be reported to the BPF scheduler via scx_ops_error(). 854 */ 855 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) 856 { 857 node = validate_node(node); 858 if (node < 0) 859 return cpu_none_mask; 860 861 #ifdef CONFIG_SMP 862 return idle_cpumask(node)->cpu; 863 #else 864 return cpu_none_mask; 865 #endif 866 } 867 868 /** 869 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking 870 * per-CPU cpumask. 871 * 872 * Returns an empty mask if idle tracking is not enabled, or running on a 873 * UP kernel. 874 */ 875 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) 876 { 877 if (static_branch_unlikely(&scx_builtin_idle_per_node)) { 878 scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled"); 879 return cpu_none_mask; 880 } 881 882 if (!check_builtin_idle_enabled()) 883 return cpu_none_mask; 884 885 #ifdef CONFIG_SMP 886 return idle_cpumask(NUMA_NO_NODE)->cpu; 887 #else 888 return cpu_none_mask; 889 #endif 890 } 891 892 /** 893 * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the 894 * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be 895 * used to determine if an entire physical core is free. 896 * @node: target NUMA node 897 * 898 * Returns an empty cpumask if idle tracking is not enabled, if @node is 899 * not valid, or running on a UP kernel. In this case the actual error will 900 * be reported to the BPF scheduler via scx_ops_error(). 901 */ 902 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) 903 { 904 node = validate_node(node); 905 if (node < 0) 906 return cpu_none_mask; 907 908 #ifdef CONFIG_SMP 909 if (sched_smt_active()) 910 return idle_cpumask(node)->smt; 911 else 912 return idle_cpumask(node)->cpu; 913 #else 914 return cpu_none_mask; 915 #endif 916 } 917 918 /** 919 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking, 920 * per-physical-core cpumask. Can be used to determine if an entire physical 921 * core is free. 922 * 923 * Returns an empty mask if idle tracking is not enabled, or running on a 924 * UP kernel. 925 */ 926 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) 927 { 928 if (static_branch_unlikely(&scx_builtin_idle_per_node)) { 929 scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled"); 930 return cpu_none_mask; 931 } 932 933 if (!check_builtin_idle_enabled()) 934 return cpu_none_mask; 935 936 #ifdef CONFIG_SMP 937 if (sched_smt_active()) 938 return idle_cpumask(NUMA_NO_NODE)->smt; 939 else 940 return idle_cpumask(NUMA_NO_NODE)->cpu; 941 #else 942 return cpu_none_mask; 943 #endif 944 } 945 946 /** 947 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to 948 * either the percpu, or SMT idle-tracking cpumask. 949 * @idle_mask: &cpumask to use 950 */ 951 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) 952 { 953 /* 954 * Empty function body because we aren't actually acquiring or releasing 955 * a reference to a global idle cpumask, which is read-only in the 956 * caller and is never released. The acquire / release semantics here 957 * are just used to make the cpumask a trusted pointer in the caller. 958 */ 959 } 960 961 /** 962 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state 963 * @cpu: cpu to test and clear idle for 964 * 965 * Returns %true if @cpu was idle and its idle state was successfully cleared. 966 * %false otherwise. 967 * 968 * Unavailable if ops.update_idle() is implemented and 969 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 970 */ 971 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) 972 { 973 if (!check_builtin_idle_enabled()) 974 return false; 975 976 if (ops_cpu_valid(cpu, NULL)) 977 return scx_idle_test_and_clear_cpu(cpu); 978 else 979 return false; 980 } 981 982 /** 983 * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node 984 * @cpus_allowed: Allowed cpumask 985 * @node: target NUMA node 986 * @flags: %SCX_PICK_IDLE_* flags 987 * 988 * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node. 989 * 990 * Returns the picked idle cpu number on success, or -%EBUSY if no matching 991 * cpu was found. 992 * 993 * The search starts from @node and proceeds to other online NUMA nodes in 994 * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified, 995 * in which case the search is limited to the target @node). 996 * 997 * Always returns an error if ops.update_idle() is implemented and 998 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if 999 * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set. 1000 */ 1001 __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed, 1002 int node, u64 flags) 1003 { 1004 node = validate_node(node); 1005 if (node < 0) 1006 return node; 1007 1008 return scx_pick_idle_cpu(cpus_allowed, node, flags); 1009 } 1010 1011 /** 1012 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu 1013 * @cpus_allowed: Allowed cpumask 1014 * @flags: %SCX_PICK_IDLE_CPU_* flags 1015 * 1016 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu 1017 * number on success. -%EBUSY if no matching cpu was found. 1018 * 1019 * Idle CPU tracking may race against CPU scheduling state transitions. For 1020 * example, this function may return -%EBUSY as CPUs are transitioning into the 1021 * idle state. If the caller then assumes that there will be dispatch events on 1022 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs 1023 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and 1024 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch 1025 * event in the near future. 1026 * 1027 * Unavailable if ops.update_idle() is implemented and 1028 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 1029 * 1030 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use 1031 * scx_bpf_pick_idle_cpu_node() instead. 1032 */ 1033 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, 1034 u64 flags) 1035 { 1036 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) { 1037 scx_ops_error("per-node idle tracking is enabled"); 1038 return -EBUSY; 1039 } 1040 1041 if (!check_builtin_idle_enabled()) 1042 return -EBUSY; 1043 1044 return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); 1045 } 1046 1047 /** 1048 * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available 1049 * or pick any CPU from @node 1050 * @cpus_allowed: Allowed cpumask 1051 * @node: target NUMA node 1052 * @flags: %SCX_PICK_IDLE_CPU_* flags 1053 * 1054 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 1055 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 1056 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 1057 * empty. 1058 * 1059 * The search starts from @node and proceeds to other online NUMA nodes in 1060 * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified, 1061 * in which case the search is limited to the target @node, regardless of 1062 * the CPU idle state). 1063 * 1064 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 1065 * set, this function can't tell which CPUs are idle and will always pick any 1066 * CPU. 1067 */ 1068 __bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed, 1069 int node, u64 flags) 1070 { 1071 s32 cpu; 1072 1073 node = validate_node(node); 1074 if (node < 0) 1075 return node; 1076 1077 cpu = scx_pick_idle_cpu(cpus_allowed, node, flags); 1078 if (cpu >= 0) 1079 return cpu; 1080 1081 if (flags & SCX_PICK_IDLE_IN_NODE) 1082 cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed); 1083 else 1084 cpu = cpumask_any_distribute(cpus_allowed); 1085 if (cpu < nr_cpu_ids) 1086 return cpu; 1087 else 1088 return -EBUSY; 1089 } 1090 1091 /** 1092 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU 1093 * @cpus_allowed: Allowed cpumask 1094 * @flags: %SCX_PICK_IDLE_CPU_* flags 1095 * 1096 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 1097 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 1098 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 1099 * empty. 1100 * 1101 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 1102 * set, this function can't tell which CPUs are idle and will always pick any 1103 * CPU. 1104 * 1105 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use 1106 * scx_bpf_pick_any_cpu_node() instead. 1107 */ 1108 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed, 1109 u64 flags) 1110 { 1111 s32 cpu; 1112 1113 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) { 1114 scx_ops_error("per-node idle tracking is enabled"); 1115 return -EBUSY; 1116 } 1117 1118 if (static_branch_likely(&scx_builtin_idle_enabled)) { 1119 cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); 1120 if (cpu >= 0) 1121 return cpu; 1122 } 1123 1124 cpu = cpumask_any_distribute(cpus_allowed); 1125 if (cpu < nr_cpu_ids) 1126 return cpu; 1127 else 1128 return -EBUSY; 1129 } 1130 1131 __bpf_kfunc_end_defs(); 1132 1133 BTF_KFUNCS_START(scx_kfunc_ids_idle) 1134 BTF_ID_FLAGS(func, scx_bpf_cpu_node) 1135 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_ACQUIRE) 1136 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) 1137 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_ACQUIRE) 1138 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) 1139 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) 1140 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) 1141 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU) 1142 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) 1143 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU) 1144 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) 1145 BTF_KFUNCS_END(scx_kfunc_ids_idle) 1146 1147 static const struct btf_kfunc_id_set scx_kfunc_set_idle = { 1148 .owner = THIS_MODULE, 1149 .set = &scx_kfunc_ids_idle, 1150 }; 1151 1152 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu) 1153 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) 1154 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu) 1155 1156 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = { 1157 .owner = THIS_MODULE, 1158 .set = &scx_kfunc_ids_select_cpu, 1159 }; 1160 1161 int scx_idle_init(void) 1162 { 1163 int ret; 1164 1165 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) || 1166 register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) || 1167 register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) || 1168 register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle); 1169 1170 return ret; 1171 } 1172