1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Built-in idle CPU tracking policy. 6 * 7 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 8 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 9 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 10 * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com> 11 */ 12 #include "ext_idle.h" 13 14 /* Enable/disable built-in idle CPU selection policy */ 15 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); 16 17 /* Enable/disable per-node idle cpumasks */ 18 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node); 19 20 /* Enable/disable LLC aware optimizations */ 21 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc); 22 23 /* Enable/disable NUMA aware optimizations */ 24 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa); 25 26 /* 27 * cpumasks to track idle CPUs within each NUMA node. 28 * 29 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask 30 * from is used to track all the idle CPUs in the system. 31 */ 32 struct scx_idle_cpus { 33 cpumask_var_t cpu; 34 cpumask_var_t smt; 35 }; 36 37 /* 38 * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE 39 * is not enabled). 40 */ 41 static struct scx_idle_cpus scx_idle_global_masks; 42 43 /* 44 * Per-node idle cpumasks. 45 */ 46 static struct scx_idle_cpus **scx_idle_node_masks; 47 48 /* 49 * Local per-CPU cpumasks (used to generate temporary idle cpumasks). 50 */ 51 static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask); 52 static DEFINE_PER_CPU(cpumask_var_t, local_llc_idle_cpumask); 53 static DEFINE_PER_CPU(cpumask_var_t, local_numa_idle_cpumask); 54 55 /* 56 * Return the idle masks associated to a target @node. 57 * 58 * NUMA_NO_NODE identifies the global idle cpumask. 59 */ 60 static struct scx_idle_cpus *idle_cpumask(int node) 61 { 62 return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node]; 63 } 64 65 /* 66 * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if 67 * per-node idle cpumasks are disabled. 68 */ 69 static int scx_cpu_node_if_enabled(int cpu) 70 { 71 if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) 72 return NUMA_NO_NODE; 73 74 return cpu_to_node(cpu); 75 } 76 77 static bool scx_idle_test_and_clear_cpu(int cpu) 78 { 79 int node = scx_cpu_node_if_enabled(cpu); 80 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; 81 82 #ifdef CONFIG_SCHED_SMT 83 /* 84 * SMT mask should be cleared whether we can claim @cpu or not. The SMT 85 * cluster is not wholly idle either way. This also prevents 86 * scx_pick_idle_cpu() from getting caught in an infinite loop. 87 */ 88 if (sched_smt_active()) { 89 const struct cpumask *smt = cpu_smt_mask(cpu); 90 struct cpumask *idle_smts = idle_cpumask(node)->smt; 91 92 /* 93 * If offline, @cpu is not its own sibling and 94 * scx_pick_idle_cpu() can get caught in an infinite loop as 95 * @cpu is never cleared from the idle SMT mask. Ensure that 96 * @cpu is eventually cleared. 97 * 98 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to 99 * reduce memory writes, which may help alleviate cache 100 * coherence pressure. 101 */ 102 if (cpumask_intersects(smt, idle_smts)) 103 cpumask_andnot(idle_smts, idle_smts, smt); 104 else if (cpumask_test_cpu(cpu, idle_smts)) 105 __cpumask_clear_cpu(cpu, idle_smts); 106 } 107 #endif 108 109 return cpumask_test_and_clear_cpu(cpu, idle_cpus); 110 } 111 112 /* 113 * Pick an idle CPU in a specific NUMA node. 114 */ 115 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags) 116 { 117 int cpu; 118 119 retry: 120 if (sched_smt_active()) { 121 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); 122 if (cpu < nr_cpu_ids) 123 goto found; 124 125 if (flags & SCX_PICK_IDLE_CORE) 126 return -EBUSY; 127 } 128 129 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed); 130 if (cpu >= nr_cpu_ids) 131 return -EBUSY; 132 133 found: 134 if (scx_idle_test_and_clear_cpu(cpu)) 135 return cpu; 136 else 137 goto retry; 138 } 139 140 #ifdef CONFIG_NUMA 141 /* 142 * Tracks nodes that have not yet been visited when searching for an idle 143 * CPU across all available nodes. 144 */ 145 static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited); 146 147 /* 148 * Search for an idle CPU across all nodes, excluding @node. 149 */ 150 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) 151 { 152 nodemask_t *unvisited; 153 s32 cpu = -EBUSY; 154 155 preempt_disable(); 156 unvisited = this_cpu_ptr(&per_cpu_unvisited); 157 158 /* 159 * Restrict the search to the online nodes (excluding the current 160 * node that has been visited already). 161 */ 162 nodes_copy(*unvisited, node_states[N_ONLINE]); 163 node_clear(node, *unvisited); 164 165 /* 166 * Traverse all nodes in order of increasing distance, starting 167 * from @node. 168 * 169 * This loop is O(N^2), with N being the amount of NUMA nodes, 170 * which might be quite expensive in large NUMA systems. However, 171 * this complexity comes into play only when a scheduler enables 172 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU 173 * without specifying a target NUMA node, so it shouldn't be a 174 * bottleneck is most cases. 175 * 176 * As a future optimization we may want to cache the list of nodes 177 * in a per-node array, instead of actually traversing them every 178 * time. 179 */ 180 for_each_node_numadist(node, *unvisited) { 181 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); 182 if (cpu >= 0) 183 break; 184 } 185 preempt_enable(); 186 187 return cpu; 188 } 189 #else 190 static inline s32 191 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) 192 { 193 return -EBUSY; 194 } 195 #endif 196 197 /* 198 * Find an idle CPU in the system, starting from @node. 199 */ 200 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags) 201 { 202 s32 cpu; 203 204 /* 205 * Always search in the starting node first (this is an 206 * optimization that can save some cycles even when the search is 207 * not limited to a single node). 208 */ 209 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); 210 if (cpu >= 0) 211 return cpu; 212 213 /* 214 * Stop the search if we are using only a single global cpumask 215 * (NUMA_NO_NODE) or if the search is restricted to the first node 216 * only. 217 */ 218 if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE) 219 return -EBUSY; 220 221 /* 222 * Extend the search to the other online nodes. 223 */ 224 return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags); 225 } 226 227 /* 228 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC 229 * domain is not defined). 230 */ 231 static unsigned int llc_weight(s32 cpu) 232 { 233 struct sched_domain *sd; 234 235 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 236 if (!sd) 237 return 0; 238 239 return sd->span_weight; 240 } 241 242 /* 243 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC 244 * domain is not defined). 245 */ 246 static struct cpumask *llc_span(s32 cpu) 247 { 248 struct sched_domain *sd; 249 250 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 251 if (!sd) 252 return NULL; 253 254 return sched_domain_span(sd); 255 } 256 257 /* 258 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the 259 * NUMA domain is not defined). 260 */ 261 static unsigned int numa_weight(s32 cpu) 262 { 263 struct sched_domain *sd; 264 struct sched_group *sg; 265 266 sd = rcu_dereference(per_cpu(sd_numa, cpu)); 267 if (!sd) 268 return 0; 269 sg = sd->groups; 270 if (!sg) 271 return 0; 272 273 return sg->group_weight; 274 } 275 276 /* 277 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA 278 * domain is not defined). 279 */ 280 static struct cpumask *numa_span(s32 cpu) 281 { 282 struct sched_domain *sd; 283 struct sched_group *sg; 284 285 sd = rcu_dereference(per_cpu(sd_numa, cpu)); 286 if (!sd) 287 return NULL; 288 sg = sd->groups; 289 if (!sg) 290 return NULL; 291 292 return sched_group_span(sg); 293 } 294 295 /* 296 * Return true if the LLC domains do not perfectly overlap with the NUMA 297 * domains, false otherwise. 298 */ 299 static bool llc_numa_mismatch(void) 300 { 301 int cpu; 302 303 /* 304 * We need to scan all online CPUs to verify whether their scheduling 305 * domains overlap. 306 * 307 * While it is rare to encounter architectures with asymmetric NUMA 308 * topologies, CPU hotplugging or virtualized environments can result 309 * in asymmetric configurations. 310 * 311 * For example: 312 * 313 * NUMA 0: 314 * - LLC 0: cpu0..cpu7 315 * - LLC 1: cpu8..cpu15 [offline] 316 * 317 * NUMA 1: 318 * - LLC 0: cpu16..cpu23 319 * - LLC 1: cpu24..cpu31 320 * 321 * In this case, if we only check the first online CPU (cpu0), we might 322 * incorrectly assume that the LLC and NUMA domains are fully 323 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC 324 * domains). 325 */ 326 for_each_online_cpu(cpu) 327 if (llc_weight(cpu) != numa_weight(cpu)) 328 return true; 329 330 return false; 331 } 332 333 /* 334 * Initialize topology-aware scheduling. 335 * 336 * Detect if the system has multiple LLC or multiple NUMA domains and enable 337 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle 338 * selection policy. 339 * 340 * Assumption: the kernel's internal topology representation assumes that each 341 * CPU belongs to a single LLC domain, and that each LLC domain is entirely 342 * contained within a single NUMA node. 343 */ 344 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) 345 { 346 bool enable_llc = false, enable_numa = false; 347 unsigned int nr_cpus; 348 s32 cpu = cpumask_first(cpu_online_mask); 349 350 /* 351 * Enable LLC domain optimization only when there are multiple LLC 352 * domains among the online CPUs. If all online CPUs are part of a 353 * single LLC domain, the idle CPU selection logic can choose any 354 * online CPU without bias. 355 * 356 * Note that it is sufficient to check the LLC domain of the first 357 * online CPU to determine whether a single LLC domain includes all 358 * CPUs. 359 */ 360 rcu_read_lock(); 361 nr_cpus = llc_weight(cpu); 362 if (nr_cpus > 0) { 363 if (nr_cpus < num_online_cpus()) 364 enable_llc = true; 365 pr_debug("sched_ext: LLC=%*pb weight=%u\n", 366 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); 367 } 368 369 /* 370 * Enable NUMA optimization only when there are multiple NUMA domains 371 * among the online CPUs and the NUMA domains don't perfectly overlaps 372 * with the LLC domains. 373 * 374 * If all CPUs belong to the same NUMA node and the same LLC domain, 375 * enabling both NUMA and LLC optimizations is unnecessary, as checking 376 * for an idle CPU in the same domain twice is redundant. 377 * 378 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA 379 * optimization, as we would naturally select idle CPUs within 380 * specific NUMA nodes querying the corresponding per-node cpumask. 381 */ 382 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { 383 nr_cpus = numa_weight(cpu); 384 if (nr_cpus > 0) { 385 if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) 386 enable_numa = true; 387 pr_debug("sched_ext: NUMA=%*pb weight=%u\n", 388 cpumask_pr_args(numa_span(cpu)), nr_cpus); 389 } 390 } 391 rcu_read_unlock(); 392 393 pr_debug("sched_ext: LLC idle selection %s\n", 394 str_enabled_disabled(enable_llc)); 395 pr_debug("sched_ext: NUMA idle selection %s\n", 396 str_enabled_disabled(enable_numa)); 397 398 if (enable_llc) 399 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc); 400 else 401 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc); 402 if (enable_numa) 403 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa); 404 else 405 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa); 406 } 407 408 /* 409 * Return true if @p can run on all possible CPUs, false otherwise. 410 */ 411 static inline bool task_affinity_all(const struct task_struct *p) 412 { 413 return p->nr_cpus_allowed >= num_possible_cpus(); 414 } 415 416 /* 417 * Built-in CPU idle selection policy: 418 * 419 * 1. Prioritize full-idle cores: 420 * - always prioritize CPUs from fully idle cores (both logical CPUs are 421 * idle) to avoid interference caused by SMT. 422 * 423 * 2. Reuse the same CPU: 424 * - prefer the last used CPU to take advantage of cached data (L1, L2) and 425 * branch prediction optimizations. 426 * 427 * 3. Pick a CPU within the same LLC (Last-Level Cache): 428 * - if the above conditions aren't met, pick a CPU that shares the same 429 * LLC, if the LLC domain is a subset of @cpus_allowed, to maintain 430 * cache locality. 431 * 432 * 4. Pick a CPU within the same NUMA node, if enabled: 433 * - choose a CPU from the same NUMA node, if the node cpumask is a 434 * subset of @cpus_allowed, to reduce memory access latency. 435 * 436 * 5. Pick any idle CPU within the @cpus_allowed domain. 437 * 438 * Step 3 and 4 are performed only if the system has, respectively, 439 * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and 440 * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs. 441 * 442 * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always 443 * begin in @prev_cpu's node and proceed to other nodes in order of 444 * increasing distance. 445 * 446 * Return the picked CPU if idle, or a negative value otherwise. 447 * 448 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because 449 * we never call ops.select_cpu() for them, see select_task_rq(). 450 */ 451 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 452 const struct cpumask *cpus_allowed, u64 flags) 453 { 454 const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL; 455 const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr; 456 int node = scx_cpu_node_if_enabled(prev_cpu); 457 bool is_prev_allowed; 458 s32 cpu; 459 460 preempt_disable(); 461 462 /* 463 * Check whether @prev_cpu is still within the allowed set. If not, 464 * we can still try selecting a nearby CPU. 465 */ 466 is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed); 467 468 /* 469 * Determine the subset of CPUs usable by @p within @cpus_allowed. 470 */ 471 if (allowed != p->cpus_ptr) { 472 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_idle_cpumask); 473 474 if (task_affinity_all(p)) { 475 allowed = cpus_allowed; 476 } else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) { 477 allowed = local_cpus; 478 } else { 479 cpu = -EBUSY; 480 goto out_enable; 481 } 482 } 483 484 /* 485 * This is necessary to protect llc_cpus. 486 */ 487 rcu_read_lock(); 488 489 /* 490 * Determine the subset of CPUs that the task can use in its 491 * current LLC and node. 492 * 493 * If the task can run on all CPUs, use the node and LLC cpumasks 494 * directly. 495 */ 496 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) { 497 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask); 498 const struct cpumask *cpus = numa_span(prev_cpu); 499 500 if (allowed == p->cpus_ptr && task_affinity_all(p)) 501 numa_cpus = cpus; 502 else if (cpus && cpumask_and(local_cpus, allowed, cpus)) 503 numa_cpus = local_cpus; 504 } 505 506 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) { 507 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask); 508 const struct cpumask *cpus = llc_span(prev_cpu); 509 510 if (allowed == p->cpus_ptr && task_affinity_all(p)) 511 llc_cpus = cpus; 512 else if (cpus && cpumask_and(local_cpus, allowed, cpus)) 513 llc_cpus = local_cpus; 514 } 515 516 /* 517 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU. 518 */ 519 if (wake_flags & SCX_WAKE_SYNC) { 520 int waker_node; 521 522 /* 523 * If the waker's CPU is cache affine and prev_cpu is idle, 524 * then avoid a migration. 525 */ 526 cpu = smp_processor_id(); 527 if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) && 528 scx_idle_test_and_clear_cpu(prev_cpu)) { 529 cpu = prev_cpu; 530 goto out_unlock; 531 } 532 533 /* 534 * If the waker's local DSQ is empty, and the system is under 535 * utilized, try to wake up @p to the local DSQ of the waker. 536 * 537 * Checking only for an empty local DSQ is insufficient as it 538 * could give the wakee an unfair advantage when the system is 539 * oversaturated. 540 * 541 * Checking only for the presence of idle CPUs is also 542 * insufficient as the local DSQ of the waker could have tasks 543 * piled up on it even if there is an idle core elsewhere on 544 * the system. 545 */ 546 waker_node = scx_cpu_node_if_enabled(cpu); 547 if (!(current->flags & PF_EXITING) && 548 cpu_rq(cpu)->scx.local_dsq.nr == 0 && 549 (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) && 550 !cpumask_empty(idle_cpumask(waker_node)->cpu)) { 551 if (cpumask_test_cpu(cpu, allowed)) 552 goto out_unlock; 553 } 554 } 555 556 /* 557 * If CPU has SMT, any wholly idle CPU is likely a better pick than 558 * partially idle @prev_cpu. 559 */ 560 if (sched_smt_active()) { 561 /* 562 * Keep using @prev_cpu if it's part of a fully idle core. 563 */ 564 if (is_prev_allowed && 565 cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && 566 scx_idle_test_and_clear_cpu(prev_cpu)) { 567 cpu = prev_cpu; 568 goto out_unlock; 569 } 570 571 /* 572 * Search for any fully idle core in the same LLC domain. 573 */ 574 if (llc_cpus) { 575 cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE); 576 if (cpu >= 0) 577 goto out_unlock; 578 } 579 580 /* 581 * Search for any fully idle core in the same NUMA node. 582 */ 583 if (numa_cpus) { 584 cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE); 585 if (cpu >= 0) 586 goto out_unlock; 587 } 588 589 /* 590 * Search for any full-idle core usable by the task. 591 * 592 * If the node-aware idle CPU selection policy is enabled 593 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always 594 * begin in prev_cpu's node and proceed to other nodes in 595 * order of increasing distance. 596 */ 597 cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE); 598 if (cpu >= 0) 599 goto out_unlock; 600 601 /* 602 * Give up if we're strictly looking for a full-idle SMT 603 * core. 604 */ 605 if (flags & SCX_PICK_IDLE_CORE) { 606 cpu = -EBUSY; 607 goto out_unlock; 608 } 609 } 610 611 /* 612 * Use @prev_cpu if it's idle. 613 */ 614 if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) { 615 cpu = prev_cpu; 616 goto out_unlock; 617 } 618 619 /* 620 * Search for any idle CPU in the same LLC domain. 621 */ 622 if (llc_cpus) { 623 cpu = pick_idle_cpu_in_node(llc_cpus, node, 0); 624 if (cpu >= 0) 625 goto out_unlock; 626 } 627 628 /* 629 * Search for any idle CPU in the same NUMA node. 630 */ 631 if (numa_cpus) { 632 cpu = pick_idle_cpu_in_node(numa_cpus, node, 0); 633 if (cpu >= 0) 634 goto out_unlock; 635 } 636 637 /* 638 * Search for any idle CPU usable by the task. 639 * 640 * If the node-aware idle CPU selection policy is enabled 641 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin 642 * in prev_cpu's node and proceed to other nodes in order of 643 * increasing distance. 644 */ 645 cpu = scx_pick_idle_cpu(allowed, node, flags); 646 647 out_unlock: 648 rcu_read_unlock(); 649 out_enable: 650 preempt_enable(); 651 652 return cpu; 653 } 654 655 /* 656 * Initialize global and per-node idle cpumasks. 657 */ 658 void scx_idle_init_masks(void) 659 { 660 int i; 661 662 /* Allocate global idle cpumasks */ 663 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL)); 664 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL)); 665 666 /* Allocate per-node idle cpumasks (use nr_node_ids for non-contiguous NUMA nodes) */ 667 scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, nr_node_ids); 668 BUG_ON(!scx_idle_node_masks); 669 670 for_each_node(i) { 671 scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks), 672 GFP_KERNEL, i); 673 BUG_ON(!scx_idle_node_masks[i]); 674 675 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i)); 676 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i)); 677 } 678 679 /* Allocate local per-cpu idle cpumasks */ 680 for_each_possible_cpu(i) { 681 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i), 682 GFP_KERNEL, cpu_to_node(i))); 683 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i), 684 GFP_KERNEL, cpu_to_node(i))); 685 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i), 686 GFP_KERNEL, cpu_to_node(i))); 687 } 688 } 689 690 static void update_builtin_idle(int cpu, bool idle) 691 { 692 int node = scx_cpu_node_if_enabled(cpu); 693 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; 694 695 assign_cpu(cpu, idle_cpus, idle); 696 697 #ifdef CONFIG_SCHED_SMT 698 if (sched_smt_active()) { 699 const struct cpumask *smt = cpu_smt_mask(cpu); 700 struct cpumask *idle_smts = idle_cpumask(node)->smt; 701 702 if (idle) { 703 /* 704 * idle_smt handling is racy but that's fine as it's 705 * only for optimization and self-correcting. 706 */ 707 if (!cpumask_subset(smt, idle_cpus)) 708 return; 709 cpumask_or(idle_smts, idle_smts, smt); 710 } else { 711 cpumask_andnot(idle_smts, idle_smts, smt); 712 } 713 } 714 #endif 715 } 716 717 /* 718 * Update the idle state of a CPU to @idle. 719 * 720 * If @do_notify is true, ops.update_idle() is invoked to notify the scx 721 * scheduler of an actual idle state transition (idle to busy or vice 722 * versa). If @do_notify is false, only the idle state in the idle masks is 723 * refreshed without invoking ops.update_idle(). 724 * 725 * This distinction is necessary, because an idle CPU can be "reserved" and 726 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as 727 * busy even if no tasks are dispatched. In this case, the CPU may return 728 * to idle without a true state transition. Refreshing the idle masks 729 * without invoking ops.update_idle() ensures accurate idle state tracking 730 * while avoiding unnecessary updates and maintaining balanced state 731 * transitions. 732 */ 733 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify) 734 { 735 struct scx_sched *sch = scx_root; 736 int cpu = cpu_of(rq); 737 738 lockdep_assert_rq_held(rq); 739 740 /* 741 * Update the idle masks: 742 * - for real idle transitions (do_notify == true) 743 * - for idle-to-idle transitions (indicated by the previous task 744 * being the idle thread, managed by pick_task_idle()) 745 * 746 * Skip updating idle masks if the previous task is not the idle 747 * thread, since set_next_task_idle() has already handled it when 748 * transitioning from a task to the idle thread (calling this 749 * function with do_notify == true). 750 * 751 * In this way we can avoid updating the idle masks twice, 752 * unnecessarily. 753 */ 754 if (static_branch_likely(&scx_builtin_idle_enabled)) 755 if (do_notify || is_idle_task(rq->curr)) 756 update_builtin_idle(cpu, idle); 757 758 /* 759 * Trigger ops.update_idle() only when transitioning from a task to 760 * the idle thread and vice versa. 761 * 762 * Idle transitions are indicated by do_notify being set to true, 763 * managed by put_prev_task_idle()/set_next_task_idle(). 764 * 765 * This must come after builtin idle update so that BPF schedulers can 766 * create interlocking between ops.update_idle() and ops.enqueue() - 767 * either enqueue() sees the idle bit or update_idle() sees the task 768 * that enqueue() queued. 769 */ 770 if (SCX_HAS_OP(sch, update_idle) && do_notify && !scx_rq_bypassing(rq)) 771 SCX_CALL_OP(sch, SCX_KF_REST, update_idle, rq, cpu_of(rq), idle); 772 } 773 774 static void reset_idle_masks(struct sched_ext_ops *ops) 775 { 776 int node; 777 778 /* 779 * Consider all online cpus idle. Should converge to the actual state 780 * quickly. 781 */ 782 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { 783 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask); 784 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask); 785 return; 786 } 787 788 for_each_node(node) { 789 const struct cpumask *node_mask = cpumask_of_node(node); 790 791 cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask); 792 cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask); 793 } 794 } 795 796 void scx_idle_enable(struct sched_ext_ops *ops) 797 { 798 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) 799 static_branch_enable_cpuslocked(&scx_builtin_idle_enabled); 800 else 801 static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); 802 803 if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) 804 static_branch_enable_cpuslocked(&scx_builtin_idle_per_node); 805 else 806 static_branch_disable_cpuslocked(&scx_builtin_idle_per_node); 807 808 reset_idle_masks(ops); 809 } 810 811 void scx_idle_disable(void) 812 { 813 static_branch_disable(&scx_builtin_idle_enabled); 814 static_branch_disable(&scx_builtin_idle_per_node); 815 } 816 817 /******************************************************************************** 818 * Helpers that can be called from the BPF scheduler. 819 */ 820 821 static int validate_node(struct scx_sched *sch, int node) 822 { 823 if (!static_branch_likely(&scx_builtin_idle_per_node)) { 824 scx_error(sch, "per-node idle tracking is disabled"); 825 return -EOPNOTSUPP; 826 } 827 828 /* Return no entry for NUMA_NO_NODE (not a critical scx error) */ 829 if (node == NUMA_NO_NODE) 830 return -ENOENT; 831 832 /* Make sure node is in a valid range */ 833 if (node < 0 || node >= nr_node_ids) { 834 scx_error(sch, "invalid node %d", node); 835 return -EINVAL; 836 } 837 838 /* Make sure the node is part of the set of possible nodes */ 839 if (!node_possible(node)) { 840 scx_error(sch, "unavailable node %d", node); 841 return -EINVAL; 842 } 843 844 return node; 845 } 846 847 __bpf_kfunc_start_defs(); 848 849 static bool check_builtin_idle_enabled(struct scx_sched *sch) 850 { 851 if (static_branch_likely(&scx_builtin_idle_enabled)) 852 return true; 853 854 scx_error(sch, "built-in idle tracking is disabled"); 855 return false; 856 } 857 858 /* 859 * Determine whether @p is a migration-disabled task in the context of BPF 860 * code. 861 * 862 * We can't simply check whether @p->migration_disabled is set in a 863 * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable 864 * migration for the current task while running BPF code. 865 * 866 * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU 867 * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for 868 * the current task is ambiguous only in that case: it could be from the BPF 869 * prolog rather than a real migrate_disable() call. 870 * 871 * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(), 872 * so migration_disabled == 1 always means the task is truly 873 * migration-disabled. 874 * 875 * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled, 876 * check whether @p is the current task or not: if it is, then migration was 877 * not disabled before entering the callback, otherwise migration was disabled. 878 * 879 * Returns true if @p is migration-disabled, false otherwise. 880 */ 881 static bool is_bpf_migration_disabled(const struct task_struct *p) 882 { 883 if (p->migration_disabled == 1) { 884 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 885 return p != current; 886 return true; 887 } 888 return p->migration_disabled; 889 } 890 891 static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p, 892 s32 prev_cpu, u64 wake_flags, 893 const struct cpumask *allowed, u64 flags) 894 { 895 struct rq *rq; 896 struct rq_flags rf; 897 s32 cpu; 898 899 if (!ops_cpu_valid(sch, prev_cpu, NULL)) 900 return -EINVAL; 901 902 if (!check_builtin_idle_enabled(sch)) 903 return -EBUSY; 904 905 /* 906 * If called from an unlocked context, acquire the task's rq lock, 907 * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed. 908 * 909 * Otherwise, allow to use this kfunc only from ops.select_cpu() 910 * and ops.select_enqueue(). 911 */ 912 if (scx_kf_allowed_if_unlocked()) { 913 rq = task_rq_lock(p, &rf); 914 } else { 915 if (!scx_kf_allowed(sch, SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE)) 916 return -EPERM; 917 rq = scx_locked_rq(); 918 } 919 920 /* 921 * Validate locking correctness to access p->cpus_ptr and 922 * p->nr_cpus_allowed: if we're holding an rq lock, we're safe; 923 * otherwise, assert that p->pi_lock is held. 924 */ 925 if (!rq) 926 lockdep_assert_held(&p->pi_lock); 927 928 /* 929 * This may also be called from ops.enqueue(), so we need to handle 930 * per-CPU tasks as well. For these tasks, we can skip all idle CPU 931 * selection optimizations and simply check whether the previously 932 * used CPU is idle and within the allowed cpumask. 933 */ 934 if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) { 935 if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) && 936 scx_idle_test_and_clear_cpu(prev_cpu)) 937 cpu = prev_cpu; 938 else 939 cpu = -EBUSY; 940 } else { 941 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 942 allowed ?: p->cpus_ptr, flags); 943 } 944 945 if (scx_kf_allowed_if_unlocked()) 946 task_rq_unlock(rq, p, &rf); 947 948 return cpu; 949 } 950 951 /** 952 * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or 953 * trigger an error if @cpu is invalid 954 * @cpu: target CPU 955 */ 956 __bpf_kfunc int scx_bpf_cpu_node(s32 cpu) 957 { 958 struct scx_sched *sch; 959 960 guard(rcu)(); 961 962 sch = rcu_dereference(scx_root); 963 if (unlikely(!sch) || !ops_cpu_valid(sch, cpu, NULL)) 964 return NUMA_NO_NODE; 965 return cpu_to_node(cpu); 966 } 967 968 /** 969 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() 970 * @p: task_struct to select a CPU for 971 * @prev_cpu: CPU @p was on previously 972 * @wake_flags: %SCX_WAKE_* flags 973 * @is_idle: out parameter indicating whether the returned CPU is idle 974 * 975 * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked 976 * context such as a BPF test_run() call, as long as built-in CPU selection 977 * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE 978 * is set. 979 * 980 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is 981 * currently idle and thus a good candidate for direct dispatching. 982 */ 983 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, 984 u64 wake_flags, bool *is_idle) 985 { 986 struct scx_sched *sch; 987 s32 cpu; 988 989 guard(rcu)(); 990 991 sch = rcu_dereference(scx_root); 992 if (unlikely(!sch)) 993 return -ENODEV; 994 995 cpu = select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, NULL, 0); 996 if (cpu >= 0) { 997 *is_idle = true; 998 return cpu; 999 } 1000 *is_idle = false; 1001 return prev_cpu; 1002 } 1003 1004 struct scx_bpf_select_cpu_and_args { 1005 /* @p and @cpus_allowed can't be packed together as KF_RCU is not transitive */ 1006 s32 prev_cpu; 1007 u64 wake_flags; 1008 u64 flags; 1009 }; 1010 1011 /** 1012 * __scx_bpf_select_cpu_and - Arg-wrapped CPU selection with cpumask 1013 * @p: task_struct to select a CPU for 1014 * @cpus_allowed: cpumask of allowed CPUs 1015 * @args: struct containing the rest of the arguments 1016 * @args->prev_cpu: CPU @p was on previously 1017 * @args->wake_flags: %SCX_WAKE_* flags 1018 * @args->flags: %SCX_PICK_IDLE* flags 1019 * 1020 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument 1021 * limit. BPF programs should use scx_bpf_select_cpu_and() which is provided 1022 * as an inline wrapper in common.bpf.h. 1023 * 1024 * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked 1025 * context such as a BPF test_run() call, as long as built-in CPU selection 1026 * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE 1027 * is set. 1028 * 1029 * @p, @args->prev_cpu and @args->wake_flags match ops.select_cpu(). 1030 * 1031 * Returns the selected idle CPU, which will be automatically awakened upon 1032 * returning from ops.select_cpu() and can be used for direct dispatch, or 1033 * a negative value if no idle CPU is available. 1034 */ 1035 __bpf_kfunc s32 1036 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed, 1037 struct scx_bpf_select_cpu_and_args *args) 1038 { 1039 struct scx_sched *sch; 1040 1041 guard(rcu)(); 1042 1043 sch = rcu_dereference(scx_root); 1044 if (unlikely(!sch)) 1045 return -ENODEV; 1046 1047 return select_cpu_from_kfunc(sch, p, args->prev_cpu, args->wake_flags, 1048 cpus_allowed, args->flags); 1049 } 1050 1051 /* 1052 * COMPAT: Will be removed in v6.22. 1053 */ 1054 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 1055 const struct cpumask *cpus_allowed, u64 flags) 1056 { 1057 struct scx_sched *sch; 1058 1059 guard(rcu)(); 1060 1061 sch = rcu_dereference(scx_root); 1062 if (unlikely(!sch)) 1063 return -ENODEV; 1064 1065 return select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, 1066 cpus_allowed, flags); 1067 } 1068 1069 /** 1070 * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the 1071 * idle-tracking per-CPU cpumask of a target NUMA node. 1072 * @node: target NUMA node 1073 * 1074 * Returns an empty cpumask if idle tracking is not enabled, if @node is 1075 * not valid, or running on a UP kernel. In this case the actual error will 1076 * be reported to the BPF scheduler via scx_error(). 1077 */ 1078 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) 1079 { 1080 struct scx_sched *sch; 1081 1082 guard(rcu)(); 1083 1084 sch = rcu_dereference(scx_root); 1085 if (unlikely(!sch)) 1086 return cpu_none_mask; 1087 1088 node = validate_node(sch, node); 1089 if (node < 0) 1090 return cpu_none_mask; 1091 1092 return idle_cpumask(node)->cpu; 1093 } 1094 1095 /** 1096 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking 1097 * per-CPU cpumask. 1098 * 1099 * Returns an empty mask if idle tracking is not enabled, or running on a 1100 * UP kernel. 1101 */ 1102 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) 1103 { 1104 struct scx_sched *sch; 1105 1106 guard(rcu)(); 1107 1108 sch = rcu_dereference(scx_root); 1109 if (unlikely(!sch)) 1110 return cpu_none_mask; 1111 1112 if (static_branch_unlikely(&scx_builtin_idle_per_node)) { 1113 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled"); 1114 return cpu_none_mask; 1115 } 1116 1117 if (!check_builtin_idle_enabled(sch)) 1118 return cpu_none_mask; 1119 1120 return idle_cpumask(NUMA_NO_NODE)->cpu; 1121 } 1122 1123 /** 1124 * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the 1125 * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be 1126 * used to determine if an entire physical core is free. 1127 * @node: target NUMA node 1128 * 1129 * Returns an empty cpumask if idle tracking is not enabled, if @node is 1130 * not valid, or running on a UP kernel. In this case the actual error will 1131 * be reported to the BPF scheduler via scx_error(). 1132 */ 1133 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) 1134 { 1135 struct scx_sched *sch; 1136 1137 guard(rcu)(); 1138 1139 sch = rcu_dereference(scx_root); 1140 if (unlikely(!sch)) 1141 return cpu_none_mask; 1142 1143 node = validate_node(sch, node); 1144 if (node < 0) 1145 return cpu_none_mask; 1146 1147 if (sched_smt_active()) 1148 return idle_cpumask(node)->smt; 1149 else 1150 return idle_cpumask(node)->cpu; 1151 } 1152 1153 /** 1154 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking, 1155 * per-physical-core cpumask. Can be used to determine if an entire physical 1156 * core is free. 1157 * 1158 * Returns an empty mask if idle tracking is not enabled, or running on a 1159 * UP kernel. 1160 */ 1161 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) 1162 { 1163 struct scx_sched *sch; 1164 1165 guard(rcu)(); 1166 1167 sch = rcu_dereference(scx_root); 1168 if (unlikely(!sch)) 1169 return cpu_none_mask; 1170 1171 if (static_branch_unlikely(&scx_builtin_idle_per_node)) { 1172 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled"); 1173 return cpu_none_mask; 1174 } 1175 1176 if (!check_builtin_idle_enabled(sch)) 1177 return cpu_none_mask; 1178 1179 if (sched_smt_active()) 1180 return idle_cpumask(NUMA_NO_NODE)->smt; 1181 else 1182 return idle_cpumask(NUMA_NO_NODE)->cpu; 1183 } 1184 1185 /** 1186 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to 1187 * either the percpu, or SMT idle-tracking cpumask. 1188 * @idle_mask: &cpumask to use 1189 */ 1190 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) 1191 { 1192 /* 1193 * Empty function body because we aren't actually acquiring or releasing 1194 * a reference to a global idle cpumask, which is read-only in the 1195 * caller and is never released. The acquire / release semantics here 1196 * are just used to make the cpumask a trusted pointer in the caller. 1197 */ 1198 } 1199 1200 /** 1201 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state 1202 * @cpu: cpu to test and clear idle for 1203 * 1204 * Returns %true if @cpu was idle and its idle state was successfully cleared. 1205 * %false otherwise. 1206 * 1207 * Unavailable if ops.update_idle() is implemented and 1208 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 1209 */ 1210 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) 1211 { 1212 struct scx_sched *sch; 1213 1214 guard(rcu)(); 1215 1216 sch = rcu_dereference(scx_root); 1217 if (unlikely(!sch)) 1218 return false; 1219 1220 if (!check_builtin_idle_enabled(sch)) 1221 return false; 1222 1223 if (!ops_cpu_valid(sch, cpu, NULL)) 1224 return false; 1225 1226 return scx_idle_test_and_clear_cpu(cpu); 1227 } 1228 1229 /** 1230 * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node 1231 * @cpus_allowed: Allowed cpumask 1232 * @node: target NUMA node 1233 * @flags: %SCX_PICK_IDLE_* flags 1234 * 1235 * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node. 1236 * 1237 * Returns the picked idle cpu number on success, or -%EBUSY if no matching 1238 * cpu was found. 1239 * 1240 * The search starts from @node and proceeds to other online NUMA nodes in 1241 * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified, 1242 * in which case the search is limited to the target @node). 1243 * 1244 * Always returns an error if ops.update_idle() is implemented and 1245 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if 1246 * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set. 1247 */ 1248 __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed, 1249 int node, u64 flags) 1250 { 1251 struct scx_sched *sch; 1252 1253 guard(rcu)(); 1254 1255 sch = rcu_dereference(scx_root); 1256 if (unlikely(!sch)) 1257 return -ENODEV; 1258 1259 node = validate_node(sch, node); 1260 if (node < 0) 1261 return node; 1262 1263 return scx_pick_idle_cpu(cpus_allowed, node, flags); 1264 } 1265 1266 /** 1267 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu 1268 * @cpus_allowed: Allowed cpumask 1269 * @flags: %SCX_PICK_IDLE_CPU_* flags 1270 * 1271 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu 1272 * number on success. -%EBUSY if no matching cpu was found. 1273 * 1274 * Idle CPU tracking may race against CPU scheduling state transitions. For 1275 * example, this function may return -%EBUSY as CPUs are transitioning into the 1276 * idle state. If the caller then assumes that there will be dispatch events on 1277 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs 1278 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and 1279 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch 1280 * event in the near future. 1281 * 1282 * Unavailable if ops.update_idle() is implemented and 1283 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set. 1284 * 1285 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use 1286 * scx_bpf_pick_idle_cpu_node() instead. 1287 */ 1288 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, 1289 u64 flags) 1290 { 1291 struct scx_sched *sch; 1292 1293 guard(rcu)(); 1294 1295 sch = rcu_dereference(scx_root); 1296 if (unlikely(!sch)) 1297 return -ENODEV; 1298 1299 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) { 1300 scx_error(sch, "per-node idle tracking is enabled"); 1301 return -EBUSY; 1302 } 1303 1304 if (!check_builtin_idle_enabled(sch)) 1305 return -EBUSY; 1306 1307 return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); 1308 } 1309 1310 /** 1311 * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available 1312 * or pick any CPU from @node 1313 * @cpus_allowed: Allowed cpumask 1314 * @node: target NUMA node 1315 * @flags: %SCX_PICK_IDLE_CPU_* flags 1316 * 1317 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 1318 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 1319 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 1320 * empty. 1321 * 1322 * The search starts from @node and proceeds to other online NUMA nodes in 1323 * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified, 1324 * in which case the search is limited to the target @node, regardless of 1325 * the CPU idle state). 1326 * 1327 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 1328 * set, this function can't tell which CPUs are idle and will always pick any 1329 * CPU. 1330 */ 1331 __bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed, 1332 int node, u64 flags) 1333 { 1334 struct scx_sched *sch; 1335 s32 cpu; 1336 1337 guard(rcu)(); 1338 1339 sch = rcu_dereference(scx_root); 1340 if (unlikely(!sch)) 1341 return -ENODEV; 1342 1343 node = validate_node(sch, node); 1344 if (node < 0) 1345 return node; 1346 1347 cpu = scx_pick_idle_cpu(cpus_allowed, node, flags); 1348 if (cpu >= 0) 1349 return cpu; 1350 1351 if (flags & SCX_PICK_IDLE_IN_NODE) 1352 cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed); 1353 else 1354 cpu = cpumask_any_distribute(cpus_allowed); 1355 if (cpu < nr_cpu_ids) 1356 return cpu; 1357 else 1358 return -EBUSY; 1359 } 1360 1361 /** 1362 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU 1363 * @cpus_allowed: Allowed cpumask 1364 * @flags: %SCX_PICK_IDLE_CPU_* flags 1365 * 1366 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any 1367 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu 1368 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is 1369 * empty. 1370 * 1371 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not 1372 * set, this function can't tell which CPUs are idle and will always pick any 1373 * CPU. 1374 * 1375 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use 1376 * scx_bpf_pick_any_cpu_node() instead. 1377 */ 1378 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed, 1379 u64 flags) 1380 { 1381 struct scx_sched *sch; 1382 s32 cpu; 1383 1384 guard(rcu)(); 1385 1386 sch = rcu_dereference(scx_root); 1387 if (unlikely(!sch)) 1388 return -ENODEV; 1389 1390 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) { 1391 scx_error(sch, "per-node idle tracking is enabled"); 1392 return -EBUSY; 1393 } 1394 1395 if (static_branch_likely(&scx_builtin_idle_enabled)) { 1396 cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); 1397 if (cpu >= 0) 1398 return cpu; 1399 } 1400 1401 cpu = cpumask_any_distribute(cpus_allowed); 1402 if (cpu < nr_cpu_ids) 1403 return cpu; 1404 else 1405 return -EBUSY; 1406 } 1407 1408 __bpf_kfunc_end_defs(); 1409 1410 BTF_KFUNCS_START(scx_kfunc_ids_idle) 1411 BTF_ID_FLAGS(func, scx_bpf_cpu_node) 1412 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_ACQUIRE) 1413 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) 1414 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_ACQUIRE) 1415 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) 1416 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) 1417 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) 1418 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU) 1419 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) 1420 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU) 1421 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) 1422 BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_RCU) 1423 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU) 1424 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) 1425 BTF_KFUNCS_END(scx_kfunc_ids_idle) 1426 1427 static const struct btf_kfunc_id_set scx_kfunc_set_idle = { 1428 .owner = THIS_MODULE, 1429 .set = &scx_kfunc_ids_idle, 1430 }; 1431 1432 int scx_idle_init(void) 1433 { 1434 int ret; 1435 1436 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) || 1437 register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) || 1438 register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle); 1439 1440 return ret; 1441 } 1442