1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Built-in idle CPU tracking policy.
6 *
7 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
8 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
9 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
10 * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com>
11 */
12 #include "ext_idle.h"
13
14 /* Enable/disable built-in idle CPU selection policy */
15 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
16
17 /* Enable/disable per-node idle cpumasks */
18 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
19
20 /* Enable/disable LLC aware optimizations */
21 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
22
23 /* Enable/disable NUMA aware optimizations */
24 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
25
26 /*
27 * cpumasks to track idle CPUs within each NUMA node.
28 *
29 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask
30 * from is used to track all the idle CPUs in the system.
31 */
32 struct scx_idle_cpus {
33 cpumask_var_t cpu;
34 cpumask_var_t smt;
35 };
36
37 /*
38 * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE
39 * is not enabled).
40 */
41 static struct scx_idle_cpus scx_idle_global_masks;
42
43 /*
44 * Per-node idle cpumasks.
45 */
46 static struct scx_idle_cpus **scx_idle_node_masks;
47
48 /*
49 * Local per-CPU cpumasks (used to generate temporary idle cpumasks).
50 */
51 static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask);
52 static DEFINE_PER_CPU(cpumask_var_t, local_llc_idle_cpumask);
53 static DEFINE_PER_CPU(cpumask_var_t, local_numa_idle_cpumask);
54
55 /*
56 * Return the idle masks associated to a target @node.
57 *
58 * NUMA_NO_NODE identifies the global idle cpumask.
59 */
idle_cpumask(int node)60 static struct scx_idle_cpus *idle_cpumask(int node)
61 {
62 return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node];
63 }
64
65 /*
66 * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if
67 * per-node idle cpumasks are disabled.
68 */
scx_cpu_node_if_enabled(int cpu)69 static int scx_cpu_node_if_enabled(int cpu)
70 {
71 if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
72 return NUMA_NO_NODE;
73
74 return cpu_to_node(cpu);
75 }
76
scx_idle_test_and_clear_cpu(int cpu)77 static bool scx_idle_test_and_clear_cpu(int cpu)
78 {
79 int node = scx_cpu_node_if_enabled(cpu);
80 struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
81
82 #ifdef CONFIG_SCHED_SMT
83 /*
84 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
85 * cluster is not wholly idle either way. This also prevents
86 * scx_pick_idle_cpu() from getting caught in an infinite loop.
87 */
88 if (sched_smt_active()) {
89 const struct cpumask *smt = cpu_smt_mask(cpu);
90 struct cpumask *idle_smts = idle_cpumask(node)->smt;
91
92 /*
93 * If offline, @cpu is not its own sibling and
94 * scx_pick_idle_cpu() can get caught in an infinite loop as
95 * @cpu is never cleared from the idle SMT mask. Ensure that
96 * @cpu is eventually cleared.
97 *
98 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
99 * reduce memory writes, which may help alleviate cache
100 * coherence pressure.
101 */
102 if (cpumask_intersects(smt, idle_smts))
103 cpumask_andnot(idle_smts, idle_smts, smt);
104 else if (cpumask_test_cpu(cpu, idle_smts))
105 __cpumask_clear_cpu(cpu, idle_smts);
106 }
107 #endif
108
109 return cpumask_test_and_clear_cpu(cpu, idle_cpus);
110 }
111
112 /*
113 * Pick an idle CPU in a specific NUMA node.
114 */
pick_idle_cpu_in_node(const struct cpumask * cpus_allowed,int node,u64 flags)115 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags)
116 {
117 int cpu;
118
119 retry:
120 if (sched_smt_active()) {
121 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
122 if (cpu < nr_cpu_ids)
123 goto found;
124
125 if (flags & SCX_PICK_IDLE_CORE)
126 return -EBUSY;
127 }
128
129 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
130 if (cpu >= nr_cpu_ids)
131 return -EBUSY;
132
133 found:
134 if (scx_idle_test_and_clear_cpu(cpu))
135 return cpu;
136 else
137 goto retry;
138 }
139
140 #ifdef CONFIG_NUMA
141 /*
142 * Tracks nodes that have not yet been visited when searching for an idle
143 * CPU across all available nodes.
144 */
145 static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited);
146
147 /*
148 * Search for an idle CPU across all nodes, excluding @node.
149 */
pick_idle_cpu_from_online_nodes(const struct cpumask * cpus_allowed,int node,u64 flags)150 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
151 {
152 nodemask_t *unvisited;
153 s32 cpu = -EBUSY;
154
155 preempt_disable();
156 unvisited = this_cpu_ptr(&per_cpu_unvisited);
157
158 /*
159 * Restrict the search to the online nodes (excluding the current
160 * node that has been visited already).
161 */
162 nodes_copy(*unvisited, node_states[N_ONLINE]);
163 node_clear(node, *unvisited);
164
165 /*
166 * Traverse all nodes in order of increasing distance, starting
167 * from @node.
168 *
169 * This loop is O(N^2), with N being the amount of NUMA nodes,
170 * which might be quite expensive in large NUMA systems. However,
171 * this complexity comes into play only when a scheduler enables
172 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU
173 * without specifying a target NUMA node, so it shouldn't be a
174 * bottleneck is most cases.
175 *
176 * As a future optimization we may want to cache the list of nodes
177 * in a per-node array, instead of actually traversing them every
178 * time.
179 */
180 for_each_node_numadist(node, *unvisited) {
181 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
182 if (cpu >= 0)
183 break;
184 }
185 preempt_enable();
186
187 return cpu;
188 }
189 #else
190 static inline s32
pick_idle_cpu_from_online_nodes(const struct cpumask * cpus_allowed,int node,u64 flags)191 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
192 {
193 return -EBUSY;
194 }
195 #endif
196
197 /*
198 * Find an idle CPU in the system, starting from @node.
199 */
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,int node,u64 flags)200 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
201 {
202 s32 cpu;
203
204 /*
205 * Always search in the starting node first (this is an
206 * optimization that can save some cycles even when the search is
207 * not limited to a single node).
208 */
209 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
210 if (cpu >= 0)
211 return cpu;
212
213 /*
214 * Stop the search if we are using only a single global cpumask
215 * (NUMA_NO_NODE) or if the search is restricted to the first node
216 * only.
217 */
218 if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE)
219 return -EBUSY;
220
221 /*
222 * Extend the search to the other online nodes.
223 */
224 return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags);
225 }
226
227 /*
228 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
229 * domain is not defined).
230 */
llc_weight(s32 cpu)231 static unsigned int llc_weight(s32 cpu)
232 {
233 struct sched_domain *sd;
234
235 sd = rcu_dereference(per_cpu(sd_llc, cpu));
236 if (!sd)
237 return 0;
238
239 return sd->span_weight;
240 }
241
242 /*
243 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
244 * domain is not defined).
245 */
llc_span(s32 cpu)246 static struct cpumask *llc_span(s32 cpu)
247 {
248 struct sched_domain *sd;
249
250 sd = rcu_dereference(per_cpu(sd_llc, cpu));
251 if (!sd)
252 return NULL;
253
254 return sched_domain_span(sd);
255 }
256
257 /*
258 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
259 * NUMA domain is not defined).
260 */
numa_weight(s32 cpu)261 static unsigned int numa_weight(s32 cpu)
262 {
263 struct sched_domain *sd;
264 struct sched_group *sg;
265
266 sd = rcu_dereference(per_cpu(sd_numa, cpu));
267 if (!sd)
268 return 0;
269 sg = sd->groups;
270 if (!sg)
271 return 0;
272
273 return sg->group_weight;
274 }
275
276 /*
277 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
278 * domain is not defined).
279 */
numa_span(s32 cpu)280 static struct cpumask *numa_span(s32 cpu)
281 {
282 struct sched_domain *sd;
283 struct sched_group *sg;
284
285 sd = rcu_dereference(per_cpu(sd_numa, cpu));
286 if (!sd)
287 return NULL;
288 sg = sd->groups;
289 if (!sg)
290 return NULL;
291
292 return sched_group_span(sg);
293 }
294
295 /*
296 * Return true if the LLC domains do not perfectly overlap with the NUMA
297 * domains, false otherwise.
298 */
llc_numa_mismatch(void)299 static bool llc_numa_mismatch(void)
300 {
301 int cpu;
302
303 /*
304 * We need to scan all online CPUs to verify whether their scheduling
305 * domains overlap.
306 *
307 * While it is rare to encounter architectures with asymmetric NUMA
308 * topologies, CPU hotplugging or virtualized environments can result
309 * in asymmetric configurations.
310 *
311 * For example:
312 *
313 * NUMA 0:
314 * - LLC 0: cpu0..cpu7
315 * - LLC 1: cpu8..cpu15 [offline]
316 *
317 * NUMA 1:
318 * - LLC 0: cpu16..cpu23
319 * - LLC 1: cpu24..cpu31
320 *
321 * In this case, if we only check the first online CPU (cpu0), we might
322 * incorrectly assume that the LLC and NUMA domains are fully
323 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
324 * domains).
325 */
326 for_each_online_cpu(cpu)
327 if (llc_weight(cpu) != numa_weight(cpu))
328 return true;
329
330 return false;
331 }
332
333 /*
334 * Initialize topology-aware scheduling.
335 *
336 * Detect if the system has multiple LLC or multiple NUMA domains and enable
337 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
338 * selection policy.
339 *
340 * Assumption: the kernel's internal topology representation assumes that each
341 * CPU belongs to a single LLC domain, and that each LLC domain is entirely
342 * contained within a single NUMA node.
343 */
scx_idle_update_selcpu_topology(struct sched_ext_ops * ops)344 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
345 {
346 bool enable_llc = false, enable_numa = false;
347 unsigned int nr_cpus;
348 s32 cpu = cpumask_first(cpu_online_mask);
349
350 /*
351 * Enable LLC domain optimization only when there are multiple LLC
352 * domains among the online CPUs. If all online CPUs are part of a
353 * single LLC domain, the idle CPU selection logic can choose any
354 * online CPU without bias.
355 *
356 * Note that it is sufficient to check the LLC domain of the first
357 * online CPU to determine whether a single LLC domain includes all
358 * CPUs.
359 */
360 rcu_read_lock();
361 nr_cpus = llc_weight(cpu);
362 if (nr_cpus > 0) {
363 if (nr_cpus < num_online_cpus())
364 enable_llc = true;
365 pr_debug("sched_ext: LLC=%*pb weight=%u\n",
366 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
367 }
368
369 /*
370 * Enable NUMA optimization only when there are multiple NUMA domains
371 * among the online CPUs and the NUMA domains don't perfectly overlap
372 * with the LLC domains.
373 *
374 * If all CPUs belong to the same NUMA node and the same LLC domain,
375 * enabling both NUMA and LLC optimizations is unnecessary, as checking
376 * for an idle CPU in the same domain twice is redundant.
377 *
378 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
379 * optimization, as we would naturally select idle CPUs within
380 * specific NUMA nodes querying the corresponding per-node cpumask.
381 */
382 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
383 nr_cpus = numa_weight(cpu);
384 if (nr_cpus > 0) {
385 if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
386 enable_numa = true;
387 pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
388 cpumask_pr_args(numa_span(cpu)), nr_cpus);
389 }
390 }
391 rcu_read_unlock();
392
393 pr_debug("sched_ext: LLC idle selection %s\n",
394 str_enabled_disabled(enable_llc));
395 pr_debug("sched_ext: NUMA idle selection %s\n",
396 str_enabled_disabled(enable_numa));
397
398 if (enable_llc)
399 static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
400 else
401 static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
402 if (enable_numa)
403 static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
404 else
405 static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
406 }
407
408 /*
409 * Return true if @p can run on all possible CPUs, false otherwise.
410 */
task_affinity_all(const struct task_struct * p)411 static inline bool task_affinity_all(const struct task_struct *p)
412 {
413 return p->nr_cpus_allowed >= num_possible_cpus();
414 }
415
416 /*
417 * Built-in CPU idle selection policy:
418 *
419 * 1. Prioritize full-idle cores:
420 * - always prioritize CPUs from fully idle cores (both logical CPUs are
421 * idle) to avoid interference caused by SMT.
422 *
423 * 2. Reuse the same CPU:
424 * - prefer the last used CPU to take advantage of cached data (L1, L2) and
425 * branch prediction optimizations.
426 *
427 * 3. Prefer @prev_cpu's SMT sibling:
428 * - if @prev_cpu is busy and no fully idle core is available, try to
429 * place the task on an idle SMT sibling of @prev_cpu; keeping the
430 * task on the same core makes migration cheaper, preserves L1 cache
431 * locality and reduces wakeup latency.
432 *
433 * 4. Pick a CPU within the same LLC (Last-Level Cache):
434 * - if the above conditions aren't met, pick a CPU that shares the same
435 * LLC, if the LLC domain is a subset of @cpus_allowed, to maintain
436 * cache locality.
437 *
438 * 5. Pick a CPU within the same NUMA node, if enabled:
439 * - choose a CPU from the same NUMA node, if the node cpumask is a
440 * subset of @cpus_allowed, to reduce memory access latency.
441 *
442 * 6. Pick any idle CPU within the @cpus_allowed domain.
443 *
444 * Step 4 and 5 are performed only if the system has, respectively,
445 * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
446 * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
447 *
448 * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
449 * begin in @prev_cpu's node and proceed to other nodes in order of
450 * increasing distance.
451 *
452 * Return the picked CPU if idle, or a negative value otherwise.
453 *
454 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
455 * we never call ops.select_cpu() for them, see select_task_rq().
456 */
scx_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * cpus_allowed,u64 flags)457 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
458 const struct cpumask *cpus_allowed, u64 flags)
459 {
460 const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL;
461 const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr;
462 int node = scx_cpu_node_if_enabled(prev_cpu);
463 bool is_prev_allowed;
464 s32 cpu;
465
466 preempt_disable();
467
468 /*
469 * Determine the subset of CPUs usable by @p within @cpus_allowed.
470 */
471 if (allowed != p->cpus_ptr) {
472 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_idle_cpumask);
473
474 if (task_affinity_all(p)) {
475 allowed = cpus_allowed;
476 } else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) {
477 allowed = local_cpus;
478 } else {
479 cpu = -EBUSY;
480 goto out_enable;
481 }
482 }
483
484 /*
485 * Check whether @prev_cpu is still within the allowed set. If not,
486 * we can still try selecting a nearby CPU.
487 */
488 is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed);
489
490 /*
491 * This is necessary to protect llc_cpus.
492 */
493 rcu_read_lock();
494
495 /*
496 * Determine the subset of CPUs that the task can use in its
497 * current LLC and node.
498 *
499 * If the task can run on all CPUs, use the node and LLC cpumasks
500 * directly.
501 */
502 if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) {
503 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask);
504 const struct cpumask *cpus = numa_span(prev_cpu);
505
506 if (allowed == p->cpus_ptr && task_affinity_all(p))
507 numa_cpus = cpus;
508 else if (cpus && cpumask_and(local_cpus, allowed, cpus))
509 numa_cpus = local_cpus;
510 }
511
512 if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
513 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask);
514 const struct cpumask *cpus = llc_span(prev_cpu);
515
516 if (allowed == p->cpus_ptr && task_affinity_all(p))
517 llc_cpus = cpus;
518 else if (cpus && cpumask_and(local_cpus, allowed, cpus))
519 llc_cpus = local_cpus;
520 }
521
522 /*
523 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
524 */
525 if (wake_flags & SCX_WAKE_SYNC) {
526 int waker_node;
527
528 /*
529 * If the waker's CPU is cache affine and prev_cpu is idle,
530 * then avoid a migration.
531 */
532 cpu = smp_processor_id();
533 if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
534 scx_idle_test_and_clear_cpu(prev_cpu)) {
535 cpu = prev_cpu;
536 goto out_unlock;
537 }
538
539 /*
540 * If the waker's local DSQ is empty, and the system is under
541 * utilized, try to wake up @p to the local DSQ of the waker.
542 *
543 * Checking only for an empty local DSQ is insufficient as it
544 * could give the wakee an unfair advantage when the system is
545 * oversaturated.
546 *
547 * Checking only for the presence of idle CPUs is also
548 * insufficient as the local DSQ of the waker could have tasks
549 * piled up on it even if there is an idle core elsewhere on
550 * the system.
551 */
552 waker_node = scx_cpu_node_if_enabled(cpu);
553 if (!(current->flags & PF_EXITING) &&
554 cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
555 (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
556 !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
557 if (cpumask_test_cpu(cpu, allowed))
558 goto out_unlock;
559 }
560 }
561
562 /*
563 * If CPU has SMT, any wholly idle CPU is likely a better pick than
564 * partially idle @prev_cpu.
565 */
566 if (sched_smt_active()) {
567 /*
568 * Keep using @prev_cpu if it's part of a fully idle core.
569 */
570 if (is_prev_allowed &&
571 cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
572 scx_idle_test_and_clear_cpu(prev_cpu)) {
573 cpu = prev_cpu;
574 goto out_unlock;
575 }
576
577 /*
578 * Search for any fully idle core in the same LLC domain.
579 */
580 if (llc_cpus) {
581 cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
582 if (cpu >= 0)
583 goto out_unlock;
584 }
585
586 /*
587 * Search for any fully idle core in the same NUMA node.
588 */
589 if (numa_cpus) {
590 cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
591 if (cpu >= 0)
592 goto out_unlock;
593 }
594
595 /*
596 * Search for any full-idle core usable by the task.
597 *
598 * If the node-aware idle CPU selection policy is enabled
599 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always
600 * begin in prev_cpu's node and proceed to other nodes in
601 * order of increasing distance.
602 */
603 cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
604 if (cpu >= 0)
605 goto out_unlock;
606
607 /*
608 * Give up if we're strictly looking for a full-idle SMT
609 * core.
610 */
611 if (flags & SCX_PICK_IDLE_CORE) {
612 cpu = -EBUSY;
613 goto out_unlock;
614 }
615 }
616
617 /*
618 * Use @prev_cpu if it's idle.
619 */
620 if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) {
621 cpu = prev_cpu;
622 goto out_unlock;
623 }
624
625 #ifdef CONFIG_SCHED_SMT
626 /*
627 * Use @prev_cpu's sibling if it's idle.
628 */
629 if (sched_smt_active()) {
630 for_each_cpu_and(cpu, cpu_smt_mask(prev_cpu), allowed) {
631 if (cpu == prev_cpu)
632 continue;
633 if (scx_idle_test_and_clear_cpu(cpu))
634 goto out_unlock;
635 }
636 }
637 #endif
638
639 /*
640 * Search for any idle CPU in the same LLC domain.
641 */
642 if (llc_cpus) {
643 cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
644 if (cpu >= 0)
645 goto out_unlock;
646 }
647
648 /*
649 * Search for any idle CPU in the same NUMA node.
650 */
651 if (numa_cpus) {
652 cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
653 if (cpu >= 0)
654 goto out_unlock;
655 }
656
657 /*
658 * Search for any idle CPU usable by the task.
659 *
660 * If the node-aware idle CPU selection policy is enabled
661 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin
662 * in prev_cpu's node and proceed to other nodes in order of
663 * increasing distance.
664 */
665 cpu = scx_pick_idle_cpu(allowed, node, flags);
666
667 out_unlock:
668 rcu_read_unlock();
669 out_enable:
670 preempt_enable();
671
672 return cpu;
673 }
674
675 /*
676 * Initialize global and per-node idle cpumasks.
677 */
scx_idle_init_masks(void)678 void scx_idle_init_masks(void)
679 {
680 int i;
681
682 /* Allocate global idle cpumasks */
683 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
684 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL));
685
686 /* Allocate per-node idle cpumasks (use nr_node_ids for non-contiguous NUMA nodes) */
687 scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, nr_node_ids);
688 BUG_ON(!scx_idle_node_masks);
689
690 for_each_node(i) {
691 scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks),
692 GFP_KERNEL, i);
693 BUG_ON(!scx_idle_node_masks[i]);
694
695 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i));
696 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i));
697 }
698
699 /* Allocate local per-cpu idle cpumasks */
700 for_each_possible_cpu(i) {
701 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i),
702 GFP_KERNEL, cpu_to_node(i)));
703 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i),
704 GFP_KERNEL, cpu_to_node(i)));
705 BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i),
706 GFP_KERNEL, cpu_to_node(i)));
707 }
708 }
709
update_builtin_idle(int cpu,bool idle)710 static void update_builtin_idle(int cpu, bool idle)
711 {
712 int node = scx_cpu_node_if_enabled(cpu);
713 struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
714
715 assign_cpu(cpu, idle_cpus, idle);
716
717 #ifdef CONFIG_SCHED_SMT
718 if (sched_smt_active()) {
719 const struct cpumask *smt = cpu_smt_mask(cpu);
720 struct cpumask *idle_smts = idle_cpumask(node)->smt;
721
722 if (idle) {
723 /*
724 * idle_smt handling is racy but that's fine as it's
725 * only for optimization and self-correcting.
726 */
727 if (!cpumask_subset(smt, idle_cpus))
728 return;
729 cpumask_or(idle_smts, idle_smts, smt);
730 } else {
731 cpumask_andnot(idle_smts, idle_smts, smt);
732 }
733 }
734 #endif
735 }
736
737 /*
738 * Update the idle state of a CPU to @idle.
739 *
740 * If @do_notify is true, ops.update_idle() is invoked to notify the scx
741 * scheduler of an actual idle state transition (idle to busy or vice
742 * versa). If @do_notify is false, only the idle state in the idle masks is
743 * refreshed without invoking ops.update_idle().
744 *
745 * This distinction is necessary, because an idle CPU can be "reserved" and
746 * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
747 * busy even if no tasks are dispatched. In this case, the CPU may return
748 * to idle without a true state transition. Refreshing the idle masks
749 * without invoking ops.update_idle() ensures accurate idle state tracking
750 * while avoiding unnecessary updates and maintaining balanced state
751 * transitions.
752 */
__scx_update_idle(struct rq * rq,bool idle,bool do_notify)753 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
754 {
755 struct scx_sched *sch = scx_root;
756 int cpu = cpu_of(rq);
757
758 lockdep_assert_rq_held(rq);
759
760 /*
761 * Update the idle masks:
762 * - for real idle transitions (do_notify == true)
763 * - for idle-to-idle transitions (indicated by the previous task
764 * being the idle thread, managed by pick_task_idle())
765 *
766 * Skip updating idle masks if the previous task is not the idle
767 * thread, since set_next_task_idle() has already handled it when
768 * transitioning from a task to the idle thread (calling this
769 * function with do_notify == true).
770 *
771 * In this way we can avoid updating the idle masks twice,
772 * unnecessarily.
773 */
774 if (static_branch_likely(&scx_builtin_idle_enabled))
775 if (do_notify || is_idle_task(rq->curr))
776 update_builtin_idle(cpu, idle);
777
778 /*
779 * Trigger ops.update_idle() only when transitioning from a task to
780 * the idle thread and vice versa.
781 *
782 * Idle transitions are indicated by do_notify being set to true,
783 * managed by put_prev_task_idle()/set_next_task_idle().
784 *
785 * This must come after builtin idle update so that BPF schedulers can
786 * create interlocking between ops.update_idle() and ops.enqueue() -
787 * either enqueue() sees the idle bit or update_idle() sees the task
788 * that enqueue() queued.
789 */
790 if (SCX_HAS_OP(sch, update_idle) && do_notify &&
791 !scx_bypassing(sch, cpu_of(rq)))
792 SCX_CALL_OP(sch, update_idle, rq, cpu_of(rq), idle);
793 }
794
reset_idle_masks(struct sched_ext_ops * ops)795 static void reset_idle_masks(struct sched_ext_ops *ops)
796 {
797 int node;
798
799 /*
800 * Consider all online cpus idle. Should converge to the actual state
801 * quickly.
802 */
803 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
804 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask);
805 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask);
806 return;
807 }
808
809 for_each_node(node) {
810 const struct cpumask *node_mask = cpumask_of_node(node);
811
812 cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
813 cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
814 }
815 }
816
scx_idle_enable(struct sched_ext_ops * ops)817 void scx_idle_enable(struct sched_ext_ops *ops)
818 {
819 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
820 static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
821 else
822 static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
823
824 if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
825 static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
826 else
827 static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
828
829 reset_idle_masks(ops);
830 }
831
scx_idle_disable(void)832 void scx_idle_disable(void)
833 {
834 static_branch_disable(&scx_builtin_idle_enabled);
835 static_branch_disable(&scx_builtin_idle_per_node);
836 }
837
838 /********************************************************************************
839 * Helpers that can be called from the BPF scheduler.
840 */
841
validate_node(struct scx_sched * sch,int node)842 static int validate_node(struct scx_sched *sch, int node)
843 {
844 if (!static_branch_likely(&scx_builtin_idle_per_node)) {
845 scx_error(sch, "per-node idle tracking is disabled");
846 return -EOPNOTSUPP;
847 }
848
849 /* Return no entry for NUMA_NO_NODE (not a critical scx error) */
850 if (node == NUMA_NO_NODE)
851 return -ENOENT;
852
853 /* Make sure node is in a valid range */
854 if (node < 0 || node >= nr_node_ids) {
855 scx_error(sch, "invalid node %d", node);
856 return -EINVAL;
857 }
858
859 /* Make sure the node is part of the set of possible nodes */
860 if (!node_possible(node)) {
861 scx_error(sch, "unavailable node %d", node);
862 return -EINVAL;
863 }
864
865 return node;
866 }
867
868 __bpf_kfunc_start_defs();
869
check_builtin_idle_enabled(struct scx_sched * sch)870 static bool check_builtin_idle_enabled(struct scx_sched *sch)
871 {
872 if (static_branch_likely(&scx_builtin_idle_enabled))
873 return true;
874
875 scx_error(sch, "built-in idle tracking is disabled");
876 return false;
877 }
878
879 /*
880 * Determine whether @p is a migration-disabled task in the context of BPF
881 * code.
882 *
883 * We can't simply check whether @p->migration_disabled is set in a
884 * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable
885 * migration for the current task while running BPF code.
886 *
887 * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU
888 * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for
889 * the current task is ambiguous only in that case: it could be from the BPF
890 * prolog rather than a real migrate_disable() call.
891 *
892 * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(),
893 * so migration_disabled == 1 always means the task is truly
894 * migration-disabled.
895 *
896 * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled,
897 * check whether @p is the current task or not: if it is, then migration was
898 * not disabled before entering the callback, otherwise migration was disabled.
899 *
900 * Returns true if @p is migration-disabled, false otherwise.
901 */
is_bpf_migration_disabled(const struct task_struct * p)902 static bool is_bpf_migration_disabled(const struct task_struct *p)
903 {
904 if (p->migration_disabled == 1) {
905 if (IS_ENABLED(CONFIG_PREEMPT_RCU))
906 return p != current;
907 return true;
908 }
909 return p->migration_disabled;
910 }
911
select_cpu_from_kfunc(struct scx_sched * sch,struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * allowed,u64 flags)912 static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,
913 s32 prev_cpu, u64 wake_flags,
914 const struct cpumask *allowed, u64 flags)
915 {
916 unsigned long irq_flags;
917 bool we_locked = false;
918 s32 cpu;
919
920 if (!ops_cpu_valid(sch, prev_cpu, NULL))
921 return -EINVAL;
922
923 if (!check_builtin_idle_enabled(sch))
924 return -EBUSY;
925
926 /*
927 * Accessing p->cpus_ptr / p->nr_cpus_allowed needs either @p's rq
928 * lock or @p's pi_lock. Three cases:
929 *
930 * - inside ops.select_cpu(): try_to_wake_up() holds the wake-up
931 * task's pi_lock; the wake-up task is recorded in kf_tasks[0]
932 * by SCX_CALL_OP_TASK_RET().
933 * - other rq-locked SCX op: scx_locked_rq() points at the held rq.
934 * - truly unlocked (UNLOCKED ops, SYSCALL, non-SCX struct_ops):
935 * nothing held, take pi_lock ourselves.
936 *
937 * In the first two cases, BPF schedulers may pass an arbitrary task
938 * that the held lock doesn't cover. Refuse those.
939 */
940 if (this_rq()->scx.in_select_cpu) {
941 if (!scx_kf_arg_task_ok(sch, p))
942 return -EINVAL;
943 lockdep_assert_held(&p->pi_lock);
944 } else if (scx_locked_rq()) {
945 if (task_rq(p) != scx_locked_rq())
946 goto cross_task;
947 } else {
948 raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
949 we_locked = true;
950 }
951
952 /*
953 * This may also be called from ops.enqueue(), so we need to handle
954 * per-CPU tasks as well. For these tasks, we can skip all idle CPU
955 * selection optimizations and simply check whether the previously
956 * used CPU is idle and within the allowed cpumask.
957 */
958 if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) {
959 if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
960 scx_idle_test_and_clear_cpu(prev_cpu))
961 cpu = prev_cpu;
962 else
963 cpu = -EBUSY;
964 } else {
965 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
966 allowed ?: p->cpus_ptr, flags);
967 }
968
969 if (we_locked)
970 raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
971
972 return cpu;
973
974 cross_task:
975 scx_error(sch, "select_cpu kfunc called cross-task on %s[%d]",
976 p->comm, p->pid);
977 return -EINVAL;
978 }
979
980 /**
981 * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
982 * trigger an error if @cpu is invalid
983 * @cpu: target CPU
984 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
985 */
scx_bpf_cpu_node(s32 cpu,const struct bpf_prog_aux * aux)986 __bpf_kfunc s32 scx_bpf_cpu_node(s32 cpu, const struct bpf_prog_aux *aux)
987 {
988 struct scx_sched *sch;
989
990 guard(rcu)();
991
992 sch = scx_prog_sched(aux);
993 if (unlikely(!sch) || !ops_cpu_valid(sch, cpu, NULL))
994 return NUMA_NO_NODE;
995 return cpu_to_node(cpu);
996 }
997
998 /**
999 * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
1000 * @p: task_struct to select a CPU for
1001 * @prev_cpu: CPU @p was on previously
1002 * @wake_flags: %SCX_WAKE_* flags
1003 * @is_idle: out parameter indicating whether the returned CPU is idle
1004 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1005 *
1006 * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
1007 * context such as a BPF test_run() call, as long as built-in CPU selection
1008 * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
1009 * is set.
1010 *
1011 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
1012 * currently idle and thus a good candidate for direct dispatching.
1013 */
scx_bpf_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * is_idle,const struct bpf_prog_aux * aux)1014 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
1015 u64 wake_flags, bool *is_idle,
1016 const struct bpf_prog_aux *aux)
1017 {
1018 struct scx_sched *sch;
1019 s32 cpu;
1020
1021 guard(rcu)();
1022
1023 sch = scx_prog_sched(aux);
1024 if (unlikely(!sch))
1025 return -ENODEV;
1026
1027 cpu = select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, NULL, 0);
1028 if (cpu >= 0) {
1029 *is_idle = true;
1030 return cpu;
1031 }
1032 *is_idle = false;
1033 return prev_cpu;
1034 }
1035
1036 struct scx_bpf_select_cpu_and_args {
1037 /* @p and @cpus_allowed can't be packed together as KF_RCU is not transitive */
1038 s32 prev_cpu;
1039 u64 wake_flags;
1040 u64 flags;
1041 };
1042
1043 /**
1044 * __scx_bpf_select_cpu_and - Arg-wrapped CPU selection with cpumask
1045 * @p: task_struct to select a CPU for
1046 * @cpus_allowed: cpumask of allowed CPUs
1047 * @args: struct containing the rest of the arguments
1048 * @args->prev_cpu: CPU @p was on previously
1049 * @args->wake_flags: %SCX_WAKE_* flags
1050 * @args->flags: %SCX_PICK_IDLE* flags
1051 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1052 *
1053 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
1054 * limit. BPF programs should use scx_bpf_select_cpu_and() which is provided
1055 * as an inline wrapper in common.bpf.h.
1056 *
1057 * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
1058 * context such as a BPF test_run() call, as long as built-in CPU selection
1059 * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
1060 * is set.
1061 *
1062 * @p, @args->prev_cpu and @args->wake_flags match ops.select_cpu().
1063 *
1064 * Returns the selected idle CPU, which will be automatically awakened upon
1065 * returning from ops.select_cpu() and can be used for direct dispatch, or
1066 * a negative value if no idle CPU is available.
1067 */
1068 __bpf_kfunc s32
__scx_bpf_select_cpu_and(struct task_struct * p,const struct cpumask * cpus_allowed,struct scx_bpf_select_cpu_and_args * args,const struct bpf_prog_aux * aux)1069 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
1070 struct scx_bpf_select_cpu_and_args *args,
1071 const struct bpf_prog_aux *aux)
1072 {
1073 struct scx_sched *sch;
1074
1075 guard(rcu)();
1076
1077 sch = scx_prog_sched(aux);
1078 if (unlikely(!sch))
1079 return -ENODEV;
1080
1081 return select_cpu_from_kfunc(sch, p, args->prev_cpu, args->wake_flags,
1082 cpus_allowed, args->flags);
1083 }
1084
1085 /*
1086 * COMPAT: Will be removed in v6.22.
1087 */
scx_bpf_select_cpu_and(struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * cpus_allowed,u64 flags)1088 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
1089 const struct cpumask *cpus_allowed, u64 flags)
1090 {
1091 struct scx_sched *sch;
1092
1093 guard(rcu)();
1094
1095 sch = rcu_dereference(scx_root);
1096 if (unlikely(!sch))
1097 return -ENODEV;
1098
1099 #ifdef CONFIG_EXT_SUB_SCHED
1100 /*
1101 * Disallow if any sub-scheds are attached. There is no way to tell
1102 * which scheduler called us, just error out @p's scheduler.
1103 */
1104 if (unlikely(!list_empty(&sch->children))) {
1105 scx_error(scx_task_sched(p), "__scx_bpf_select_cpu_and() must be used");
1106 return -EINVAL;
1107 }
1108 #endif
1109
1110 return select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags,
1111 cpus_allowed, flags);
1112 }
1113
1114 /**
1115 * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the
1116 * idle-tracking per-CPU cpumask of a target NUMA node.
1117 * @node: target NUMA node
1118 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1119 *
1120 * Returns an empty cpumask if idle tracking is not enabled, if @node is
1121 * not valid, or running on a UP kernel. In this case the actual error will
1122 * be reported to the BPF scheduler via scx_error().
1123 */
1124 __bpf_kfunc const struct cpumask *
scx_bpf_get_idle_cpumask_node(s32 node,const struct bpf_prog_aux * aux)1125 scx_bpf_get_idle_cpumask_node(s32 node, const struct bpf_prog_aux *aux)
1126 {
1127 struct scx_sched *sch;
1128
1129 guard(rcu)();
1130
1131 sch = scx_prog_sched(aux);
1132 if (unlikely(!sch))
1133 return cpu_none_mask;
1134
1135 node = validate_node(sch, node);
1136 if (node < 0)
1137 return cpu_none_mask;
1138
1139 return idle_cpumask(node)->cpu;
1140 }
1141
1142 /**
1143 * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
1144 * per-CPU cpumask.
1145 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1146 *
1147 * Returns an empty mask if idle tracking is not enabled, or running on a
1148 * UP kernel.
1149 */
scx_bpf_get_idle_cpumask(const struct bpf_prog_aux * aux)1150 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(const struct bpf_prog_aux *aux)
1151 {
1152 struct scx_sched *sch;
1153
1154 guard(rcu)();
1155
1156 sch = scx_prog_sched(aux);
1157 if (unlikely(!sch))
1158 return cpu_none_mask;
1159
1160 if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1161 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1162 return cpu_none_mask;
1163 }
1164
1165 if (!check_builtin_idle_enabled(sch))
1166 return cpu_none_mask;
1167
1168 return idle_cpumask(NUMA_NO_NODE)->cpu;
1169 }
1170
1171 /**
1172 * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the
1173 * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be
1174 * used to determine if an entire physical core is free.
1175 * @node: target NUMA node
1176 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1177 *
1178 * Returns an empty cpumask if idle tracking is not enabled, if @node is
1179 * not valid, or running on a UP kernel. In this case the actual error will
1180 * be reported to the BPF scheduler via scx_error().
1181 */
1182 __bpf_kfunc const struct cpumask *
scx_bpf_get_idle_smtmask_node(s32 node,const struct bpf_prog_aux * aux)1183 scx_bpf_get_idle_smtmask_node(s32 node, const struct bpf_prog_aux *aux)
1184 {
1185 struct scx_sched *sch;
1186
1187 guard(rcu)();
1188
1189 sch = scx_prog_sched(aux);
1190 if (unlikely(!sch))
1191 return cpu_none_mask;
1192
1193 node = validate_node(sch, node);
1194 if (node < 0)
1195 return cpu_none_mask;
1196
1197 if (sched_smt_active())
1198 return idle_cpumask(node)->smt;
1199 else
1200 return idle_cpumask(node)->cpu;
1201 }
1202
1203 /**
1204 * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
1205 * per-physical-core cpumask. Can be used to determine if an entire physical
1206 * core is free.
1207 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1208 *
1209 * Returns an empty mask if idle tracking is not enabled, or running on a
1210 * UP kernel.
1211 */
scx_bpf_get_idle_smtmask(const struct bpf_prog_aux * aux)1212 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(const struct bpf_prog_aux *aux)
1213 {
1214 struct scx_sched *sch;
1215
1216 guard(rcu)();
1217
1218 sch = scx_prog_sched(aux);
1219 if (unlikely(!sch))
1220 return cpu_none_mask;
1221
1222 if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1223 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1224 return cpu_none_mask;
1225 }
1226
1227 if (!check_builtin_idle_enabled(sch))
1228 return cpu_none_mask;
1229
1230 if (sched_smt_active())
1231 return idle_cpumask(NUMA_NO_NODE)->smt;
1232 else
1233 return idle_cpumask(NUMA_NO_NODE)->cpu;
1234 }
1235
1236 /**
1237 * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
1238 * either the percpu, or SMT idle-tracking cpumask.
1239 * @idle_mask: &cpumask to use
1240 */
scx_bpf_put_idle_cpumask(const struct cpumask * idle_mask)1241 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
1242 {
1243 /*
1244 * Empty function body because we aren't actually acquiring or releasing
1245 * a reference to a global idle cpumask, which is read-only in the
1246 * caller and is never released. The acquire / release semantics here
1247 * are just used to make the cpumask a trusted pointer in the caller.
1248 */
1249 }
1250
1251 /**
1252 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
1253 * @cpu: cpu to test and clear idle for
1254 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1255 *
1256 * Returns %true if @cpu was idle and its idle state was successfully cleared.
1257 * %false otherwise.
1258 *
1259 * Unavailable if ops.update_idle() is implemented and
1260 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1261 */
scx_bpf_test_and_clear_cpu_idle(s32 cpu,const struct bpf_prog_aux * aux)1262 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu, const struct bpf_prog_aux *aux)
1263 {
1264 struct scx_sched *sch;
1265
1266 guard(rcu)();
1267
1268 sch = scx_prog_sched(aux);
1269 if (unlikely(!sch))
1270 return false;
1271
1272 if (!check_builtin_idle_enabled(sch))
1273 return false;
1274
1275 if (!ops_cpu_valid(sch, cpu, NULL))
1276 return false;
1277
1278 return scx_idle_test_and_clear_cpu(cpu);
1279 }
1280
1281 /**
1282 * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node
1283 * @cpus_allowed: Allowed cpumask
1284 * @node: target NUMA node
1285 * @flags: %SCX_PICK_IDLE_* flags
1286 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1287 *
1288 * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
1289 *
1290 * Returns the picked idle cpu number on success, or -%EBUSY if no matching
1291 * cpu was found.
1292 *
1293 * The search starts from @node and proceeds to other online NUMA nodes in
1294 * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified,
1295 * in which case the search is limited to the target @node).
1296 *
1297 * Always returns an error if ops.update_idle() is implemented and
1298 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if
1299 * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set.
1300 */
scx_bpf_pick_idle_cpu_node(const struct cpumask * cpus_allowed,s32 node,u64 flags,const struct bpf_prog_aux * aux)1301 __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
1302 s32 node, u64 flags,
1303 const struct bpf_prog_aux *aux)
1304 {
1305 struct scx_sched *sch;
1306
1307 guard(rcu)();
1308
1309 sch = scx_prog_sched(aux);
1310 if (unlikely(!sch))
1311 return -ENODEV;
1312
1313 node = validate_node(sch, node);
1314 if (node < 0)
1315 return node;
1316
1317 return scx_pick_idle_cpu(cpus_allowed, node, flags);
1318 }
1319
1320 /**
1321 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
1322 * @cpus_allowed: Allowed cpumask
1323 * @flags: %SCX_PICK_IDLE_CPU_* flags
1324 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1325 *
1326 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
1327 * number on success. -%EBUSY if no matching cpu was found.
1328 *
1329 * Idle CPU tracking may race against CPU scheduling state transitions. For
1330 * example, this function may return -%EBUSY as CPUs are transitioning into the
1331 * idle state. If the caller then assumes that there will be dispatch events on
1332 * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
1333 * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
1334 * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
1335 * event in the near future.
1336 *
1337 * Unavailable if ops.update_idle() is implemented and
1338 * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1339 *
1340 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1341 * scx_bpf_pick_idle_cpu_node() instead.
1342 */
scx_bpf_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags,const struct bpf_prog_aux * aux)1343 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
1344 u64 flags, const struct bpf_prog_aux *aux)
1345 {
1346 struct scx_sched *sch;
1347
1348 guard(rcu)();
1349
1350 sch = scx_prog_sched(aux);
1351 if (unlikely(!sch))
1352 return -ENODEV;
1353
1354 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1355 scx_error(sch, "per-node idle tracking is enabled");
1356 return -EBUSY;
1357 }
1358
1359 if (!check_builtin_idle_enabled(sch))
1360 return -EBUSY;
1361
1362 return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1363 }
1364
1365 /**
1366 * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available
1367 * or pick any CPU from @node
1368 * @cpus_allowed: Allowed cpumask
1369 * @node: target NUMA node
1370 * @flags: %SCX_PICK_IDLE_CPU_* flags
1371 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1372 *
1373 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1374 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1375 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1376 * empty.
1377 *
1378 * The search starts from @node and proceeds to other online NUMA nodes in
1379 * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified,
1380 * in which case the search is limited to the target @node, regardless of
1381 * the CPU idle state).
1382 *
1383 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1384 * set, this function can't tell which CPUs are idle and will always pick any
1385 * CPU.
1386 */
scx_bpf_pick_any_cpu_node(const struct cpumask * cpus_allowed,s32 node,u64 flags,const struct bpf_prog_aux * aux)1387 __bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed,
1388 s32 node, u64 flags,
1389 const struct bpf_prog_aux *aux)
1390 {
1391 struct scx_sched *sch;
1392 s32 cpu;
1393
1394 guard(rcu)();
1395
1396 sch = scx_prog_sched(aux);
1397 if (unlikely(!sch))
1398 return -ENODEV;
1399
1400 node = validate_node(sch, node);
1401 if (node < 0)
1402 return node;
1403
1404 cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
1405 if (cpu >= 0)
1406 return cpu;
1407
1408 if (flags & SCX_PICK_IDLE_IN_NODE)
1409 cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
1410 else
1411 cpu = cpumask_any_distribute(cpus_allowed);
1412 if (cpu < nr_cpu_ids)
1413 return cpu;
1414 else
1415 return -EBUSY;
1416 }
1417
1418 /**
1419 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
1420 * @cpus_allowed: Allowed cpumask
1421 * @flags: %SCX_PICK_IDLE_CPU_* flags
1422 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1423 *
1424 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1425 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1426 * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1427 * empty.
1428 *
1429 * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1430 * set, this function can't tell which CPUs are idle and will always pick any
1431 * CPU.
1432 *
1433 * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1434 * scx_bpf_pick_any_cpu_node() instead.
1435 */
scx_bpf_pick_any_cpu(const struct cpumask * cpus_allowed,u64 flags,const struct bpf_prog_aux * aux)1436 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
1437 u64 flags, const struct bpf_prog_aux *aux)
1438 {
1439 struct scx_sched *sch;
1440 s32 cpu;
1441
1442 guard(rcu)();
1443
1444 sch = scx_prog_sched(aux);
1445 if (unlikely(!sch))
1446 return -ENODEV;
1447
1448 if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1449 scx_error(sch, "per-node idle tracking is enabled");
1450 return -EBUSY;
1451 }
1452
1453 if (static_branch_likely(&scx_builtin_idle_enabled)) {
1454 cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1455 if (cpu >= 0)
1456 return cpu;
1457 }
1458
1459 cpu = cpumask_any_distribute(cpus_allowed);
1460 if (cpu < nr_cpu_ids)
1461 return cpu;
1462 else
1463 return -EBUSY;
1464 }
1465
1466 __bpf_kfunc_end_defs();
1467
1468 BTF_KFUNCS_START(scx_kfunc_ids_idle)
1469 BTF_ID_FLAGS(func, scx_bpf_cpu_node, KF_IMPLICIT_ARGS)
1470 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1471 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1472 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1473 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1474 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
1475 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle, KF_IMPLICIT_ARGS)
1476 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_IMPLICIT_ARGS | KF_RCU)
1477 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_IMPLICIT_ARGS | KF_RCU)
1478 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_IMPLICIT_ARGS | KF_RCU)
1479 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_IMPLICIT_ARGS | KF_RCU)
1480 BTF_KFUNCS_END(scx_kfunc_ids_idle)
1481
1482 static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
1483 .owner = THIS_MODULE,
1484 .set = &scx_kfunc_ids_idle,
1485 .filter = scx_kfunc_context_filter,
1486 };
1487
1488 /*
1489 * The select_cpu kfuncs internally call task_rq_lock() when invoked from an
1490 * rq-unlocked context, and thus cannot be safely called from arbitrary tracing
1491 * contexts where @p's pi_lock state is unknown. Keep them out of
1492 * BPF_PROG_TYPE_TRACING by registering them in their own set which is exposed
1493 * only to STRUCT_OPS and SYSCALL programs.
1494 *
1495 * These kfuncs are also members of scx_kfunc_ids_unlocked (see ext.c) because
1496 * they're callable from unlocked contexts in addition to ops.select_cpu() and
1497 * ops.enqueue().
1498 */
1499 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
1500 BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_IMPLICIT_ARGS | KF_RCU)
1501 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
1502 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_IMPLICIT_ARGS | KF_RCU)
1503 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
1504
1505 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
1506 .owner = THIS_MODULE,
1507 .set = &scx_kfunc_ids_select_cpu,
1508 .filter = scx_kfunc_context_filter,
1509 };
1510
scx_idle_init(void)1511 int scx_idle_init(void)
1512 {
1513 int ret;
1514
1515 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
1516 register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
1517 register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle) ||
1518 register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) ||
1519 register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_select_cpu);
1520
1521 return ret;
1522 }
1523