xref: /linux/tools/sched_ext/scx_cpu0.bpf.c (revision 02baaa67d9afc2e56c6e1ac6a1fb1f1dd2be366f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * A CPU0 scheduler.
4  *
5  * This scheduler queues all tasks to a shared DSQ and only dispatches them on
6  * CPU0 in FIFO order. This is useful for testing bypass behavior when many
7  * tasks are concentrated on a single CPU. If the load balancer doesn't work,
8  * bypass mode can trigger task hangs or RCU stalls as the queue is long and
9  * there's only one CPU working on it.
10  *
11  * - Statistics tracking how many tasks are queued to local and CPU0 DSQs.
12  * - Termination notification for userspace.
13  *
14  * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
15  * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
16  */
17 #include <scx/common.bpf.h>
18 
19 char _license[] SEC("license") = "GPL";
20 
21 const volatile u32 nr_cpus = 32;	/* !0 for veristat, set during init */
22 
23 UEI_DEFINE(uei);
24 
25 /*
26  * We create a custom DSQ with ID 0 that we dispatch to and consume from on
27  * CPU0.
28  */
29 #define DSQ_CPU0 0
30 
31 struct {
32 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
33 	__uint(key_size, sizeof(u32));
34 	__uint(value_size, sizeof(u64));
35 	__uint(max_entries, 2);			/* [local, cpu0] */
36 } stats SEC(".maps");
37 
stat_inc(u32 idx)38 static void stat_inc(u32 idx)
39 {
40 	u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx);
41 	if (cnt_p)
42 		(*cnt_p)++;
43 }
44 
BPF_STRUCT_OPS(cpu0_select_cpu,struct task_struct * p,s32 prev_cpu,u64 wake_flags)45 s32 BPF_STRUCT_OPS(cpu0_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
46 {
47 	return 0;
48 }
49 
BPF_STRUCT_OPS(cpu0_enqueue,struct task_struct * p,u64 enq_flags)50 void BPF_STRUCT_OPS(cpu0_enqueue, struct task_struct *p, u64 enq_flags)
51 {
52 	/*
53 	 * select_cpu() always picks CPU0. If @p is not on CPU0, it can't run on
54 	 * CPU 0. Queue on whichever CPU it's currently only.
55 	 */
56 	if (scx_bpf_task_cpu(p) != 0) {
57 		stat_inc(0);	/* count local queueing */
58 		scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
59 		return;
60 	}
61 
62 	stat_inc(1);	/* count cpu0 queueing */
63 	scx_bpf_dsq_insert(p, DSQ_CPU0, SCX_SLICE_DFL, enq_flags);
64 }
65 
BPF_STRUCT_OPS(cpu0_dispatch,s32 cpu,struct task_struct * prev)66 void BPF_STRUCT_OPS(cpu0_dispatch, s32 cpu, struct task_struct *prev)
67 {
68 	if (cpu == 0)
69 		scx_bpf_dsq_move_to_local(DSQ_CPU0);
70 }
71 
BPF_STRUCT_OPS_SLEEPABLE(cpu0_init)72 s32 BPF_STRUCT_OPS_SLEEPABLE(cpu0_init)
73 {
74 	return scx_bpf_create_dsq(DSQ_CPU0, -1);
75 }
76 
BPF_STRUCT_OPS(cpu0_exit,struct scx_exit_info * ei)77 void BPF_STRUCT_OPS(cpu0_exit, struct scx_exit_info *ei)
78 {
79 	UEI_RECORD(uei, ei);
80 }
81 
82 SCX_OPS_DEFINE(cpu0_ops,
83 	       .select_cpu		= (void *)cpu0_select_cpu,
84 	       .enqueue			= (void *)cpu0_enqueue,
85 	       .dispatch		= (void *)cpu0_dispatch,
86 	       .init			= (void *)cpu0_init,
87 	       .exit			= (void *)cpu0_exit,
88 	       .name			= "cpu0");
89