1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * A scheduler that validates that enqueue flags are properly stored and
4 * applied at dispatch time when a task is directly dispatched from
5 * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
6 * making the test a very basic vtime scheduler.
7 *
8 * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
9 * Copyright (c) 2024 David Vernet <dvernet@meta.com>
10 * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
11 */
12
13 #include <scx/common.bpf.h>
14
15 char _license[] SEC("license") = "GPL";
16
17 volatile bool consumed;
18
19 static u64 vtime_now;
20
21 #define VTIME_DSQ 0
22
vtime_before(u64 a,u64 b)23 static inline bool vtime_before(u64 a, u64 b)
24 {
25 return (s64)(a - b) < 0;
26 }
27
task_vtime(const struct task_struct * p)28 static inline u64 task_vtime(const struct task_struct *p)
29 {
30 u64 vtime = p->scx.dsq_vtime;
31
32 if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
33 return vtime_now - SCX_SLICE_DFL;
34 else
35 return vtime;
36 }
37
BPF_STRUCT_OPS(select_cpu_vtime_select_cpu,struct task_struct * p,s32 prev_cpu,u64 wake_flags)38 s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
39 s32 prev_cpu, u64 wake_flags)
40 {
41 s32 cpu;
42
43 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
44 if (cpu >= 0)
45 goto ddsp;
46
47 cpu = prev_cpu;
48 scx_bpf_test_and_clear_cpu_idle(cpu);
49 ddsp:
50 scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
51 return cpu;
52 }
53
BPF_STRUCT_OPS(select_cpu_vtime_dispatch,s32 cpu,struct task_struct * p)54 void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
55 {
56 if (scx_bpf_consume(VTIME_DSQ))
57 consumed = true;
58 }
59
BPF_STRUCT_OPS(select_cpu_vtime_running,struct task_struct * p)60 void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p)
61 {
62 if (vtime_before(vtime_now, p->scx.dsq_vtime))
63 vtime_now = p->scx.dsq_vtime;
64 }
65
BPF_STRUCT_OPS(select_cpu_vtime_stopping,struct task_struct * p,bool runnable)66 void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p,
67 bool runnable)
68 {
69 p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
70 }
71
BPF_STRUCT_OPS(select_cpu_vtime_enable,struct task_struct * p)72 void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p)
73 {
74 p->scx.dsq_vtime = vtime_now;
75 }
76
BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)77 s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
78 {
79 return scx_bpf_create_dsq(VTIME_DSQ, -1);
80 }
81
82 SEC(".struct_ops.link")
83 struct sched_ext_ops select_cpu_vtime_ops = {
84 .select_cpu = (void *) select_cpu_vtime_select_cpu,
85 .dispatch = (void *) select_cpu_vtime_dispatch,
86 .running = (void *) select_cpu_vtime_running,
87 .stopping = (void *) select_cpu_vtime_stopping,
88 .enable = (void *) select_cpu_vtime_enable,
89 .init = (void *) select_cpu_vtime_init,
90 .name = "select_cpu_vtime",
91 .timeout_ms = 1000U,
92 };
93