1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4 * Copyright (c) 2023 David Vernet <dvernet@meta.com>
5 * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
6 */
7 #include <signal.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <bpf/bpf.h>
12 #include <scx/common.h>
13 #include <sys/wait.h>
14 #include "scx_test.h"
15 #include "init_enable_count.bpf.skel.h"
16
17 #define SCHED_EXT 7
18
run_test(bool global)19 static enum scx_test_status run_test(bool global)
20 {
21 struct init_enable_count *skel;
22 struct bpf_link *link;
23 const u32 num_children = 5, num_pre_forks = 1024;
24 int ret, i, status;
25 struct sched_param param = {};
26 pid_t pids[num_pre_forks];
27 int pipe_fds[2];
28
29 SCX_FAIL_IF(pipe(pipe_fds) < 0, "Failed to create pipe");
30
31 skel = init_enable_count__open();
32 SCX_FAIL_IF(!skel, "Failed to open");
33 SCX_ENUM_INIT(skel);
34
35 if (!global)
36 skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
37
38 SCX_FAIL_IF(init_enable_count__load(skel), "Failed to load skel");
39
40 /*
41 * Fork a bunch of children before we attach the scheduler so that we
42 * ensure (at least in practical terms) that there are more tasks that
43 * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
44 * take the fork() path either below or in other processes.
45 *
46 * All children will block on read() on the pipe until the parent closes
47 * the write end after attaching the scheduler, which signals all of
48 * them to exit simultaneously. Auto-reap so we don't have to wait on
49 * them.
50 */
51 signal(SIGCHLD, SIG_IGN);
52 for (i = 0; i < num_pre_forks; i++) {
53 pid_t pid = fork();
54
55 SCX_FAIL_IF(pid < 0, "Failed to fork child");
56 if (pid == 0) {
57 char buf;
58
59 close(pipe_fds[1]);
60 if (read(pipe_fds[0], &buf, 1) < 0)
61 exit(1);
62 close(pipe_fds[0]);
63 exit(0);
64 }
65 }
66 close(pipe_fds[0]);
67
68 link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
69 SCX_FAIL_IF(!link, "Failed to attach struct_ops");
70
71 /* Signal all pre-forked children to exit. */
72 close(pipe_fds[1]);
73 signal(SIGCHLD, SIG_DFL);
74
75 bpf_link__destroy(link);
76 SCX_GE(skel->bss->init_task_cnt, num_pre_forks);
77 SCX_GE(skel->bss->exit_task_cnt, num_pre_forks);
78
79 link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
80 SCX_FAIL_IF(!link, "Failed to attach struct_ops");
81
82 /* SCHED_EXT children */
83 for (i = 0; i < num_children; i++) {
84 pids[i] = fork();
85 SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
86
87 if (pids[i] == 0) {
88 ret = sched_setscheduler(0, SCHED_EXT, ¶m);
89 SCX_BUG_ON(ret, "Failed to set sched to sched_ext");
90
91 /*
92 * Reset to SCHED_OTHER for half of them. Counts for
93 * everything should still be the same regardless, as
94 * ops.disable() is invoked even if a task is still on
95 * SCHED_EXT before it exits.
96 */
97 if (i % 2 == 0) {
98 ret = sched_setscheduler(0, SCHED_OTHER, ¶m);
99 SCX_BUG_ON(ret, "Failed to reset sched to normal");
100 }
101 exit(0);
102 }
103 }
104 for (i = 0; i < num_children; i++) {
105 SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
106 "Failed to wait for SCX child\n");
107
108 SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i,
109 status);
110 }
111
112 /* SCHED_OTHER children */
113 for (i = 0; i < num_children; i++) {
114 pids[i] = fork();
115 if (pids[i] == 0)
116 exit(0);
117 }
118
119 for (i = 0; i < num_children; i++) {
120 SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
121 "Failed to wait for normal child\n");
122
123 SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i,
124 status);
125 }
126
127 bpf_link__destroy(link);
128
129 SCX_GE(skel->bss->init_task_cnt, 2 * num_children);
130 SCX_GE(skel->bss->exit_task_cnt, 2 * num_children);
131
132 if (global) {
133 SCX_GE(skel->bss->enable_cnt, 2 * num_children);
134 SCX_GE(skel->bss->disable_cnt, 2 * num_children);
135 } else {
136 SCX_EQ(skel->bss->enable_cnt, num_children);
137 SCX_EQ(skel->bss->disable_cnt, num_children);
138 }
139 /*
140 * We forked a ton of tasks before we attached the scheduler above, so
141 * this should be fine. Technically it could be flaky if a ton of forks
142 * are happening at the same time in other processes, but that should
143 * be exceedingly unlikely.
144 */
145 SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
146 SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);
147
148 init_enable_count__destroy(skel);
149
150 return SCX_TEST_PASS;
151 }
152
run(void * ctx)153 static enum scx_test_status run(void *ctx)
154 {
155 enum scx_test_status status;
156
157 status = run_test(true);
158 if (status != SCX_TEST_PASS)
159 return status;
160
161 return run_test(false);
162 }
163
164 struct scx_test init_enable_count = {
165 .name = "init_enable_count",
166 .description = "Verify we correctly count the occurrences of init, "
167 "enable, etc callbacks.",
168 .run = run,
169 };
170 REGISTER_SCX_TEST(&init_enable_count)
171