xref: /linux/tools/testing/selftests/sched_ext/init_enable_count.c (revision 4544e9c4ec9a5955a37fdd8204a3d98106f97ab7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2023 David Vernet <dvernet@meta.com>
5  * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
6  */
7 #include <signal.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <bpf/bpf.h>
12 #include <scx/common.h>
13 #include <sys/wait.h>
14 #include "scx_test.h"
15 #include "init_enable_count.bpf.skel.h"
16 
17 #define SCHED_EXT 7
18 
19 static enum scx_test_status run_test(bool global)
20 {
21 	struct init_enable_count *skel;
22 	struct bpf_link *link;
23 	const u32 num_children = 5, num_pre_forks = 1024;
24 	int ret, i, status;
25 	struct sched_param param = {};
26 	pid_t pids[num_pre_forks];
27 	int pipe_fds[2];
28 
29 	SCX_FAIL_IF(pipe(pipe_fds) < 0, "Failed to create pipe");
30 
31 	skel = init_enable_count__open();
32 	SCX_FAIL_IF(!skel, "Failed to open");
33 	SCX_ENUM_INIT(skel);
34 
35 	if (!global)
36 		skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
37 
38 	SCX_FAIL_IF(init_enable_count__load(skel), "Failed to load skel");
39 
40 	/*
41 	 * Fork a bunch of children before we attach the scheduler so that we
42 	 * ensure (at least in practical terms) that there are more tasks that
43 	 * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
44 	 * take the fork() path either below or in other processes.
45 	 *
46 	 * All children will block on read() on the pipe until the parent closes
47 	 * the write end after attaching the scheduler, which signals all of
48 	 * them to exit simultaneously. Auto-reap so we don't have to wait on
49 	 * them.
50 	 */
51 	signal(SIGCHLD, SIG_IGN);
52 	for (i = 0; i < num_pre_forks; i++) {
53 		pid_t pid = fork();
54 
55 		SCX_FAIL_IF(pid < 0, "Failed to fork child");
56 		if (pid == 0) {
57 			char buf;
58 
59 			close(pipe_fds[1]);
60 			read(pipe_fds[0], &buf, 1);
61 			close(pipe_fds[0]);
62 			exit(0);
63 		}
64 	}
65 	close(pipe_fds[0]);
66 
67 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
68 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
69 
70 	/* Signal all pre-forked children to exit. */
71 	close(pipe_fds[1]);
72 	signal(SIGCHLD, SIG_DFL);
73 
74 	bpf_link__destroy(link);
75 	SCX_GE(skel->bss->init_task_cnt, num_pre_forks);
76 	SCX_GE(skel->bss->exit_task_cnt, num_pre_forks);
77 
78 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
79 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
80 
81 	/* SCHED_EXT children */
82 	for (i = 0; i < num_children; i++) {
83 		pids[i] = fork();
84 		SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
85 
86 		if (pids[i] == 0) {
87 			ret = sched_setscheduler(0, SCHED_EXT, &param);
88 			SCX_BUG_ON(ret, "Failed to set sched to sched_ext");
89 
90 			/*
91 			 * Reset to SCHED_OTHER for half of them. Counts for
92 			 * everything should still be the same regardless, as
93 			 * ops.disable() is invoked even if a task is still on
94 			 * SCHED_EXT before it exits.
95 			 */
96 			if (i % 2 == 0) {
97 				ret = sched_setscheduler(0, SCHED_OTHER, &param);
98 				SCX_BUG_ON(ret, "Failed to reset sched to normal");
99 			}
100 			exit(0);
101 		}
102 	}
103 	for (i = 0; i < num_children; i++) {
104 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
105 			    "Failed to wait for SCX child\n");
106 
107 		SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i,
108 			    status);
109 	}
110 
111 	/* SCHED_OTHER children */
112 	for (i = 0; i < num_children; i++) {
113 		pids[i] = fork();
114 		if (pids[i] == 0)
115 			exit(0);
116 	}
117 
118 	for (i = 0; i < num_children; i++) {
119 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
120 			    "Failed to wait for normal child\n");
121 
122 		SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i,
123 			    status);
124 	}
125 
126 	bpf_link__destroy(link);
127 
128 	SCX_GE(skel->bss->init_task_cnt, 2 * num_children);
129 	SCX_GE(skel->bss->exit_task_cnt, 2 * num_children);
130 
131 	if (global) {
132 		SCX_GE(skel->bss->enable_cnt, 2 * num_children);
133 		SCX_GE(skel->bss->disable_cnt, 2 * num_children);
134 	} else {
135 		SCX_EQ(skel->bss->enable_cnt, num_children);
136 		SCX_EQ(skel->bss->disable_cnt, num_children);
137 	}
138 	/*
139 	 * We forked a ton of tasks before we attached the scheduler above, so
140 	 * this should be fine. Technically it could be flaky if a ton of forks
141 	 * are happening at the same time in other processes, but that should
142 	 * be exceedingly unlikely.
143 	 */
144 	SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
145 	SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);
146 
147 	init_enable_count__destroy(skel);
148 
149 	return SCX_TEST_PASS;
150 }
151 
152 static enum scx_test_status run(void *ctx)
153 {
154 	enum scx_test_status status;
155 
156 	status = run_test(true);
157 	if (status != SCX_TEST_PASS)
158 		return status;
159 
160 	return run_test(false);
161 }
162 
163 struct scx_test init_enable_count = {
164 	.name = "init_enable_count",
165 	.description = "Verify we correctly count the occurrences of init, "
166 		       "enable, etc callbacks.",
167 	.run = run,
168 };
169 REGISTER_SCX_TEST(&init_enable_count)
170