xref: /linux/tools/testing/selftests/sched_ext/init_enable_count.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2023 David Vernet <dvernet@meta.com>
5  * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
6  */
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <sched.h>
10 #include <bpf/bpf.h>
11 #include <scx/common.h>
12 #include <sys/wait.h>
13 #include "scx_test.h"
14 #include "init_enable_count.bpf.skel.h"
15 
16 #define SCHED_EXT 7
17 
18 static struct init_enable_count *
19 open_load_prog(bool global)
20 {
21 	struct init_enable_count *skel;
22 
23 	skel = init_enable_count__open();
24 	SCX_BUG_ON(!skel, "Failed to open skel");
25 
26 	if (!global)
27 		skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
28 
29 	SCX_BUG_ON(init_enable_count__load(skel), "Failed to load skel");
30 
31 	return skel;
32 }
33 
34 static enum scx_test_status run_test(bool global)
35 {
36 	struct init_enable_count *skel;
37 	struct bpf_link *link;
38 	const u32 num_children = 5, num_pre_forks = 1024;
39 	int ret, i, status;
40 	struct sched_param param = {};
41 	pid_t pids[num_pre_forks];
42 
43 	skel = open_load_prog(global);
44 
45 	/*
46 	 * Fork a bunch of children before we attach the scheduler so that we
47 	 * ensure (at least in practical terms) that there are more tasks that
48 	 * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
49 	 * take the fork() path either below or in other processes.
50 	 */
51 	for (i = 0; i < num_pre_forks; i++) {
52 		pids[i] = fork();
53 		SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
54 		if (pids[i] == 0) {
55 			sleep(1);
56 			exit(0);
57 		}
58 	}
59 
60 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
61 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
62 
63 	for (i = 0; i < num_pre_forks; i++) {
64 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
65 			    "Failed to wait for pre-forked child\n");
66 
67 		SCX_FAIL_IF(status != 0, "Pre-forked child %d exited with status %d\n", i,
68 			    status);
69 	}
70 
71 	bpf_link__destroy(link);
72 	SCX_GE(skel->bss->init_task_cnt, num_pre_forks);
73 	SCX_GE(skel->bss->exit_task_cnt, num_pre_forks);
74 
75 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
76 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
77 
78 	/* SCHED_EXT children */
79 	for (i = 0; i < num_children; i++) {
80 		pids[i] = fork();
81 		SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
82 
83 		if (pids[i] == 0) {
84 			ret = sched_setscheduler(0, SCHED_EXT, &param);
85 			SCX_BUG_ON(ret, "Failed to set sched to sched_ext");
86 
87 			/*
88 			 * Reset to SCHED_OTHER for half of them. Counts for
89 			 * everything should still be the same regardless, as
90 			 * ops.disable() is invoked even if a task is still on
91 			 * SCHED_EXT before it exits.
92 			 */
93 			if (i % 2 == 0) {
94 				ret = sched_setscheduler(0, SCHED_OTHER, &param);
95 				SCX_BUG_ON(ret, "Failed to reset sched to normal");
96 			}
97 			exit(0);
98 		}
99 	}
100 	for (i = 0; i < num_children; i++) {
101 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
102 			    "Failed to wait for SCX child\n");
103 
104 		SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i,
105 			    status);
106 	}
107 
108 	/* SCHED_OTHER children */
109 	for (i = 0; i < num_children; i++) {
110 		pids[i] = fork();
111 		if (pids[i] == 0)
112 			exit(0);
113 	}
114 
115 	for (i = 0; i < num_children; i++) {
116 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
117 			    "Failed to wait for normal child\n");
118 
119 		SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i,
120 			    status);
121 	}
122 
123 	bpf_link__destroy(link);
124 
125 	SCX_GE(skel->bss->init_task_cnt, 2 * num_children);
126 	SCX_GE(skel->bss->exit_task_cnt, 2 * num_children);
127 
128 	if (global) {
129 		SCX_GE(skel->bss->enable_cnt, 2 * num_children);
130 		SCX_GE(skel->bss->disable_cnt, 2 * num_children);
131 	} else {
132 		SCX_EQ(skel->bss->enable_cnt, num_children);
133 		SCX_EQ(skel->bss->disable_cnt, num_children);
134 	}
135 	/*
136 	 * We forked a ton of tasks before we attached the scheduler above, so
137 	 * this should be fine. Technically it could be flaky if a ton of forks
138 	 * are happening at the same time in other processes, but that should
139 	 * be exceedingly unlikely.
140 	 */
141 	SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
142 	SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);
143 
144 	init_enable_count__destroy(skel);
145 
146 	return SCX_TEST_PASS;
147 }
148 
149 static enum scx_test_status run(void *ctx)
150 {
151 	enum scx_test_status status;
152 
153 	status = run_test(true);
154 	if (status != SCX_TEST_PASS)
155 		return status;
156 
157 	return run_test(false);
158 }
159 
160 struct scx_test init_enable_count = {
161 	.name = "init_enable_count",
162 	.description = "Verify we do the correct amount of counting of init, "
163 		       "enable, etc callbacks.",
164 	.run = run,
165 };
166 REGISTER_SCX_TEST(&init_enable_count)
167