xref: /linux/tools/testing/selftests/sched_ext/init_enable_count.c (revision 04f41cbf03ec7221ab0b179e336f4c805ee55520)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2023 David Vernet <dvernet@meta.com>
5  * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
6  */
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <sched.h>
10 #include <bpf/bpf.h>
11 #include <scx/common.h>
12 #include <sys/wait.h>
13 #include "scx_test.h"
14 #include "init_enable_count.bpf.skel.h"
15 
16 #define SCHED_EXT 7
17 
run_test(bool global)18 static enum scx_test_status run_test(bool global)
19 {
20 	struct init_enable_count *skel;
21 	struct bpf_link *link;
22 	const u32 num_children = 5, num_pre_forks = 1024;
23 	int ret, i, status;
24 	struct sched_param param = {};
25 	pid_t pids[num_pre_forks];
26 
27 	skel = init_enable_count__open();
28 	SCX_FAIL_IF(!skel, "Failed to open");
29 	SCX_ENUM_INIT(skel);
30 
31 	if (!global)
32 		skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
33 
34 	SCX_FAIL_IF(init_enable_count__load(skel), "Failed to load skel");
35 
36 	/*
37 	 * Fork a bunch of children before we attach the scheduler so that we
38 	 * ensure (at least in practical terms) that there are more tasks that
39 	 * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
40 	 * take the fork() path either below or in other processes.
41 	 */
42 	for (i = 0; i < num_pre_forks; i++) {
43 		pids[i] = fork();
44 		SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
45 		if (pids[i] == 0) {
46 			sleep(1);
47 			exit(0);
48 		}
49 	}
50 
51 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
52 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
53 
54 	for (i = 0; i < num_pre_forks; i++) {
55 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
56 			    "Failed to wait for pre-forked child\n");
57 
58 		SCX_FAIL_IF(status != 0, "Pre-forked child %d exited with status %d\n", i,
59 			    status);
60 	}
61 
62 	bpf_link__destroy(link);
63 	SCX_GE(skel->bss->init_task_cnt, num_pre_forks);
64 	SCX_GE(skel->bss->exit_task_cnt, num_pre_forks);
65 
66 	link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
67 	SCX_FAIL_IF(!link, "Failed to attach struct_ops");
68 
69 	/* SCHED_EXT children */
70 	for (i = 0; i < num_children; i++) {
71 		pids[i] = fork();
72 		SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
73 
74 		if (pids[i] == 0) {
75 			ret = sched_setscheduler(0, SCHED_EXT, &param);
76 			SCX_BUG_ON(ret, "Failed to set sched to sched_ext");
77 
78 			/*
79 			 * Reset to SCHED_OTHER for half of them. Counts for
80 			 * everything should still be the same regardless, as
81 			 * ops.disable() is invoked even if a task is still on
82 			 * SCHED_EXT before it exits.
83 			 */
84 			if (i % 2 == 0) {
85 				ret = sched_setscheduler(0, SCHED_OTHER, &param);
86 				SCX_BUG_ON(ret, "Failed to reset sched to normal");
87 			}
88 			exit(0);
89 		}
90 	}
91 	for (i = 0; i < num_children; i++) {
92 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
93 			    "Failed to wait for SCX child\n");
94 
95 		SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i,
96 			    status);
97 	}
98 
99 	/* SCHED_OTHER children */
100 	for (i = 0; i < num_children; i++) {
101 		pids[i] = fork();
102 		if (pids[i] == 0)
103 			exit(0);
104 	}
105 
106 	for (i = 0; i < num_children; i++) {
107 		SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
108 			    "Failed to wait for normal child\n");
109 
110 		SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i,
111 			    status);
112 	}
113 
114 	bpf_link__destroy(link);
115 
116 	SCX_GE(skel->bss->init_task_cnt, 2 * num_children);
117 	SCX_GE(skel->bss->exit_task_cnt, 2 * num_children);
118 
119 	if (global) {
120 		SCX_GE(skel->bss->enable_cnt, 2 * num_children);
121 		SCX_GE(skel->bss->disable_cnt, 2 * num_children);
122 	} else {
123 		SCX_EQ(skel->bss->enable_cnt, num_children);
124 		SCX_EQ(skel->bss->disable_cnt, num_children);
125 	}
126 	/*
127 	 * We forked a ton of tasks before we attached the scheduler above, so
128 	 * this should be fine. Technically it could be flaky if a ton of forks
129 	 * are happening at the same time in other processes, but that should
130 	 * be exceedingly unlikely.
131 	 */
132 	SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
133 	SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);
134 
135 	init_enable_count__destroy(skel);
136 
137 	return SCX_TEST_PASS;
138 }
139 
run(void * ctx)140 static enum scx_test_status run(void *ctx)
141 {
142 	enum scx_test_status status;
143 
144 	status = run_test(true);
145 	if (status != SCX_TEST_PASS)
146 		return status;
147 
148 	return run_test(false);
149 }
150 
151 struct scx_test init_enable_count = {
152 	.name = "init_enable_count",
153 	.description = "Verify we correctly count the occurrences of init, "
154 		       "enable, etc callbacks.",
155 	.run = run,
156 };
157 REGISTER_SCX_TEST(&init_enable_count)
158