xref: /linux/tools/testing/selftests/bpf/prog_tests/timer.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <sched.h>
4 #include <test_progs.h>
5 #include <linux/perf_event.h>
6 #include <sys/syscall.h>
7 #include "timer.skel.h"
8 #include "timer_failure.skel.h"
9 #include "timer_interrupt.skel.h"
10 
11 #define NUM_THR 8
12 
13 static int perf_event_open(__u32 type, __u64 config, int pid, int cpu)
14 {
15 	struct perf_event_attr attr = {
16 		.type = type,
17 		.config = config,
18 		.size = sizeof(struct perf_event_attr),
19 		.sample_period = 10000,
20 	};
21 
22 	return syscall(__NR_perf_event_open, &attr, pid, cpu, -1, 0);
23 }
24 
25 static void *spin_lock_thread(void *arg)
26 {
27 	int i, err, prog_fd = *(int *)arg;
28 	LIBBPF_OPTS(bpf_test_run_opts, topts);
29 
30 	for (i = 0; i < 10000; i++) {
31 		err = bpf_prog_test_run_opts(prog_fd, &topts);
32 		if (!ASSERT_OK(err, "test_run_opts err") ||
33 		    !ASSERT_OK(topts.retval, "test_run_opts retval"))
34 			break;
35 	}
36 
37 	pthread_exit(arg);
38 }
39 
40 
41 static int timer_stress_runner(struct timer *timer_skel, bool async_cancel)
42 {
43 	int i, err = 1, prog_fd;
44 	LIBBPF_OPTS(bpf_test_run_opts, topts);
45 	pthread_t thread_id[NUM_THR];
46 	void *ret;
47 
48 	timer_skel->bss->async_cancel = async_cancel;
49 	prog_fd = bpf_program__fd(timer_skel->progs.race);
50 	for (i = 0; i < NUM_THR; i++) {
51 		err = pthread_create(&thread_id[i], NULL,
52 				     &spin_lock_thread, &prog_fd);
53 		if (!ASSERT_OK(err, "pthread_create"))
54 			break;
55 	}
56 
57 	while (i) {
58 		err = pthread_join(thread_id[--i], &ret);
59 		if (ASSERT_OK(err, "pthread_join"))
60 			ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join");
61 	}
62 	return err;
63 }
64 
65 static int timer_stress(struct timer *timer_skel)
66 {
67 	return timer_stress_runner(timer_skel, false);
68 }
69 
70 static int timer_stress_async_cancel(struct timer *timer_skel)
71 {
72 	return timer_stress_runner(timer_skel, true);
73 }
74 
75 static void *nmi_cpu_worker(void *arg)
76 {
77 	volatile __u64 num = 1;
78 	int i;
79 
80 	for (i = 0; i < 500000000; ++i)
81 		num *= (i % 7) + 1;
82 	(void)num;
83 
84 	return NULL;
85 }
86 
87 static int run_nmi_test(struct timer *timer_skel, struct bpf_program *prog)
88 {
89 	struct bpf_link *link = NULL;
90 	int pe_fd = -1, pipefd[2] = {-1, -1}, pid = 0, status;
91 	char buf = 0;
92 	int ret = -1;
93 
94 	if (!ASSERT_OK(pipe(pipefd), "pipe"))
95 		goto cleanup;
96 
97 	pid = fork();
98 	if (pid == 0) {
99 		/* Child: spawn multiple threads to consume multiple CPUs */
100 		pthread_t threads[NUM_THR];
101 		int i;
102 
103 		close(pipefd[1]);
104 		read(pipefd[0], &buf, 1);
105 		close(pipefd[0]);
106 
107 		for (i = 0; i < NUM_THR; i++)
108 			pthread_create(&threads[i], NULL, nmi_cpu_worker, NULL);
109 		for (i = 0; i < NUM_THR; i++)
110 			pthread_join(threads[i], NULL);
111 		exit(0);
112 	}
113 
114 	if (!ASSERT_GE(pid, 0, "fork"))
115 		goto cleanup;
116 
117 	/* Open perf event for child process across all CPUs */
118 	pe_fd = perf_event_open(PERF_TYPE_HARDWARE,
119 				PERF_COUNT_HW_CPU_CYCLES,
120 				pid,  /* measure child process */
121 				-1);  /* on any CPU */
122 	if (pe_fd < 0) {
123 		if (errno == ENOENT || errno == EOPNOTSUPP) {
124 			printf("SKIP:no PERF_COUNT_HW_CPU_CYCLES\n");
125 			test__skip();
126 			ret = EOPNOTSUPP;
127 			goto cleanup;
128 		}
129 		ASSERT_GE(pe_fd, 0, "perf_event_open");
130 		goto cleanup;
131 	}
132 
133 	link = bpf_program__attach_perf_event(prog, pe_fd);
134 	if (!ASSERT_OK_PTR(link, "attach_perf_event"))
135 		goto cleanup;
136 	pe_fd = -1;  /* Ownership transferred to link */
137 
138 	/* Signal child to start CPU work */
139 	close(pipefd[0]);
140 	pipefd[0] = -1;
141 	write(pipefd[1], &buf, 1);
142 	close(pipefd[1]);
143 	pipefd[1] = -1;
144 
145 	waitpid(pid, &status, 0);
146 	pid = 0;
147 
148 	/* Verify NMI context was hit */
149 	ASSERT_GT(timer_skel->bss->test_hits, 0, "test_hits");
150 	ret = 0;
151 
152 cleanup:
153 	bpf_link__destroy(link);
154 	if (pe_fd >= 0)
155 		close(pe_fd);
156 	if (pid > 0) {
157 		write(pipefd[1], &buf, 1);
158 		waitpid(pid, &status, 0);
159 	}
160 	if (pipefd[0] >= 0)
161 		close(pipefd[0]);
162 	if (pipefd[1] >= 0)
163 		close(pipefd[1]);
164 	return ret;
165 }
166 
167 static int timer_stress_nmi_race(struct timer *timer_skel)
168 {
169 	int err;
170 
171 	err = run_nmi_test(timer_skel, timer_skel->progs.nmi_race);
172 	if (err == EOPNOTSUPP)
173 		return 0;
174 	return err;
175 }
176 
177 static int timer_stress_nmi_update(struct timer *timer_skel)
178 {
179 	int err;
180 
181 	err = run_nmi_test(timer_skel, timer_skel->progs.nmi_update);
182 	if (err == EOPNOTSUPP)
183 		return 0;
184 	if (err)
185 		return err;
186 	ASSERT_GT(timer_skel->bss->update_hits, 0, "update_hits");
187 	return 0;
188 }
189 
190 static int timer_stress_nmi_cancel(struct timer *timer_skel)
191 {
192 	int err;
193 
194 	err = run_nmi_test(timer_skel, timer_skel->progs.nmi_cancel);
195 	if (err == EOPNOTSUPP)
196 		return 0;
197 	if (err)
198 		return err;
199 	ASSERT_GT(timer_skel->bss->cancel_hits, 0, "cancel_hits");
200 	return 0;
201 }
202 
203 static int timer(struct timer *timer_skel)
204 {
205 	int err, prog_fd;
206 	LIBBPF_OPTS(bpf_test_run_opts, topts);
207 
208 	err = timer__attach(timer_skel);
209 	if (!ASSERT_OK(err, "timer_attach"))
210 		return err;
211 
212 	ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1");
213 	ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1");
214 	ASSERT_EQ(timer_skel->bss->pinned_callback_check, 0, "pinned_callback_check1");
215 
216 	prog_fd = bpf_program__fd(timer_skel->progs.test1);
217 	err = bpf_prog_test_run_opts(prog_fd, &topts);
218 	ASSERT_OK(err, "test_run");
219 	ASSERT_EQ(topts.retval, 0, "test_run");
220 	timer__detach(timer_skel);
221 
222 	usleep(50); /* 10 usecs should be enough, but give it extra */
223 	/* check that timer_cb1() was executed 10+10 times */
224 	ASSERT_EQ(timer_skel->data->callback_check, 42, "callback_check2");
225 	ASSERT_EQ(timer_skel->data->callback2_check, 42, "callback2_check2");
226 
227 	/* check that timer_cb2() was executed twice */
228 	ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data");
229 
230 	/* check that timer_cb3() was executed twice */
231 	ASSERT_EQ(timer_skel->bss->abs_data, 12, "abs_data");
232 
233 	/* check that timer_cb_pinned() was executed twice */
234 	ASSERT_EQ(timer_skel->bss->pinned_callback_check, 2, "pinned_callback_check");
235 
236 	/* check that there were no errors in timer execution */
237 	ASSERT_EQ(timer_skel->bss->err, 0, "err");
238 
239 	/* check that code paths completed */
240 	ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
241 
242 	return 0;
243 }
244 
245 static int timer_cancel_async(struct timer *timer_skel)
246 {
247 	int err, prog_fd;
248 	LIBBPF_OPTS(bpf_test_run_opts, topts);
249 
250 	prog_fd = bpf_program__fd(timer_skel->progs.test_async_cancel_succeed);
251 	err = bpf_prog_test_run_opts(prog_fd, &topts);
252 	ASSERT_OK(err, "test_run");
253 	ASSERT_EQ(topts.retval, 0, "test_run");
254 
255 	usleep(500);
256 	/* check that there were no errors in timer execution */
257 	ASSERT_EQ(timer_skel->bss->err, 0, "err");
258 
259 	/* check that code paths completed */
260 	ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
261 
262 	return 0;
263 }
264 
265 static void test_timer(int (*timer_test_fn)(struct timer *timer_skel))
266 {
267 	struct timer *timer_skel = NULL;
268 	int err;
269 
270 	timer_skel = timer__open_and_load();
271 	if (!timer_skel && errno == EOPNOTSUPP) {
272 		test__skip();
273 		return;
274 	}
275 	if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
276 		return;
277 
278 	err = timer_test_fn(timer_skel);
279 	ASSERT_OK(err, "timer");
280 	timer__destroy(timer_skel);
281 }
282 
283 void serial_test_timer(void)
284 {
285 	test_timer(timer);
286 
287 	RUN_TESTS(timer_failure);
288 }
289 
290 void serial_test_timer_stress(void)
291 {
292 	test_timer(timer_stress);
293 }
294 
295 void serial_test_timer_stress_async_cancel(void)
296 {
297 	test_timer(timer_stress_async_cancel);
298 }
299 
300 void serial_test_timer_async_cancel(void)
301 {
302 	test_timer(timer_cancel_async);
303 }
304 
305 void serial_test_timer_stress_nmi_race(void)
306 {
307 	test_timer(timer_stress_nmi_race);
308 }
309 
310 void serial_test_timer_stress_nmi_update(void)
311 {
312 	test_timer(timer_stress_nmi_update);
313 }
314 
315 void serial_test_timer_stress_nmi_cancel(void)
316 {
317 	test_timer(timer_stress_nmi_cancel);
318 }
319 
320 void test_timer_interrupt(void)
321 {
322 	struct timer_interrupt *skel = NULL;
323 	int err, prog_fd;
324 	LIBBPF_OPTS(bpf_test_run_opts, opts);
325 
326 	skel = timer_interrupt__open_and_load();
327 	if (!skel && errno == EOPNOTSUPP) {
328 		test__skip();
329 		return;
330 	}
331 	if (!ASSERT_OK_PTR(skel, "timer_interrupt__open_and_load"))
332 		return;
333 
334 	err = timer_interrupt__attach(skel);
335 	if (!ASSERT_OK(err, "timer_interrupt__attach"))
336 		goto out;
337 
338 	prog_fd = bpf_program__fd(skel->progs.test_timer_interrupt);
339 	err = bpf_prog_test_run_opts(prog_fd, &opts);
340 	if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
341 		goto out;
342 
343 	usleep(50);
344 
345 	ASSERT_EQ(skel->bss->in_interrupt, 0, "in_interrupt");
346 	if (skel->bss->preempt_count)
347 		ASSERT_NEQ(skel->bss->in_interrupt_cb, 0, "in_interrupt_cb");
348 
349 out:
350 	timer_interrupt__destroy(skel);
351 }
352