xref: /linux/tools/testing/selftests/bpf/test_progs.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #define _GNU_SOURCE
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
8 #include <argp.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <execinfo.h> /* backtrace */
14 #include <linux/membarrier.h>
15 #include <sys/sysinfo.h> /* get_nprocs */
16 #include <netinet/in.h>
17 #include <sys/select.h>
18 #include <sys/socket.h>
19 #include <sys/un.h>
20 
21 static bool verbose(void)
22 {
23 	return env.verbosity > VERBOSE_NONE;
24 }
25 
26 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
27 {
28 #ifdef __GLIBC__
29 	if (verbose() && env.worker_id == -1) {
30 		/* nothing to do, output to stdout by default */
31 		return;
32 	}
33 
34 	fflush(stdout);
35 	fflush(stderr);
36 
37 	stdout = open_memstream(log_buf, log_cnt);
38 	if (!stdout) {
39 		stdout = env.stdout;
40 		perror("open_memstream");
41 		return;
42 	}
43 
44 	if (env.subtest_state)
45 		env.subtest_state->stdout = stdout;
46 	else
47 		env.test_state->stdout = stdout;
48 
49 	stderr = stdout;
50 #endif
51 }
52 
53 static void stdio_hijack(char **log_buf, size_t *log_cnt)
54 {
55 #ifdef __GLIBC__
56 	if (verbose() && env.worker_id == -1) {
57 		/* nothing to do, output to stdout by default */
58 		return;
59 	}
60 
61 	env.stdout = stdout;
62 	env.stderr = stderr;
63 
64 	stdio_hijack_init(log_buf, log_cnt);
65 #endif
66 }
67 
68 static void stdio_restore_cleanup(void)
69 {
70 #ifdef __GLIBC__
71 	if (verbose() && env.worker_id == -1) {
72 		/* nothing to do, output to stdout by default */
73 		return;
74 	}
75 
76 	fflush(stdout);
77 
78 	if (env.subtest_state) {
79 		fclose(env.subtest_state->stdout);
80 		env.subtest_state->stdout = NULL;
81 		stdout = env.test_state->stdout;
82 		stderr = env.test_state->stdout;
83 	} else {
84 		fclose(env.test_state->stdout);
85 		env.test_state->stdout = NULL;
86 	}
87 #endif
88 }
89 
90 static void stdio_restore(void)
91 {
92 #ifdef __GLIBC__
93 	if (verbose() && env.worker_id == -1) {
94 		/* nothing to do, output to stdout by default */
95 		return;
96 	}
97 
98 	if (stdout == env.stdout)
99 		return;
100 
101 	stdio_restore_cleanup();
102 
103 	stdout = env.stdout;
104 	stderr = env.stderr;
105 #endif
106 }
107 
108 /* Adapted from perf/util/string.c */
109 static bool glob_match(const char *str, const char *pat)
110 {
111 	while (*str && *pat && *pat != '*') {
112 		if (*str != *pat)
113 			return false;
114 		str++;
115 		pat++;
116 	}
117 	/* Check wild card */
118 	if (*pat == '*') {
119 		while (*pat == '*')
120 			pat++;
121 		if (!*pat) /* Tail wild card matches all */
122 			return true;
123 		while (*str)
124 			if (glob_match(str++, pat))
125 				return true;
126 	}
127 	return !*str && !*pat;
128 }
129 
130 #define EXIT_NO_TEST		2
131 #define EXIT_ERR_SETUP_INFRA	3
132 
133 /* defined in test_progs.h */
134 struct test_env env = {};
135 
136 struct prog_test_def {
137 	const char *test_name;
138 	int test_num;
139 	void (*run_test)(void);
140 	void (*run_serial_test)(void);
141 	bool should_run;
142 	bool need_cgroup_cleanup;
143 };
144 
145 /* Override C runtime library's usleep() implementation to ensure nanosleep()
146  * is always called. Usleep is frequently used in selftests as a way to
147  * trigger kprobe and tracepoints.
148  */
149 int usleep(useconds_t usec)
150 {
151 	struct timespec ts = {
152 		.tv_sec = usec / 1000000,
153 		.tv_nsec = (usec % 1000000) * 1000,
154 	};
155 
156 	return syscall(__NR_nanosleep, &ts, NULL);
157 }
158 
159 static bool should_run(struct test_selector *sel, int num, const char *name)
160 {
161 	int i;
162 
163 	for (i = 0; i < sel->blacklist.cnt; i++) {
164 		if (glob_match(name, sel->blacklist.tests[i].name) &&
165 		    !sel->blacklist.tests[i].subtest_cnt)
166 			return false;
167 	}
168 
169 	for (i = 0; i < sel->whitelist.cnt; i++) {
170 		if (glob_match(name, sel->whitelist.tests[i].name))
171 			return true;
172 	}
173 
174 	if (!sel->whitelist.cnt && !sel->num_set)
175 		return true;
176 
177 	return num < sel->num_set_len && sel->num_set[num];
178 }
179 
180 static bool should_run_subtest(struct test_selector *sel,
181 			       struct test_selector *subtest_sel,
182 			       int subtest_num,
183 			       const char *test_name,
184 			       const char *subtest_name)
185 {
186 	int i, j;
187 
188 	for (i = 0; i < sel->blacklist.cnt; i++) {
189 		if (glob_match(test_name, sel->blacklist.tests[i].name)) {
190 			if (!sel->blacklist.tests[i].subtest_cnt)
191 				return false;
192 
193 			for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
194 				if (glob_match(subtest_name,
195 					       sel->blacklist.tests[i].subtests[j]))
196 					return false;
197 			}
198 		}
199 	}
200 
201 	for (i = 0; i < sel->whitelist.cnt; i++) {
202 		if (glob_match(test_name, sel->whitelist.tests[i].name)) {
203 			if (!sel->whitelist.tests[i].subtest_cnt)
204 				return true;
205 
206 			for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
207 				if (glob_match(subtest_name,
208 					       sel->whitelist.tests[i].subtests[j]))
209 					return true;
210 			}
211 		}
212 	}
213 
214 	if (!sel->whitelist.cnt && !subtest_sel->num_set)
215 		return true;
216 
217 	return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
218 }
219 
220 static char *test_result(bool failed, bool skipped)
221 {
222 	return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
223 }
224 
225 #define TEST_NUM_WIDTH 7
226 
227 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
228 {
229 	int skipped_cnt = test_state->skip_cnt;
230 	int subtests_cnt = test_state->subtest_num;
231 
232 	fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
233 	if (test_state->error_cnt)
234 		fprintf(env.stdout, "FAIL");
235 	else if (!skipped_cnt)
236 		fprintf(env.stdout, "OK");
237 	else if (skipped_cnt == subtests_cnt || !subtests_cnt)
238 		fprintf(env.stdout, "SKIP");
239 	else
240 		fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
241 
242 	fprintf(env.stdout, "\n");
243 }
244 
245 static void print_test_log(char *log_buf, size_t log_cnt)
246 {
247 	log_buf[log_cnt] = '\0';
248 	fprintf(env.stdout, "%s", log_buf);
249 	if (log_buf[log_cnt - 1] != '\n')
250 		fprintf(env.stdout, "\n");
251 }
252 
253 static void print_subtest_name(int test_num, int subtest_num,
254 			       const char *test_name, char *subtest_name,
255 			       char *result)
256 {
257 	char test_num_str[TEST_NUM_WIDTH + 1];
258 
259 	snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
260 
261 	fprintf(env.stdout, "#%-*s %s/%s",
262 		TEST_NUM_WIDTH, test_num_str,
263 		test_name, subtest_name);
264 
265 	if (result)
266 		fprintf(env.stdout, ":%s", result);
267 
268 	fprintf(env.stdout, "\n");
269 }
270 
271 static void dump_test_log(const struct prog_test_def *test,
272 			  const struct test_state *test_state,
273 			  bool skip_ok_subtests,
274 			  bool par_exec_result)
275 {
276 	bool test_failed = test_state->error_cnt > 0;
277 	bool force_log = test_state->force_log;
278 	bool print_test = verbose() || force_log || test_failed;
279 	int i;
280 	struct subtest_state *subtest_state;
281 	bool subtest_failed;
282 	bool subtest_filtered;
283 	bool print_subtest;
284 
285 	/* we do not print anything in the worker thread */
286 	if (env.worker_id != -1)
287 		return;
288 
289 	/* there is nothing to print when verbose log is used and execution
290 	 * is not in parallel mode
291 	 */
292 	if (verbose() && !par_exec_result)
293 		return;
294 
295 	if (test_state->log_cnt && print_test)
296 		print_test_log(test_state->log_buf, test_state->log_cnt);
297 
298 	for (i = 0; i < test_state->subtest_num; i++) {
299 		subtest_state = &test_state->subtest_states[i];
300 		subtest_failed = subtest_state->error_cnt;
301 		subtest_filtered = subtest_state->filtered;
302 		print_subtest = verbose() || force_log || subtest_failed;
303 
304 		if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
305 			continue;
306 
307 		if (subtest_state->log_cnt && print_subtest) {
308 			print_test_log(subtest_state->log_buf,
309 				       subtest_state->log_cnt);
310 		}
311 
312 		print_subtest_name(test->test_num, i + 1,
313 				   test->test_name, subtest_state->name,
314 				   test_result(subtest_state->error_cnt,
315 					       subtest_state->skipped));
316 	}
317 
318 	print_test_result(test, test_state);
319 }
320 
321 static void stdio_restore(void);
322 
323 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
324  * it after each test/sub-test.
325  */
326 static void reset_affinity(void)
327 {
328 	cpu_set_t cpuset;
329 	int i, err;
330 
331 	CPU_ZERO(&cpuset);
332 	for (i = 0; i < env.nr_cpus; i++)
333 		CPU_SET(i, &cpuset);
334 
335 	err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
336 	if (err < 0) {
337 		stdio_restore();
338 		fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
339 		exit(EXIT_ERR_SETUP_INFRA);
340 	}
341 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
342 	if (err < 0) {
343 		stdio_restore();
344 		fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
345 		exit(EXIT_ERR_SETUP_INFRA);
346 	}
347 }
348 
349 static void save_netns(void)
350 {
351 	env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
352 	if (env.saved_netns_fd == -1) {
353 		perror("open(/proc/self/ns/net)");
354 		exit(EXIT_ERR_SETUP_INFRA);
355 	}
356 }
357 
358 static void restore_netns(void)
359 {
360 	if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
361 		stdio_restore();
362 		perror("setns(CLONE_NEWNS)");
363 		exit(EXIT_ERR_SETUP_INFRA);
364 	}
365 }
366 
367 void test__end_subtest(void)
368 {
369 	struct prog_test_def *test = env.test;
370 	struct test_state *test_state = env.test_state;
371 	struct subtest_state *subtest_state = env.subtest_state;
372 
373 	if (subtest_state->error_cnt) {
374 		test_state->error_cnt++;
375 	} else {
376 		if (!subtest_state->skipped)
377 			test_state->sub_succ_cnt++;
378 		else
379 			test_state->skip_cnt++;
380 	}
381 
382 	if (verbose() && !env.workers)
383 		print_subtest_name(test->test_num, test_state->subtest_num,
384 				   test->test_name, subtest_state->name,
385 				   test_result(subtest_state->error_cnt,
386 					       subtest_state->skipped));
387 
388 	stdio_restore_cleanup();
389 	env.subtest_state = NULL;
390 }
391 
392 bool test__start_subtest(const char *subtest_name)
393 {
394 	struct prog_test_def *test = env.test;
395 	struct test_state *state = env.test_state;
396 	struct subtest_state *subtest_state;
397 	size_t sub_state_size = sizeof(*subtest_state);
398 
399 	if (env.subtest_state)
400 		test__end_subtest();
401 
402 	state->subtest_num++;
403 	state->subtest_states =
404 		realloc(state->subtest_states,
405 			state->subtest_num * sub_state_size);
406 	if (!state->subtest_states) {
407 		fprintf(stderr, "Not enough memory to allocate subtest result\n");
408 		return false;
409 	}
410 
411 	subtest_state = &state->subtest_states[state->subtest_num - 1];
412 
413 	memset(subtest_state, 0, sub_state_size);
414 
415 	if (!subtest_name || !subtest_name[0]) {
416 		fprintf(env.stderr,
417 			"Subtest #%d didn't provide sub-test name!\n",
418 			state->subtest_num);
419 		return false;
420 	}
421 
422 	subtest_state->name = strdup(subtest_name);
423 	if (!subtest_state->name) {
424 		fprintf(env.stderr,
425 			"Subtest #%d: failed to copy subtest name!\n",
426 			state->subtest_num);
427 		return false;
428 	}
429 
430 	if (!should_run_subtest(&env.test_selector,
431 				&env.subtest_selector,
432 				state->subtest_num,
433 				test->test_name,
434 				subtest_name)) {
435 		subtest_state->filtered = true;
436 		return false;
437 	}
438 
439 	env.subtest_state = subtest_state;
440 	stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
441 
442 	return true;
443 }
444 
445 void test__force_log(void)
446 {
447 	env.test_state->force_log = true;
448 }
449 
450 void test__skip(void)
451 {
452 	if (env.subtest_state)
453 		env.subtest_state->skipped = true;
454 	else
455 		env.test_state->skip_cnt++;
456 }
457 
458 void test__fail(void)
459 {
460 	if (env.subtest_state)
461 		env.subtest_state->error_cnt++;
462 	else
463 		env.test_state->error_cnt++;
464 }
465 
466 int test__join_cgroup(const char *path)
467 {
468 	int fd;
469 
470 	if (!env.test->need_cgroup_cleanup) {
471 		if (setup_cgroup_environment()) {
472 			fprintf(stderr,
473 				"#%d %s: Failed to setup cgroup environment\n",
474 				env.test->test_num, env.test->test_name);
475 			return -1;
476 		}
477 
478 		env.test->need_cgroup_cleanup = true;
479 	}
480 
481 	fd = create_and_get_cgroup(path);
482 	if (fd < 0) {
483 		fprintf(stderr,
484 			"#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
485 			env.test->test_num, env.test->test_name, path, errno);
486 		return fd;
487 	}
488 
489 	if (join_cgroup(path)) {
490 		fprintf(stderr,
491 			"#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
492 			env.test->test_num, env.test->test_name, path, errno);
493 		return -1;
494 	}
495 
496 	return fd;
497 }
498 
499 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
500 {
501 	struct bpf_map *map;
502 
503 	map = bpf_object__find_map_by_name(obj, name);
504 	if (!map) {
505 		fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
506 		test__fail();
507 		return -1;
508 	}
509 	return bpf_map__fd(map);
510 }
511 
512 static bool is_jit_enabled(void)
513 {
514 	const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
515 	bool enabled = false;
516 	int sysctl_fd;
517 
518 	sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
519 	if (sysctl_fd != -1) {
520 		char tmpc;
521 
522 		if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
523 			enabled = (tmpc != '0');
524 		close(sysctl_fd);
525 	}
526 
527 	return enabled;
528 }
529 
530 int compare_map_keys(int map1_fd, int map2_fd)
531 {
532 	__u32 key, next_key;
533 	char val_buf[PERF_MAX_STACK_DEPTH *
534 		     sizeof(struct bpf_stack_build_id)];
535 	int err;
536 
537 	err = bpf_map_get_next_key(map1_fd, NULL, &key);
538 	if (err)
539 		return err;
540 	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
541 	if (err)
542 		return err;
543 
544 	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
545 		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
546 		if (err)
547 			return err;
548 
549 		key = next_key;
550 	}
551 	if (errno != ENOENT)
552 		return -1;
553 
554 	return 0;
555 }
556 
557 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
558 {
559 	__u32 key, next_key, *cur_key_p, *next_key_p;
560 	char *val_buf1, *val_buf2;
561 	int i, err = 0;
562 
563 	val_buf1 = malloc(stack_trace_len);
564 	val_buf2 = malloc(stack_trace_len);
565 	cur_key_p = NULL;
566 	next_key_p = &key;
567 	while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
568 		err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
569 		if (err)
570 			goto out;
571 		err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
572 		if (err)
573 			goto out;
574 		for (i = 0; i < stack_trace_len; i++) {
575 			if (val_buf1[i] != val_buf2[i]) {
576 				err = -1;
577 				goto out;
578 			}
579 		}
580 		key = *next_key_p;
581 		cur_key_p = &key;
582 		next_key_p = &next_key;
583 	}
584 	if (errno != ENOENT)
585 		err = -1;
586 
587 out:
588 	free(val_buf1);
589 	free(val_buf2);
590 	return err;
591 }
592 
593 int extract_build_id(char *build_id, size_t size)
594 {
595 	FILE *fp;
596 	char *line = NULL;
597 	size_t len = 0;
598 
599 	fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
600 	if (fp == NULL)
601 		return -1;
602 
603 	if (getline(&line, &len, fp) == -1)
604 		goto err;
605 	pclose(fp);
606 
607 	if (len > size)
608 		len = size;
609 	memcpy(build_id, line, len);
610 	build_id[len] = '\0';
611 	free(line);
612 	return 0;
613 err:
614 	pclose(fp);
615 	return -1;
616 }
617 
618 static int finit_module(int fd, const char *param_values, int flags)
619 {
620 	return syscall(__NR_finit_module, fd, param_values, flags);
621 }
622 
623 static int delete_module(const char *name, int flags)
624 {
625 	return syscall(__NR_delete_module, name, flags);
626 }
627 
628 /*
629  * Trigger synchronize_rcu() in kernel.
630  */
631 int kern_sync_rcu(void)
632 {
633 	return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
634 }
635 
636 static void unload_bpf_testmod(void)
637 {
638 	if (kern_sync_rcu())
639 		fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
640 	if (delete_module("bpf_testmod", 0)) {
641 		if (errno == ENOENT) {
642 			if (verbose())
643 				fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
644 			return;
645 		}
646 		fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
647 		return;
648 	}
649 	if (verbose())
650 		fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
651 }
652 
653 static int load_bpf_testmod(void)
654 {
655 	int fd;
656 
657 	/* ensure previous instance of the module is unloaded */
658 	unload_bpf_testmod();
659 
660 	if (verbose())
661 		fprintf(stdout, "Loading bpf_testmod.ko...\n");
662 
663 	fd = open("bpf_testmod.ko", O_RDONLY);
664 	if (fd < 0) {
665 		fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
666 		return -ENOENT;
667 	}
668 	if (finit_module(fd, "", 0)) {
669 		fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
670 		close(fd);
671 		return -EINVAL;
672 	}
673 	close(fd);
674 
675 	if (verbose())
676 		fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
677 	return 0;
678 }
679 
680 /* extern declarations for test funcs */
681 #define DEFINE_TEST(name)				\
682 	extern void test_##name(void) __weak;		\
683 	extern void serial_test_##name(void) __weak;
684 #include <prog_tests/tests.h>
685 #undef DEFINE_TEST
686 
687 static struct prog_test_def prog_test_defs[] = {
688 #define DEFINE_TEST(name) {			\
689 	.test_name = #name,			\
690 	.run_test = &test_##name,		\
691 	.run_serial_test = &serial_test_##name,	\
692 },
693 #include <prog_tests/tests.h>
694 #undef DEFINE_TEST
695 };
696 
697 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
698 
699 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
700 
701 const char *argp_program_version = "test_progs 0.1";
702 const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
703 static const char argp_program_doc[] = "BPF selftests test runner";
704 
705 enum ARG_KEYS {
706 	ARG_TEST_NUM = 'n',
707 	ARG_TEST_NAME = 't',
708 	ARG_TEST_NAME_BLACKLIST = 'b',
709 	ARG_VERIFIER_STATS = 's',
710 	ARG_VERBOSE = 'v',
711 	ARG_GET_TEST_CNT = 'c',
712 	ARG_LIST_TEST_NAMES = 'l',
713 	ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
714 	ARG_TEST_NAME_GLOB_DENYLIST = 'd',
715 	ARG_NUM_WORKERS = 'j',
716 	ARG_DEBUG = -1,
717 };
718 
719 static const struct argp_option opts[] = {
720 	{ "num", ARG_TEST_NUM, "NUM", 0,
721 	  "Run test number NUM only " },
722 	{ "name", ARG_TEST_NAME, "NAMES", 0,
723 	  "Run tests with names containing any string from NAMES list" },
724 	{ "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
725 	  "Don't run tests with names containing any string from NAMES list" },
726 	{ "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
727 	  "Output verifier statistics", },
728 	{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
729 	  "Verbose output (use -vv or -vvv for progressively verbose output)" },
730 	{ "count", ARG_GET_TEST_CNT, NULL, 0,
731 	  "Get number of selected top-level tests " },
732 	{ "list", ARG_LIST_TEST_NAMES, NULL, 0,
733 	  "List test names that would run (without running them) " },
734 	{ "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
735 	  "Run tests with name matching the pattern (supports '*' wildcard)." },
736 	{ "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
737 	  "Don't run tests with name matching the pattern (supports '*' wildcard)." },
738 	{ "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
739 	  "Number of workers to run in parallel, default to number of cpus." },
740 	{ "debug", ARG_DEBUG, NULL, 0,
741 	  "print extra debug information for test_progs." },
742 	{},
743 };
744 
745 static int libbpf_print_fn(enum libbpf_print_level level,
746 			   const char *format, va_list args)
747 {
748 	if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
749 		return 0;
750 	vfprintf(stdout, format, args);
751 	return 0;
752 }
753 
754 static void free_test_filter_set(const struct test_filter_set *set)
755 {
756 	int i, j;
757 
758 	if (!set)
759 		return;
760 
761 	for (i = 0; i < set->cnt; i++) {
762 		free((void *)set->tests[i].name);
763 		for (j = 0; j < set->tests[i].subtest_cnt; j++)
764 			free((void *)set->tests[i].subtests[j]);
765 
766 		free((void *)set->tests[i].subtests);
767 	}
768 
769 	free((void *)set->tests);
770 }
771 
772 static void free_test_selector(struct test_selector *test_selector)
773 {
774 	free_test_filter_set(&test_selector->blacklist);
775 	free_test_filter_set(&test_selector->whitelist);
776 	free(test_selector->num_set);
777 }
778 
779 extern int extra_prog_load_log_flags;
780 
781 static error_t parse_arg(int key, char *arg, struct argp_state *state)
782 {
783 	struct test_env *env = state->input;
784 
785 	switch (key) {
786 	case ARG_TEST_NUM: {
787 		char *subtest_str = strchr(arg, '/');
788 
789 		if (subtest_str) {
790 			*subtest_str = '\0';
791 			if (parse_num_list(subtest_str + 1,
792 					   &env->subtest_selector.num_set,
793 					   &env->subtest_selector.num_set_len)) {
794 				fprintf(stderr,
795 					"Failed to parse subtest numbers.\n");
796 				return -EINVAL;
797 			}
798 		}
799 		if (parse_num_list(arg, &env->test_selector.num_set,
800 				   &env->test_selector.num_set_len)) {
801 			fprintf(stderr, "Failed to parse test numbers.\n");
802 			return -EINVAL;
803 		}
804 		break;
805 	}
806 	case ARG_TEST_NAME_GLOB_ALLOWLIST:
807 	case ARG_TEST_NAME: {
808 		if (parse_test_list(arg,
809 				    &env->test_selector.whitelist,
810 				    key == ARG_TEST_NAME_GLOB_ALLOWLIST))
811 			return -ENOMEM;
812 		break;
813 	}
814 	case ARG_TEST_NAME_GLOB_DENYLIST:
815 	case ARG_TEST_NAME_BLACKLIST: {
816 		if (parse_test_list(arg,
817 				    &env->test_selector.blacklist,
818 				    key == ARG_TEST_NAME_GLOB_DENYLIST))
819 			return -ENOMEM;
820 		break;
821 	}
822 	case ARG_VERIFIER_STATS:
823 		env->verifier_stats = true;
824 		break;
825 	case ARG_VERBOSE:
826 		env->verbosity = VERBOSE_NORMAL;
827 		if (arg) {
828 			if (strcmp(arg, "v") == 0) {
829 				env->verbosity = VERBOSE_VERY;
830 				extra_prog_load_log_flags = 1;
831 			} else if (strcmp(arg, "vv") == 0) {
832 				env->verbosity = VERBOSE_SUPER;
833 				extra_prog_load_log_flags = 2;
834 			} else {
835 				fprintf(stderr,
836 					"Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
837 					arg);
838 				return -EINVAL;
839 			}
840 		}
841 
842 		if (verbose()) {
843 			if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
844 				fprintf(stderr,
845 					"Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
846 					errno);
847 				return -EINVAL;
848 			}
849 		}
850 
851 		break;
852 	case ARG_GET_TEST_CNT:
853 		env->get_test_cnt = true;
854 		break;
855 	case ARG_LIST_TEST_NAMES:
856 		env->list_test_names = true;
857 		break;
858 	case ARG_NUM_WORKERS:
859 		if (arg) {
860 			env->workers = atoi(arg);
861 			if (!env->workers) {
862 				fprintf(stderr, "Invalid number of worker: %s.", arg);
863 				return -EINVAL;
864 			}
865 		} else {
866 			env->workers = get_nprocs();
867 		}
868 		break;
869 	case ARG_DEBUG:
870 		env->debug = true;
871 		break;
872 	case ARGP_KEY_ARG:
873 		argp_usage(state);
874 		break;
875 	case ARGP_KEY_END:
876 		break;
877 	default:
878 		return ARGP_ERR_UNKNOWN;
879 	}
880 	return 0;
881 }
882 
883 /*
884  * Determine if test_progs is running as a "flavored" test runner and switch
885  * into corresponding sub-directory to load correct BPF objects.
886  *
887  * This is done by looking at executable name. If it contains "-flavor"
888  * suffix, then we are running as a flavored test runner.
889  */
890 int cd_flavor_subdir(const char *exec_name)
891 {
892 	/* General form of argv[0] passed here is:
893 	 * some/path/to/test_progs[-flavor], where -flavor part is optional.
894 	 * First cut out "test_progs[-flavor]" part, then extract "flavor"
895 	 * part, if it's there.
896 	 */
897 	const char *flavor = strrchr(exec_name, '/');
898 
899 	if (!flavor)
900 		flavor = exec_name;
901 	else
902 		flavor++;
903 
904 	flavor = strrchr(flavor, '-');
905 	if (!flavor)
906 		return 0;
907 	flavor++;
908 	if (verbose())
909 		fprintf(stdout,	"Switching to flavor '%s' subdirectory...\n", flavor);
910 
911 	return chdir(flavor);
912 }
913 
914 int trigger_module_test_read(int read_sz)
915 {
916 	int fd, err;
917 
918 	fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
919 	err = -errno;
920 	if (!ASSERT_GE(fd, 0, "testmod_file_open"))
921 		return err;
922 
923 	read(fd, NULL, read_sz);
924 	close(fd);
925 
926 	return 0;
927 }
928 
929 int trigger_module_test_write(int write_sz)
930 {
931 	int fd, err;
932 	char *buf = malloc(write_sz);
933 
934 	if (!buf)
935 		return -ENOMEM;
936 
937 	memset(buf, 'a', write_sz);
938 	buf[write_sz-1] = '\0';
939 
940 	fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
941 	err = -errno;
942 	if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
943 		free(buf);
944 		return err;
945 	}
946 
947 	write(fd, buf, write_sz);
948 	close(fd);
949 	free(buf);
950 	return 0;
951 }
952 
953 int write_sysctl(const char *sysctl, const char *value)
954 {
955 	int fd, err, len;
956 
957 	fd = open(sysctl, O_WRONLY);
958 	if (!ASSERT_NEQ(fd, -1, "open sysctl"))
959 		return -1;
960 
961 	len = strlen(value);
962 	err = write(fd, value, len);
963 	close(fd);
964 	if (!ASSERT_EQ(err, len, "write sysctl"))
965 		return -1;
966 
967 	return 0;
968 }
969 
970 #define MAX_BACKTRACE_SZ 128
971 void crash_handler(int signum)
972 {
973 	void *bt[MAX_BACKTRACE_SZ];
974 	size_t sz;
975 
976 	sz = backtrace(bt, ARRAY_SIZE(bt));
977 
978 	if (env.test) {
979 		env.test_state->error_cnt++;
980 		dump_test_log(env.test, env.test_state, true, false);
981 	}
982 	if (env.stdout)
983 		stdio_restore();
984 	if (env.worker_id != -1)
985 		fprintf(stderr, "[%d]: ", env.worker_id);
986 	fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
987 	backtrace_symbols_fd(bt, sz, STDERR_FILENO);
988 }
989 
990 static void sigint_handler(int signum)
991 {
992 	int i;
993 
994 	for (i = 0; i < env.workers; i++)
995 		if (env.worker_socks[i] > 0)
996 			close(env.worker_socks[i]);
997 }
998 
999 static int current_test_idx;
1000 static pthread_mutex_t current_test_lock;
1001 static pthread_mutex_t stdout_output_lock;
1002 
1003 static inline const char *str_msg(const struct msg *msg, char *buf)
1004 {
1005 	switch (msg->type) {
1006 	case MSG_DO_TEST:
1007 		sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1008 		break;
1009 	case MSG_TEST_DONE:
1010 		sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1011 			msg->test_done.num,
1012 			msg->test_done.have_log);
1013 		break;
1014 	case MSG_SUBTEST_DONE:
1015 		sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1016 			msg->subtest_done.num,
1017 			msg->subtest_done.have_log);
1018 		break;
1019 	case MSG_TEST_LOG:
1020 		sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1021 			strlen(msg->test_log.log_buf),
1022 			msg->test_log.is_last);
1023 		break;
1024 	case MSG_EXIT:
1025 		sprintf(buf, "MSG_EXIT");
1026 		break;
1027 	default:
1028 		sprintf(buf, "UNKNOWN");
1029 		break;
1030 	}
1031 
1032 	return buf;
1033 }
1034 
1035 static int send_message(int sock, const struct msg *msg)
1036 {
1037 	char buf[256];
1038 
1039 	if (env.debug)
1040 		fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1041 	return send(sock, msg, sizeof(*msg), 0);
1042 }
1043 
1044 static int recv_message(int sock, struct msg *msg)
1045 {
1046 	int ret;
1047 	char buf[256];
1048 
1049 	memset(msg, 0, sizeof(*msg));
1050 	ret = recv(sock, msg, sizeof(*msg), 0);
1051 	if (ret >= 0) {
1052 		if (env.debug)
1053 			fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1054 	}
1055 	return ret;
1056 }
1057 
1058 static void run_one_test(int test_num)
1059 {
1060 	struct prog_test_def *test = &prog_test_defs[test_num];
1061 	struct test_state *state = &test_states[test_num];
1062 
1063 	env.test = test;
1064 	env.test_state = state;
1065 
1066 	stdio_hijack(&state->log_buf, &state->log_cnt);
1067 
1068 	if (test->run_test)
1069 		test->run_test();
1070 	else if (test->run_serial_test)
1071 		test->run_serial_test();
1072 
1073 	/* ensure last sub-test is finalized properly */
1074 	if (env.subtest_state)
1075 		test__end_subtest();
1076 
1077 	state->tested = true;
1078 
1079 	if (verbose() && env.worker_id == -1)
1080 		print_test_result(test, state);
1081 
1082 	reset_affinity();
1083 	restore_netns();
1084 	if (test->need_cgroup_cleanup)
1085 		cleanup_cgroup_environment();
1086 
1087 	stdio_restore();
1088 
1089 	dump_test_log(test, state, false, false);
1090 }
1091 
1092 struct dispatch_data {
1093 	int worker_id;
1094 	int sock_fd;
1095 };
1096 
1097 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1098 {
1099 	if (recv_message(sock_fd, msg) < 0)
1100 		return 1;
1101 
1102 	if (msg->type != type) {
1103 		printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1104 		return 1;
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1111 {
1112 	FILE *log_fp = NULL;
1113 	int result = 0;
1114 
1115 	log_fp = open_memstream(log_buf, log_cnt);
1116 	if (!log_fp)
1117 		return 1;
1118 
1119 	while (true) {
1120 		struct msg msg;
1121 
1122 		if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1123 			result = 1;
1124 			goto out;
1125 		}
1126 
1127 		fprintf(log_fp, "%s", msg.test_log.log_buf);
1128 		if (msg.test_log.is_last)
1129 			break;
1130 	}
1131 
1132 out:
1133 	fclose(log_fp);
1134 	log_fp = NULL;
1135 	return result;
1136 }
1137 
1138 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1139 {
1140 	struct msg msg;
1141 	struct subtest_state *subtest_state;
1142 	int subtest_num = state->subtest_num;
1143 
1144 	state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1145 
1146 	for (int i = 0; i < subtest_num; i++) {
1147 		subtest_state = &state->subtest_states[i];
1148 
1149 		memset(subtest_state, 0, sizeof(*subtest_state));
1150 
1151 		if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1152 			return 1;
1153 
1154 		subtest_state->name = strdup(msg.subtest_done.name);
1155 		subtest_state->error_cnt = msg.subtest_done.error_cnt;
1156 		subtest_state->skipped = msg.subtest_done.skipped;
1157 		subtest_state->filtered = msg.subtest_done.filtered;
1158 
1159 		/* collect all logs */
1160 		if (msg.subtest_done.have_log)
1161 			if (dispatch_thread_read_log(sock_fd,
1162 						     &subtest_state->log_buf,
1163 						     &subtest_state->log_cnt))
1164 				return 1;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 static void *dispatch_thread(void *ctx)
1171 {
1172 	struct dispatch_data *data = ctx;
1173 	int sock_fd;
1174 
1175 	sock_fd = data->sock_fd;
1176 
1177 	while (true) {
1178 		int test_to_run = -1;
1179 		struct prog_test_def *test;
1180 		struct test_state *state;
1181 
1182 		/* grab a test */
1183 		{
1184 			pthread_mutex_lock(&current_test_lock);
1185 
1186 			if (current_test_idx >= prog_test_cnt) {
1187 				pthread_mutex_unlock(&current_test_lock);
1188 				goto done;
1189 			}
1190 
1191 			test = &prog_test_defs[current_test_idx];
1192 			test_to_run = current_test_idx;
1193 			current_test_idx++;
1194 
1195 			pthread_mutex_unlock(&current_test_lock);
1196 		}
1197 
1198 		if (!test->should_run || test->run_serial_test)
1199 			continue;
1200 
1201 		/* run test through worker */
1202 		{
1203 			struct msg msg_do_test;
1204 
1205 			memset(&msg_do_test, 0, sizeof(msg_do_test));
1206 			msg_do_test.type = MSG_DO_TEST;
1207 			msg_do_test.do_test.num = test_to_run;
1208 			if (send_message(sock_fd, &msg_do_test) < 0) {
1209 				perror("Fail to send command");
1210 				goto done;
1211 			}
1212 			env.worker_current_test[data->worker_id] = test_to_run;
1213 		}
1214 
1215 		/* wait for test done */
1216 		do {
1217 			struct msg msg;
1218 
1219 			if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1220 				goto error;
1221 			if (test_to_run != msg.test_done.num)
1222 				goto error;
1223 
1224 			state = &test_states[test_to_run];
1225 			state->tested = true;
1226 			state->error_cnt = msg.test_done.error_cnt;
1227 			state->skip_cnt = msg.test_done.skip_cnt;
1228 			state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1229 			state->subtest_num = msg.test_done.subtest_num;
1230 
1231 			/* collect all logs */
1232 			if (msg.test_done.have_log) {
1233 				if (dispatch_thread_read_log(sock_fd,
1234 							     &state->log_buf,
1235 							     &state->log_cnt))
1236 					goto error;
1237 			}
1238 
1239 			/* collect all subtests and subtest logs */
1240 			if (!state->subtest_num)
1241 				break;
1242 
1243 			if (dispatch_thread_send_subtests(sock_fd, state))
1244 				goto error;
1245 		} while (false);
1246 
1247 		pthread_mutex_lock(&stdout_output_lock);
1248 		dump_test_log(test, state, false, true);
1249 		pthread_mutex_unlock(&stdout_output_lock);
1250 	} /* while (true) */
1251 error:
1252 	if (env.debug)
1253 		fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1254 
1255 done:
1256 	{
1257 		struct msg msg_exit;
1258 
1259 		msg_exit.type = MSG_EXIT;
1260 		if (send_message(sock_fd, &msg_exit) < 0) {
1261 			if (env.debug)
1262 				fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1263 					data->worker_id, strerror(errno));
1264 		}
1265 	}
1266 	return NULL;
1267 }
1268 
1269 static void calculate_summary_and_print_errors(struct test_env *env)
1270 {
1271 	int i;
1272 	int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1273 
1274 	for (i = 0; i < prog_test_cnt; i++) {
1275 		struct test_state *state = &test_states[i];
1276 
1277 		if (!state->tested)
1278 			continue;
1279 
1280 		sub_succ_cnt += state->sub_succ_cnt;
1281 		skip_cnt += state->skip_cnt;
1282 
1283 		if (state->error_cnt)
1284 			fail_cnt++;
1285 		else
1286 			succ_cnt++;
1287 	}
1288 
1289 	/*
1290 	 * We only print error logs summary when there are failed tests and
1291 	 * verbose mode is not enabled. Otherwise, results may be incosistent.
1292 	 *
1293 	 */
1294 	if (!verbose() && fail_cnt) {
1295 		printf("\nAll error logs:\n");
1296 
1297 		/* print error logs again */
1298 		for (i = 0; i < prog_test_cnt; i++) {
1299 			struct prog_test_def *test = &prog_test_defs[i];
1300 			struct test_state *state = &test_states[i];
1301 
1302 			if (!state->tested || !state->error_cnt)
1303 				continue;
1304 
1305 			dump_test_log(test, state, true, true);
1306 		}
1307 	}
1308 
1309 	printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1310 	       succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1311 
1312 	env->succ_cnt = succ_cnt;
1313 	env->sub_succ_cnt = sub_succ_cnt;
1314 	env->fail_cnt = fail_cnt;
1315 	env->skip_cnt = skip_cnt;
1316 }
1317 
1318 static void server_main(void)
1319 {
1320 	pthread_t *dispatcher_threads;
1321 	struct dispatch_data *data;
1322 	struct sigaction sigact_int = {
1323 		.sa_handler = sigint_handler,
1324 		.sa_flags = SA_RESETHAND,
1325 	};
1326 	int i;
1327 
1328 	sigaction(SIGINT, &sigact_int, NULL);
1329 
1330 	dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1331 	data = calloc(sizeof(struct dispatch_data), env.workers);
1332 
1333 	env.worker_current_test = calloc(sizeof(int), env.workers);
1334 	for (i = 0; i < env.workers; i++) {
1335 		int rc;
1336 
1337 		data[i].worker_id = i;
1338 		data[i].sock_fd = env.worker_socks[i];
1339 		rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1340 		if (rc < 0) {
1341 			perror("Failed to launch dispatcher thread");
1342 			exit(EXIT_ERR_SETUP_INFRA);
1343 		}
1344 	}
1345 
1346 	/* wait for all dispatcher to finish */
1347 	for (i = 0; i < env.workers; i++) {
1348 		while (true) {
1349 			int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1350 
1351 			if (!ret) {
1352 				break;
1353 			} else if (ret == EBUSY) {
1354 				if (env.debug)
1355 					fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1356 						i,  env.worker_current_test[i] + 1);
1357 				usleep(1000 * 1000);
1358 				continue;
1359 			} else {
1360 				fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1361 				break;
1362 			}
1363 		}
1364 	}
1365 	free(dispatcher_threads);
1366 	free(env.worker_current_test);
1367 	free(data);
1368 
1369 	/* run serial tests */
1370 	save_netns();
1371 
1372 	for (int i = 0; i < prog_test_cnt; i++) {
1373 		struct prog_test_def *test = &prog_test_defs[i];
1374 
1375 		if (!test->should_run || !test->run_serial_test)
1376 			continue;
1377 
1378 		run_one_test(i);
1379 	}
1380 
1381 	/* generate summary */
1382 	fflush(stderr);
1383 	fflush(stdout);
1384 
1385 	calculate_summary_and_print_errors(&env);
1386 
1387 	/* reap all workers */
1388 	for (i = 0; i < env.workers; i++) {
1389 		int wstatus, pid;
1390 
1391 		pid = waitpid(env.worker_pids[i], &wstatus, 0);
1392 		if (pid != env.worker_pids[i])
1393 			perror("Unable to reap worker");
1394 	}
1395 }
1396 
1397 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1398 {
1399 	char *src;
1400 	size_t slen;
1401 
1402 	src = log_buf;
1403 	slen = log_cnt;
1404 	while (slen) {
1405 		struct msg msg_log;
1406 		char *dest;
1407 		size_t len;
1408 
1409 		memset(&msg_log, 0, sizeof(msg_log));
1410 		msg_log.type = MSG_TEST_LOG;
1411 		dest = msg_log.test_log.log_buf;
1412 		len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1413 		memcpy(dest, src, len);
1414 
1415 		src += len;
1416 		slen -= len;
1417 		if (!slen)
1418 			msg_log.test_log.is_last = true;
1419 
1420 		assert(send_message(sock, &msg_log) >= 0);
1421 	}
1422 }
1423 
1424 static void free_subtest_state(struct subtest_state *state)
1425 {
1426 	if (state->log_buf) {
1427 		free(state->log_buf);
1428 		state->log_buf = NULL;
1429 		state->log_cnt = 0;
1430 	}
1431 	free(state->name);
1432 	state->name = NULL;
1433 }
1434 
1435 static int worker_main_send_subtests(int sock, struct test_state *state)
1436 {
1437 	int i, result = 0;
1438 	struct msg msg;
1439 	struct subtest_state *subtest_state;
1440 
1441 	memset(&msg, 0, sizeof(msg));
1442 	msg.type = MSG_SUBTEST_DONE;
1443 
1444 	for (i = 0; i < state->subtest_num; i++) {
1445 		subtest_state = &state->subtest_states[i];
1446 
1447 		msg.subtest_done.num = i;
1448 
1449 		strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1450 
1451 		msg.subtest_done.error_cnt = subtest_state->error_cnt;
1452 		msg.subtest_done.skipped = subtest_state->skipped;
1453 		msg.subtest_done.filtered = subtest_state->filtered;
1454 		msg.subtest_done.have_log = false;
1455 
1456 		if (verbose() || state->force_log || subtest_state->error_cnt) {
1457 			if (subtest_state->log_cnt)
1458 				msg.subtest_done.have_log = true;
1459 		}
1460 
1461 		if (send_message(sock, &msg) < 0) {
1462 			perror("Fail to send message done");
1463 			result = 1;
1464 			goto out;
1465 		}
1466 
1467 		/* send logs */
1468 		if (msg.subtest_done.have_log)
1469 			worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1470 
1471 		free_subtest_state(subtest_state);
1472 		free(subtest_state->name);
1473 	}
1474 
1475 out:
1476 	for (; i < state->subtest_num; i++)
1477 		free_subtest_state(&state->subtest_states[i]);
1478 	free(state->subtest_states);
1479 	return result;
1480 }
1481 
1482 static int worker_main(int sock)
1483 {
1484 	save_netns();
1485 
1486 	while (true) {
1487 		/* receive command */
1488 		struct msg msg;
1489 
1490 		if (recv_message(sock, &msg) < 0)
1491 			goto out;
1492 
1493 		switch (msg.type) {
1494 		case MSG_EXIT:
1495 			if (env.debug)
1496 				fprintf(stderr, "[%d]: worker exit.\n",
1497 					env.worker_id);
1498 			goto out;
1499 		case MSG_DO_TEST: {
1500 			int test_to_run = msg.do_test.num;
1501 			struct prog_test_def *test = &prog_test_defs[test_to_run];
1502 			struct test_state *state = &test_states[test_to_run];
1503 			struct msg msg;
1504 
1505 			if (env.debug)
1506 				fprintf(stderr, "[%d]: #%d:%s running.\n",
1507 					env.worker_id,
1508 					test_to_run + 1,
1509 					test->test_name);
1510 
1511 			run_one_test(test_to_run);
1512 
1513 			memset(&msg, 0, sizeof(msg));
1514 			msg.type = MSG_TEST_DONE;
1515 			msg.test_done.num = test_to_run;
1516 			msg.test_done.error_cnt = state->error_cnt;
1517 			msg.test_done.skip_cnt = state->skip_cnt;
1518 			msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1519 			msg.test_done.subtest_num = state->subtest_num;
1520 			msg.test_done.have_log = false;
1521 
1522 			if (verbose() || state->force_log || state->error_cnt) {
1523 				if (state->log_cnt)
1524 					msg.test_done.have_log = true;
1525 			}
1526 			if (send_message(sock, &msg) < 0) {
1527 				perror("Fail to send message done");
1528 				goto out;
1529 			}
1530 
1531 			/* send logs */
1532 			if (msg.test_done.have_log)
1533 				worker_main_send_log(sock, state->log_buf, state->log_cnt);
1534 
1535 			if (state->log_buf) {
1536 				free(state->log_buf);
1537 				state->log_buf = NULL;
1538 				state->log_cnt = 0;
1539 			}
1540 
1541 			if (state->subtest_num)
1542 				if (worker_main_send_subtests(sock, state))
1543 					goto out;
1544 
1545 			if (env.debug)
1546 				fprintf(stderr, "[%d]: #%d:%s done.\n",
1547 					env.worker_id,
1548 					test_to_run + 1,
1549 					test->test_name);
1550 			break;
1551 		} /* case MSG_DO_TEST */
1552 		default:
1553 			if (env.debug)
1554 				fprintf(stderr, "[%d]: unknown message.\n",  env.worker_id);
1555 			return -1;
1556 		}
1557 	}
1558 out:
1559 	return 0;
1560 }
1561 
1562 static void free_test_states(void)
1563 {
1564 	int i, j;
1565 
1566 	for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1567 		struct test_state *test_state = &test_states[i];
1568 
1569 		for (j = 0; j < test_state->subtest_num; j++)
1570 			free_subtest_state(&test_state->subtest_states[j]);
1571 
1572 		free(test_state->subtest_states);
1573 		free(test_state->log_buf);
1574 		test_state->subtest_states = NULL;
1575 		test_state->log_buf = NULL;
1576 	}
1577 }
1578 
1579 int main(int argc, char **argv)
1580 {
1581 	static const struct argp argp = {
1582 		.options = opts,
1583 		.parser = parse_arg,
1584 		.doc = argp_program_doc,
1585 	};
1586 	struct sigaction sigact = {
1587 		.sa_handler = crash_handler,
1588 		.sa_flags = SA_RESETHAND,
1589 		};
1590 	int err, i;
1591 
1592 	sigaction(SIGSEGV, &sigact, NULL);
1593 
1594 	err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1595 	if (err)
1596 		return err;
1597 
1598 	err = cd_flavor_subdir(argv[0]);
1599 	if (err)
1600 		return err;
1601 
1602 	/* Use libbpf 1.0 API mode */
1603 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1604 	libbpf_set_print(libbpf_print_fn);
1605 
1606 	srand(time(NULL));
1607 
1608 	env.jit_enabled = is_jit_enabled();
1609 	env.nr_cpus = libbpf_num_possible_cpus();
1610 	if (env.nr_cpus < 0) {
1611 		fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1612 			env.nr_cpus);
1613 		return -1;
1614 	}
1615 
1616 	env.stdout = stdout;
1617 	env.stderr = stderr;
1618 
1619 	env.has_testmod = true;
1620 	if (!env.list_test_names && load_bpf_testmod()) {
1621 		fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1622 		env.has_testmod = false;
1623 	}
1624 
1625 	/* initializing tests */
1626 	for (i = 0; i < prog_test_cnt; i++) {
1627 		struct prog_test_def *test = &prog_test_defs[i];
1628 
1629 		test->test_num = i + 1;
1630 		test->should_run = should_run(&env.test_selector,
1631 					      test->test_num, test->test_name);
1632 
1633 		if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1634 		    (test->run_test != NULL && test->run_serial_test != NULL)) {
1635 			fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1636 				test->test_num, test->test_name, test->test_name, test->test_name);
1637 			exit(EXIT_ERR_SETUP_INFRA);
1638 		}
1639 	}
1640 
1641 	/* ignore workers if we are just listing */
1642 	if (env.get_test_cnt || env.list_test_names)
1643 		env.workers = 0;
1644 
1645 	/* launch workers if requested */
1646 	env.worker_id = -1; /* main process */
1647 	if (env.workers) {
1648 		env.worker_pids = calloc(sizeof(__pid_t), env.workers);
1649 		env.worker_socks = calloc(sizeof(int), env.workers);
1650 		if (env.debug)
1651 			fprintf(stdout, "Launching %d workers.\n", env.workers);
1652 		for (i = 0; i < env.workers; i++) {
1653 			int sv[2];
1654 			pid_t pid;
1655 
1656 			if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1657 				perror("Fail to create worker socket");
1658 				return -1;
1659 			}
1660 			pid = fork();
1661 			if (pid < 0) {
1662 				perror("Failed to fork worker");
1663 				return -1;
1664 			} else if (pid != 0) { /* main process */
1665 				close(sv[1]);
1666 				env.worker_pids[i] = pid;
1667 				env.worker_socks[i] = sv[0];
1668 			} else { /* inside each worker process */
1669 				close(sv[0]);
1670 				env.worker_id = i;
1671 				return worker_main(sv[1]);
1672 			}
1673 		}
1674 
1675 		if (env.worker_id == -1) {
1676 			server_main();
1677 			goto out;
1678 		}
1679 	}
1680 
1681 	/* The rest of the main process */
1682 
1683 	/* on single mode */
1684 	save_netns();
1685 
1686 	for (i = 0; i < prog_test_cnt; i++) {
1687 		struct prog_test_def *test = &prog_test_defs[i];
1688 
1689 		if (!test->should_run)
1690 			continue;
1691 
1692 		if (env.get_test_cnt) {
1693 			env.succ_cnt++;
1694 			continue;
1695 		}
1696 
1697 		if (env.list_test_names) {
1698 			fprintf(env.stdout, "%s\n", test->test_name);
1699 			env.succ_cnt++;
1700 			continue;
1701 		}
1702 
1703 		run_one_test(i);
1704 	}
1705 
1706 	if (env.get_test_cnt) {
1707 		printf("%d\n", env.succ_cnt);
1708 		goto out;
1709 	}
1710 
1711 	if (env.list_test_names)
1712 		goto out;
1713 
1714 	calculate_summary_and_print_errors(&env);
1715 
1716 	close(env.saved_netns_fd);
1717 out:
1718 	if (!env.list_test_names && env.has_testmod)
1719 		unload_bpf_testmod();
1720 
1721 	free_test_selector(&env.test_selector);
1722 	free_test_selector(&env.subtest_selector);
1723 	free_test_states();
1724 
1725 	if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
1726 		return EXIT_NO_TEST;
1727 
1728 	return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1729 }
1730