xref: /linux/tools/perf/tests/builtin-test.c (revision 1672f3707a6ef4b386c30bb76df2f62e58a39430)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-test.c
4  *
5  * Builtin regression testing command: ever growing number of sanity tests
6  */
7 #include <ctype.h>
8 #include <fcntl.h>
9 #include <errno.h>
10 #ifdef HAVE_BACKTRACE_SUPPORT
11 #include <execinfo.h>
12 #endif
13 #include <poll.h>
14 #include <unistd.h>
15 #include <setjmp.h>
16 #include <string.h>
17 #include <stdlib.h>
18 #include <sys/types.h>
19 #include <dirent.h>
20 #include <sys/wait.h>
21 #include <sys/stat.h>
22 #include "builtin.h"
23 #include "config.h"
24 #include "hist.h"
25 #include "intlist.h"
26 #include "tests.h"
27 #include "debug.h"
28 #include "color.h"
29 #include <subcmd/parse-options.h>
30 #include <subcmd/run-command.h>
31 #include "string2.h"
32 #include "symbol.h"
33 #include "util/rlimit.h"
34 #include "util/strbuf.h"
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <subcmd/exec-cmd.h>
38 #include <linux/zalloc.h>
39 
40 #include "tests-scripts.h"
41 
42 /*
43  * Command line option to not fork the test running in the same process and
44  * making them easier to debug.
45  */
46 static bool dont_fork;
47 /* Fork the tests in parallel and wait for their completion. */
48 static bool sequential;
49 /* Number of times each test is run. */
50 static unsigned int runs_per_test = 1;
51 const char *dso_to_test;
52 const char *test_objdump_path = "objdump";
53 
54 /*
55  * List of architecture specific tests. Not a weak symbol as the array length is
56  * dependent on the initialization, as such GCC with LTO complains of
57  * conflicting definitions with a weak symbol.
58  */
59 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
60 extern struct test_suite *arch_tests[];
61 #else
62 static struct test_suite *arch_tests[] = {
63 	NULL,
64 };
65 #endif
66 
67 static struct test_suite *generic_tests[] = {
68 	&suite__vmlinux_matches_kallsyms,
69 	&suite__openat_syscall_event,
70 	&suite__openat_syscall_event_on_all_cpus,
71 	&suite__basic_mmap,
72 	&suite__mem,
73 	&suite__parse_events,
74 	&suite__expr,
75 	&suite__PERF_RECORD,
76 	&suite__pmu,
77 	&suite__pmu_events,
78 	&suite__hwmon_pmu,
79 	&suite__tool_pmu,
80 	&suite__dso_data,
81 	&suite__perf_evsel__roundtrip_name_test,
82 #ifdef HAVE_LIBTRACEEVENT
83 	&suite__perf_evsel__tp_sched_test,
84 	&suite__syscall_openat_tp_fields,
85 #endif
86 	&suite__hists_link,
87 	&suite__bp_signal,
88 	&suite__bp_signal_overflow,
89 	&suite__bp_accounting,
90 	&suite__wp,
91 	&suite__task_exit,
92 	&suite__sw_clock_freq,
93 	&suite__code_reading,
94 	&suite__sample_parsing,
95 	&suite__keep_tracking,
96 	&suite__parse_no_sample_id_all,
97 	&suite__hists_filter,
98 	&suite__mmap_thread_lookup,
99 	&suite__thread_maps_share,
100 	&suite__hists_output,
101 	&suite__hists_cumulate,
102 #ifdef HAVE_LIBTRACEEVENT
103 	&suite__switch_tracking,
104 #endif
105 	&suite__fdarray__filter,
106 	&suite__fdarray__add,
107 	&suite__kmod_path__parse,
108 	&suite__thread_map,
109 	&suite__session_topology,
110 	&suite__thread_map_synthesize,
111 	&suite__thread_map_remove,
112 	&suite__cpu_map,
113 	&suite__synthesize_stat_config,
114 	&suite__synthesize_stat,
115 	&suite__synthesize_stat_round,
116 	&suite__event_update,
117 	&suite__event_times,
118 	&suite__backward_ring_buffer,
119 	&suite__sdt_event,
120 	&suite__is_printable_array,
121 	&suite__bitmap_print,
122 	&suite__perf_hooks,
123 	&suite__unit_number__scnprint,
124 	&suite__mem2node,
125 	&suite__time_utils,
126 	&suite__jit_write_elf,
127 	&suite__pfm,
128 	&suite__api_io,
129 	&suite__maps,
130 	&suite__demangle_java,
131 	&suite__demangle_ocaml,
132 	&suite__demangle_rust,
133 	&suite__parse_metric,
134 	&suite__pe_file_parsing,
135 	&suite__expand_cgroup_events,
136 	&suite__perf_time_to_tsc,
137 	&suite__dlfilter,
138 	&suite__sigtrap,
139 	&suite__event_groups,
140 	&suite__symbols,
141 	&suite__util,
142 	&suite__subcmd_help,
143 	&suite__kallsyms_split,
144 	NULL,
145 };
146 
147 static struct test_workload *workloads[] = {
148 	&workload__noploop,
149 	&workload__thloop,
150 	&workload__leafloop,
151 	&workload__sqrtloop,
152 	&workload__brstack,
153 	&workload__datasym,
154 	&workload__landlock,
155 	&workload__traploop,
156 	&workload__inlineloop,
157 };
158 
159 #define workloads__for_each(workload) \
160 	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
161 
162 #define test_suite__for_each_test_case(suite, idx)			\
163 	for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
164 
165 static void close_parent_fds(void)
166 {
167 	DIR *dir = opendir("/proc/self/fd");
168 	struct dirent *ent;
169 
170 	while ((ent = readdir(dir))) {
171 		char *end;
172 		long fd;
173 
174 		if (ent->d_type != DT_LNK)
175 			continue;
176 
177 		if (!isdigit(ent->d_name[0]))
178 			continue;
179 
180 		fd = strtol(ent->d_name, &end, 10);
181 		if (*end)
182 			continue;
183 
184 		if (fd <= 3 || fd == dirfd(dir))
185 			continue;
186 
187 		close(fd);
188 	}
189 	closedir(dir);
190 }
191 
192 static void check_leaks(void)
193 {
194 	DIR *dir = opendir("/proc/self/fd");
195 	struct dirent *ent;
196 	int leaks = 0;
197 
198 	while ((ent = readdir(dir))) {
199 		char path[PATH_MAX];
200 		char *end;
201 		long fd;
202 		ssize_t len;
203 
204 		if (ent->d_type != DT_LNK)
205 			continue;
206 
207 		if (!isdigit(ent->d_name[0]))
208 			continue;
209 
210 		fd = strtol(ent->d_name, &end, 10);
211 		if (*end)
212 			continue;
213 
214 		if (fd <= 3 || fd == dirfd(dir))
215 			continue;
216 
217 		leaks++;
218 		len = readlinkat(dirfd(dir), ent->d_name, path, sizeof(path));
219 		if (len > 0 && (size_t)len < sizeof(path))
220 			path[len] = '\0';
221 		else
222 			strncpy(path, ent->d_name, sizeof(path));
223 		pr_err("Leak of file descriptor %s that opened: '%s'\n", ent->d_name, path);
224 	}
225 	closedir(dir);
226 	if (leaks)
227 		abort();
228 }
229 
230 static int test_suite__num_test_cases(const struct test_suite *t)
231 {
232 	int num;
233 
234 	test_suite__for_each_test_case(t, num);
235 
236 	return num;
237 }
238 
239 static const char *skip_reason(const struct test_suite *t, int test_case)
240 {
241 	if (!t->test_cases)
242 		return NULL;
243 
244 	return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
245 }
246 
247 static const char *test_description(const struct test_suite *t, int test_case)
248 {
249 	if (t->test_cases && test_case >= 0)
250 		return t->test_cases[test_case].desc;
251 
252 	return t->desc;
253 }
254 
255 static test_fnptr test_function(const struct test_suite *t, int test_case)
256 {
257 	if (test_case <= 0)
258 		return t->test_cases[0].run_case;
259 
260 	return t->test_cases[test_case].run_case;
261 }
262 
263 static bool test_exclusive(const struct test_suite *t, int test_case)
264 {
265 	if (test_case <= 0)
266 		return t->test_cases[0].exclusive;
267 
268 	return t->test_cases[test_case].exclusive;
269 }
270 
271 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
272 {
273 	int i;
274 
275 	if (argc == 0)
276 		return true;
277 
278 	for (i = 0; i < argc; ++i) {
279 		char *end;
280 		long nr = strtoul(argv[i], &end, 10);
281 
282 		if (*end == '\0') {
283 			if (nr == suite_num + 1)
284 				return true;
285 			continue;
286 		}
287 
288 		if (strcasestr(desc, argv[i]))
289 			return true;
290 	}
291 
292 	return false;
293 }
294 
295 struct child_test {
296 	struct child_process process;
297 	struct test_suite *test;
298 	int suite_num;
299 	int test_case_num;
300 };
301 
302 static jmp_buf run_test_jmp_buf;
303 
304 static void child_test_sig_handler(int sig)
305 {
306 #ifdef HAVE_BACKTRACE_SUPPORT
307 	void *stackdump[32];
308 	size_t stackdump_size;
309 #endif
310 
311 	fprintf(stderr, "\n---- unexpected signal (%d) ----\n", sig);
312 #ifdef HAVE_BACKTRACE_SUPPORT
313 	stackdump_size = backtrace(stackdump, ARRAY_SIZE(stackdump));
314 	__dump_stack(stderr, stackdump, stackdump_size);
315 #endif
316 	siglongjmp(run_test_jmp_buf, sig);
317 }
318 
319 static int run_test_child(struct child_process *process)
320 {
321 	const int signals[] = {
322 		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
323 	};
324 	struct child_test *child = container_of(process, struct child_test, process);
325 	int err;
326 
327 	close_parent_fds();
328 
329 	err = sigsetjmp(run_test_jmp_buf, 1);
330 	if (err) {
331 		/* Received signal. */
332 		err = err > 0 ? -err : -1;
333 		goto err_out;
334 	}
335 
336 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
337 		signal(signals[i], child_test_sig_handler);
338 
339 	pr_debug("--- start ---\n");
340 	pr_debug("test child forked, pid %d\n", getpid());
341 	err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
342 	pr_debug("---- end(%d) ----\n", err);
343 
344 	check_leaks();
345 err_out:
346 	fflush(NULL);
347 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
348 		signal(signals[i], SIG_DFL);
349 	return -err;
350 }
351 
352 #define TEST_RUNNING -3
353 
354 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
355 			     int result, int width, int running)
356 {
357 	if (test_suite__num_test_cases(t) > 1) {
358 		int subw = width > 2 ? width - 2 : width;
359 
360 		pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
361 			test_description(t, curr_test_case));
362 	} else
363 		pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
364 
365 	switch (result) {
366 	case TEST_RUNNING:
367 		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
368 		break;
369 	case TEST_OK:
370 		pr_info(" Ok\n");
371 		break;
372 	case TEST_SKIP: {
373 		const char *reason = skip_reason(t, curr_test_case);
374 
375 		if (reason)
376 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
377 		else
378 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
379 	}
380 		break;
381 	case TEST_FAIL:
382 	default:
383 		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
384 		break;
385 	}
386 
387 	return 0;
388 }
389 
390 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
391 		int width)
392 {
393 	struct child_test *child_test = child_tests[running_test];
394 	struct test_suite *t;
395 	int curr_suite, curr_test_case, err;
396 	bool err_done = false;
397 	struct strbuf err_output = STRBUF_INIT;
398 	int last_running = -1;
399 	int ret;
400 
401 	if (child_test == NULL) {
402 		/* Test wasn't started. */
403 		return;
404 	}
405 	t = child_test->test;
406 	curr_suite = child_test->suite_num;
407 	curr_test_case = child_test->test_case_num;
408 	err = child_test->process.err;
409 	/*
410 	 * For test suites with subtests, display the suite name ahead of the
411 	 * sub test names.
412 	 */
413 	if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
414 		pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
415 
416 	/*
417 	 * Busy loop reading from the child's stdout/stderr that are set to be
418 	 * non-blocking until EOF.
419 	 */
420 	if (err > 0)
421 		fcntl(err, F_SETFL, O_NONBLOCK);
422 	if (verbose > 1) {
423 		if (test_suite__num_test_cases(t) > 1)
424 			pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
425 				test_description(t, curr_test_case));
426 		else
427 			pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
428 	}
429 	while (!err_done) {
430 		struct pollfd pfds[1] = {
431 			{ .fd = err,
432 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
433 			},
434 		};
435 		if (perf_use_color_default) {
436 			int running = 0;
437 
438 			for (int y = running_test; y < child_test_num; y++) {
439 				if (child_tests[y] == NULL)
440 					continue;
441 				if (check_if_command_finished(&child_tests[y]->process) == 0)
442 					running++;
443 			}
444 			if (running != last_running) {
445 				if (last_running != -1) {
446 					/*
447 					 * Erase "Running (.. active)" line
448 					 * printed before poll/sleep.
449 					 */
450 					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
451 				}
452 				print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
453 						  width, running);
454 				last_running = running;
455 			}
456 		}
457 
458 		err_done = true;
459 		if (err <= 0) {
460 			/* No child stderr to poll, sleep for 10ms for child to complete. */
461 			usleep(10 * 1000);
462 		} else {
463 			/* Poll to avoid excessive spinning, timeout set for 100ms. */
464 			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
465 			if (pfds[0].revents) {
466 				char buf[512];
467 				ssize_t len;
468 
469 				len = read(err, buf, sizeof(buf) - 1);
470 
471 				if (len > 0) {
472 					err_done = false;
473 					buf[len] = '\0';
474 					strbuf_addstr(&err_output, buf);
475 				}
476 			}
477 		}
478 		if (err_done)
479 			err_done = check_if_command_finished(&child_test->process);
480 	}
481 	if (perf_use_color_default && last_running != -1) {
482 		/* Erase "Running (.. active)" line printed before poll/sleep. */
483 		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
484 	}
485 	/* Clean up child process. */
486 	ret = finish_command(&child_test->process);
487 	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
488 		fprintf(stderr, "%s", err_output.buf);
489 
490 	strbuf_release(&err_output);
491 	print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
492 	if (err > 0)
493 		close(err);
494 	zfree(&child_tests[running_test]);
495 }
496 
497 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
498 		struct child_test **child, int width, int pass)
499 {
500 	int err;
501 
502 	*child = NULL;
503 	if (dont_fork) {
504 		if (pass == 1) {
505 			pr_debug("--- start ---\n");
506 			err = test_function(test, curr_test_case)(test, curr_test_case);
507 			pr_debug("---- end ----\n");
508 			print_test_result(test, curr_suite, curr_test_case, err, width,
509 					  /*running=*/0);
510 		}
511 		return 0;
512 	}
513 	if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
514 		/* When parallel, skip exclusive tests on the first pass. */
515 		return 0;
516 	}
517 	if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
518 		/* Sequential and non-exclusive tests were run on the first pass. */
519 		return 0;
520 	}
521 	*child = zalloc(sizeof(**child));
522 	if (!*child)
523 		return -ENOMEM;
524 
525 	(*child)->test = test;
526 	(*child)->suite_num = curr_suite;
527 	(*child)->test_case_num = curr_test_case;
528 	(*child)->process.pid = -1;
529 	(*child)->process.no_stdin = 1;
530 	if (verbose <= 0) {
531 		(*child)->process.no_stdout = 1;
532 		(*child)->process.no_stderr = 1;
533 	} else {
534 		(*child)->process.stdout_to_stderr = 1;
535 		(*child)->process.out = -1;
536 		(*child)->process.err = -1;
537 	}
538 	(*child)->process.no_exec_cmd = run_test_child;
539 	if (sequential || pass == 2) {
540 		err = start_command(&(*child)->process);
541 		if (err)
542 			return err;
543 		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
544 		return 0;
545 	}
546 	return start_command(&(*child)->process);
547 }
548 
549 /* State outside of __cmd_test for the sake of the signal handler. */
550 
551 static size_t num_tests;
552 static struct child_test **child_tests;
553 static jmp_buf cmd_test_jmp_buf;
554 
555 static void cmd_test_sig_handler(int sig)
556 {
557 	siglongjmp(cmd_test_jmp_buf, sig);
558 }
559 
560 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
561 		      struct intlist *skiplist)
562 {
563 	static int width = 0;
564 	int err = 0;
565 
566 	for (struct test_suite **t = suites; *t; t++) {
567 		int i, len = strlen(test_description(*t, -1));
568 
569 		if (width < len)
570 			width = len;
571 
572 		test_suite__for_each_test_case(*t, i) {
573 			len = strlen(test_description(*t, i));
574 			if (width < len)
575 				width = len;
576 			num_tests += runs_per_test;
577 		}
578 	}
579 	child_tests = calloc(num_tests, sizeof(*child_tests));
580 	if (!child_tests)
581 		return -ENOMEM;
582 
583 	err = sigsetjmp(cmd_test_jmp_buf, 1);
584 	if (err) {
585 		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
586 		       err);
587 		for (size_t x = 0; x < num_tests; x++) {
588 			struct child_test *child_test = child_tests[x];
589 
590 			if (!child_test || child_test->process.pid <= 0)
591 				continue;
592 
593 			pr_debug3("Killing %d pid %d\n",
594 				  child_test->suite_num + 1,
595 				  child_test->process.pid);
596 			kill(child_test->process.pid, err);
597 		}
598 		goto err_out;
599 	}
600 	signal(SIGINT, cmd_test_sig_handler);
601 	signal(SIGTERM, cmd_test_sig_handler);
602 
603 	/*
604 	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
605 	 * runs the exclusive tests sequentially. In other modes all tests are
606 	 * run in pass 1.
607 	 */
608 	for (int pass = 1; pass <= 2; pass++) {
609 		int child_test_num = 0;
610 		int curr_suite = 0;
611 
612 		for (struct test_suite **t = suites; *t; t++, curr_suite++) {
613 			int curr_test_case;
614 			bool suite_matched = false;
615 
616 			if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
617 				/*
618 				 * Test suite shouldn't be run based on
619 				 * description. See if any test case should.
620 				 */
621 				bool skip = true;
622 
623 				test_suite__for_each_test_case(*t, curr_test_case) {
624 					if (perf_test__matches(test_description(*t, curr_test_case),
625 							       curr_suite, argc, argv)) {
626 						skip = false;
627 						break;
628 					}
629 				}
630 				if (skip)
631 					continue;
632 			} else {
633 				suite_matched = true;
634 			}
635 
636 			if (intlist__find(skiplist, curr_suite + 1)) {
637 				pr_info("%3d: %-*s:", curr_suite + 1, width,
638 					test_description(*t, -1));
639 				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
640 				continue;
641 			}
642 
643 			for (unsigned int run = 0; run < runs_per_test; run++) {
644 				test_suite__for_each_test_case(*t, curr_test_case) {
645 					if (!suite_matched &&
646 					    !perf_test__matches(test_description(*t, curr_test_case),
647 								curr_suite, argc, argv))
648 						continue;
649 					err = start_test(*t, curr_suite, curr_test_case,
650 							 &child_tests[child_test_num++],
651 							 width, pass);
652 					if (err)
653 						goto err_out;
654 				}
655 			}
656 		}
657 		if (!sequential) {
658 			/* Parallel mode starts tests but doesn't finish them. Do that now. */
659 			for (size_t x = 0; x < num_tests; x++)
660 				finish_test(child_tests, x, num_tests, width);
661 		}
662 	}
663 err_out:
664 	signal(SIGINT, SIG_DFL);
665 	signal(SIGTERM, SIG_DFL);
666 	if (err) {
667 		pr_err("Internal test harness failure. Completing any started tests:\n:");
668 		for (size_t x = 0; x < num_tests; x++)
669 			finish_test(child_tests, x, num_tests, width);
670 	}
671 	free(child_tests);
672 	return err;
673 }
674 
675 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
676 {
677 	int curr_suite = 0;
678 
679 	for (struct test_suite **t = suites; *t; t++, curr_suite++) {
680 		int curr_test_case;
681 
682 		if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
683 			continue;
684 
685 		fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
686 
687 		if (test_suite__num_test_cases(*t) <= 1)
688 			continue;
689 
690 		test_suite__for_each_test_case(*t, curr_test_case) {
691 			fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
692 				test_description(*t, curr_test_case));
693 		}
694 	}
695 	return 0;
696 }
697 
698 static int workloads__fprintf_list(FILE *fp)
699 {
700 	struct test_workload *twl;
701 	int printed = 0;
702 
703 	workloads__for_each(twl)
704 		printed += fprintf(fp, "%s\n", twl->name);
705 
706 	return printed;
707 }
708 
709 static int run_workload(const char *work, int argc, const char **argv)
710 {
711 	struct test_workload *twl;
712 
713 	workloads__for_each(twl) {
714 		if (!strcmp(twl->name, work))
715 			return twl->func(argc, argv);
716 	}
717 
718 	pr_info("No workload found: %s\n", work);
719 	return -1;
720 }
721 
722 static int perf_test__config(const char *var, const char *value,
723 			     void *data __maybe_unused)
724 {
725 	if (!strcmp(var, "annotate.objdump"))
726 		test_objdump_path = value;
727 
728 	return 0;
729 }
730 
731 static struct test_suite **build_suites(void)
732 {
733 	/*
734 	 * TODO: suites is static to avoid needing to clean up the scripts tests
735 	 * for leak sanitizer.
736 	 */
737 	static struct test_suite **suites[] = {
738 		generic_tests,
739 		arch_tests,
740 		NULL,
741 	};
742 	struct test_suite **result;
743 	struct test_suite *t;
744 	size_t n = 0, num_suites = 0;
745 
746 	if (suites[2] == NULL)
747 		suites[2] = create_script_test_suites();
748 
749 #define for_each_suite(suite)						\
750 	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
751 		while ((suite = suites[i][j++]) != NULL)
752 
753 	for_each_suite(t)
754 		num_suites++;
755 
756 	result = calloc(num_suites + 1, sizeof(struct test_suite *));
757 
758 	for (int pass = 1; pass <= 2; pass++) {
759 		for_each_suite(t) {
760 			bool exclusive = false;
761 			int curr_test_case;
762 
763 			test_suite__for_each_test_case(t, curr_test_case) {
764 				if (test_exclusive(t, curr_test_case)) {
765 					exclusive = true;
766 					break;
767 				}
768 			}
769 			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
770 				result[n++] = t;
771 		}
772 	}
773 	return result;
774 #undef for_each_suite
775 }
776 
777 int cmd_test(int argc, const char **argv)
778 {
779 	const char *test_usage[] = {
780 	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
781 	NULL,
782 	};
783 	const char *skip = NULL;
784 	const char *workload = NULL;
785 	bool list_workloads = false;
786 	const struct option test_options[] = {
787 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
788 	OPT_INCR('v', "verbose", &verbose,
789 		    "be more verbose (show symbol address, etc)"),
790 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
791 		    "Do not fork for testcase"),
792 	OPT_BOOLEAN('S', "sequential", &sequential,
793 		    "Run the tests one after another rather than in parallel"),
794 	OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
795 		     "Run each test the given number of times, default 1"),
796 	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
797 	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
798 	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
799 	OPT_STRING(0, "objdump", &test_objdump_path, "path",
800 		   "objdump binary to use for disassembly and annotations"),
801 	OPT_END()
802 	};
803 	const char * const test_subcommands[] = { "list", NULL };
804 	struct intlist *skiplist = NULL;
805         int ret = hists__init();
806 	struct test_suite **suites;
807 
808         if (ret < 0)
809                 return ret;
810 
811 	perf_config(perf_test__config, NULL);
812 
813 	/* Unbuffered output */
814 	setvbuf(stdout, NULL, _IONBF, 0);
815 
816 	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
817 	if (argc >= 1 && !strcmp(argv[0], "list")) {
818 		suites = build_suites();
819 		ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
820 		free(suites);
821 		return ret;
822 	}
823 
824 	if (workload)
825 		return run_workload(workload, argc, argv);
826 
827 	if (list_workloads) {
828 		workloads__fprintf_list(stdout);
829 		return 0;
830 	}
831 
832 	if (dont_fork)
833 		sequential = true;
834 
835 	symbol_conf.priv_size = sizeof(int);
836 	symbol_conf.try_vmlinux_path = true;
837 
838 
839 	if (symbol__init(NULL) < 0)
840 		return -1;
841 
842 	if (skip != NULL)
843 		skiplist = intlist__new(skip);
844 	/*
845 	 * Tests that create BPF maps, for instance, need more than the 64K
846 	 * default:
847 	 */
848 	rlimit__bump_memlock();
849 
850 	suites = build_suites();
851 	ret = __cmd_test(suites, argc, argv, skiplist);
852 	free(suites);
853 	return ret;
854 }
855