xref: /linux/tools/perf/tests/builtin-test.c (revision 2f5d370dec3f800b44bbf7b68875d521e0af43cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-test.c
4  *
5  * Builtin regression testing command: ever growing number of sanity tests
6  */
7 #include <fcntl.h>
8 #include <errno.h>
9 #ifdef HAVE_BACKTRACE_SUPPORT
10 #include <execinfo.h>
11 #endif
12 #include <poll.h>
13 #include <unistd.h>
14 #include <setjmp.h>
15 #include <string.h>
16 #include <stdlib.h>
17 #include <sys/types.h>
18 #include <dirent.h>
19 #include <sys/wait.h>
20 #include <sys/stat.h>
21 #include "builtin.h"
22 #include "config.h"
23 #include "hist.h"
24 #include "intlist.h"
25 #include "tests.h"
26 #include "debug.h"
27 #include "color.h"
28 #include <subcmd/parse-options.h>
29 #include <subcmd/run-command.h>
30 #include "string2.h"
31 #include "symbol.h"
32 #include "util/rlimit.h"
33 #include "util/strbuf.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36 #include <subcmd/exec-cmd.h>
37 #include <linux/zalloc.h>
38 
39 #include "tests-scripts.h"
40 
41 /*
42  * Command line option to not fork the test running in the same process and
43  * making them easier to debug.
44  */
45 static bool dont_fork;
46 /* Fork the tests in parallel and wait for their completion. */
47 static bool sequential;
48 /* Number of times each test is run. */
49 static unsigned int runs_per_test = 1;
50 const char *dso_to_test;
51 const char *test_objdump_path = "objdump";
52 
53 /*
54  * List of architecture specific tests. Not a weak symbol as the array length is
55  * dependent on the initialization, as such GCC with LTO complains of
56  * conflicting definitions with a weak symbol.
57  */
58 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
59 extern struct test_suite *arch_tests[];
60 #else
61 static struct test_suite *arch_tests[] = {
62 	NULL,
63 };
64 #endif
65 
66 static struct test_suite *generic_tests[] = {
67 	&suite__vmlinux_matches_kallsyms,
68 	&suite__openat_syscall_event,
69 	&suite__openat_syscall_event_on_all_cpus,
70 	&suite__basic_mmap,
71 	&suite__mem,
72 	&suite__parse_events,
73 	&suite__expr,
74 	&suite__PERF_RECORD,
75 	&suite__pmu,
76 	&suite__pmu_events,
77 	&suite__hwmon_pmu,
78 	&suite__tool_pmu,
79 	&suite__dso_data,
80 	&suite__perf_evsel__roundtrip_name_test,
81 #ifdef HAVE_LIBTRACEEVENT
82 	&suite__perf_evsel__tp_sched_test,
83 	&suite__syscall_openat_tp_fields,
84 #endif
85 	&suite__hists_link,
86 	&suite__python_use,
87 	&suite__bp_signal,
88 	&suite__bp_signal_overflow,
89 	&suite__bp_accounting,
90 	&suite__wp,
91 	&suite__task_exit,
92 	&suite__sw_clock_freq,
93 	&suite__code_reading,
94 	&suite__sample_parsing,
95 	&suite__keep_tracking,
96 	&suite__parse_no_sample_id_all,
97 	&suite__hists_filter,
98 	&suite__mmap_thread_lookup,
99 	&suite__thread_maps_share,
100 	&suite__hists_output,
101 	&suite__hists_cumulate,
102 #ifdef HAVE_LIBTRACEEVENT
103 	&suite__switch_tracking,
104 #endif
105 	&suite__fdarray__filter,
106 	&suite__fdarray__add,
107 	&suite__kmod_path__parse,
108 	&suite__thread_map,
109 	&suite__session_topology,
110 	&suite__thread_map_synthesize,
111 	&suite__thread_map_remove,
112 	&suite__cpu_map,
113 	&suite__synthesize_stat_config,
114 	&suite__synthesize_stat,
115 	&suite__synthesize_stat_round,
116 	&suite__event_update,
117 	&suite__event_times,
118 	&suite__backward_ring_buffer,
119 	&suite__sdt_event,
120 	&suite__is_printable_array,
121 	&suite__bitmap_print,
122 	&suite__perf_hooks,
123 	&suite__unit_number__scnprint,
124 	&suite__mem2node,
125 	&suite__time_utils,
126 	&suite__jit_write_elf,
127 	&suite__pfm,
128 	&suite__api_io,
129 	&suite__maps__merge_in,
130 	&suite__demangle_java,
131 	&suite__demangle_ocaml,
132 	&suite__demangle_rust,
133 	&suite__parse_metric,
134 	&suite__pe_file_parsing,
135 	&suite__expand_cgroup_events,
136 	&suite__perf_time_to_tsc,
137 	&suite__dlfilter,
138 	&suite__sigtrap,
139 	&suite__event_groups,
140 	&suite__symbols,
141 	&suite__util,
142 	NULL,
143 };
144 
145 static struct test_workload *workloads[] = {
146 	&workload__noploop,
147 	&workload__thloop,
148 	&workload__leafloop,
149 	&workload__sqrtloop,
150 	&workload__brstack,
151 	&workload__datasym,
152 	&workload__landlock,
153 };
154 
155 #define workloads__for_each(workload) \
156 	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
157 
158 #define test_suite__for_each_test_case(suite, idx)			\
159 	for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
160 
161 static int test_suite__num_test_cases(const struct test_suite *t)
162 {
163 	int num;
164 
165 	test_suite__for_each_test_case(t, num);
166 
167 	return num;
168 }
169 
170 static const char *skip_reason(const struct test_suite *t, int test_case)
171 {
172 	if (!t->test_cases)
173 		return NULL;
174 
175 	return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
176 }
177 
178 static const char *test_description(const struct test_suite *t, int test_case)
179 {
180 	if (t->test_cases && test_case >= 0)
181 		return t->test_cases[test_case].desc;
182 
183 	return t->desc;
184 }
185 
186 static test_fnptr test_function(const struct test_suite *t, int test_case)
187 {
188 	if (test_case <= 0)
189 		return t->test_cases[0].run_case;
190 
191 	return t->test_cases[test_case].run_case;
192 }
193 
194 static bool test_exclusive(const struct test_suite *t, int test_case)
195 {
196 	if (test_case <= 0)
197 		return t->test_cases[0].exclusive;
198 
199 	return t->test_cases[test_case].exclusive;
200 }
201 
202 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
203 {
204 	int i;
205 
206 	if (argc == 0)
207 		return true;
208 
209 	for (i = 0; i < argc; ++i) {
210 		char *end;
211 		long nr = strtoul(argv[i], &end, 10);
212 
213 		if (*end == '\0') {
214 			if (nr == suite_num + 1)
215 				return true;
216 			continue;
217 		}
218 
219 		if (strcasestr(desc, argv[i]))
220 			return true;
221 	}
222 
223 	return false;
224 }
225 
226 struct child_test {
227 	struct child_process process;
228 	struct test_suite *test;
229 	int suite_num;
230 	int test_case_num;
231 };
232 
233 static jmp_buf run_test_jmp_buf;
234 
235 static void child_test_sig_handler(int sig)
236 {
237 #ifdef HAVE_BACKTRACE_SUPPORT
238 	void *stackdump[32];
239 	size_t stackdump_size;
240 #endif
241 
242 	fprintf(stderr, "\n---- unexpected signal (%d) ----\n", sig);
243 #ifdef HAVE_BACKTRACE_SUPPORT
244 	stackdump_size = backtrace(stackdump, ARRAY_SIZE(stackdump));
245 	__dump_stack(stderr, stackdump, stackdump_size);
246 #endif
247 	siglongjmp(run_test_jmp_buf, sig);
248 }
249 
250 static int run_test_child(struct child_process *process)
251 {
252 	const int signals[] = {
253 		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
254 	};
255 	struct child_test *child = container_of(process, struct child_test, process);
256 	int err;
257 
258 	err = sigsetjmp(run_test_jmp_buf, 1);
259 	if (err) {
260 		/* Received signal. */
261 		err = err > 0 ? -err : -1;
262 		goto err_out;
263 	}
264 
265 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
266 		signal(signals[i], child_test_sig_handler);
267 
268 	pr_debug("--- start ---\n");
269 	pr_debug("test child forked, pid %d\n", getpid());
270 	err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
271 	pr_debug("---- end(%d) ----\n", err);
272 
273 err_out:
274 	fflush(NULL);
275 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
276 		signal(signals[i], SIG_DFL);
277 	return -err;
278 }
279 
280 #define TEST_RUNNING -3
281 
282 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
283 			     int result, int width, int running)
284 {
285 	if (test_suite__num_test_cases(t) > 1) {
286 		int subw = width > 2 ? width - 2 : width;
287 
288 		pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
289 			test_description(t, curr_test_case));
290 	} else
291 		pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
292 
293 	switch (result) {
294 	case TEST_RUNNING:
295 		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
296 		break;
297 	case TEST_OK:
298 		pr_info(" Ok\n");
299 		break;
300 	case TEST_SKIP: {
301 		const char *reason = skip_reason(t, curr_test_case);
302 
303 		if (reason)
304 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
305 		else
306 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
307 	}
308 		break;
309 	case TEST_FAIL:
310 	default:
311 		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
312 		break;
313 	}
314 
315 	return 0;
316 }
317 
318 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
319 		int width)
320 {
321 	struct child_test *child_test = child_tests[running_test];
322 	struct test_suite *t;
323 	int curr_suite, curr_test_case, err;
324 	bool err_done = false;
325 	struct strbuf err_output = STRBUF_INIT;
326 	int last_running = -1;
327 	int ret;
328 
329 	if (child_test == NULL) {
330 		/* Test wasn't started. */
331 		return;
332 	}
333 	t = child_test->test;
334 	curr_suite = child_test->suite_num;
335 	curr_test_case = child_test->test_case_num;
336 	err = child_test->process.err;
337 	/*
338 	 * For test suites with subtests, display the suite name ahead of the
339 	 * sub test names.
340 	 */
341 	if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
342 		pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
343 
344 	/*
345 	 * Busy loop reading from the child's stdout/stderr that are set to be
346 	 * non-blocking until EOF.
347 	 */
348 	if (err > 0)
349 		fcntl(err, F_SETFL, O_NONBLOCK);
350 	if (verbose > 1) {
351 		if (test_suite__num_test_cases(t) > 1)
352 			pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
353 				test_description(t, curr_test_case));
354 		else
355 			pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
356 	}
357 	while (!err_done) {
358 		struct pollfd pfds[1] = {
359 			{ .fd = err,
360 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
361 			},
362 		};
363 		if (perf_use_color_default) {
364 			int running = 0;
365 
366 			for (int y = running_test; y < child_test_num; y++) {
367 				if (child_tests[y] == NULL)
368 					continue;
369 				if (check_if_command_finished(&child_tests[y]->process) == 0)
370 					running++;
371 			}
372 			if (running != last_running) {
373 				if (last_running != -1) {
374 					/*
375 					 * Erase "Running (.. active)" line
376 					 * printed before poll/sleep.
377 					 */
378 					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
379 				}
380 				print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
381 						  width, running);
382 				last_running = running;
383 			}
384 		}
385 
386 		err_done = true;
387 		if (err <= 0) {
388 			/* No child stderr to poll, sleep for 10ms for child to complete. */
389 			usleep(10 * 1000);
390 		} else {
391 			/* Poll to avoid excessive spinning, timeout set for 100ms. */
392 			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
393 			if (pfds[0].revents) {
394 				char buf[512];
395 				ssize_t len;
396 
397 				len = read(err, buf, sizeof(buf) - 1);
398 
399 				if (len > 0) {
400 					err_done = false;
401 					buf[len] = '\0';
402 					strbuf_addstr(&err_output, buf);
403 				}
404 			}
405 		}
406 		if (err_done)
407 			err_done = check_if_command_finished(&child_test->process);
408 	}
409 	if (perf_use_color_default && last_running != -1) {
410 		/* Erase "Running (.. active)" line printed before poll/sleep. */
411 		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
412 	}
413 	/* Clean up child process. */
414 	ret = finish_command(&child_test->process);
415 	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
416 		fprintf(stderr, "%s", err_output.buf);
417 
418 	strbuf_release(&err_output);
419 	print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
420 	if (err > 0)
421 		close(err);
422 	zfree(&child_tests[running_test]);
423 }
424 
425 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
426 		struct child_test **child, int width, int pass)
427 {
428 	int err;
429 
430 	*child = NULL;
431 	if (dont_fork) {
432 		if (pass == 1) {
433 			pr_debug("--- start ---\n");
434 			err = test_function(test, curr_test_case)(test, curr_test_case);
435 			pr_debug("---- end ----\n");
436 			print_test_result(test, curr_suite, curr_test_case, err, width,
437 					  /*running=*/0);
438 		}
439 		return 0;
440 	}
441 	if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
442 		/* When parallel, skip exclusive tests on the first pass. */
443 		return 0;
444 	}
445 	if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
446 		/* Sequential and non-exclusive tests were run on the first pass. */
447 		return 0;
448 	}
449 	*child = zalloc(sizeof(**child));
450 	if (!*child)
451 		return -ENOMEM;
452 
453 	(*child)->test = test;
454 	(*child)->suite_num = curr_suite;
455 	(*child)->test_case_num = curr_test_case;
456 	(*child)->process.pid = -1;
457 	(*child)->process.no_stdin = 1;
458 	if (verbose <= 0) {
459 		(*child)->process.no_stdout = 1;
460 		(*child)->process.no_stderr = 1;
461 	} else {
462 		(*child)->process.stdout_to_stderr = 1;
463 		(*child)->process.out = -1;
464 		(*child)->process.err = -1;
465 	}
466 	(*child)->process.no_exec_cmd = run_test_child;
467 	if (sequential || pass == 2) {
468 		err = start_command(&(*child)->process);
469 		if (err)
470 			return err;
471 		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
472 		return 0;
473 	}
474 	return start_command(&(*child)->process);
475 }
476 
477 /* State outside of __cmd_test for the sake of the signal handler. */
478 
479 static size_t num_tests;
480 static struct child_test **child_tests;
481 static jmp_buf cmd_test_jmp_buf;
482 
483 static void cmd_test_sig_handler(int sig)
484 {
485 	siglongjmp(cmd_test_jmp_buf, sig);
486 }
487 
488 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
489 		      struct intlist *skiplist)
490 {
491 	static int width = 0;
492 	int err = 0;
493 
494 	for (struct test_suite **t = suites; *t; t++) {
495 		int i, len = strlen(test_description(*t, -1));
496 
497 		if (width < len)
498 			width = len;
499 
500 		test_suite__for_each_test_case(*t, i) {
501 			len = strlen(test_description(*t, i));
502 			if (width < len)
503 				width = len;
504 			num_tests += runs_per_test;
505 		}
506 	}
507 	child_tests = calloc(num_tests, sizeof(*child_tests));
508 	if (!child_tests)
509 		return -ENOMEM;
510 
511 	err = sigsetjmp(cmd_test_jmp_buf, 1);
512 	if (err) {
513 		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
514 		       err);
515 		for (size_t x = 0; x < num_tests; x++) {
516 			struct child_test *child_test = child_tests[x];
517 
518 			if (!child_test || child_test->process.pid <= 0)
519 				continue;
520 
521 			pr_debug3("Killing %d pid %d\n",
522 				  child_test->suite_num + 1,
523 				  child_test->process.pid);
524 			kill(child_test->process.pid, err);
525 		}
526 		goto err_out;
527 	}
528 	signal(SIGINT, cmd_test_sig_handler);
529 	signal(SIGTERM, cmd_test_sig_handler);
530 
531 	/*
532 	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
533 	 * runs the exclusive tests sequentially. In other modes all tests are
534 	 * run in pass 1.
535 	 */
536 	for (int pass = 1; pass <= 2; pass++) {
537 		int child_test_num = 0;
538 		int curr_suite = 0;
539 
540 		for (struct test_suite **t = suites; *t; t++, curr_suite++) {
541 			int curr_test_case;
542 
543 			if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
544 				/*
545 				 * Test suite shouldn't be run based on
546 				 * description. See if any test case should.
547 				 */
548 				bool skip = true;
549 
550 				test_suite__for_each_test_case(*t, curr_test_case) {
551 					if (perf_test__matches(test_description(*t, curr_test_case),
552 							       curr_suite, argc, argv)) {
553 						skip = false;
554 						break;
555 					}
556 				}
557 				if (skip)
558 					continue;
559 			}
560 
561 			if (intlist__find(skiplist, curr_suite + 1)) {
562 				pr_info("%3d: %-*s:", curr_suite + 1, width,
563 					test_description(*t, -1));
564 				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
565 				continue;
566 			}
567 
568 			for (unsigned int run = 0; run < runs_per_test; run++) {
569 				test_suite__for_each_test_case(*t, curr_test_case) {
570 					if (!perf_test__matches(test_description(*t, curr_test_case),
571 								curr_suite, argc, argv))
572 						continue;
573 
574 					err = start_test(*t, curr_suite, curr_test_case,
575 							 &child_tests[child_test_num++],
576 							 width, pass);
577 					if (err)
578 						goto err_out;
579 				}
580 			}
581 		}
582 		if (!sequential) {
583 			/* Parallel mode starts tests but doesn't finish them. Do that now. */
584 			for (size_t x = 0; x < num_tests; x++)
585 				finish_test(child_tests, x, num_tests, width);
586 		}
587 	}
588 err_out:
589 	signal(SIGINT, SIG_DFL);
590 	signal(SIGTERM, SIG_DFL);
591 	if (err) {
592 		pr_err("Internal test harness failure. Completing any started tests:\n:");
593 		for (size_t x = 0; x < num_tests; x++)
594 			finish_test(child_tests, x, num_tests, width);
595 	}
596 	free(child_tests);
597 	return err;
598 }
599 
600 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
601 {
602 	int curr_suite = 0;
603 
604 	for (struct test_suite **t = suites; *t; t++, curr_suite++) {
605 		int curr_test_case;
606 
607 		if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
608 			continue;
609 
610 		fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
611 
612 		if (test_suite__num_test_cases(*t) <= 1)
613 			continue;
614 
615 		test_suite__for_each_test_case(*t, curr_test_case) {
616 			fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
617 				test_description(*t, curr_test_case));
618 		}
619 	}
620 	return 0;
621 }
622 
623 static int workloads__fprintf_list(FILE *fp)
624 {
625 	struct test_workload *twl;
626 	int printed = 0;
627 
628 	workloads__for_each(twl)
629 		printed += fprintf(fp, "%s\n", twl->name);
630 
631 	return printed;
632 }
633 
634 static int run_workload(const char *work, int argc, const char **argv)
635 {
636 	struct test_workload *twl;
637 
638 	workloads__for_each(twl) {
639 		if (!strcmp(twl->name, work))
640 			return twl->func(argc, argv);
641 	}
642 
643 	pr_info("No workload found: %s\n", work);
644 	return -1;
645 }
646 
647 static int perf_test__config(const char *var, const char *value,
648 			     void *data __maybe_unused)
649 {
650 	if (!strcmp(var, "annotate.objdump"))
651 		test_objdump_path = value;
652 
653 	return 0;
654 }
655 
656 static struct test_suite **build_suites(void)
657 {
658 	/*
659 	 * TODO: suites is static to avoid needing to clean up the scripts tests
660 	 * for leak sanitizer.
661 	 */
662 	static struct test_suite **suites[] = {
663 		generic_tests,
664 		arch_tests,
665 		NULL,
666 	};
667 	struct test_suite **result;
668 	struct test_suite *t;
669 	size_t n = 0, num_suites = 0;
670 
671 	if (suites[2] == NULL)
672 		suites[2] = create_script_test_suites();
673 
674 #define for_each_suite(suite)						\
675 	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
676 		while ((suite = suites[i][j++]) != NULL)
677 
678 	for_each_suite(t)
679 		num_suites++;
680 
681 	result = calloc(num_suites + 1, sizeof(struct test_suite *));
682 
683 	for (int pass = 1; pass <= 2; pass++) {
684 		for_each_suite(t) {
685 			bool exclusive = false;
686 			int curr_test_case;
687 
688 			test_suite__for_each_test_case(t, curr_test_case) {
689 				if (test_exclusive(t, curr_test_case)) {
690 					exclusive = true;
691 					break;
692 				}
693 			}
694 			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
695 				result[n++] = t;
696 		}
697 	}
698 	return result;
699 #undef for_each_suite
700 }
701 
702 int cmd_test(int argc, const char **argv)
703 {
704 	const char *test_usage[] = {
705 	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
706 	NULL,
707 	};
708 	const char *skip = NULL;
709 	const char *workload = NULL;
710 	bool list_workloads = false;
711 	const struct option test_options[] = {
712 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
713 	OPT_INCR('v', "verbose", &verbose,
714 		    "be more verbose (show symbol address, etc)"),
715 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
716 		    "Do not fork for testcase"),
717 	OPT_BOOLEAN('S', "sequential", &sequential,
718 		    "Run the tests one after another rather than in parallel"),
719 	OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
720 		     "Run each test the given number of times, default 1"),
721 	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
722 	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
723 	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
724 	OPT_STRING(0, "objdump", &test_objdump_path, "path",
725 		   "objdump binary to use for disassembly and annotations"),
726 	OPT_END()
727 	};
728 	const char * const test_subcommands[] = { "list", NULL };
729 	struct intlist *skiplist = NULL;
730         int ret = hists__init();
731 	struct test_suite **suites;
732 
733         if (ret < 0)
734                 return ret;
735 
736 	perf_config(perf_test__config, NULL);
737 
738 	/* Unbuffered output */
739 	setvbuf(stdout, NULL, _IONBF, 0);
740 
741 	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
742 	if (argc >= 1 && !strcmp(argv[0], "list")) {
743 		suites = build_suites();
744 		ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
745 		free(suites);
746 		return ret;
747 	}
748 
749 	if (workload)
750 		return run_workload(workload, argc, argv);
751 
752 	if (list_workloads) {
753 		workloads__fprintf_list(stdout);
754 		return 0;
755 	}
756 
757 	if (dont_fork)
758 		sequential = true;
759 
760 	symbol_conf.priv_size = sizeof(int);
761 	symbol_conf.try_vmlinux_path = true;
762 
763 
764 	if (symbol__init(NULL) < 0)
765 		return -1;
766 
767 	if (skip != NULL)
768 		skiplist = intlist__new(skip);
769 	/*
770 	 * Tests that create BPF maps, for instance, need more than the 64K
771 	 * default:
772 	 */
773 	rlimit__bump_memlock();
774 
775 	suites = build_suites();
776 	ret = __cmd_test(suites, argc, argv, skiplist);
777 	free(suites);
778 	return ret;
779 }
780