xref: /linux/tools/perf/tests/builtin-test.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-test.c
4  *
5  * Builtin regression testing command: ever growing number of sanity tests
6  */
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <poll.h>
10 #include <unistd.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <sys/types.h>
15 #include <dirent.h>
16 #include <sys/wait.h>
17 #include <sys/stat.h>
18 #include "builtin.h"
19 #include "config.h"
20 #include "hist.h"
21 #include "intlist.h"
22 #include "tests.h"
23 #include "debug.h"
24 #include "color.h"
25 #include <subcmd/parse-options.h>
26 #include <subcmd/run-command.h>
27 #include "string2.h"
28 #include "symbol.h"
29 #include "util/rlimit.h"
30 #include "util/strbuf.h"
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <subcmd/exec-cmd.h>
34 #include <linux/zalloc.h>
35 
36 #include "tests-scripts.h"
37 
38 /*
39  * Command line option to not fork the test running in the same process and
40  * making them easier to debug.
41  */
42 static bool dont_fork;
43 /* Fork the tests in parallel and wait for their completion. */
44 static bool sequential;
45 /* Number of times each test is run. */
46 static unsigned int runs_per_test = 1;
47 const char *dso_to_test;
48 const char *test_objdump_path = "objdump";
49 
50 /*
51  * List of architecture specific tests. Not a weak symbol as the array length is
52  * dependent on the initialization, as such GCC with LTO complains of
53  * conflicting definitions with a weak symbol.
54  */
55 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
56 extern struct test_suite *arch_tests[];
57 #else
58 static struct test_suite *arch_tests[] = {
59 	NULL,
60 };
61 #endif
62 
63 static struct test_suite *generic_tests[] = {
64 	&suite__vmlinux_matches_kallsyms,
65 	&suite__openat_syscall_event,
66 	&suite__openat_syscall_event_on_all_cpus,
67 	&suite__basic_mmap,
68 	&suite__mem,
69 	&suite__parse_events,
70 	&suite__expr,
71 	&suite__PERF_RECORD,
72 	&suite__pmu,
73 	&suite__pmu_events,
74 	&suite__hwmon_pmu,
75 	&suite__tool_pmu,
76 	&suite__dso_data,
77 	&suite__perf_evsel__roundtrip_name_test,
78 #ifdef HAVE_LIBTRACEEVENT
79 	&suite__perf_evsel__tp_sched_test,
80 	&suite__syscall_openat_tp_fields,
81 #endif
82 	&suite__hists_link,
83 	&suite__python_use,
84 	&suite__bp_signal,
85 	&suite__bp_signal_overflow,
86 	&suite__bp_accounting,
87 	&suite__wp,
88 	&suite__task_exit,
89 	&suite__sw_clock_freq,
90 	&suite__code_reading,
91 	&suite__sample_parsing,
92 	&suite__keep_tracking,
93 	&suite__parse_no_sample_id_all,
94 	&suite__hists_filter,
95 	&suite__mmap_thread_lookup,
96 	&suite__thread_maps_share,
97 	&suite__hists_output,
98 	&suite__hists_cumulate,
99 #ifdef HAVE_LIBTRACEEVENT
100 	&suite__switch_tracking,
101 #endif
102 	&suite__fdarray__filter,
103 	&suite__fdarray__add,
104 	&suite__kmod_path__parse,
105 	&suite__thread_map,
106 	&suite__session_topology,
107 	&suite__thread_map_synthesize,
108 	&suite__thread_map_remove,
109 	&suite__cpu_map,
110 	&suite__synthesize_stat_config,
111 	&suite__synthesize_stat,
112 	&suite__synthesize_stat_round,
113 	&suite__event_update,
114 	&suite__event_times,
115 	&suite__backward_ring_buffer,
116 	&suite__sdt_event,
117 	&suite__is_printable_array,
118 	&suite__bitmap_print,
119 	&suite__perf_hooks,
120 	&suite__unit_number__scnprint,
121 	&suite__mem2node,
122 	&suite__time_utils,
123 	&suite__jit_write_elf,
124 	&suite__pfm,
125 	&suite__api_io,
126 	&suite__maps__merge_in,
127 	&suite__demangle_java,
128 	&suite__demangle_ocaml,
129 	&suite__parse_metric,
130 	&suite__pe_file_parsing,
131 	&suite__expand_cgroup_events,
132 	&suite__perf_time_to_tsc,
133 	&suite__dlfilter,
134 	&suite__sigtrap,
135 	&suite__event_groups,
136 	&suite__symbols,
137 	&suite__util,
138 	NULL,
139 };
140 
141 static struct test_workload *workloads[] = {
142 	&workload__noploop,
143 	&workload__thloop,
144 	&workload__leafloop,
145 	&workload__sqrtloop,
146 	&workload__brstack,
147 	&workload__datasym,
148 	&workload__landlock,
149 };
150 
151 #define workloads__for_each(workload) \
152 	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
153 
154 #define test_suite__for_each_test_case(suite, idx)			\
155 	for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
156 
test_suite__num_test_cases(const struct test_suite * t)157 static int test_suite__num_test_cases(const struct test_suite *t)
158 {
159 	int num;
160 
161 	test_suite__for_each_test_case(t, num);
162 
163 	return num;
164 }
165 
skip_reason(const struct test_suite * t,int test_case)166 static const char *skip_reason(const struct test_suite *t, int test_case)
167 {
168 	if (!t->test_cases)
169 		return NULL;
170 
171 	return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
172 }
173 
test_description(const struct test_suite * t,int test_case)174 static const char *test_description(const struct test_suite *t, int test_case)
175 {
176 	if (t->test_cases && test_case >= 0)
177 		return t->test_cases[test_case].desc;
178 
179 	return t->desc;
180 }
181 
test_function(const struct test_suite * t,int test_case)182 static test_fnptr test_function(const struct test_suite *t, int test_case)
183 {
184 	if (test_case <= 0)
185 		return t->test_cases[0].run_case;
186 
187 	return t->test_cases[test_case].run_case;
188 }
189 
test_exclusive(const struct test_suite * t,int test_case)190 static bool test_exclusive(const struct test_suite *t, int test_case)
191 {
192 	if (test_case <= 0)
193 		return t->test_cases[0].exclusive;
194 
195 	return t->test_cases[test_case].exclusive;
196 }
197 
perf_test__matches(const char * desc,int suite_num,int argc,const char * argv[])198 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
199 {
200 	int i;
201 
202 	if (argc == 0)
203 		return true;
204 
205 	for (i = 0; i < argc; ++i) {
206 		char *end;
207 		long nr = strtoul(argv[i], &end, 10);
208 
209 		if (*end == '\0') {
210 			if (nr == suite_num + 1)
211 				return true;
212 			continue;
213 		}
214 
215 		if (strcasestr(desc, argv[i]))
216 			return true;
217 	}
218 
219 	return false;
220 }
221 
222 struct child_test {
223 	struct child_process process;
224 	struct test_suite *test;
225 	int suite_num;
226 	int test_case_num;
227 };
228 
229 static jmp_buf run_test_jmp_buf;
230 
child_test_sig_handler(int sig)231 static void child_test_sig_handler(int sig)
232 {
233 	siglongjmp(run_test_jmp_buf, sig);
234 }
235 
run_test_child(struct child_process * process)236 static int run_test_child(struct child_process *process)
237 {
238 	const int signals[] = {
239 		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
240 	};
241 	struct child_test *child = container_of(process, struct child_test, process);
242 	int err;
243 
244 	err = sigsetjmp(run_test_jmp_buf, 1);
245 	if (err) {
246 		fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
247 		err = err > 0 ? -err : -1;
248 		goto err_out;
249 	}
250 
251 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
252 		signal(signals[i], child_test_sig_handler);
253 
254 	pr_debug("--- start ---\n");
255 	pr_debug("test child forked, pid %d\n", getpid());
256 	err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
257 	pr_debug("---- end(%d) ----\n", err);
258 
259 err_out:
260 	fflush(NULL);
261 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
262 		signal(signals[i], SIG_DFL);
263 	return -err;
264 }
265 
266 #define TEST_RUNNING -3
267 
print_test_result(struct test_suite * t,int curr_suite,int curr_test_case,int result,int width,int running)268 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
269 			     int result, int width, int running)
270 {
271 	if (test_suite__num_test_cases(t) > 1) {
272 		int subw = width > 2 ? width - 2 : width;
273 
274 		pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
275 			test_description(t, curr_test_case));
276 	} else
277 		pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
278 
279 	switch (result) {
280 	case TEST_RUNNING:
281 		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
282 		break;
283 	case TEST_OK:
284 		pr_info(" Ok\n");
285 		break;
286 	case TEST_SKIP: {
287 		const char *reason = skip_reason(t, curr_test_case);
288 
289 		if (reason)
290 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
291 		else
292 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
293 	}
294 		break;
295 	case TEST_FAIL:
296 	default:
297 		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
298 		break;
299 	}
300 
301 	return 0;
302 }
303 
finish_test(struct child_test ** child_tests,int running_test,int child_test_num,int width)304 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
305 		int width)
306 {
307 	struct child_test *child_test = child_tests[running_test];
308 	struct test_suite *t;
309 	int curr_suite, curr_test_case, err;
310 	bool err_done = false;
311 	struct strbuf err_output = STRBUF_INIT;
312 	int last_running = -1;
313 	int ret;
314 
315 	if (child_test == NULL) {
316 		/* Test wasn't started. */
317 		return;
318 	}
319 	t = child_test->test;
320 	curr_suite = child_test->suite_num;
321 	curr_test_case = child_test->test_case_num;
322 	err = child_test->process.err;
323 	/*
324 	 * For test suites with subtests, display the suite name ahead of the
325 	 * sub test names.
326 	 */
327 	if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
328 		pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
329 
330 	/*
331 	 * Busy loop reading from the child's stdout/stderr that are set to be
332 	 * non-blocking until EOF.
333 	 */
334 	if (err > 0)
335 		fcntl(err, F_SETFL, O_NONBLOCK);
336 	if (verbose > 1) {
337 		if (test_suite__num_test_cases(t) > 1)
338 			pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
339 				test_description(t, curr_test_case));
340 		else
341 			pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
342 	}
343 	while (!err_done) {
344 		struct pollfd pfds[1] = {
345 			{ .fd = err,
346 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
347 			},
348 		};
349 		if (perf_use_color_default) {
350 			int running = 0;
351 
352 			for (int y = running_test; y < child_test_num; y++) {
353 				if (child_tests[y] == NULL)
354 					continue;
355 				if (check_if_command_finished(&child_tests[y]->process) == 0)
356 					running++;
357 			}
358 			if (running != last_running) {
359 				if (last_running != -1) {
360 					/*
361 					 * Erase "Running (.. active)" line
362 					 * printed before poll/sleep.
363 					 */
364 					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
365 				}
366 				print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
367 						  width, running);
368 				last_running = running;
369 			}
370 		}
371 
372 		err_done = true;
373 		if (err <= 0) {
374 			/* No child stderr to poll, sleep for 10ms for child to complete. */
375 			usleep(10 * 1000);
376 		} else {
377 			/* Poll to avoid excessive spinning, timeout set for 100ms. */
378 			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
379 			if (pfds[0].revents) {
380 				char buf[512];
381 				ssize_t len;
382 
383 				len = read(err, buf, sizeof(buf) - 1);
384 
385 				if (len > 0) {
386 					err_done = false;
387 					buf[len] = '\0';
388 					strbuf_addstr(&err_output, buf);
389 				}
390 			}
391 		}
392 		if (err_done)
393 			err_done = check_if_command_finished(&child_test->process);
394 	}
395 	if (perf_use_color_default && last_running != -1) {
396 		/* Erase "Running (.. active)" line printed before poll/sleep. */
397 		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
398 	}
399 	/* Clean up child process. */
400 	ret = finish_command(&child_test->process);
401 	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
402 		fprintf(stderr, "%s", err_output.buf);
403 
404 	strbuf_release(&err_output);
405 	print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
406 	if (err > 0)
407 		close(err);
408 	zfree(&child_tests[running_test]);
409 }
410 
start_test(struct test_suite * test,int curr_suite,int curr_test_case,struct child_test ** child,int width,int pass)411 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
412 		struct child_test **child, int width, int pass)
413 {
414 	int err;
415 
416 	*child = NULL;
417 	if (dont_fork) {
418 		if (pass == 1) {
419 			pr_debug("--- start ---\n");
420 			err = test_function(test, curr_test_case)(test, curr_test_case);
421 			pr_debug("---- end ----\n");
422 			print_test_result(test, curr_suite, curr_test_case, err, width,
423 					  /*running=*/0);
424 		}
425 		return 0;
426 	}
427 	if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
428 		/* When parallel, skip exclusive tests on the first pass. */
429 		return 0;
430 	}
431 	if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
432 		/* Sequential and non-exclusive tests were run on the first pass. */
433 		return 0;
434 	}
435 	*child = zalloc(sizeof(**child));
436 	if (!*child)
437 		return -ENOMEM;
438 
439 	(*child)->test = test;
440 	(*child)->suite_num = curr_suite;
441 	(*child)->test_case_num = curr_test_case;
442 	(*child)->process.pid = -1;
443 	(*child)->process.no_stdin = 1;
444 	if (verbose <= 0) {
445 		(*child)->process.no_stdout = 1;
446 		(*child)->process.no_stderr = 1;
447 	} else {
448 		(*child)->process.stdout_to_stderr = 1;
449 		(*child)->process.out = -1;
450 		(*child)->process.err = -1;
451 	}
452 	(*child)->process.no_exec_cmd = run_test_child;
453 	if (sequential || pass == 2) {
454 		err = start_command(&(*child)->process);
455 		if (err)
456 			return err;
457 		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
458 		return 0;
459 	}
460 	return start_command(&(*child)->process);
461 }
462 
463 /* State outside of __cmd_test for the sake of the signal handler. */
464 
465 static size_t num_tests;
466 static struct child_test **child_tests;
467 static jmp_buf cmd_test_jmp_buf;
468 
cmd_test_sig_handler(int sig)469 static void cmd_test_sig_handler(int sig)
470 {
471 	siglongjmp(cmd_test_jmp_buf, sig);
472 }
473 
__cmd_test(struct test_suite ** suites,int argc,const char * argv[],struct intlist * skiplist)474 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
475 		      struct intlist *skiplist)
476 {
477 	static int width = 0;
478 	int err = 0;
479 
480 	for (struct test_suite **t = suites; *t; t++) {
481 		int i, len = strlen(test_description(*t, -1));
482 
483 		if (width < len)
484 			width = len;
485 
486 		test_suite__for_each_test_case(*t, i) {
487 			len = strlen(test_description(*t, i));
488 			if (width < len)
489 				width = len;
490 			num_tests += runs_per_test;
491 		}
492 	}
493 	child_tests = calloc(num_tests, sizeof(*child_tests));
494 	if (!child_tests)
495 		return -ENOMEM;
496 
497 	err = sigsetjmp(cmd_test_jmp_buf, 1);
498 	if (err) {
499 		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
500 		       err);
501 		for (size_t x = 0; x < num_tests; x++) {
502 			struct child_test *child_test = child_tests[x];
503 
504 			if (!child_test || child_test->process.pid <= 0)
505 				continue;
506 
507 			pr_debug3("Killing %d pid %d\n",
508 				  child_test->suite_num + 1,
509 				  child_test->process.pid);
510 			kill(child_test->process.pid, err);
511 		}
512 		goto err_out;
513 	}
514 	signal(SIGINT, cmd_test_sig_handler);
515 	signal(SIGTERM, cmd_test_sig_handler);
516 
517 	/*
518 	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
519 	 * runs the exclusive tests sequentially. In other modes all tests are
520 	 * run in pass 1.
521 	 */
522 	for (int pass = 1; pass <= 2; pass++) {
523 		int child_test_num = 0;
524 		int curr_suite = 0;
525 
526 		for (struct test_suite **t = suites; *t; t++, curr_suite++) {
527 			int curr_test_case;
528 
529 			if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
530 				/*
531 				 * Test suite shouldn't be run based on
532 				 * description. See if any test case should.
533 				 */
534 				bool skip = true;
535 
536 				test_suite__for_each_test_case(*t, curr_test_case) {
537 					if (perf_test__matches(test_description(*t, curr_test_case),
538 							       curr_suite, argc, argv)) {
539 						skip = false;
540 						break;
541 					}
542 				}
543 				if (skip)
544 					continue;
545 			}
546 
547 			if (intlist__find(skiplist, curr_suite + 1)) {
548 				pr_info("%3d: %-*s:", curr_suite + 1, width,
549 					test_description(*t, -1));
550 				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
551 				continue;
552 			}
553 
554 			for (unsigned int run = 0; run < runs_per_test; run++) {
555 				test_suite__for_each_test_case(*t, curr_test_case) {
556 					if (!perf_test__matches(test_description(*t, curr_test_case),
557 								curr_suite, argc, argv))
558 						continue;
559 
560 					err = start_test(*t, curr_suite, curr_test_case,
561 							 &child_tests[child_test_num++],
562 							 width, pass);
563 					if (err)
564 						goto err_out;
565 				}
566 			}
567 		}
568 		if (!sequential) {
569 			/* Parallel mode starts tests but doesn't finish them. Do that now. */
570 			for (size_t x = 0; x < num_tests; x++)
571 				finish_test(child_tests, x, num_tests, width);
572 		}
573 	}
574 err_out:
575 	signal(SIGINT, SIG_DFL);
576 	signal(SIGTERM, SIG_DFL);
577 	if (err) {
578 		pr_err("Internal test harness failure. Completing any started tests:\n:");
579 		for (size_t x = 0; x < num_tests; x++)
580 			finish_test(child_tests, x, num_tests, width);
581 	}
582 	free(child_tests);
583 	return err;
584 }
585 
perf_test__list(FILE * fp,struct test_suite ** suites,int argc,const char ** argv)586 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
587 {
588 	int curr_suite = 0;
589 
590 	for (struct test_suite **t = suites; *t; t++, curr_suite++) {
591 		int curr_test_case;
592 
593 		if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
594 			continue;
595 
596 		fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
597 
598 		if (test_suite__num_test_cases(*t) <= 1)
599 			continue;
600 
601 		test_suite__for_each_test_case(*t, curr_test_case) {
602 			fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
603 				test_description(*t, curr_test_case));
604 		}
605 	}
606 	return 0;
607 }
608 
workloads__fprintf_list(FILE * fp)609 static int workloads__fprintf_list(FILE *fp)
610 {
611 	struct test_workload *twl;
612 	int printed = 0;
613 
614 	workloads__for_each(twl)
615 		printed += fprintf(fp, "%s\n", twl->name);
616 
617 	return printed;
618 }
619 
run_workload(const char * work,int argc,const char ** argv)620 static int run_workload(const char *work, int argc, const char **argv)
621 {
622 	struct test_workload *twl;
623 
624 	workloads__for_each(twl) {
625 		if (!strcmp(twl->name, work))
626 			return twl->func(argc, argv);
627 	}
628 
629 	pr_info("No workload found: %s\n", work);
630 	return -1;
631 }
632 
perf_test__config(const char * var,const char * value,void * data __maybe_unused)633 static int perf_test__config(const char *var, const char *value,
634 			     void *data __maybe_unused)
635 {
636 	if (!strcmp(var, "annotate.objdump"))
637 		test_objdump_path = value;
638 
639 	return 0;
640 }
641 
build_suites(void)642 static struct test_suite **build_suites(void)
643 {
644 	/*
645 	 * TODO: suites is static to avoid needing to clean up the scripts tests
646 	 * for leak sanitizer.
647 	 */
648 	static struct test_suite **suites[] = {
649 		generic_tests,
650 		arch_tests,
651 		NULL,
652 	};
653 	struct test_suite **result;
654 	struct test_suite *t;
655 	size_t n = 0, num_suites = 0;
656 
657 	if (suites[2] == NULL)
658 		suites[2] = create_script_test_suites();
659 
660 #define for_each_suite(suite)						\
661 	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
662 		while ((suite = suites[i][j++]) != NULL)
663 
664 	for_each_suite(t)
665 		num_suites++;
666 
667 	result = calloc(num_suites + 1, sizeof(struct test_suite *));
668 
669 	for (int pass = 1; pass <= 2; pass++) {
670 		for_each_suite(t) {
671 			bool exclusive = false;
672 			int curr_test_case;
673 
674 			test_suite__for_each_test_case(t, curr_test_case) {
675 				if (test_exclusive(t, curr_test_case)) {
676 					exclusive = true;
677 					break;
678 				}
679 			}
680 			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
681 				result[n++] = t;
682 		}
683 	}
684 	return result;
685 #undef for_each_suite
686 }
687 
cmd_test(int argc,const char ** argv)688 int cmd_test(int argc, const char **argv)
689 {
690 	const char *test_usage[] = {
691 	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
692 	NULL,
693 	};
694 	const char *skip = NULL;
695 	const char *workload = NULL;
696 	bool list_workloads = false;
697 	const struct option test_options[] = {
698 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
699 	OPT_INCR('v', "verbose", &verbose,
700 		    "be more verbose (show symbol address, etc)"),
701 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
702 		    "Do not fork for testcase"),
703 	OPT_BOOLEAN('S', "sequential", &sequential,
704 		    "Run the tests one after another rather than in parallel"),
705 	OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
706 		     "Run each test the given number of times, default 1"),
707 	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
708 	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
709 	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
710 	OPT_STRING(0, "objdump", &test_objdump_path, "path",
711 		   "objdump binary to use for disassembly and annotations"),
712 	OPT_END()
713 	};
714 	const char * const test_subcommands[] = { "list", NULL };
715 	struct intlist *skiplist = NULL;
716         int ret = hists__init();
717 	struct test_suite **suites;
718 
719         if (ret < 0)
720                 return ret;
721 
722 	perf_config(perf_test__config, NULL);
723 
724 	/* Unbuffered output */
725 	setvbuf(stdout, NULL, _IONBF, 0);
726 
727 	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
728 	if (argc >= 1 && !strcmp(argv[0], "list")) {
729 		suites = build_suites();
730 		ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
731 		free(suites);
732 		return ret;
733 	}
734 
735 	if (workload)
736 		return run_workload(workload, argc, argv);
737 
738 	if (list_workloads) {
739 		workloads__fprintf_list(stdout);
740 		return 0;
741 	}
742 
743 	if (dont_fork)
744 		sequential = true;
745 
746 	symbol_conf.priv_size = sizeof(int);
747 	symbol_conf.try_vmlinux_path = true;
748 
749 
750 	if (symbol__init(NULL) < 0)
751 		return -1;
752 
753 	if (skip != NULL)
754 		skiplist = intlist__new(skip);
755 	/*
756 	 * Tests that create BPF maps, for instance, need more than the 64K
757 	 * default:
758 	 */
759 	rlimit__bump_memlock();
760 
761 	suites = build_suites();
762 	ret = __cmd_test(suites, argc, argv, skiplist);
763 	free(suites);
764 	return ret;
765 }
766