xref: /linux/tools/perf/tests/builtin-test.c (revision e3b2949e3fa2fd8c19cd5fbb0424d38f70a70e9c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-test.c
4  *
5  * Builtin regression testing command: ever growing number of sanity tests
6  */
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <poll.h>
10 #include <unistd.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <sys/types.h>
15 #include <dirent.h>
16 #include <sys/wait.h>
17 #include <sys/stat.h>
18 #include "builtin.h"
19 #include "config.h"
20 #include "hist.h"
21 #include "intlist.h"
22 #include "tests.h"
23 #include "debug.h"
24 #include "color.h"
25 #include <subcmd/parse-options.h>
26 #include <subcmd/run-command.h>
27 #include "string2.h"
28 #include "symbol.h"
29 #include "util/rlimit.h"
30 #include "util/strbuf.h"
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <subcmd/exec-cmd.h>
34 #include <linux/zalloc.h>
35 
36 #include "tests-scripts.h"
37 
38 /*
39  * Command line option to not fork the test running in the same process and
40  * making them easier to debug.
41  */
42 static bool dont_fork;
43 /* Fork the tests in parallel and wait for their completion. */
44 static bool sequential;
45 const char *dso_to_test;
46 const char *test_objdump_path = "objdump";
47 
48 /*
49  * List of architecture specific tests. Not a weak symbol as the array length is
50  * dependent on the initialization, as such GCC with LTO complains of
51  * conflicting definitions with a weak symbol.
52  */
53 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
54 extern struct test_suite *arch_tests[];
55 #else
56 static struct test_suite *arch_tests[] = {
57 	NULL,
58 };
59 #endif
60 
61 static struct test_suite *generic_tests[] = {
62 	&suite__vmlinux_matches_kallsyms,
63 #ifdef HAVE_LIBTRACEEVENT
64 	&suite__openat_syscall_event,
65 	&suite__openat_syscall_event_on_all_cpus,
66 	&suite__basic_mmap,
67 #endif
68 	&suite__mem,
69 	&suite__parse_events,
70 	&suite__expr,
71 	&suite__PERF_RECORD,
72 	&suite__pmu,
73 	&suite__pmu_events,
74 	&suite__tool_pmu,
75 	&suite__dso_data,
76 	&suite__perf_evsel__roundtrip_name_test,
77 #ifdef HAVE_LIBTRACEEVENT
78 	&suite__perf_evsel__tp_sched_test,
79 	&suite__syscall_openat_tp_fields,
80 #endif
81 	&suite__hists_link,
82 	&suite__python_use,
83 	&suite__bp_signal,
84 	&suite__bp_signal_overflow,
85 	&suite__bp_accounting,
86 	&suite__wp,
87 	&suite__task_exit,
88 	&suite__sw_clock_freq,
89 	&suite__code_reading,
90 	&suite__sample_parsing,
91 	&suite__keep_tracking,
92 	&suite__parse_no_sample_id_all,
93 	&suite__hists_filter,
94 	&suite__mmap_thread_lookup,
95 	&suite__thread_maps_share,
96 	&suite__hists_output,
97 	&suite__hists_cumulate,
98 #ifdef HAVE_LIBTRACEEVENT
99 	&suite__switch_tracking,
100 #endif
101 	&suite__fdarray__filter,
102 	&suite__fdarray__add,
103 	&suite__kmod_path__parse,
104 	&suite__thread_map,
105 	&suite__session_topology,
106 	&suite__thread_map_synthesize,
107 	&suite__thread_map_remove,
108 	&suite__cpu_map,
109 	&suite__synthesize_stat_config,
110 	&suite__synthesize_stat,
111 	&suite__synthesize_stat_round,
112 	&suite__event_update,
113 	&suite__event_times,
114 	&suite__backward_ring_buffer,
115 	&suite__sdt_event,
116 	&suite__is_printable_array,
117 	&suite__bitmap_print,
118 	&suite__perf_hooks,
119 	&suite__unit_number__scnprint,
120 	&suite__mem2node,
121 	&suite__time_utils,
122 	&suite__jit_write_elf,
123 	&suite__pfm,
124 	&suite__api_io,
125 	&suite__maps__merge_in,
126 	&suite__demangle_java,
127 	&suite__demangle_ocaml,
128 	&suite__parse_metric,
129 	&suite__pe_file_parsing,
130 	&suite__expand_cgroup_events,
131 	&suite__perf_time_to_tsc,
132 	&suite__dlfilter,
133 	&suite__sigtrap,
134 	&suite__event_groups,
135 	&suite__symbols,
136 	&suite__util,
137 	NULL,
138 };
139 
140 static struct test_workload *workloads[] = {
141 	&workload__noploop,
142 	&workload__thloop,
143 	&workload__leafloop,
144 	&workload__sqrtloop,
145 	&workload__brstack,
146 	&workload__datasym,
147 	&workload__landlock,
148 };
149 
150 #define workloads__for_each(workload) \
151 	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
152 
153 static int num_subtests(const struct test_suite *t)
154 {
155 	int num;
156 
157 	if (!t->test_cases)
158 		return 0;
159 
160 	num = 0;
161 	while (t->test_cases[num].name)
162 		num++;
163 
164 	return num;
165 }
166 
167 static bool has_subtests(const struct test_suite *t)
168 {
169 	return num_subtests(t) > 1;
170 }
171 
172 static const char *skip_reason(const struct test_suite *t, int subtest)
173 {
174 	if (!t->test_cases)
175 		return NULL;
176 
177 	return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
178 }
179 
180 static const char *test_description(const struct test_suite *t, int subtest)
181 {
182 	if (t->test_cases && subtest >= 0)
183 		return t->test_cases[subtest].desc;
184 
185 	return t->desc;
186 }
187 
188 static test_fnptr test_function(const struct test_suite *t, int subtest)
189 {
190 	if (subtest <= 0)
191 		return t->test_cases[0].run_case;
192 
193 	return t->test_cases[subtest].run_case;
194 }
195 
196 static bool test_exclusive(const struct test_suite *t, int subtest)
197 {
198 	if (subtest <= 0)
199 		return t->test_cases[0].exclusive;
200 
201 	return t->test_cases[subtest].exclusive;
202 }
203 
204 static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
205 {
206 	int i;
207 
208 	if (argc == 0)
209 		return true;
210 
211 	for (i = 0; i < argc; ++i) {
212 		char *end;
213 		long nr = strtoul(argv[i], &end, 10);
214 
215 		if (*end == '\0') {
216 			if (nr == curr + 1)
217 				return true;
218 			continue;
219 		}
220 
221 		if (strcasestr(desc, argv[i]))
222 			return true;
223 	}
224 
225 	return false;
226 }
227 
228 struct child_test {
229 	struct child_process process;
230 	struct test_suite *test;
231 	int test_num;
232 	int subtest;
233 };
234 
235 static jmp_buf run_test_jmp_buf;
236 
237 static void child_test_sig_handler(int sig)
238 {
239 	siglongjmp(run_test_jmp_buf, sig);
240 }
241 
242 static int run_test_child(struct child_process *process)
243 {
244 	const int signals[] = {
245 		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
246 	};
247 	struct child_test *child = container_of(process, struct child_test, process);
248 	int err;
249 
250 	err = sigsetjmp(run_test_jmp_buf, 1);
251 	if (err) {
252 		fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
253 		err = err > 0 ? -err : -1;
254 		goto err_out;
255 	}
256 
257 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
258 		signal(signals[i], child_test_sig_handler);
259 
260 	pr_debug("--- start ---\n");
261 	pr_debug("test child forked, pid %d\n", getpid());
262 	err = test_function(child->test, child->subtest)(child->test, child->subtest);
263 	pr_debug("---- end(%d) ----\n", err);
264 
265 err_out:
266 	fflush(NULL);
267 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
268 		signal(signals[i], SIG_DFL);
269 	return -err;
270 }
271 
272 #define TEST_RUNNING -3
273 
274 static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width,
275 			     int running)
276 {
277 	if (has_subtests(t)) {
278 		int subw = width > 2 ? width - 2 : width;
279 
280 		pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
281 	} else
282 		pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
283 
284 	switch (result) {
285 	case TEST_RUNNING:
286 		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
287 		break;
288 	case TEST_OK:
289 		pr_info(" Ok\n");
290 		break;
291 	case TEST_SKIP: {
292 		const char *reason = skip_reason(t, subtest);
293 
294 		if (reason)
295 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
296 		else
297 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
298 	}
299 		break;
300 	case TEST_FAIL:
301 	default:
302 		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
303 		break;
304 	}
305 
306 	return 0;
307 }
308 
309 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
310 		int width)
311 {
312 	struct child_test *child_test = child_tests[running_test];
313 	struct test_suite *t;
314 	int i, subi, err;
315 	bool err_done = false;
316 	struct strbuf err_output = STRBUF_INIT;
317 	int last_running = -1;
318 	int ret;
319 
320 	if (child_test == NULL) {
321 		/* Test wasn't started. */
322 		return;
323 	}
324 	t = child_test->test;
325 	i = child_test->test_num;
326 	subi = child_test->subtest;
327 	err = child_test->process.err;
328 	/*
329 	 * For test suites with subtests, display the suite name ahead of the
330 	 * sub test names.
331 	 */
332 	if (has_subtests(t) && subi == 0)
333 		pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
334 
335 	/*
336 	 * Busy loop reading from the child's stdout/stderr that are set to be
337 	 * non-blocking until EOF.
338 	 */
339 	if (err > 0)
340 		fcntl(err, F_SETFL, O_NONBLOCK);
341 	if (verbose > 1) {
342 		if (has_subtests(t))
343 			pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
344 		else
345 			pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
346 	}
347 	while (!err_done) {
348 		struct pollfd pfds[1] = {
349 			{ .fd = err,
350 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
351 			},
352 		};
353 		if (perf_use_color_default) {
354 			int running = 0;
355 
356 			for (int y = running_test; y < child_test_num; y++) {
357 				if (child_tests[y] == NULL)
358 					continue;
359 				if (check_if_command_finished(&child_tests[y]->process) == 0)
360 					running++;
361 			}
362 			if (running != last_running) {
363 				if (last_running != -1) {
364 					/*
365 					 * Erase "Running (.. active)" line
366 					 * printed before poll/sleep.
367 					 */
368 					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
369 				}
370 				print_test_result(t, i, subi, TEST_RUNNING, width, running);
371 				last_running = running;
372 			}
373 		}
374 
375 		err_done = true;
376 		if (err <= 0) {
377 			/* No child stderr to poll, sleep for 10ms for child to complete. */
378 			usleep(10 * 1000);
379 		} else {
380 			/* Poll to avoid excessive spinning, timeout set for 100ms. */
381 			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
382 			if (pfds[0].revents) {
383 				char buf[512];
384 				ssize_t len;
385 
386 				len = read(err, buf, sizeof(buf) - 1);
387 
388 				if (len > 0) {
389 					err_done = false;
390 					buf[len] = '\0';
391 					strbuf_addstr(&err_output, buf);
392 				}
393 			}
394 		}
395 		if (err_done)
396 			err_done = check_if_command_finished(&child_test->process);
397 	}
398 	if (perf_use_color_default && last_running != -1) {
399 		/* Erase "Running (.. active)" line printed before poll/sleep. */
400 		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
401 	}
402 	/* Clean up child process. */
403 	ret = finish_command(&child_test->process);
404 	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
405 		fprintf(stderr, "%s", err_output.buf);
406 
407 	strbuf_release(&err_output);
408 	print_test_result(t, i, subi, ret, width, /*running=*/0);
409 	if (err > 0)
410 		close(err);
411 	zfree(&child_tests[running_test]);
412 }
413 
414 static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
415 		int width, int pass)
416 {
417 	int err;
418 
419 	*child = NULL;
420 	if (dont_fork) {
421 		if (pass == 1) {
422 			pr_debug("--- start ---\n");
423 			err = test_function(test, subi)(test, subi);
424 			pr_debug("---- end ----\n");
425 			print_test_result(test, i, subi, err, width, /*running=*/0);
426 		}
427 		return 0;
428 	}
429 	if (pass == 1 && !sequential && test_exclusive(test, subi)) {
430 		/* When parallel, skip exclusive tests on the first pass. */
431 		return 0;
432 	}
433 	if (pass != 1 && (sequential || !test_exclusive(test, subi))) {
434 		/* Sequential and non-exclusive tests were run on the first pass. */
435 		return 0;
436 	}
437 	*child = zalloc(sizeof(**child));
438 	if (!*child)
439 		return -ENOMEM;
440 
441 	(*child)->test = test;
442 	(*child)->test_num = i;
443 	(*child)->subtest = subi;
444 	(*child)->process.pid = -1;
445 	(*child)->process.no_stdin = 1;
446 	if (verbose <= 0) {
447 		(*child)->process.no_stdout = 1;
448 		(*child)->process.no_stderr = 1;
449 	} else {
450 		(*child)->process.stdout_to_stderr = 1;
451 		(*child)->process.out = -1;
452 		(*child)->process.err = -1;
453 	}
454 	(*child)->process.no_exec_cmd = run_test_child;
455 	if (sequential || pass == 2) {
456 		err = start_command(&(*child)->process);
457 		if (err)
458 			return err;
459 		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
460 		return 0;
461 	}
462 	return start_command(&(*child)->process);
463 }
464 
465 /* State outside of __cmd_test for the sake of the signal handler. */
466 
467 static size_t num_tests;
468 static struct child_test **child_tests;
469 static jmp_buf cmd_test_jmp_buf;
470 
471 static void cmd_test_sig_handler(int sig)
472 {
473 	siglongjmp(cmd_test_jmp_buf, sig);
474 }
475 
476 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
477 		      struct intlist *skiplist)
478 {
479 	static int width = 0;
480 	int err = 0;
481 
482 	for (struct test_suite **t = suites; *t; t++) {
483 		int len = strlen(test_description(*t, -1));
484 
485 		if (width < len)
486 			width = len;
487 
488 		if (has_subtests(*t)) {
489 			for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
490 				len = strlen(test_description(*t, subi));
491 				if (width < len)
492 					width = len;
493 				num_tests++;
494 			}
495 		} else {
496 			num_tests++;
497 		}
498 	}
499 	child_tests = calloc(num_tests, sizeof(*child_tests));
500 	if (!child_tests)
501 		return -ENOMEM;
502 
503 	err = sigsetjmp(cmd_test_jmp_buf, 1);
504 	if (err) {
505 		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
506 		       err);
507 		for (size_t x = 0; x < num_tests; x++) {
508 			struct child_test *child_test = child_tests[x];
509 
510 			if (!child_test)
511 				continue;
512 
513 			pr_debug3("Killing %d pid %d\n",
514 				  child_test->test_num + 1,
515 				  child_test->process.pid);
516 			kill(child_test->process.pid, err);
517 		}
518 		goto err_out;
519 	}
520 	signal(SIGINT, cmd_test_sig_handler);
521 	signal(SIGTERM, cmd_test_sig_handler);
522 
523 	/*
524 	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
525 	 * runs the exclusive tests sequentially. In other modes all tests are
526 	 * run in pass 1.
527 	 */
528 	for (int pass = 1; pass <= 2; pass++) {
529 		int child_test_num = 0;
530 		int i = 0;
531 
532 		for (struct test_suite **t = suites; *t; t++) {
533 			int curr = i++;
534 
535 			if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) {
536 				/*
537 				 * Test suite shouldn't be run based on
538 				 * description. See if subtest should.
539 				 */
540 				bool skip = true;
541 
542 				for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
543 					if (perf_test__matches(test_description(*t, subi),
544 								curr, argc, argv))
545 						skip = false;
546 				}
547 
548 				if (skip)
549 					continue;
550 			}
551 
552 			if (intlist__find(skiplist, i)) {
553 				pr_info("%3d: %-*s:", curr + 1, width, test_description(*t, -1));
554 				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
555 				continue;
556 			}
557 
558 			if (!has_subtests(*t)) {
559 				err = start_test(*t, curr, -1, &child_tests[child_test_num++],
560 						 width, pass);
561 				if (err)
562 					goto err_out;
563 				continue;
564 			}
565 			for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
566 				if (!perf_test__matches(test_description(*t, subi),
567 							curr, argc, argv))
568 					continue;
569 
570 				err = start_test(*t, curr, subi, &child_tests[child_test_num++],
571 						 width, pass);
572 				if (err)
573 					goto err_out;
574 			}
575 		}
576 		if (!sequential) {
577 			/* Parallel mode starts tests but doesn't finish them. Do that now. */
578 			for (size_t x = 0; x < num_tests; x++)
579 				finish_test(child_tests, x, num_tests, width);
580 		}
581 	}
582 err_out:
583 	signal(SIGINT, SIG_DFL);
584 	signal(SIGTERM, SIG_DFL);
585 	if (err) {
586 		pr_err("Internal test harness failure. Completing any started tests:\n:");
587 		for (size_t x = 0; x < num_tests; x++)
588 			finish_test(child_tests, x, num_tests, width);
589 	}
590 	free(child_tests);
591 	return err;
592 }
593 
594 static int perf_test__list(struct test_suite **suites, int argc, const char **argv)
595 {
596 	int i = 0;
597 
598 	for (struct test_suite **t = suites; *t; t++) {
599 		int curr = i++;
600 
601 		if (!perf_test__matches(test_description(*t, -1), curr, argc, argv))
602 			continue;
603 
604 		pr_info("%3d: %s\n", i, test_description(*t, -1));
605 
606 		if (has_subtests(*t)) {
607 			int subn = num_subtests(*t);
608 			int subi;
609 
610 			for (subi = 0; subi < subn; subi++)
611 				pr_info("%3d:%1d: %s\n", i, subi + 1,
612 					test_description(*t, subi));
613 		}
614 	}
615 	return 0;
616 }
617 
618 static int workloads__fprintf_list(FILE *fp)
619 {
620 	struct test_workload *twl;
621 	int printed = 0;
622 
623 	workloads__for_each(twl)
624 		printed += fprintf(fp, "%s\n", twl->name);
625 
626 	return printed;
627 }
628 
629 static int run_workload(const char *work, int argc, const char **argv)
630 {
631 	struct test_workload *twl;
632 
633 	workloads__for_each(twl) {
634 		if (!strcmp(twl->name, work))
635 			return twl->func(argc, argv);
636 	}
637 
638 	pr_info("No workload found: %s\n", work);
639 	return -1;
640 }
641 
642 static int perf_test__config(const char *var, const char *value,
643 			     void *data __maybe_unused)
644 {
645 	if (!strcmp(var, "annotate.objdump"))
646 		test_objdump_path = value;
647 
648 	return 0;
649 }
650 
651 static struct test_suite **build_suites(void)
652 {
653 	/*
654 	 * TODO: suites is static to avoid needing to clean up the scripts tests
655 	 * for leak sanitizer.
656 	 */
657 	static struct test_suite **suites[] = {
658 		generic_tests,
659 		arch_tests,
660 		NULL,
661 	};
662 	struct test_suite **result;
663 	struct test_suite *t;
664 	size_t n = 0, num_suites = 0;
665 
666 	if (suites[2] == NULL)
667 		suites[2] = create_script_test_suites();
668 
669 #define for_each_test(t)						\
670 	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
671 		while ((t = suites[i][j++]) != NULL)
672 
673 	for_each_test(t)
674 		num_suites++;
675 
676 	result = calloc(num_suites + 1, sizeof(struct test_suite *));
677 
678 	for (int pass = 1; pass <= 2; pass++) {
679 		for_each_test(t) {
680 			bool exclusive = false;
681 
682 			if (!has_subtests(t)) {
683 				exclusive = test_exclusive(t, -1);
684 			} else {
685 				for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
686 					if (test_exclusive(t, subi)) {
687 						exclusive = true;
688 						break;
689 					}
690 				}
691 			}
692 			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
693 				result[n++] = t;
694 		}
695 	}
696 	return result;
697 #undef for_each_test
698 }
699 
700 int cmd_test(int argc, const char **argv)
701 {
702 	const char *test_usage[] = {
703 	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
704 	NULL,
705 	};
706 	const char *skip = NULL;
707 	const char *workload = NULL;
708 	bool list_workloads = false;
709 	const struct option test_options[] = {
710 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
711 	OPT_INCR('v', "verbose", &verbose,
712 		    "be more verbose (show symbol address, etc)"),
713 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
714 		    "Do not fork for testcase"),
715 	OPT_BOOLEAN('S', "sequential", &sequential,
716 		    "Run the tests one after another rather than in parallel"),
717 	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
718 	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
719 	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
720 	OPT_STRING(0, "objdump", &test_objdump_path, "path",
721 		   "objdump binary to use for disassembly and annotations"),
722 	OPT_END()
723 	};
724 	const char * const test_subcommands[] = { "list", NULL };
725 	struct intlist *skiplist = NULL;
726         int ret = hists__init();
727 	struct test_suite **suites;
728 
729         if (ret < 0)
730                 return ret;
731 
732 	perf_config(perf_test__config, NULL);
733 
734 	/* Unbuffered output */
735 	setvbuf(stdout, NULL, _IONBF, 0);
736 
737 	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
738 	if (argc >= 1 && !strcmp(argv[0], "list")) {
739 		suites = build_suites();
740 		ret = perf_test__list(suites, argc - 1, argv + 1);
741 		free(suites);
742 		return ret;
743 	}
744 
745 	if (workload)
746 		return run_workload(workload, argc, argv);
747 
748 	if (list_workloads) {
749 		workloads__fprintf_list(stdout);
750 		return 0;
751 	}
752 
753 	if (dont_fork)
754 		sequential = true;
755 
756 	symbol_conf.priv_size = sizeof(int);
757 	symbol_conf.try_vmlinux_path = true;
758 
759 
760 	if (symbol__init(NULL) < 0)
761 		return -1;
762 
763 	if (skip != NULL)
764 		skiplist = intlist__new(skip);
765 	/*
766 	 * Tests that create BPF maps, for instance, need more than the 64K
767 	 * default:
768 	 */
769 	rlimit__bump_memlock();
770 
771 	suites = build_suites();
772 	ret = __cmd_test(suites, argc, argv, skiplist);
773 	free(suites);
774 	return ret;
775 }
776