1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-test.c
4 *
5 * Builtin regression testing command: ever growing number of sanity tests
6 */
7 #include <ctype.h>
8 #include <fcntl.h>
9 #include <errno.h>
10 #ifdef HAVE_BACKTRACE_SUPPORT
11 #include <execinfo.h>
12 #endif
13 #include <poll.h>
14 #include <unistd.h>
15 #include <setjmp.h>
16 #include <string.h>
17 #include <stdlib.h>
18 #include <sys/types.h>
19 #include <dirent.h>
20 #include <sys/wait.h>
21 #include <sys/stat.h>
22 #include "builtin.h"
23 #include "config.h"
24 #include "hist.h"
25 #include "intlist.h"
26 #include "tests.h"
27 #include "debug.h"
28 #include "color.h"
29 #include <subcmd/parse-options.h>
30 #include <subcmd/run-command.h>
31 #include "string2.h"
32 #include "symbol.h"
33 #include "util/rlimit.h"
34 #include "util/strbuf.h"
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <subcmd/exec-cmd.h>
38 #include <linux/zalloc.h>
39
40 #include "tests-scripts.h"
41
42 /*
43 * Command line option to not fork the test running in the same process and
44 * making them easier to debug.
45 */
46 static bool dont_fork;
47 /* Fork the tests in parallel and wait for their completion. */
48 static bool sequential;
49 /* Number of times each test is run. */
50 static unsigned int runs_per_test = 1;
51 const char *dso_to_test;
52 const char *test_objdump_path = "objdump";
53
54 /*
55 * List of architecture specific tests. Not a weak symbol as the array length is
56 * dependent on the initialization, as such GCC with LTO complains of
57 * conflicting definitions with a weak symbol.
58 */
59 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
60 extern struct test_suite *arch_tests[];
61 #else
62 static struct test_suite *arch_tests[] = {
63 NULL,
64 };
65 #endif
66
67 static struct test_suite *generic_tests[] = {
68 &suite__vmlinux_matches_kallsyms,
69 &suite__openat_syscall_event,
70 &suite__openat_syscall_event_on_all_cpus,
71 &suite__basic_mmap,
72 &suite__mem,
73 &suite__parse_events,
74 &suite__expr,
75 &suite__PERF_RECORD,
76 &suite__pmu,
77 &suite__pmu_events,
78 &suite__hwmon_pmu,
79 &suite__tool_pmu,
80 &suite__dso_data,
81 &suite__perf_evsel__roundtrip_name_test,
82 #ifdef HAVE_LIBTRACEEVENT
83 &suite__perf_evsel__tp_sched_test,
84 &suite__syscall_openat_tp_fields,
85 #endif
86 &suite__hists_link,
87 &suite__bp_signal,
88 &suite__bp_signal_overflow,
89 &suite__bp_accounting,
90 &suite__wp,
91 &suite__task_exit,
92 &suite__sw_clock_freq,
93 &suite__code_reading,
94 &suite__sample_parsing,
95 &suite__keep_tracking,
96 &suite__parse_no_sample_id_all,
97 &suite__hists_filter,
98 &suite__mmap_thread_lookup,
99 &suite__thread_maps_share,
100 &suite__hists_output,
101 &suite__hists_cumulate,
102 #ifdef HAVE_LIBTRACEEVENT
103 &suite__switch_tracking,
104 #endif
105 &suite__fdarray__filter,
106 &suite__fdarray__add,
107 &suite__kmod_path__parse,
108 &suite__thread_map,
109 &suite__session_topology,
110 &suite__thread_map_synthesize,
111 &suite__thread_map_remove,
112 &suite__cpu_map,
113 &suite__synthesize_stat_config,
114 &suite__synthesize_stat,
115 &suite__synthesize_stat_round,
116 &suite__event_update,
117 &suite__event_times,
118 &suite__backward_ring_buffer,
119 &suite__sdt_event,
120 &suite__is_printable_array,
121 &suite__bitmap_print,
122 &suite__perf_hooks,
123 &suite__unit_number__scnprint,
124 &suite__mem2node,
125 &suite__time_utils,
126 &suite__jit_write_elf,
127 &suite__pfm,
128 &suite__api_io,
129 &suite__maps,
130 &suite__demangle_java,
131 &suite__demangle_ocaml,
132 &suite__demangle_rust,
133 &suite__parse_metric,
134 &suite__pe_file_parsing,
135 &suite__expand_cgroup_events,
136 &suite__perf_time_to_tsc,
137 &suite__dlfilter,
138 &suite__sigtrap,
139 &suite__event_groups,
140 &suite__symbols,
141 &suite__util,
142 &suite__subcmd_help,
143 &suite__kallsyms_split,
144 NULL,
145 };
146
147 static struct test_workload *workloads[] = {
148 &workload__noploop,
149 &workload__thloop,
150 &workload__leafloop,
151 &workload__sqrtloop,
152 &workload__brstack,
153 &workload__datasym,
154 &workload__landlock,
155 &workload__traploop,
156 };
157
158 #define workloads__for_each(workload) \
159 for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
160
161 #define test_suite__for_each_test_case(suite, idx) \
162 for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
163
close_parent_fds(void)164 static void close_parent_fds(void)
165 {
166 DIR *dir = opendir("/proc/self/fd");
167 struct dirent *ent;
168
169 while ((ent = readdir(dir))) {
170 char *end;
171 long fd;
172
173 if (ent->d_type != DT_LNK)
174 continue;
175
176 if (!isdigit(ent->d_name[0]))
177 continue;
178
179 fd = strtol(ent->d_name, &end, 10);
180 if (*end)
181 continue;
182
183 if (fd <= 3 || fd == dirfd(dir))
184 continue;
185
186 close(fd);
187 }
188 closedir(dir);
189 }
190
check_leaks(void)191 static void check_leaks(void)
192 {
193 DIR *dir = opendir("/proc/self/fd");
194 struct dirent *ent;
195 int leaks = 0;
196
197 while ((ent = readdir(dir))) {
198 char path[PATH_MAX];
199 char *end;
200 long fd;
201 ssize_t len;
202
203 if (ent->d_type != DT_LNK)
204 continue;
205
206 if (!isdigit(ent->d_name[0]))
207 continue;
208
209 fd = strtol(ent->d_name, &end, 10);
210 if (*end)
211 continue;
212
213 if (fd <= 3 || fd == dirfd(dir))
214 continue;
215
216 leaks++;
217 len = readlinkat(dirfd(dir), ent->d_name, path, sizeof(path));
218 if (len > 0 && (size_t)len < sizeof(path))
219 path[len] = '\0';
220 else
221 strncpy(path, ent->d_name, sizeof(path));
222 pr_err("Leak of file descriptor %s that opened: '%s'\n", ent->d_name, path);
223 }
224 closedir(dir);
225 if (leaks)
226 abort();
227 }
228
test_suite__num_test_cases(const struct test_suite * t)229 static int test_suite__num_test_cases(const struct test_suite *t)
230 {
231 int num;
232
233 test_suite__for_each_test_case(t, num);
234
235 return num;
236 }
237
skip_reason(const struct test_suite * t,int test_case)238 static const char *skip_reason(const struct test_suite *t, int test_case)
239 {
240 if (!t->test_cases)
241 return NULL;
242
243 return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
244 }
245
test_description(const struct test_suite * t,int test_case)246 static const char *test_description(const struct test_suite *t, int test_case)
247 {
248 if (t->test_cases && test_case >= 0)
249 return t->test_cases[test_case].desc;
250
251 return t->desc;
252 }
253
test_function(const struct test_suite * t,int test_case)254 static test_fnptr test_function(const struct test_suite *t, int test_case)
255 {
256 if (test_case <= 0)
257 return t->test_cases[0].run_case;
258
259 return t->test_cases[test_case].run_case;
260 }
261
test_exclusive(const struct test_suite * t,int test_case)262 static bool test_exclusive(const struct test_suite *t, int test_case)
263 {
264 if (test_case <= 0)
265 return t->test_cases[0].exclusive;
266
267 return t->test_cases[test_case].exclusive;
268 }
269
perf_test__matches(const char * desc,int suite_num,int argc,const char * argv[])270 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
271 {
272 int i;
273
274 if (argc == 0)
275 return true;
276
277 for (i = 0; i < argc; ++i) {
278 char *end;
279 long nr = strtoul(argv[i], &end, 10);
280
281 if (*end == '\0') {
282 if (nr == suite_num + 1)
283 return true;
284 continue;
285 }
286
287 if (strcasestr(desc, argv[i]))
288 return true;
289 }
290
291 return false;
292 }
293
294 struct child_test {
295 struct child_process process;
296 struct test_suite *test;
297 int suite_num;
298 int test_case_num;
299 };
300
301 static jmp_buf run_test_jmp_buf;
302
child_test_sig_handler(int sig)303 static void child_test_sig_handler(int sig)
304 {
305 #ifdef HAVE_BACKTRACE_SUPPORT
306 void *stackdump[32];
307 size_t stackdump_size;
308 #endif
309
310 fprintf(stderr, "\n---- unexpected signal (%d) ----\n", sig);
311 #ifdef HAVE_BACKTRACE_SUPPORT
312 stackdump_size = backtrace(stackdump, ARRAY_SIZE(stackdump));
313 __dump_stack(stderr, stackdump, stackdump_size);
314 #endif
315 siglongjmp(run_test_jmp_buf, sig);
316 }
317
run_test_child(struct child_process * process)318 static int run_test_child(struct child_process *process)
319 {
320 const int signals[] = {
321 SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
322 };
323 struct child_test *child = container_of(process, struct child_test, process);
324 int err;
325
326 close_parent_fds();
327
328 err = sigsetjmp(run_test_jmp_buf, 1);
329 if (err) {
330 /* Received signal. */
331 err = err > 0 ? -err : -1;
332 goto err_out;
333 }
334
335 for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
336 signal(signals[i], child_test_sig_handler);
337
338 pr_debug("--- start ---\n");
339 pr_debug("test child forked, pid %d\n", getpid());
340 err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
341 pr_debug("---- end(%d) ----\n", err);
342
343 check_leaks();
344 err_out:
345 fflush(NULL);
346 for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
347 signal(signals[i], SIG_DFL);
348 return -err;
349 }
350
351 #define TEST_RUNNING -3
352
print_test_result(struct test_suite * t,int curr_suite,int curr_test_case,int result,int width,int running)353 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
354 int result, int width, int running)
355 {
356 if (test_suite__num_test_cases(t) > 1) {
357 int subw = width > 2 ? width - 2 : width;
358
359 pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
360 test_description(t, curr_test_case));
361 } else
362 pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
363
364 switch (result) {
365 case TEST_RUNNING:
366 color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
367 break;
368 case TEST_OK:
369 pr_info(" Ok\n");
370 break;
371 case TEST_SKIP: {
372 const char *reason = skip_reason(t, curr_test_case);
373
374 if (reason)
375 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
376 else
377 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
378 }
379 break;
380 case TEST_FAIL:
381 default:
382 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
383 break;
384 }
385
386 return 0;
387 }
388
finish_test(struct child_test ** child_tests,int running_test,int child_test_num,int width)389 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
390 int width)
391 {
392 struct child_test *child_test = child_tests[running_test];
393 struct test_suite *t;
394 int curr_suite, curr_test_case, err;
395 bool err_done = false;
396 struct strbuf err_output = STRBUF_INIT;
397 int last_running = -1;
398 int ret;
399
400 if (child_test == NULL) {
401 /* Test wasn't started. */
402 return;
403 }
404 t = child_test->test;
405 curr_suite = child_test->suite_num;
406 curr_test_case = child_test->test_case_num;
407 err = child_test->process.err;
408 /*
409 * For test suites with subtests, display the suite name ahead of the
410 * sub test names.
411 */
412 if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
413 pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
414
415 /*
416 * Busy loop reading from the child's stdout/stderr that are set to be
417 * non-blocking until EOF.
418 */
419 if (err > 0)
420 fcntl(err, F_SETFL, O_NONBLOCK);
421 if (verbose > 1) {
422 if (test_suite__num_test_cases(t) > 1)
423 pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
424 test_description(t, curr_test_case));
425 else
426 pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
427 }
428 while (!err_done) {
429 struct pollfd pfds[1] = {
430 { .fd = err,
431 .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
432 },
433 };
434 if (perf_use_color_default) {
435 int running = 0;
436
437 for (int y = running_test; y < child_test_num; y++) {
438 if (child_tests[y] == NULL)
439 continue;
440 if (check_if_command_finished(&child_tests[y]->process) == 0)
441 running++;
442 }
443 if (running != last_running) {
444 if (last_running != -1) {
445 /*
446 * Erase "Running (.. active)" line
447 * printed before poll/sleep.
448 */
449 fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
450 }
451 print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
452 width, running);
453 last_running = running;
454 }
455 }
456
457 err_done = true;
458 if (err <= 0) {
459 /* No child stderr to poll, sleep for 10ms for child to complete. */
460 usleep(10 * 1000);
461 } else {
462 /* Poll to avoid excessive spinning, timeout set for 100ms. */
463 poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
464 if (pfds[0].revents) {
465 char buf[512];
466 ssize_t len;
467
468 len = read(err, buf, sizeof(buf) - 1);
469
470 if (len > 0) {
471 err_done = false;
472 buf[len] = '\0';
473 strbuf_addstr(&err_output, buf);
474 }
475 }
476 }
477 if (err_done)
478 err_done = check_if_command_finished(&child_test->process);
479 }
480 if (perf_use_color_default && last_running != -1) {
481 /* Erase "Running (.. active)" line printed before poll/sleep. */
482 fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
483 }
484 /* Clean up child process. */
485 ret = finish_command(&child_test->process);
486 if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
487 fprintf(stderr, "%s", err_output.buf);
488
489 strbuf_release(&err_output);
490 print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
491 if (err > 0)
492 close(err);
493 zfree(&child_tests[running_test]);
494 }
495
start_test(struct test_suite * test,int curr_suite,int curr_test_case,struct child_test ** child,int width,int pass)496 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
497 struct child_test **child, int width, int pass)
498 {
499 int err;
500
501 *child = NULL;
502 if (dont_fork) {
503 if (pass == 1) {
504 pr_debug("--- start ---\n");
505 err = test_function(test, curr_test_case)(test, curr_test_case);
506 pr_debug("---- end ----\n");
507 print_test_result(test, curr_suite, curr_test_case, err, width,
508 /*running=*/0);
509 }
510 return 0;
511 }
512 if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
513 /* When parallel, skip exclusive tests on the first pass. */
514 return 0;
515 }
516 if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
517 /* Sequential and non-exclusive tests were run on the first pass. */
518 return 0;
519 }
520 *child = zalloc(sizeof(**child));
521 if (!*child)
522 return -ENOMEM;
523
524 (*child)->test = test;
525 (*child)->suite_num = curr_suite;
526 (*child)->test_case_num = curr_test_case;
527 (*child)->process.pid = -1;
528 (*child)->process.no_stdin = 1;
529 if (verbose <= 0) {
530 (*child)->process.no_stdout = 1;
531 (*child)->process.no_stderr = 1;
532 } else {
533 (*child)->process.stdout_to_stderr = 1;
534 (*child)->process.out = -1;
535 (*child)->process.err = -1;
536 }
537 (*child)->process.no_exec_cmd = run_test_child;
538 if (sequential || pass == 2) {
539 err = start_command(&(*child)->process);
540 if (err)
541 return err;
542 finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
543 return 0;
544 }
545 return start_command(&(*child)->process);
546 }
547
548 /* State outside of __cmd_test for the sake of the signal handler. */
549
550 static size_t num_tests;
551 static struct child_test **child_tests;
552 static jmp_buf cmd_test_jmp_buf;
553
cmd_test_sig_handler(int sig)554 static void cmd_test_sig_handler(int sig)
555 {
556 siglongjmp(cmd_test_jmp_buf, sig);
557 }
558
__cmd_test(struct test_suite ** suites,int argc,const char * argv[],struct intlist * skiplist)559 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
560 struct intlist *skiplist)
561 {
562 static int width = 0;
563 int err = 0;
564
565 for (struct test_suite **t = suites; *t; t++) {
566 int i, len = strlen(test_description(*t, -1));
567
568 if (width < len)
569 width = len;
570
571 test_suite__for_each_test_case(*t, i) {
572 len = strlen(test_description(*t, i));
573 if (width < len)
574 width = len;
575 num_tests += runs_per_test;
576 }
577 }
578 child_tests = calloc(num_tests, sizeof(*child_tests));
579 if (!child_tests)
580 return -ENOMEM;
581
582 err = sigsetjmp(cmd_test_jmp_buf, 1);
583 if (err) {
584 pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
585 err);
586 for (size_t x = 0; x < num_tests; x++) {
587 struct child_test *child_test = child_tests[x];
588
589 if (!child_test || child_test->process.pid <= 0)
590 continue;
591
592 pr_debug3("Killing %d pid %d\n",
593 child_test->suite_num + 1,
594 child_test->process.pid);
595 kill(child_test->process.pid, err);
596 }
597 goto err_out;
598 }
599 signal(SIGINT, cmd_test_sig_handler);
600 signal(SIGTERM, cmd_test_sig_handler);
601
602 /*
603 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
604 * runs the exclusive tests sequentially. In other modes all tests are
605 * run in pass 1.
606 */
607 for (int pass = 1; pass <= 2; pass++) {
608 int child_test_num = 0;
609 int curr_suite = 0;
610
611 for (struct test_suite **t = suites; *t; t++, curr_suite++) {
612 int curr_test_case;
613 bool suite_matched = false;
614
615 if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
616 /*
617 * Test suite shouldn't be run based on
618 * description. See if any test case should.
619 */
620 bool skip = true;
621
622 test_suite__for_each_test_case(*t, curr_test_case) {
623 if (perf_test__matches(test_description(*t, curr_test_case),
624 curr_suite, argc, argv)) {
625 skip = false;
626 break;
627 }
628 }
629 if (skip)
630 continue;
631 } else {
632 suite_matched = true;
633 }
634
635 if (intlist__find(skiplist, curr_suite + 1)) {
636 pr_info("%3d: %-*s:", curr_suite + 1, width,
637 test_description(*t, -1));
638 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
639 continue;
640 }
641
642 for (unsigned int run = 0; run < runs_per_test; run++) {
643 test_suite__for_each_test_case(*t, curr_test_case) {
644 if (!suite_matched &&
645 !perf_test__matches(test_description(*t, curr_test_case),
646 curr_suite, argc, argv))
647 continue;
648 err = start_test(*t, curr_suite, curr_test_case,
649 &child_tests[child_test_num++],
650 width, pass);
651 if (err)
652 goto err_out;
653 }
654 }
655 }
656 if (!sequential) {
657 /* Parallel mode starts tests but doesn't finish them. Do that now. */
658 for (size_t x = 0; x < num_tests; x++)
659 finish_test(child_tests, x, num_tests, width);
660 }
661 }
662 err_out:
663 signal(SIGINT, SIG_DFL);
664 signal(SIGTERM, SIG_DFL);
665 if (err) {
666 pr_err("Internal test harness failure. Completing any started tests:\n:");
667 for (size_t x = 0; x < num_tests; x++)
668 finish_test(child_tests, x, num_tests, width);
669 }
670 free(child_tests);
671 return err;
672 }
673
perf_test__list(FILE * fp,struct test_suite ** suites,int argc,const char ** argv)674 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
675 {
676 int curr_suite = 0;
677
678 for (struct test_suite **t = suites; *t; t++, curr_suite++) {
679 int curr_test_case;
680
681 if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
682 continue;
683
684 fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
685
686 if (test_suite__num_test_cases(*t) <= 1)
687 continue;
688
689 test_suite__for_each_test_case(*t, curr_test_case) {
690 fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
691 test_description(*t, curr_test_case));
692 }
693 }
694 return 0;
695 }
696
workloads__fprintf_list(FILE * fp)697 static int workloads__fprintf_list(FILE *fp)
698 {
699 struct test_workload *twl;
700 int printed = 0;
701
702 workloads__for_each(twl)
703 printed += fprintf(fp, "%s\n", twl->name);
704
705 return printed;
706 }
707
run_workload(const char * work,int argc,const char ** argv)708 static int run_workload(const char *work, int argc, const char **argv)
709 {
710 struct test_workload *twl;
711
712 workloads__for_each(twl) {
713 if (!strcmp(twl->name, work))
714 return twl->func(argc, argv);
715 }
716
717 pr_info("No workload found: %s\n", work);
718 return -1;
719 }
720
perf_test__config(const char * var,const char * value,void * data __maybe_unused)721 static int perf_test__config(const char *var, const char *value,
722 void *data __maybe_unused)
723 {
724 if (!strcmp(var, "annotate.objdump"))
725 test_objdump_path = value;
726
727 return 0;
728 }
729
build_suites(void)730 static struct test_suite **build_suites(void)
731 {
732 /*
733 * TODO: suites is static to avoid needing to clean up the scripts tests
734 * for leak sanitizer.
735 */
736 static struct test_suite **suites[] = {
737 generic_tests,
738 arch_tests,
739 NULL,
740 };
741 struct test_suite **result;
742 struct test_suite *t;
743 size_t n = 0, num_suites = 0;
744
745 if (suites[2] == NULL)
746 suites[2] = create_script_test_suites();
747
748 #define for_each_suite(suite) \
749 for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \
750 while ((suite = suites[i][j++]) != NULL)
751
752 for_each_suite(t)
753 num_suites++;
754
755 result = calloc(num_suites + 1, sizeof(struct test_suite *));
756
757 for (int pass = 1; pass <= 2; pass++) {
758 for_each_suite(t) {
759 bool exclusive = false;
760 int curr_test_case;
761
762 test_suite__for_each_test_case(t, curr_test_case) {
763 if (test_exclusive(t, curr_test_case)) {
764 exclusive = true;
765 break;
766 }
767 }
768 if ((!exclusive && pass == 1) || (exclusive && pass == 2))
769 result[n++] = t;
770 }
771 }
772 return result;
773 #undef for_each_suite
774 }
775
cmd_test(int argc,const char ** argv)776 int cmd_test(int argc, const char **argv)
777 {
778 const char *test_usage[] = {
779 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
780 NULL,
781 };
782 const char *skip = NULL;
783 const char *workload = NULL;
784 bool list_workloads = false;
785 const struct option test_options[] = {
786 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
787 OPT_INCR('v', "verbose", &verbose,
788 "be more verbose (show symbol address, etc)"),
789 OPT_BOOLEAN('F', "dont-fork", &dont_fork,
790 "Do not fork for testcase"),
791 OPT_BOOLEAN('S', "sequential", &sequential,
792 "Run the tests one after another rather than in parallel"),
793 OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
794 "Run each test the given number of times, default 1"),
795 OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
796 OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
797 OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
798 OPT_STRING(0, "objdump", &test_objdump_path, "path",
799 "objdump binary to use for disassembly and annotations"),
800 OPT_END()
801 };
802 const char * const test_subcommands[] = { "list", NULL };
803 struct intlist *skiplist = NULL;
804 int ret = hists__init();
805 struct test_suite **suites;
806
807 if (ret < 0)
808 return ret;
809
810 perf_config(perf_test__config, NULL);
811
812 /* Unbuffered output */
813 setvbuf(stdout, NULL, _IONBF, 0);
814
815 argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
816 if (argc >= 1 && !strcmp(argv[0], "list")) {
817 suites = build_suites();
818 ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
819 free(suites);
820 return ret;
821 }
822
823 if (workload)
824 return run_workload(workload, argc, argv);
825
826 if (list_workloads) {
827 workloads__fprintf_list(stdout);
828 return 0;
829 }
830
831 if (dont_fork)
832 sequential = true;
833
834 symbol_conf.priv_size = sizeof(int);
835 symbol_conf.try_vmlinux_path = true;
836
837
838 if (symbol__init(NULL) < 0)
839 return -1;
840
841 if (skip != NULL)
842 skiplist = intlist__new(skip);
843 /*
844 * Tests that create BPF maps, for instance, need more than the 64K
845 * default:
846 */
847 rlimit__bump_memlock();
848
849 suites = build_suites();
850 ret = __cmd_test(suites, argc, argv, skiplist);
851 free(suites);
852 return ret;
853 }
854