1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-test.c
4 *
5 * Builtin regression testing command: ever growing number of sanity tests
6 */
7 #include <ctype.h>
8 #include <fcntl.h>
9 #include <errno.h>
10 #ifdef HAVE_BACKTRACE_SUPPORT
11 #include <execinfo.h>
12 #endif
13 #include <poll.h>
14 #include <unistd.h>
15 #include <setjmp.h>
16 #include <string.h>
17 #include <stdlib.h>
18 #include <sys/types.h>
19 #include <dirent.h>
20 #include <sys/wait.h>
21 #include <sys/stat.h>
22 #include "builtin.h"
23 #include "config.h"
24 #include "hist.h"
25 #include "intlist.h"
26 #include "tests.h"
27 #include "debug.h"
28 #include "color.h"
29 #include <subcmd/parse-options.h>
30 #include <subcmd/run-command.h>
31 #include "string2.h"
32 #include "symbol.h"
33 #include "util/rlimit.h"
34 #include "util/strbuf.h"
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <subcmd/exec-cmd.h>
38 #include <linux/zalloc.h>
39
40 #include "tests-scripts.h"
41
42 /*
43 * Command line option to not fork the test running in the same process and
44 * making them easier to debug.
45 */
46 static bool dont_fork;
47 /* Fork the tests in parallel and wait for their completion. */
48 static bool sequential;
49 /* Number of times each test is run. */
50 static unsigned int runs_per_test = 1;
51 const char *dso_to_test;
52 const char *test_objdump_path = "objdump";
53
54 /*
55 * List of architecture specific tests. Not a weak symbol as the array length is
56 * dependent on the initialization, as such GCC with LTO complains of
57 * conflicting definitions with a weak symbol.
58 */
59 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
60 extern struct test_suite *arch_tests[];
61 #else
62 static struct test_suite *arch_tests[] = {
63 NULL,
64 };
65 #endif
66
67 static struct test_suite *generic_tests[] = {
68 &suite__vmlinux_matches_kallsyms,
69 &suite__openat_syscall_event,
70 &suite__openat_syscall_event_on_all_cpus,
71 &suite__basic_mmap,
72 &suite__mem,
73 &suite__parse_events,
74 &suite__expr,
75 &suite__PERF_RECORD,
76 &suite__pmu,
77 &suite__pmu_events,
78 &suite__hwmon_pmu,
79 &suite__tool_pmu,
80 &suite__dso_data,
81 &suite__perf_evsel__roundtrip_name_test,
82 #ifdef HAVE_LIBTRACEEVENT
83 &suite__perf_evsel__tp_sched_test,
84 &suite__syscall_openat_tp_fields,
85 #endif
86 &suite__hists_link,
87 &suite__bp_signal,
88 &suite__bp_signal_overflow,
89 &suite__bp_accounting,
90 &suite__wp,
91 &suite__task_exit,
92 &suite__sw_clock_freq,
93 &suite__code_reading,
94 &suite__sample_parsing,
95 &suite__keep_tracking,
96 &suite__parse_no_sample_id_all,
97 &suite__hists_filter,
98 &suite__mmap_thread_lookup,
99 &suite__thread_maps_share,
100 &suite__hists_output,
101 &suite__hists_cumulate,
102 #ifdef HAVE_LIBTRACEEVENT
103 &suite__switch_tracking,
104 #endif
105 &suite__fdarray__filter,
106 &suite__fdarray__add,
107 &suite__kmod_path__parse,
108 &suite__thread_map,
109 &suite__session_topology,
110 &suite__thread_map_synthesize,
111 &suite__thread_map_remove,
112 &suite__cpu_map,
113 &suite__synthesize_stat_config,
114 &suite__synthesize_stat,
115 &suite__synthesize_stat_round,
116 &suite__event_update,
117 &suite__event_times,
118 &suite__backward_ring_buffer,
119 &suite__sdt_event,
120 &suite__is_printable_array,
121 &suite__bitmap_print,
122 &suite__perf_hooks,
123 &suite__unit_number__scnprint,
124 &suite__mem2node,
125 &suite__time_utils,
126 &suite__jit_write_elf,
127 &suite__pfm,
128 &suite__api_io,
129 &suite__maps,
130 &suite__demangle_java,
131 &suite__demangle_ocaml,
132 &suite__demangle_rust,
133 &suite__parse_metric,
134 &suite__pe_file_parsing,
135 &suite__expand_cgroup_events,
136 &suite__perf_time_to_tsc,
137 &suite__dlfilter,
138 &suite__sigtrap,
139 &suite__event_groups,
140 &suite__symbols,
141 &suite__util,
142 &suite__subcmd_help,
143 &suite__kallsyms_split,
144 NULL,
145 };
146
147 static struct test_workload *workloads[] = {
148 &workload__noploop,
149 &workload__thloop,
150 &workload__leafloop,
151 &workload__sqrtloop,
152 &workload__brstack,
153 &workload__datasym,
154 &workload__landlock,
155 &workload__traploop,
156 &workload__inlineloop,
157
158 #ifdef HAVE_RUST_SUPPORT
159 &workload__code_with_type,
160 #endif
161 };
162
163 #define workloads__for_each(workload) \
164 for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
165
166 #define test_suite__for_each_test_case(suite, idx) \
167 for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
168
close_parent_fds(void)169 static void close_parent_fds(void)
170 {
171 DIR *dir = opendir("/proc/self/fd");
172 struct dirent *ent;
173
174 while ((ent = readdir(dir))) {
175 char *end;
176 long fd;
177
178 if (ent->d_type != DT_LNK)
179 continue;
180
181 if (!isdigit(ent->d_name[0]))
182 continue;
183
184 fd = strtol(ent->d_name, &end, 10);
185 if (*end)
186 continue;
187
188 if (fd <= 3 || fd == dirfd(dir))
189 continue;
190
191 close(fd);
192 }
193 closedir(dir);
194 }
195
check_leaks(void)196 static void check_leaks(void)
197 {
198 DIR *dir = opendir("/proc/self/fd");
199 struct dirent *ent;
200 int leaks = 0;
201
202 while ((ent = readdir(dir))) {
203 char path[PATH_MAX];
204 char *end;
205 long fd;
206 ssize_t len;
207
208 if (ent->d_type != DT_LNK)
209 continue;
210
211 if (!isdigit(ent->d_name[0]))
212 continue;
213
214 fd = strtol(ent->d_name, &end, 10);
215 if (*end)
216 continue;
217
218 if (fd <= 3 || fd == dirfd(dir))
219 continue;
220
221 leaks++;
222 len = readlinkat(dirfd(dir), ent->d_name, path, sizeof(path));
223 if (len > 0 && (size_t)len < sizeof(path))
224 path[len] = '\0';
225 else
226 strncpy(path, ent->d_name, sizeof(path));
227 pr_err("Leak of file descriptor %s that opened: '%s'\n", ent->d_name, path);
228 }
229 closedir(dir);
230 if (leaks)
231 abort();
232 }
233
test_suite__num_test_cases(const struct test_suite * t)234 static int test_suite__num_test_cases(const struct test_suite *t)
235 {
236 int num;
237
238 test_suite__for_each_test_case(t, num);
239
240 return num;
241 }
242
skip_reason(const struct test_suite * t,int test_case)243 static const char *skip_reason(const struct test_suite *t, int test_case)
244 {
245 if (!t->test_cases)
246 return NULL;
247
248 return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
249 }
250
test_description(const struct test_suite * t,int test_case)251 static const char *test_description(const struct test_suite *t, int test_case)
252 {
253 if (t->test_cases && test_case >= 0)
254 return t->test_cases[test_case].desc;
255
256 return t->desc;
257 }
258
test_function(const struct test_suite * t,int test_case)259 static test_fnptr test_function(const struct test_suite *t, int test_case)
260 {
261 if (test_case <= 0)
262 return t->test_cases[0].run_case;
263
264 return t->test_cases[test_case].run_case;
265 }
266
test_exclusive(const struct test_suite * t,int test_case)267 static bool test_exclusive(const struct test_suite *t, int test_case)
268 {
269 if (test_case <= 0)
270 return t->test_cases[0].exclusive;
271
272 return t->test_cases[test_case].exclusive;
273 }
274
perf_test__matches(const char * desc,int suite_num,int argc,const char * argv[])275 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
276 {
277 int i;
278
279 if (argc == 0)
280 return true;
281
282 for (i = 0; i < argc; ++i) {
283 char *end;
284 long nr = strtoul(argv[i], &end, 10);
285
286 if (*end == '\0') {
287 if (nr == suite_num + 1)
288 return true;
289 continue;
290 }
291
292 if (strcasestr(desc, argv[i]))
293 return true;
294 }
295
296 return false;
297 }
298
299 struct child_test {
300 struct child_process process;
301 struct test_suite *test;
302 int suite_num;
303 int test_case_num;
304 };
305
306 static jmp_buf run_test_jmp_buf;
307
child_test_sig_handler(int sig)308 static void child_test_sig_handler(int sig)
309 {
310 #ifdef HAVE_BACKTRACE_SUPPORT
311 void *stackdump[32];
312 size_t stackdump_size;
313 #endif
314
315 fprintf(stderr, "\n---- unexpected signal (%d) ----\n", sig);
316 #ifdef HAVE_BACKTRACE_SUPPORT
317 stackdump_size = backtrace(stackdump, ARRAY_SIZE(stackdump));
318 __dump_stack(stderr, stackdump, stackdump_size);
319 #endif
320 siglongjmp(run_test_jmp_buf, sig);
321 }
322
run_test_child(struct child_process * process)323 static int run_test_child(struct child_process *process)
324 {
325 const int signals[] = {
326 SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
327 };
328 struct child_test *child = container_of(process, struct child_test, process);
329 int err;
330
331 close_parent_fds();
332
333 err = sigsetjmp(run_test_jmp_buf, 1);
334 if (err) {
335 /* Received signal. */
336 err = err > 0 ? -err : -1;
337 goto err_out;
338 }
339
340 for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
341 signal(signals[i], child_test_sig_handler);
342
343 pr_debug("--- start ---\n");
344 pr_debug("test child forked, pid %d\n", getpid());
345 err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
346 pr_debug("---- end(%d) ----\n", err);
347
348 check_leaks();
349 err_out:
350 fflush(NULL);
351 for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
352 signal(signals[i], SIG_DFL);
353 return -err;
354 }
355
356 #define TEST_RUNNING -3
357
print_test_result(struct test_suite * t,int curr_suite,int curr_test_case,int result,int width,int running)358 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
359 int result, int width, int running)
360 {
361 if (test_suite__num_test_cases(t) > 1) {
362 int subw = width > 2 ? width - 2 : width;
363
364 pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
365 test_description(t, curr_test_case));
366 } else
367 pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
368
369 switch (result) {
370 case TEST_RUNNING:
371 color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
372 break;
373 case TEST_OK:
374 pr_info(" Ok\n");
375 break;
376 case TEST_SKIP: {
377 const char *reason = skip_reason(t, curr_test_case);
378
379 if (reason)
380 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
381 else
382 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
383 }
384 break;
385 case TEST_FAIL:
386 default:
387 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
388 break;
389 }
390
391 return 0;
392 }
393
finish_test(struct child_test ** child_tests,int running_test,int child_test_num,int width)394 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
395 int width)
396 {
397 struct child_test *child_test = child_tests[running_test];
398 struct test_suite *t;
399 int curr_suite, curr_test_case, err;
400 bool err_done = false;
401 struct strbuf err_output = STRBUF_INIT;
402 int last_running = -1;
403 int ret;
404
405 if (child_test == NULL) {
406 /* Test wasn't started. */
407 return;
408 }
409 t = child_test->test;
410 curr_suite = child_test->suite_num;
411 curr_test_case = child_test->test_case_num;
412 err = child_test->process.err;
413 /*
414 * For test suites with subtests, display the suite name ahead of the
415 * sub test names.
416 */
417 if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
418 pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
419
420 /*
421 * Busy loop reading from the child's stdout/stderr that are set to be
422 * non-blocking until EOF.
423 */
424 if (err > 0)
425 fcntl(err, F_SETFL, O_NONBLOCK);
426 if (verbose > 1) {
427 if (test_suite__num_test_cases(t) > 1)
428 pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
429 test_description(t, curr_test_case));
430 else
431 pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
432 }
433 while (!err_done) {
434 struct pollfd pfds[1] = {
435 { .fd = err,
436 .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
437 },
438 };
439 if (perf_use_color_default) {
440 int running = 0;
441
442 for (int y = running_test; y < child_test_num; y++) {
443 if (child_tests[y] == NULL)
444 continue;
445 if (check_if_command_finished(&child_tests[y]->process) == 0)
446 running++;
447 }
448 if (running != last_running) {
449 if (last_running != -1) {
450 /*
451 * Erase "Running (.. active)" line
452 * printed before poll/sleep.
453 */
454 fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
455 }
456 print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
457 width, running);
458 last_running = running;
459 }
460 }
461
462 err_done = true;
463 if (err <= 0) {
464 /* No child stderr to poll, sleep for 10ms for child to complete. */
465 usleep(10 * 1000);
466 } else {
467 /* Poll to avoid excessive spinning, timeout set for 100ms. */
468 poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
469 if (pfds[0].revents) {
470 char buf[512];
471 ssize_t len;
472
473 len = read(err, buf, sizeof(buf) - 1);
474
475 if (len > 0) {
476 err_done = false;
477 buf[len] = '\0';
478 strbuf_addstr(&err_output, buf);
479 }
480 }
481 }
482 if (err_done)
483 err_done = check_if_command_finished(&child_test->process);
484 }
485 if (perf_use_color_default && last_running != -1) {
486 /* Erase "Running (.. active)" line printed before poll/sleep. */
487 fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
488 }
489 /* Clean up child process. */
490 ret = finish_command(&child_test->process);
491 if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
492 fprintf(stderr, "%s", err_output.buf);
493
494 strbuf_release(&err_output);
495 print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
496 if (err > 0)
497 close(err);
498 zfree(&child_tests[running_test]);
499 }
500
start_test(struct test_suite * test,int curr_suite,int curr_test_case,struct child_test ** child,int width,int pass)501 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
502 struct child_test **child, int width, int pass)
503 {
504 int err;
505
506 *child = NULL;
507 if (dont_fork) {
508 if (pass == 1) {
509 pr_debug("--- start ---\n");
510 err = test_function(test, curr_test_case)(test, curr_test_case);
511 pr_debug("---- end ----\n");
512 print_test_result(test, curr_suite, curr_test_case, err, width,
513 /*running=*/0);
514 }
515 return 0;
516 }
517 if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
518 /* When parallel, skip exclusive tests on the first pass. */
519 return 0;
520 }
521 if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
522 /* Sequential and non-exclusive tests were run on the first pass. */
523 return 0;
524 }
525 *child = zalloc(sizeof(**child));
526 if (!*child)
527 return -ENOMEM;
528
529 (*child)->test = test;
530 (*child)->suite_num = curr_suite;
531 (*child)->test_case_num = curr_test_case;
532 (*child)->process.pid = -1;
533 (*child)->process.no_stdin = 1;
534 if (verbose <= 0) {
535 (*child)->process.no_stdout = 1;
536 (*child)->process.no_stderr = 1;
537 } else {
538 (*child)->process.stdout_to_stderr = 1;
539 (*child)->process.out = -1;
540 (*child)->process.err = -1;
541 }
542 (*child)->process.no_exec_cmd = run_test_child;
543 if (sequential || pass == 2) {
544 err = start_command(&(*child)->process);
545 if (err)
546 return err;
547 finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
548 return 0;
549 }
550 return start_command(&(*child)->process);
551 }
552
553 /* State outside of __cmd_test for the sake of the signal handler. */
554
555 static size_t num_tests;
556 static struct child_test **child_tests;
557 static jmp_buf cmd_test_jmp_buf;
558
cmd_test_sig_handler(int sig)559 static void cmd_test_sig_handler(int sig)
560 {
561 siglongjmp(cmd_test_jmp_buf, sig);
562 }
563
__cmd_test(struct test_suite ** suites,int argc,const char * argv[],struct intlist * skiplist)564 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
565 struct intlist *skiplist)
566 {
567 static int width = 0;
568 int err = 0;
569
570 for (struct test_suite **t = suites; *t; t++) {
571 int i, len = strlen(test_description(*t, -1));
572
573 if (width < len)
574 width = len;
575
576 test_suite__for_each_test_case(*t, i) {
577 len = strlen(test_description(*t, i));
578 if (width < len)
579 width = len;
580 num_tests += runs_per_test;
581 }
582 }
583 child_tests = calloc(num_tests, sizeof(*child_tests));
584 if (!child_tests)
585 return -ENOMEM;
586
587 err = sigsetjmp(cmd_test_jmp_buf, 1);
588 if (err) {
589 pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
590 err);
591 for (size_t x = 0; x < num_tests; x++) {
592 struct child_test *child_test = child_tests[x];
593
594 if (!child_test || child_test->process.pid <= 0)
595 continue;
596
597 pr_debug3("Killing %d pid %d\n",
598 child_test->suite_num + 1,
599 child_test->process.pid);
600 kill(child_test->process.pid, err);
601 }
602 goto err_out;
603 }
604 signal(SIGINT, cmd_test_sig_handler);
605 signal(SIGTERM, cmd_test_sig_handler);
606
607 /*
608 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
609 * runs the exclusive tests sequentially. In other modes all tests are
610 * run in pass 1.
611 */
612 for (int pass = 1; pass <= 2; pass++) {
613 int child_test_num = 0;
614 int curr_suite = 0;
615
616 for (struct test_suite **t = suites; *t; t++, curr_suite++) {
617 int curr_test_case;
618 bool suite_matched = false;
619
620 if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
621 /*
622 * Test suite shouldn't be run based on
623 * description. See if any test case should.
624 */
625 bool skip = true;
626
627 test_suite__for_each_test_case(*t, curr_test_case) {
628 if (perf_test__matches(test_description(*t, curr_test_case),
629 curr_suite, argc, argv)) {
630 skip = false;
631 break;
632 }
633 }
634 if (skip)
635 continue;
636 } else {
637 suite_matched = true;
638 }
639
640 if (intlist__find(skiplist, curr_suite + 1)) {
641 pr_info("%3d: %-*s:", curr_suite + 1, width,
642 test_description(*t, -1));
643 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
644 continue;
645 }
646
647 for (unsigned int run = 0; run < runs_per_test; run++) {
648 test_suite__for_each_test_case(*t, curr_test_case) {
649 if (!suite_matched &&
650 !perf_test__matches(test_description(*t, curr_test_case),
651 curr_suite, argc, argv))
652 continue;
653 err = start_test(*t, curr_suite, curr_test_case,
654 &child_tests[child_test_num++],
655 width, pass);
656 if (err)
657 goto err_out;
658 }
659 }
660 }
661 if (!sequential) {
662 /* Parallel mode starts tests but doesn't finish them. Do that now. */
663 for (size_t x = 0; x < num_tests; x++)
664 finish_test(child_tests, x, num_tests, width);
665 }
666 }
667 err_out:
668 signal(SIGINT, SIG_DFL);
669 signal(SIGTERM, SIG_DFL);
670 if (err) {
671 pr_err("Internal test harness failure. Completing any started tests:\n:");
672 for (size_t x = 0; x < num_tests; x++)
673 finish_test(child_tests, x, num_tests, width);
674 }
675 free(child_tests);
676 return err;
677 }
678
perf_test__list(FILE * fp,struct test_suite ** suites,int argc,const char ** argv)679 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
680 {
681 int curr_suite = 0;
682
683 for (struct test_suite **t = suites; *t; t++, curr_suite++) {
684 int curr_test_case;
685
686 if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
687 continue;
688
689 fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
690
691 if (test_suite__num_test_cases(*t) <= 1)
692 continue;
693
694 test_suite__for_each_test_case(*t, curr_test_case) {
695 fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
696 test_description(*t, curr_test_case));
697 }
698 }
699 return 0;
700 }
701
workloads__fprintf_list(FILE * fp)702 static int workloads__fprintf_list(FILE *fp)
703 {
704 struct test_workload *twl;
705 int printed = 0;
706
707 workloads__for_each(twl)
708 printed += fprintf(fp, "%s\n", twl->name);
709
710 return printed;
711 }
712
run_workload(const char * work,int argc,const char ** argv)713 static int run_workload(const char *work, int argc, const char **argv)
714 {
715 struct test_workload *twl;
716
717 workloads__for_each(twl) {
718 if (!strcmp(twl->name, work))
719 return twl->func(argc, argv);
720 }
721
722 pr_info("No workload found: %s\n", work);
723 return -1;
724 }
725
perf_test__config(const char * var,const char * value,void * data __maybe_unused)726 static int perf_test__config(const char *var, const char *value,
727 void *data __maybe_unused)
728 {
729 if (!strcmp(var, "annotate.objdump"))
730 test_objdump_path = value;
731
732 return 0;
733 }
734
build_suites(void)735 static struct test_suite **build_suites(void)
736 {
737 /*
738 * TODO: suites is static to avoid needing to clean up the scripts tests
739 * for leak sanitizer.
740 */
741 static struct test_suite **suites[] = {
742 generic_tests,
743 arch_tests,
744 NULL,
745 };
746 struct test_suite **result;
747 struct test_suite *t;
748 size_t n = 0, num_suites = 0;
749
750 if (suites[2] == NULL)
751 suites[2] = create_script_test_suites();
752
753 #define for_each_suite(suite) \
754 for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \
755 while ((suite = suites[i][j++]) != NULL)
756
757 for_each_suite(t)
758 num_suites++;
759
760 result = calloc(num_suites + 1, sizeof(struct test_suite *));
761
762 for (int pass = 1; pass <= 2; pass++) {
763 for_each_suite(t) {
764 bool exclusive = false;
765 int curr_test_case;
766
767 test_suite__for_each_test_case(t, curr_test_case) {
768 if (test_exclusive(t, curr_test_case)) {
769 exclusive = true;
770 break;
771 }
772 }
773 if ((!exclusive && pass == 1) || (exclusive && pass == 2))
774 result[n++] = t;
775 }
776 }
777 return result;
778 #undef for_each_suite
779 }
780
cmd_test(int argc,const char ** argv)781 int cmd_test(int argc, const char **argv)
782 {
783 const char *test_usage[] = {
784 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
785 NULL,
786 };
787 const char *skip = NULL;
788 const char *workload = NULL;
789 bool list_workloads = false;
790 const struct option test_options[] = {
791 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
792 OPT_INCR('v', "verbose", &verbose,
793 "be more verbose (show symbol address, etc)"),
794 OPT_BOOLEAN('F', "dont-fork", &dont_fork,
795 "Do not fork for testcase"),
796 OPT_BOOLEAN('S', "sequential", &sequential,
797 "Run the tests one after another rather than in parallel"),
798 OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
799 "Run each test the given number of times, default 1"),
800 OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
801 OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
802 OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
803 OPT_STRING(0, "objdump", &test_objdump_path, "path",
804 "objdump binary to use for disassembly and annotations"),
805 OPT_END()
806 };
807 const char * const test_subcommands[] = { "list", NULL };
808 struct intlist *skiplist = NULL;
809 int ret = hists__init();
810 struct test_suite **suites;
811
812 if (ret < 0)
813 return ret;
814
815 perf_config(perf_test__config, NULL);
816
817 /* Unbuffered output */
818 setvbuf(stdout, NULL, _IONBF, 0);
819
820 argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
821 if (argc >= 1 && !strcmp(argv[0], "list")) {
822 suites = build_suites();
823 ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
824 free(suites);
825 return ret;
826 }
827
828 if (workload)
829 return run_workload(workload, argc, argv);
830
831 if (list_workloads) {
832 workloads__fprintf_list(stdout);
833 return 0;
834 }
835
836 if (dont_fork)
837 sequential = true;
838
839 symbol_conf.priv_size = sizeof(int);
840 symbol_conf.try_vmlinux_path = true;
841
842
843 if (symbol__init(NULL) < 0)
844 return -1;
845
846 if (skip != NULL)
847 skiplist = intlist__new(skip);
848 /*
849 * Tests that create BPF maps, for instance, need more than the 64K
850 * default:
851 */
852 rlimit__bump_memlock();
853
854 suites = build_suites();
855 ret = __cmd_test(suites, argc, argv, skiplist);
856 free(suites);
857 return ret;
858 }
859