1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * builtin-test.c 4 * 5 * Builtin regression testing command: ever growing number of sanity tests 6 */ 7 #include <fcntl.h> 8 #include <errno.h> 9 #include <poll.h> 10 #include <unistd.h> 11 #include <setjmp.h> 12 #include <string.h> 13 #include <stdlib.h> 14 #include <sys/types.h> 15 #include <dirent.h> 16 #include <sys/wait.h> 17 #include <sys/stat.h> 18 #include "builtin.h" 19 #include "config.h" 20 #include "hist.h" 21 #include "intlist.h" 22 #include "tests.h" 23 #include "debug.h" 24 #include "color.h" 25 #include <subcmd/parse-options.h> 26 #include <subcmd/run-command.h> 27 #include "string2.h" 28 #include "symbol.h" 29 #include "util/rlimit.h" 30 #include "util/strbuf.h" 31 #include <linux/kernel.h> 32 #include <linux/string.h> 33 #include <subcmd/exec-cmd.h> 34 #include <linux/zalloc.h> 35 36 #include "tests-scripts.h" 37 38 /* 39 * Command line option to not fork the test running in the same process and 40 * making them easier to debug. 41 */ 42 static bool dont_fork; 43 /* Fork the tests in parallel and wait for their completion. */ 44 static bool sequential; 45 const char *dso_to_test; 46 const char *test_objdump_path = "objdump"; 47 48 /* 49 * List of architecture specific tests. Not a weak symbol as the array length is 50 * dependent on the initialization, as such GCC with LTO complains of 51 * conflicting definitions with a weak symbol. 52 */ 53 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) 54 extern struct test_suite *arch_tests[]; 55 #else 56 static struct test_suite *arch_tests[] = { 57 NULL, 58 }; 59 #endif 60 61 static struct test_suite *generic_tests[] = { 62 &suite__vmlinux_matches_kallsyms, 63 #ifdef HAVE_LIBTRACEEVENT 64 &suite__openat_syscall_event, 65 &suite__openat_syscall_event_on_all_cpus, 66 &suite__basic_mmap, 67 #endif 68 &suite__mem, 69 &suite__parse_events, 70 &suite__expr, 71 &suite__PERF_RECORD, 72 &suite__pmu, 73 &suite__pmu_events, 74 &suite__hwmon_pmu, 75 &suite__tool_pmu, 76 &suite__dso_data, 77 &suite__perf_evsel__roundtrip_name_test, 78 #ifdef HAVE_LIBTRACEEVENT 79 &suite__perf_evsel__tp_sched_test, 80 &suite__syscall_openat_tp_fields, 81 #endif 82 &suite__hists_link, 83 &suite__python_use, 84 &suite__bp_signal, 85 &suite__bp_signal_overflow, 86 &suite__bp_accounting, 87 &suite__wp, 88 &suite__task_exit, 89 &suite__sw_clock_freq, 90 &suite__code_reading, 91 &suite__sample_parsing, 92 &suite__keep_tracking, 93 &suite__parse_no_sample_id_all, 94 &suite__hists_filter, 95 &suite__mmap_thread_lookup, 96 &suite__thread_maps_share, 97 &suite__hists_output, 98 &suite__hists_cumulate, 99 #ifdef HAVE_LIBTRACEEVENT 100 &suite__switch_tracking, 101 #endif 102 &suite__fdarray__filter, 103 &suite__fdarray__add, 104 &suite__kmod_path__parse, 105 &suite__thread_map, 106 &suite__session_topology, 107 &suite__thread_map_synthesize, 108 &suite__thread_map_remove, 109 &suite__cpu_map, 110 &suite__synthesize_stat_config, 111 &suite__synthesize_stat, 112 &suite__synthesize_stat_round, 113 &suite__event_update, 114 &suite__event_times, 115 &suite__backward_ring_buffer, 116 &suite__sdt_event, 117 &suite__is_printable_array, 118 &suite__bitmap_print, 119 &suite__perf_hooks, 120 &suite__unit_number__scnprint, 121 &suite__mem2node, 122 &suite__time_utils, 123 &suite__jit_write_elf, 124 &suite__pfm, 125 &suite__api_io, 126 &suite__maps__merge_in, 127 &suite__demangle_java, 128 &suite__demangle_ocaml, 129 &suite__parse_metric, 130 &suite__pe_file_parsing, 131 &suite__expand_cgroup_events, 132 &suite__perf_time_to_tsc, 133 &suite__dlfilter, 134 &suite__sigtrap, 135 &suite__event_groups, 136 &suite__symbols, 137 &suite__util, 138 NULL, 139 }; 140 141 static struct test_workload *workloads[] = { 142 &workload__noploop, 143 &workload__thloop, 144 &workload__leafloop, 145 &workload__sqrtloop, 146 &workload__brstack, 147 &workload__datasym, 148 &workload__landlock, 149 }; 150 151 #define workloads__for_each(workload) \ 152 for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++) 153 154 static int num_subtests(const struct test_suite *t) 155 { 156 int num; 157 158 if (!t->test_cases) 159 return 0; 160 161 num = 0; 162 while (t->test_cases[num].name) 163 num++; 164 165 return num; 166 } 167 168 static bool has_subtests(const struct test_suite *t) 169 { 170 return num_subtests(t) > 1; 171 } 172 173 static const char *skip_reason(const struct test_suite *t, int subtest) 174 { 175 if (!t->test_cases) 176 return NULL; 177 178 return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason; 179 } 180 181 static const char *test_description(const struct test_suite *t, int subtest) 182 { 183 if (t->test_cases && subtest >= 0) 184 return t->test_cases[subtest].desc; 185 186 return t->desc; 187 } 188 189 static test_fnptr test_function(const struct test_suite *t, int subtest) 190 { 191 if (subtest <= 0) 192 return t->test_cases[0].run_case; 193 194 return t->test_cases[subtest].run_case; 195 } 196 197 static bool test_exclusive(const struct test_suite *t, int subtest) 198 { 199 if (subtest <= 0) 200 return t->test_cases[0].exclusive; 201 202 return t->test_cases[subtest].exclusive; 203 } 204 205 static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[]) 206 { 207 int i; 208 209 if (argc == 0) 210 return true; 211 212 for (i = 0; i < argc; ++i) { 213 char *end; 214 long nr = strtoul(argv[i], &end, 10); 215 216 if (*end == '\0') { 217 if (nr == curr + 1) 218 return true; 219 continue; 220 } 221 222 if (strcasestr(desc, argv[i])) 223 return true; 224 } 225 226 return false; 227 } 228 229 struct child_test { 230 struct child_process process; 231 struct test_suite *test; 232 int test_num; 233 int subtest; 234 }; 235 236 static jmp_buf run_test_jmp_buf; 237 238 static void child_test_sig_handler(int sig) 239 { 240 siglongjmp(run_test_jmp_buf, sig); 241 } 242 243 static int run_test_child(struct child_process *process) 244 { 245 const int signals[] = { 246 SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM, 247 }; 248 struct child_test *child = container_of(process, struct child_test, process); 249 int err; 250 251 err = sigsetjmp(run_test_jmp_buf, 1); 252 if (err) { 253 fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err); 254 err = err > 0 ? -err : -1; 255 goto err_out; 256 } 257 258 for (size_t i = 0; i < ARRAY_SIZE(signals); i++) 259 signal(signals[i], child_test_sig_handler); 260 261 pr_debug("--- start ---\n"); 262 pr_debug("test child forked, pid %d\n", getpid()); 263 err = test_function(child->test, child->subtest)(child->test, child->subtest); 264 pr_debug("---- end(%d) ----\n", err); 265 266 err_out: 267 fflush(NULL); 268 for (size_t i = 0; i < ARRAY_SIZE(signals); i++) 269 signal(signals[i], SIG_DFL); 270 return -err; 271 } 272 273 #define TEST_RUNNING -3 274 275 static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width, 276 int running) 277 { 278 if (has_subtests(t)) { 279 int subw = width > 2 ? width - 2 : width; 280 281 pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest)); 282 } else 283 pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest)); 284 285 switch (result) { 286 case TEST_RUNNING: 287 color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running); 288 break; 289 case TEST_OK: 290 pr_info(" Ok\n"); 291 break; 292 case TEST_SKIP: { 293 const char *reason = skip_reason(t, subtest); 294 295 if (reason) 296 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason); 297 else 298 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); 299 } 300 break; 301 case TEST_FAIL: 302 default: 303 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 304 break; 305 } 306 307 return 0; 308 } 309 310 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num, 311 int width) 312 { 313 struct child_test *child_test = child_tests[running_test]; 314 struct test_suite *t; 315 int i, subi, err; 316 bool err_done = false; 317 struct strbuf err_output = STRBUF_INIT; 318 int last_running = -1; 319 int ret; 320 321 if (child_test == NULL) { 322 /* Test wasn't started. */ 323 return; 324 } 325 t = child_test->test; 326 i = child_test->test_num; 327 subi = child_test->subtest; 328 err = child_test->process.err; 329 /* 330 * For test suites with subtests, display the suite name ahead of the 331 * sub test names. 332 */ 333 if (has_subtests(t) && subi == 0) 334 pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1)); 335 336 /* 337 * Busy loop reading from the child's stdout/stderr that are set to be 338 * non-blocking until EOF. 339 */ 340 if (err > 0) 341 fcntl(err, F_SETFL, O_NONBLOCK); 342 if (verbose > 1) { 343 if (has_subtests(t)) 344 pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi)); 345 else 346 pr_info("%3d: %s:\n", i + 1, test_description(t, -1)); 347 } 348 while (!err_done) { 349 struct pollfd pfds[1] = { 350 { .fd = err, 351 .events = POLLIN | POLLERR | POLLHUP | POLLNVAL, 352 }, 353 }; 354 if (perf_use_color_default) { 355 int running = 0; 356 357 for (int y = running_test; y < child_test_num; y++) { 358 if (child_tests[y] == NULL) 359 continue; 360 if (check_if_command_finished(&child_tests[y]->process) == 0) 361 running++; 362 } 363 if (running != last_running) { 364 if (last_running != -1) { 365 /* 366 * Erase "Running (.. active)" line 367 * printed before poll/sleep. 368 */ 369 fprintf(debug_file(), PERF_COLOR_DELETE_LINE); 370 } 371 print_test_result(t, i, subi, TEST_RUNNING, width, running); 372 last_running = running; 373 } 374 } 375 376 err_done = true; 377 if (err <= 0) { 378 /* No child stderr to poll, sleep for 10ms for child to complete. */ 379 usleep(10 * 1000); 380 } else { 381 /* Poll to avoid excessive spinning, timeout set for 100ms. */ 382 poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100); 383 if (pfds[0].revents) { 384 char buf[512]; 385 ssize_t len; 386 387 len = read(err, buf, sizeof(buf) - 1); 388 389 if (len > 0) { 390 err_done = false; 391 buf[len] = '\0'; 392 strbuf_addstr(&err_output, buf); 393 } 394 } 395 } 396 if (err_done) 397 err_done = check_if_command_finished(&child_test->process); 398 } 399 if (perf_use_color_default && last_running != -1) { 400 /* Erase "Running (.. active)" line printed before poll/sleep. */ 401 fprintf(debug_file(), PERF_COLOR_DELETE_LINE); 402 } 403 /* Clean up child process. */ 404 ret = finish_command(&child_test->process); 405 if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL)) 406 fprintf(stderr, "%s", err_output.buf); 407 408 strbuf_release(&err_output); 409 print_test_result(t, i, subi, ret, width, /*running=*/0); 410 if (err > 0) 411 close(err); 412 zfree(&child_tests[running_test]); 413 } 414 415 static int start_test(struct test_suite *test, int i, int subi, struct child_test **child, 416 int width, int pass) 417 { 418 int err; 419 420 *child = NULL; 421 if (dont_fork) { 422 if (pass == 1) { 423 pr_debug("--- start ---\n"); 424 err = test_function(test, subi)(test, subi); 425 pr_debug("---- end ----\n"); 426 print_test_result(test, i, subi, err, width, /*running=*/0); 427 } 428 return 0; 429 } 430 if (pass == 1 && !sequential && test_exclusive(test, subi)) { 431 /* When parallel, skip exclusive tests on the first pass. */ 432 return 0; 433 } 434 if (pass != 1 && (sequential || !test_exclusive(test, subi))) { 435 /* Sequential and non-exclusive tests were run on the first pass. */ 436 return 0; 437 } 438 *child = zalloc(sizeof(**child)); 439 if (!*child) 440 return -ENOMEM; 441 442 (*child)->test = test; 443 (*child)->test_num = i; 444 (*child)->subtest = subi; 445 (*child)->process.pid = -1; 446 (*child)->process.no_stdin = 1; 447 if (verbose <= 0) { 448 (*child)->process.no_stdout = 1; 449 (*child)->process.no_stderr = 1; 450 } else { 451 (*child)->process.stdout_to_stderr = 1; 452 (*child)->process.out = -1; 453 (*child)->process.err = -1; 454 } 455 (*child)->process.no_exec_cmd = run_test_child; 456 if (sequential || pass == 2) { 457 err = start_command(&(*child)->process); 458 if (err) 459 return err; 460 finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width); 461 return 0; 462 } 463 return start_command(&(*child)->process); 464 } 465 466 /* State outside of __cmd_test for the sake of the signal handler. */ 467 468 static size_t num_tests; 469 static struct child_test **child_tests; 470 static jmp_buf cmd_test_jmp_buf; 471 472 static void cmd_test_sig_handler(int sig) 473 { 474 siglongjmp(cmd_test_jmp_buf, sig); 475 } 476 477 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[], 478 struct intlist *skiplist) 479 { 480 static int width = 0; 481 int err = 0; 482 483 for (struct test_suite **t = suites; *t; t++) { 484 int len = strlen(test_description(*t, -1)); 485 486 if (width < len) 487 width = len; 488 489 if (has_subtests(*t)) { 490 for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) { 491 len = strlen(test_description(*t, subi)); 492 if (width < len) 493 width = len; 494 num_tests++; 495 } 496 } else { 497 num_tests++; 498 } 499 } 500 child_tests = calloc(num_tests, sizeof(*child_tests)); 501 if (!child_tests) 502 return -ENOMEM; 503 504 err = sigsetjmp(cmd_test_jmp_buf, 1); 505 if (err) { 506 pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n", 507 err); 508 for (size_t x = 0; x < num_tests; x++) { 509 struct child_test *child_test = child_tests[x]; 510 511 if (!child_test || child_test->process.pid <= 0) 512 continue; 513 514 pr_debug3("Killing %d pid %d\n", 515 child_test->test_num + 1, 516 child_test->process.pid); 517 kill(child_test->process.pid, err); 518 } 519 goto err_out; 520 } 521 signal(SIGINT, cmd_test_sig_handler); 522 signal(SIGTERM, cmd_test_sig_handler); 523 524 /* 525 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2 526 * runs the exclusive tests sequentially. In other modes all tests are 527 * run in pass 1. 528 */ 529 for (int pass = 1; pass <= 2; pass++) { 530 int child_test_num = 0; 531 int i = 0; 532 533 for (struct test_suite **t = suites; *t; t++) { 534 int curr = i++; 535 536 if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) { 537 /* 538 * Test suite shouldn't be run based on 539 * description. See if subtest should. 540 */ 541 bool skip = true; 542 543 for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) { 544 if (perf_test__matches(test_description(*t, subi), 545 curr, argc, argv)) 546 skip = false; 547 } 548 549 if (skip) 550 continue; 551 } 552 553 if (intlist__find(skiplist, i)) { 554 pr_info("%3d: %-*s:", curr + 1, width, test_description(*t, -1)); 555 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); 556 continue; 557 } 558 559 if (!has_subtests(*t)) { 560 err = start_test(*t, curr, -1, &child_tests[child_test_num++], 561 width, pass); 562 if (err) 563 goto err_out; 564 continue; 565 } 566 for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) { 567 if (!perf_test__matches(test_description(*t, subi), 568 curr, argc, argv)) 569 continue; 570 571 err = start_test(*t, curr, subi, &child_tests[child_test_num++], 572 width, pass); 573 if (err) 574 goto err_out; 575 } 576 } 577 if (!sequential) { 578 /* Parallel mode starts tests but doesn't finish them. Do that now. */ 579 for (size_t x = 0; x < num_tests; x++) 580 finish_test(child_tests, x, num_tests, width); 581 } 582 } 583 err_out: 584 signal(SIGINT, SIG_DFL); 585 signal(SIGTERM, SIG_DFL); 586 if (err) { 587 pr_err("Internal test harness failure. Completing any started tests:\n:"); 588 for (size_t x = 0; x < num_tests; x++) 589 finish_test(child_tests, x, num_tests, width); 590 } 591 free(child_tests); 592 return err; 593 } 594 595 static int perf_test__list(struct test_suite **suites, int argc, const char **argv) 596 { 597 int i = 0; 598 599 for (struct test_suite **t = suites; *t; t++) { 600 int curr = i++; 601 602 if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) 603 continue; 604 605 pr_info("%3d: %s\n", i, test_description(*t, -1)); 606 607 if (has_subtests(*t)) { 608 int subn = num_subtests(*t); 609 int subi; 610 611 for (subi = 0; subi < subn; subi++) 612 pr_info("%3d:%1d: %s\n", i, subi + 1, 613 test_description(*t, subi)); 614 } 615 } 616 return 0; 617 } 618 619 static int workloads__fprintf_list(FILE *fp) 620 { 621 struct test_workload *twl; 622 int printed = 0; 623 624 workloads__for_each(twl) 625 printed += fprintf(fp, "%s\n", twl->name); 626 627 return printed; 628 } 629 630 static int run_workload(const char *work, int argc, const char **argv) 631 { 632 struct test_workload *twl; 633 634 workloads__for_each(twl) { 635 if (!strcmp(twl->name, work)) 636 return twl->func(argc, argv); 637 } 638 639 pr_info("No workload found: %s\n", work); 640 return -1; 641 } 642 643 static int perf_test__config(const char *var, const char *value, 644 void *data __maybe_unused) 645 { 646 if (!strcmp(var, "annotate.objdump")) 647 test_objdump_path = value; 648 649 return 0; 650 } 651 652 static struct test_suite **build_suites(void) 653 { 654 /* 655 * TODO: suites is static to avoid needing to clean up the scripts tests 656 * for leak sanitizer. 657 */ 658 static struct test_suite **suites[] = { 659 generic_tests, 660 arch_tests, 661 NULL, 662 }; 663 struct test_suite **result; 664 struct test_suite *t; 665 size_t n = 0, num_suites = 0; 666 667 if (suites[2] == NULL) 668 suites[2] = create_script_test_suites(); 669 670 #define for_each_test(t) \ 671 for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \ 672 while ((t = suites[i][j++]) != NULL) 673 674 for_each_test(t) 675 num_suites++; 676 677 result = calloc(num_suites + 1, sizeof(struct test_suite *)); 678 679 for (int pass = 1; pass <= 2; pass++) { 680 for_each_test(t) { 681 bool exclusive = false; 682 683 if (!has_subtests(t)) { 684 exclusive = test_exclusive(t, -1); 685 } else { 686 for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) { 687 if (test_exclusive(t, subi)) { 688 exclusive = true; 689 break; 690 } 691 } 692 } 693 if ((!exclusive && pass == 1) || (exclusive && pass == 2)) 694 result[n++] = t; 695 } 696 } 697 return result; 698 #undef for_each_test 699 } 700 701 int cmd_test(int argc, const char **argv) 702 { 703 const char *test_usage[] = { 704 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", 705 NULL, 706 }; 707 const char *skip = NULL; 708 const char *workload = NULL; 709 bool list_workloads = false; 710 const struct option test_options[] = { 711 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"), 712 OPT_INCR('v', "verbose", &verbose, 713 "be more verbose (show symbol address, etc)"), 714 OPT_BOOLEAN('F', "dont-fork", &dont_fork, 715 "Do not fork for testcase"), 716 OPT_BOOLEAN('S', "sequential", &sequential, 717 "Run the tests one after another rather than in parallel"), 718 OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."), 719 OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"), 720 OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"), 721 OPT_STRING(0, "objdump", &test_objdump_path, "path", 722 "objdump binary to use for disassembly and annotations"), 723 OPT_END() 724 }; 725 const char * const test_subcommands[] = { "list", NULL }; 726 struct intlist *skiplist = NULL; 727 int ret = hists__init(); 728 struct test_suite **suites; 729 730 if (ret < 0) 731 return ret; 732 733 perf_config(perf_test__config, NULL); 734 735 /* Unbuffered output */ 736 setvbuf(stdout, NULL, _IONBF, 0); 737 738 argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0); 739 if (argc >= 1 && !strcmp(argv[0], "list")) { 740 suites = build_suites(); 741 ret = perf_test__list(suites, argc - 1, argv + 1); 742 free(suites); 743 return ret; 744 } 745 746 if (workload) 747 return run_workload(workload, argc, argv); 748 749 if (list_workloads) { 750 workloads__fprintf_list(stdout); 751 return 0; 752 } 753 754 if (dont_fork) 755 sequential = true; 756 757 symbol_conf.priv_size = sizeof(int); 758 symbol_conf.try_vmlinux_path = true; 759 760 761 if (symbol__init(NULL) < 0) 762 return -1; 763 764 if (skip != NULL) 765 skiplist = intlist__new(skip); 766 /* 767 * Tests that create BPF maps, for instance, need more than the 64K 768 * default: 769 */ 770 rlimit__bump_memlock(); 771 772 suites = build_suites(); 773 ret = __cmd_test(suites, argc, argv, skiplist); 774 free(suites); 775 return ret; 776 } 777