1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #define _GNU_SOURCE 5 #include "test_progs.h" 6 #include "testing_helpers.h" 7 #include "cgroup_helpers.h" 8 #include <argp.h> 9 #include <pthread.h> 10 #include <sched.h> 11 #include <signal.h> 12 #include <string.h> 13 #include <execinfo.h> /* backtrace */ 14 #include <sys/sysinfo.h> /* get_nprocs */ 15 #include <netinet/in.h> 16 #include <sys/select.h> 17 #include <sys/socket.h> 18 #include <sys/un.h> 19 #include <bpf/btf.h> 20 #include "json_writer.h" 21 22 static bool verbose(void) 23 { 24 return env.verbosity > VERBOSE_NONE; 25 } 26 27 static void stdio_hijack_init(char **log_buf, size_t *log_cnt) 28 { 29 #ifdef __GLIBC__ 30 if (verbose() && env.worker_id == -1) { 31 /* nothing to do, output to stdout by default */ 32 return; 33 } 34 35 fflush(stdout); 36 fflush(stderr); 37 38 stdout = open_memstream(log_buf, log_cnt); 39 if (!stdout) { 40 stdout = env.stdout; 41 perror("open_memstream"); 42 return; 43 } 44 45 if (env.subtest_state) 46 env.subtest_state->stdout = stdout; 47 else 48 env.test_state->stdout = stdout; 49 50 stderr = stdout; 51 #endif 52 } 53 54 static void stdio_hijack(char **log_buf, size_t *log_cnt) 55 { 56 #ifdef __GLIBC__ 57 if (verbose() && env.worker_id == -1) { 58 /* nothing to do, output to stdout by default */ 59 return; 60 } 61 62 env.stdout = stdout; 63 env.stderr = stderr; 64 65 stdio_hijack_init(log_buf, log_cnt); 66 #endif 67 } 68 69 static void stdio_restore_cleanup(void) 70 { 71 #ifdef __GLIBC__ 72 if (verbose() && env.worker_id == -1) { 73 /* nothing to do, output to stdout by default */ 74 return; 75 } 76 77 fflush(stdout); 78 79 if (env.subtest_state) { 80 fclose(env.subtest_state->stdout); 81 env.subtest_state->stdout = NULL; 82 stdout = env.test_state->stdout; 83 stderr = env.test_state->stdout; 84 } else { 85 fclose(env.test_state->stdout); 86 env.test_state->stdout = NULL; 87 } 88 #endif 89 } 90 91 static void stdio_restore(void) 92 { 93 #ifdef __GLIBC__ 94 if (verbose() && env.worker_id == -1) { 95 /* nothing to do, output to stdout by default */ 96 return; 97 } 98 99 if (stdout == env.stdout) 100 return; 101 102 stdio_restore_cleanup(); 103 104 stdout = env.stdout; 105 stderr = env.stderr; 106 #endif 107 } 108 109 /* Adapted from perf/util/string.c */ 110 static bool glob_match(const char *str, const char *pat) 111 { 112 while (*str && *pat && *pat != '*') { 113 if (*str != *pat) 114 return false; 115 str++; 116 pat++; 117 } 118 /* Check wild card */ 119 if (*pat == '*') { 120 while (*pat == '*') 121 pat++; 122 if (!*pat) /* Tail wild card matches all */ 123 return true; 124 while (*str) 125 if (glob_match(str++, pat)) 126 return true; 127 } 128 return !*str && !*pat; 129 } 130 131 #define EXIT_NO_TEST 2 132 #define EXIT_ERR_SETUP_INFRA 3 133 134 /* defined in test_progs.h */ 135 struct test_env env = {}; 136 137 struct prog_test_def { 138 const char *test_name; 139 int test_num; 140 void (*run_test)(void); 141 void (*run_serial_test)(void); 142 bool should_run; 143 bool need_cgroup_cleanup; 144 }; 145 146 /* Override C runtime library's usleep() implementation to ensure nanosleep() 147 * is always called. Usleep is frequently used in selftests as a way to 148 * trigger kprobe and tracepoints. 149 */ 150 int usleep(useconds_t usec) 151 { 152 struct timespec ts = { 153 .tv_sec = usec / 1000000, 154 .tv_nsec = (usec % 1000000) * 1000, 155 }; 156 157 return syscall(__NR_nanosleep, &ts, NULL); 158 } 159 160 static bool should_run(struct test_selector *sel, int num, const char *name) 161 { 162 int i; 163 164 for (i = 0; i < sel->blacklist.cnt; i++) { 165 if (glob_match(name, sel->blacklist.tests[i].name) && 166 !sel->blacklist.tests[i].subtest_cnt) 167 return false; 168 } 169 170 for (i = 0; i < sel->whitelist.cnt; i++) { 171 if (glob_match(name, sel->whitelist.tests[i].name)) 172 return true; 173 } 174 175 if (!sel->whitelist.cnt && !sel->num_set) 176 return true; 177 178 return num < sel->num_set_len && sel->num_set[num]; 179 } 180 181 static bool should_run_subtest(struct test_selector *sel, 182 struct test_selector *subtest_sel, 183 int subtest_num, 184 const char *test_name, 185 const char *subtest_name) 186 { 187 int i, j; 188 189 for (i = 0; i < sel->blacklist.cnt; i++) { 190 if (glob_match(test_name, sel->blacklist.tests[i].name)) { 191 if (!sel->blacklist.tests[i].subtest_cnt) 192 return false; 193 194 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) { 195 if (glob_match(subtest_name, 196 sel->blacklist.tests[i].subtests[j])) 197 return false; 198 } 199 } 200 } 201 202 for (i = 0; i < sel->whitelist.cnt; i++) { 203 if (glob_match(test_name, sel->whitelist.tests[i].name)) { 204 if (!sel->whitelist.tests[i].subtest_cnt) 205 return true; 206 207 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) { 208 if (glob_match(subtest_name, 209 sel->whitelist.tests[i].subtests[j])) 210 return true; 211 } 212 } 213 } 214 215 if (!sel->whitelist.cnt && !subtest_sel->num_set) 216 return true; 217 218 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num]; 219 } 220 221 static char *test_result(bool failed, bool skipped) 222 { 223 return failed ? "FAIL" : (skipped ? "SKIP" : "OK"); 224 } 225 226 #define TEST_NUM_WIDTH 7 227 228 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state) 229 { 230 int skipped_cnt = test_state->skip_cnt; 231 int subtests_cnt = test_state->subtest_num; 232 233 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name); 234 if (test_state->error_cnt) 235 fprintf(env.stdout, "FAIL"); 236 else if (!skipped_cnt) 237 fprintf(env.stdout, "OK"); 238 else if (skipped_cnt == subtests_cnt || !subtests_cnt) 239 fprintf(env.stdout, "SKIP"); 240 else 241 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt); 242 243 fprintf(env.stdout, "\n"); 244 } 245 246 static void print_test_log(char *log_buf, size_t log_cnt) 247 { 248 log_buf[log_cnt] = '\0'; 249 fprintf(env.stdout, "%s", log_buf); 250 if (log_buf[log_cnt - 1] != '\n') 251 fprintf(env.stdout, "\n"); 252 } 253 254 static void print_subtest_name(int test_num, int subtest_num, 255 const char *test_name, char *subtest_name, 256 char *result) 257 { 258 char test_num_str[32]; 259 260 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num); 261 262 fprintf(env.stdout, "#%-*s %s/%s", 263 TEST_NUM_WIDTH, test_num_str, 264 test_name, subtest_name); 265 266 if (result) 267 fprintf(env.stdout, ":%s", result); 268 269 fprintf(env.stdout, "\n"); 270 } 271 272 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) 273 { 274 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a 275 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message. 276 */ 277 if (log_cnt) { 278 jsonw_string_field(w, "message", log_buf); 279 } else { 280 jsonw_string_field(w, "message", ""); 281 } 282 } 283 284 static void dump_test_log(const struct prog_test_def *test, 285 const struct test_state *test_state, 286 bool skip_ok_subtests, 287 bool par_exec_result, 288 json_writer_t *w) 289 { 290 bool test_failed = test_state->error_cnt > 0; 291 bool force_log = test_state->force_log; 292 bool print_test = verbose() || force_log || test_failed; 293 int i; 294 struct subtest_state *subtest_state; 295 bool subtest_failed; 296 bool subtest_filtered; 297 bool print_subtest; 298 299 /* we do not print anything in the worker thread */ 300 if (env.worker_id != -1) 301 return; 302 303 /* there is nothing to print when verbose log is used and execution 304 * is not in parallel mode 305 */ 306 if (verbose() && !par_exec_result) 307 return; 308 309 if (test_state->log_cnt && print_test) 310 print_test_log(test_state->log_buf, test_state->log_cnt); 311 312 if (w && print_test) { 313 jsonw_start_object(w); 314 jsonw_string_field(w, "name", test->test_name); 315 jsonw_uint_field(w, "number", test->test_num); 316 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt); 317 jsonw_bool_field(w, "failed", test_failed); 318 jsonw_name(w, "subtests"); 319 jsonw_start_array(w); 320 } 321 322 for (i = 0; i < test_state->subtest_num; i++) { 323 subtest_state = &test_state->subtest_states[i]; 324 subtest_failed = subtest_state->error_cnt; 325 subtest_filtered = subtest_state->filtered; 326 print_subtest = verbose() || force_log || subtest_failed; 327 328 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered) 329 continue; 330 331 if (subtest_state->log_cnt && print_subtest) { 332 print_test_log(subtest_state->log_buf, 333 subtest_state->log_cnt); 334 } 335 336 print_subtest_name(test->test_num, i + 1, 337 test->test_name, subtest_state->name, 338 test_result(subtest_state->error_cnt, 339 subtest_state->skipped)); 340 341 if (w && print_subtest) { 342 jsonw_start_object(w); 343 jsonw_string_field(w, "name", subtest_state->name); 344 jsonw_uint_field(w, "number", i+1); 345 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt); 346 jsonw_bool_field(w, "failed", subtest_failed); 347 jsonw_end_object(w); 348 } 349 } 350 351 if (w && print_test) { 352 jsonw_end_array(w); 353 jsonw_end_object(w); 354 } 355 356 print_test_result(test, test_state); 357 } 358 359 static void stdio_restore(void); 360 361 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset 362 * it after each test/sub-test. 363 */ 364 static void reset_affinity(void) 365 { 366 cpu_set_t cpuset; 367 int i, err; 368 369 CPU_ZERO(&cpuset); 370 for (i = 0; i < env.nr_cpus; i++) 371 CPU_SET(i, &cpuset); 372 373 err = sched_setaffinity(0, sizeof(cpuset), &cpuset); 374 if (err < 0) { 375 stdio_restore(); 376 fprintf(stderr, "Failed to reset process affinity: %d!\n", err); 377 exit(EXIT_ERR_SETUP_INFRA); 378 } 379 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); 380 if (err < 0) { 381 stdio_restore(); 382 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); 383 exit(EXIT_ERR_SETUP_INFRA); 384 } 385 } 386 387 static void save_netns(void) 388 { 389 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); 390 if (env.saved_netns_fd == -1) { 391 perror("open(/proc/self/ns/net)"); 392 exit(EXIT_ERR_SETUP_INFRA); 393 } 394 } 395 396 static void restore_netns(void) 397 { 398 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { 399 stdio_restore(); 400 perror("setns(CLONE_NEWNS)"); 401 exit(EXIT_ERR_SETUP_INFRA); 402 } 403 } 404 405 void test__end_subtest(void) 406 { 407 struct prog_test_def *test = env.test; 408 struct test_state *test_state = env.test_state; 409 struct subtest_state *subtest_state = env.subtest_state; 410 411 if (subtest_state->error_cnt) { 412 test_state->error_cnt++; 413 } else { 414 if (!subtest_state->skipped) 415 test_state->sub_succ_cnt++; 416 else 417 test_state->skip_cnt++; 418 } 419 420 if (verbose() && !env.workers) 421 print_subtest_name(test->test_num, test_state->subtest_num, 422 test->test_name, subtest_state->name, 423 test_result(subtest_state->error_cnt, 424 subtest_state->skipped)); 425 426 stdio_restore_cleanup(); 427 env.subtest_state = NULL; 428 } 429 430 bool test__start_subtest(const char *subtest_name) 431 { 432 struct prog_test_def *test = env.test; 433 struct test_state *state = env.test_state; 434 struct subtest_state *subtest_state; 435 size_t sub_state_size = sizeof(*subtest_state); 436 437 if (env.subtest_state) 438 test__end_subtest(); 439 440 state->subtest_num++; 441 state->subtest_states = 442 realloc(state->subtest_states, 443 state->subtest_num * sub_state_size); 444 if (!state->subtest_states) { 445 fprintf(stderr, "Not enough memory to allocate subtest result\n"); 446 return false; 447 } 448 449 subtest_state = &state->subtest_states[state->subtest_num - 1]; 450 451 memset(subtest_state, 0, sub_state_size); 452 453 if (!subtest_name || !subtest_name[0]) { 454 fprintf(env.stderr, 455 "Subtest #%d didn't provide sub-test name!\n", 456 state->subtest_num); 457 return false; 458 } 459 460 subtest_state->name = strdup(subtest_name); 461 if (!subtest_state->name) { 462 fprintf(env.stderr, 463 "Subtest #%d: failed to copy subtest name!\n", 464 state->subtest_num); 465 return false; 466 } 467 468 if (!should_run_subtest(&env.test_selector, 469 &env.subtest_selector, 470 state->subtest_num, 471 test->test_name, 472 subtest_name)) { 473 subtest_state->filtered = true; 474 return false; 475 } 476 477 env.subtest_state = subtest_state; 478 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); 479 480 return true; 481 } 482 483 void test__force_log(void) 484 { 485 env.test_state->force_log = true; 486 } 487 488 void test__skip(void) 489 { 490 if (env.subtest_state) 491 env.subtest_state->skipped = true; 492 else 493 env.test_state->skip_cnt++; 494 } 495 496 void test__fail(void) 497 { 498 if (env.subtest_state) 499 env.subtest_state->error_cnt++; 500 else 501 env.test_state->error_cnt++; 502 } 503 504 int test__join_cgroup(const char *path) 505 { 506 int fd; 507 508 if (!env.test->need_cgroup_cleanup) { 509 if (setup_cgroup_environment()) { 510 fprintf(stderr, 511 "#%d %s: Failed to setup cgroup environment\n", 512 env.test->test_num, env.test->test_name); 513 return -1; 514 } 515 516 env.test->need_cgroup_cleanup = true; 517 } 518 519 fd = create_and_get_cgroup(path); 520 if (fd < 0) { 521 fprintf(stderr, 522 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n", 523 env.test->test_num, env.test->test_name, path, errno); 524 return fd; 525 } 526 527 if (join_cgroup(path)) { 528 fprintf(stderr, 529 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n", 530 env.test->test_num, env.test->test_name, path, errno); 531 return -1; 532 } 533 534 return fd; 535 } 536 537 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) 538 { 539 struct bpf_map *map; 540 541 map = bpf_object__find_map_by_name(obj, name); 542 if (!map) { 543 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name); 544 test__fail(); 545 return -1; 546 } 547 return bpf_map__fd(map); 548 } 549 550 int compare_map_keys(int map1_fd, int map2_fd) 551 { 552 __u32 key, next_key; 553 char val_buf[PERF_MAX_STACK_DEPTH * 554 sizeof(struct bpf_stack_build_id)]; 555 int err; 556 557 err = bpf_map_get_next_key(map1_fd, NULL, &key); 558 if (err) 559 return err; 560 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 561 if (err) 562 return err; 563 564 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 565 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 566 if (err) 567 return err; 568 569 key = next_key; 570 } 571 if (errno != ENOENT) 572 return -1; 573 574 return 0; 575 } 576 577 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) 578 { 579 __u32 key, next_key, *cur_key_p, *next_key_p; 580 char *val_buf1, *val_buf2; 581 int i, err = 0; 582 583 val_buf1 = malloc(stack_trace_len); 584 val_buf2 = malloc(stack_trace_len); 585 cur_key_p = NULL; 586 next_key_p = &key; 587 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) { 588 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1); 589 if (err) 590 goto out; 591 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2); 592 if (err) 593 goto out; 594 for (i = 0; i < stack_trace_len; i++) { 595 if (val_buf1[i] != val_buf2[i]) { 596 err = -1; 597 goto out; 598 } 599 } 600 key = *next_key_p; 601 cur_key_p = &key; 602 next_key_p = &next_key; 603 } 604 if (errno != ENOENT) 605 err = -1; 606 607 out: 608 free(val_buf1); 609 free(val_buf2); 610 return err; 611 } 612 613 /* extern declarations for test funcs */ 614 #define DEFINE_TEST(name) \ 615 extern void test_##name(void) __weak; \ 616 extern void serial_test_##name(void) __weak; 617 #include <prog_tests/tests.h> 618 #undef DEFINE_TEST 619 620 static struct prog_test_def prog_test_defs[] = { 621 #define DEFINE_TEST(name) { \ 622 .test_name = #name, \ 623 .run_test = &test_##name, \ 624 .run_serial_test = &serial_test_##name, \ 625 }, 626 #include <prog_tests/tests.h> 627 #undef DEFINE_TEST 628 }; 629 630 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); 631 632 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; 633 634 const char *argp_program_version = "test_progs 0.1"; 635 const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; 636 static const char argp_program_doc[] = 637 "BPF selftests test runner\v" 638 "Options accepting the NAMES parameter take either a comma-separated list\n" 639 "of test names, or a filename prefixed with @. The file contains one name\n" 640 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n" 641 "\n" 642 "These options can be passed repeatedly to read multiple files.\n"; 643 644 enum ARG_KEYS { 645 ARG_TEST_NUM = 'n', 646 ARG_TEST_NAME = 't', 647 ARG_TEST_NAME_BLACKLIST = 'b', 648 ARG_VERIFIER_STATS = 's', 649 ARG_VERBOSE = 'v', 650 ARG_GET_TEST_CNT = 'c', 651 ARG_LIST_TEST_NAMES = 'l', 652 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', 653 ARG_TEST_NAME_GLOB_DENYLIST = 'd', 654 ARG_NUM_WORKERS = 'j', 655 ARG_DEBUG = -1, 656 ARG_JSON_SUMMARY = 'J' 657 }; 658 659 static const struct argp_option opts[] = { 660 { "num", ARG_TEST_NUM, "NUM", 0, 661 "Run test number NUM only " }, 662 { "name", ARG_TEST_NAME, "NAMES", 0, 663 "Run tests with names containing any string from NAMES list" }, 664 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0, 665 "Don't run tests with names containing any string from NAMES list" }, 666 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, 667 "Output verifier statistics", }, 668 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, 669 "Verbose output (use -vv or -vvv for progressively verbose output)" }, 670 { "count", ARG_GET_TEST_CNT, NULL, 0, 671 "Get number of selected top-level tests " }, 672 { "list", ARG_LIST_TEST_NAMES, NULL, 0, 673 "List test names that would run (without running them) " }, 674 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0, 675 "Run tests with name matching the pattern (supports '*' wildcard)." }, 676 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, 677 "Don't run tests with name matching the pattern (supports '*' wildcard)." }, 678 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 679 "Number of workers to run in parallel, default to number of cpus." }, 680 { "debug", ARG_DEBUG, NULL, 0, 681 "print extra debug information for test_progs." }, 682 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."}, 683 {}, 684 }; 685 686 static int libbpf_print_fn(enum libbpf_print_level level, 687 const char *format, va_list args) 688 { 689 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) 690 return 0; 691 vfprintf(stdout, format, args); 692 return 0; 693 } 694 695 static void free_test_filter_set(const struct test_filter_set *set) 696 { 697 int i, j; 698 699 if (!set) 700 return; 701 702 for (i = 0; i < set->cnt; i++) { 703 free((void *)set->tests[i].name); 704 for (j = 0; j < set->tests[i].subtest_cnt; j++) 705 free((void *)set->tests[i].subtests[j]); 706 707 free((void *)set->tests[i].subtests); 708 } 709 710 free((void *)set->tests); 711 } 712 713 static void free_test_selector(struct test_selector *test_selector) 714 { 715 free_test_filter_set(&test_selector->blacklist); 716 free_test_filter_set(&test_selector->whitelist); 717 free(test_selector->num_set); 718 } 719 720 extern int extra_prog_load_log_flags; 721 722 static error_t parse_arg(int key, char *arg, struct argp_state *state) 723 { 724 struct test_env *env = state->input; 725 int err = 0; 726 727 switch (key) { 728 case ARG_TEST_NUM: { 729 char *subtest_str = strchr(arg, '/'); 730 731 if (subtest_str) { 732 *subtest_str = '\0'; 733 if (parse_num_list(subtest_str + 1, 734 &env->subtest_selector.num_set, 735 &env->subtest_selector.num_set_len)) { 736 fprintf(stderr, 737 "Failed to parse subtest numbers.\n"); 738 return -EINVAL; 739 } 740 } 741 if (parse_num_list(arg, &env->test_selector.num_set, 742 &env->test_selector.num_set_len)) { 743 fprintf(stderr, "Failed to parse test numbers.\n"); 744 return -EINVAL; 745 } 746 break; 747 } 748 case ARG_TEST_NAME_GLOB_ALLOWLIST: 749 case ARG_TEST_NAME: { 750 if (arg[0] == '@') 751 err = parse_test_list_file(arg + 1, 752 &env->test_selector.whitelist, 753 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 754 else 755 err = parse_test_list(arg, 756 &env->test_selector.whitelist, 757 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 758 759 break; 760 } 761 case ARG_TEST_NAME_GLOB_DENYLIST: 762 case ARG_TEST_NAME_BLACKLIST: { 763 if (arg[0] == '@') 764 err = parse_test_list_file(arg + 1, 765 &env->test_selector.blacklist, 766 key == ARG_TEST_NAME_GLOB_DENYLIST); 767 else 768 err = parse_test_list(arg, 769 &env->test_selector.blacklist, 770 key == ARG_TEST_NAME_GLOB_DENYLIST); 771 772 break; 773 } 774 case ARG_VERIFIER_STATS: 775 env->verifier_stats = true; 776 break; 777 case ARG_VERBOSE: 778 env->verbosity = VERBOSE_NORMAL; 779 if (arg) { 780 if (strcmp(arg, "v") == 0) { 781 env->verbosity = VERBOSE_VERY; 782 extra_prog_load_log_flags = 1; 783 } else if (strcmp(arg, "vv") == 0) { 784 env->verbosity = VERBOSE_SUPER; 785 extra_prog_load_log_flags = 2; 786 } else { 787 fprintf(stderr, 788 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", 789 arg); 790 return -EINVAL; 791 } 792 } 793 794 if (verbose()) { 795 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) { 796 fprintf(stderr, 797 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", 798 errno); 799 return -EINVAL; 800 } 801 } 802 803 break; 804 case ARG_GET_TEST_CNT: 805 env->get_test_cnt = true; 806 break; 807 case ARG_LIST_TEST_NAMES: 808 env->list_test_names = true; 809 break; 810 case ARG_NUM_WORKERS: 811 if (arg) { 812 env->workers = atoi(arg); 813 if (!env->workers) { 814 fprintf(stderr, "Invalid number of worker: %s.", arg); 815 return -EINVAL; 816 } 817 } else { 818 env->workers = get_nprocs(); 819 } 820 break; 821 case ARG_DEBUG: 822 env->debug = true; 823 break; 824 case ARG_JSON_SUMMARY: 825 env->json = fopen(arg, "w"); 826 if (env->json == NULL) { 827 perror("Failed to open json summary file"); 828 return -errno; 829 } 830 break; 831 case ARGP_KEY_ARG: 832 argp_usage(state); 833 break; 834 case ARGP_KEY_END: 835 break; 836 default: 837 return ARGP_ERR_UNKNOWN; 838 } 839 return err; 840 } 841 842 /* 843 * Determine if test_progs is running as a "flavored" test runner and switch 844 * into corresponding sub-directory to load correct BPF objects. 845 * 846 * This is done by looking at executable name. If it contains "-flavor" 847 * suffix, then we are running as a flavored test runner. 848 */ 849 int cd_flavor_subdir(const char *exec_name) 850 { 851 /* General form of argv[0] passed here is: 852 * some/path/to/test_progs[-flavor], where -flavor part is optional. 853 * First cut out "test_progs[-flavor]" part, then extract "flavor" 854 * part, if it's there. 855 */ 856 const char *flavor = strrchr(exec_name, '/'); 857 858 if (!flavor) 859 flavor = exec_name; 860 else 861 flavor++; 862 863 flavor = strrchr(flavor, '-'); 864 if (!flavor) 865 return 0; 866 flavor++; 867 if (verbose()) 868 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); 869 870 return chdir(flavor); 871 } 872 873 int trigger_module_test_read(int read_sz) 874 { 875 int fd, err; 876 877 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); 878 err = -errno; 879 if (!ASSERT_GE(fd, 0, "testmod_file_open")) 880 return err; 881 882 read(fd, NULL, read_sz); 883 close(fd); 884 885 return 0; 886 } 887 888 int trigger_module_test_write(int write_sz) 889 { 890 int fd, err; 891 char *buf = malloc(write_sz); 892 893 if (!buf) 894 return -ENOMEM; 895 896 memset(buf, 'a', write_sz); 897 buf[write_sz-1] = '\0'; 898 899 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); 900 err = -errno; 901 if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 902 free(buf); 903 return err; 904 } 905 906 write(fd, buf, write_sz); 907 close(fd); 908 free(buf); 909 return 0; 910 } 911 912 int write_sysctl(const char *sysctl, const char *value) 913 { 914 int fd, err, len; 915 916 fd = open(sysctl, O_WRONLY); 917 if (!ASSERT_NEQ(fd, -1, "open sysctl")) 918 return -1; 919 920 len = strlen(value); 921 err = write(fd, value, len); 922 close(fd); 923 if (!ASSERT_EQ(err, len, "write sysctl")) 924 return -1; 925 926 return 0; 927 } 928 929 int get_bpf_max_tramp_links_from(struct btf *btf) 930 { 931 const struct btf_enum *e; 932 const struct btf_type *t; 933 __u32 i, type_cnt; 934 const char *name; 935 __u16 j, vlen; 936 937 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) { 938 t = btf__type_by_id(btf, i); 939 if (!t || !btf_is_enum(t) || t->name_off) 940 continue; 941 e = btf_enum(t); 942 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) { 943 name = btf__str_by_offset(btf, e->name_off); 944 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS")) 945 return e->val; 946 } 947 } 948 949 return -1; 950 } 951 952 int get_bpf_max_tramp_links(void) 953 { 954 struct btf *vmlinux_btf; 955 int ret; 956 957 vmlinux_btf = btf__load_vmlinux_btf(); 958 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf")) 959 return -1; 960 ret = get_bpf_max_tramp_links_from(vmlinux_btf); 961 btf__free(vmlinux_btf); 962 963 return ret; 964 } 965 966 #define MAX_BACKTRACE_SZ 128 967 void crash_handler(int signum) 968 { 969 void *bt[MAX_BACKTRACE_SZ]; 970 size_t sz; 971 972 sz = backtrace(bt, ARRAY_SIZE(bt)); 973 974 if (env.stdout) 975 stdio_restore(); 976 if (env.test) { 977 env.test_state->error_cnt++; 978 dump_test_log(env.test, env.test_state, true, false, NULL); 979 } 980 if (env.worker_id != -1) 981 fprintf(stderr, "[%d]: ", env.worker_id); 982 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 983 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 984 } 985 986 static void sigint_handler(int signum) 987 { 988 int i; 989 990 for (i = 0; i < env.workers; i++) 991 if (env.worker_socks[i] > 0) 992 close(env.worker_socks[i]); 993 } 994 995 static int current_test_idx; 996 static pthread_mutex_t current_test_lock; 997 static pthread_mutex_t stdout_output_lock; 998 999 static inline const char *str_msg(const struct msg *msg, char *buf) 1000 { 1001 switch (msg->type) { 1002 case MSG_DO_TEST: 1003 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num); 1004 break; 1005 case MSG_TEST_DONE: 1006 sprintf(buf, "MSG_TEST_DONE %d (log: %d)", 1007 msg->test_done.num, 1008 msg->test_done.have_log); 1009 break; 1010 case MSG_SUBTEST_DONE: 1011 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)", 1012 msg->subtest_done.num, 1013 msg->subtest_done.have_log); 1014 break; 1015 case MSG_TEST_LOG: 1016 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)", 1017 strlen(msg->test_log.log_buf), 1018 msg->test_log.is_last); 1019 break; 1020 case MSG_EXIT: 1021 sprintf(buf, "MSG_EXIT"); 1022 break; 1023 default: 1024 sprintf(buf, "UNKNOWN"); 1025 break; 1026 } 1027 1028 return buf; 1029 } 1030 1031 static int send_message(int sock, const struct msg *msg) 1032 { 1033 char buf[256]; 1034 1035 if (env.debug) 1036 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); 1037 return send(sock, msg, sizeof(*msg), 0); 1038 } 1039 1040 static int recv_message(int sock, struct msg *msg) 1041 { 1042 int ret; 1043 char buf[256]; 1044 1045 memset(msg, 0, sizeof(*msg)); 1046 ret = recv(sock, msg, sizeof(*msg), 0); 1047 if (ret >= 0) { 1048 if (env.debug) 1049 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); 1050 } 1051 return ret; 1052 } 1053 1054 static void run_one_test(int test_num) 1055 { 1056 struct prog_test_def *test = &prog_test_defs[test_num]; 1057 struct test_state *state = &test_states[test_num]; 1058 1059 env.test = test; 1060 env.test_state = state; 1061 1062 stdio_hijack(&state->log_buf, &state->log_cnt); 1063 1064 if (test->run_test) 1065 test->run_test(); 1066 else if (test->run_serial_test) 1067 test->run_serial_test(); 1068 1069 /* ensure last sub-test is finalized properly */ 1070 if (env.subtest_state) 1071 test__end_subtest(); 1072 1073 state->tested = true; 1074 1075 if (verbose() && env.worker_id == -1) 1076 print_test_result(test, state); 1077 1078 reset_affinity(); 1079 restore_netns(); 1080 if (test->need_cgroup_cleanup) 1081 cleanup_cgroup_environment(); 1082 1083 stdio_restore(); 1084 1085 dump_test_log(test, state, false, false, NULL); 1086 } 1087 1088 struct dispatch_data { 1089 int worker_id; 1090 int sock_fd; 1091 }; 1092 1093 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type) 1094 { 1095 if (recv_message(sock_fd, msg) < 0) 1096 return 1; 1097 1098 if (msg->type != type) { 1099 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type); 1100 return 1; 1101 } 1102 1103 return 0; 1104 } 1105 1106 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt) 1107 { 1108 FILE *log_fp = NULL; 1109 int result = 0; 1110 1111 log_fp = open_memstream(log_buf, log_cnt); 1112 if (!log_fp) 1113 return 1; 1114 1115 while (true) { 1116 struct msg msg; 1117 1118 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) { 1119 result = 1; 1120 goto out; 1121 } 1122 1123 fprintf(log_fp, "%s", msg.test_log.log_buf); 1124 if (msg.test_log.is_last) 1125 break; 1126 } 1127 1128 out: 1129 fclose(log_fp); 1130 log_fp = NULL; 1131 return result; 1132 } 1133 1134 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state) 1135 { 1136 struct msg msg; 1137 struct subtest_state *subtest_state; 1138 int subtest_num = state->subtest_num; 1139 1140 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state)); 1141 1142 for (int i = 0; i < subtest_num; i++) { 1143 subtest_state = &state->subtest_states[i]; 1144 1145 memset(subtest_state, 0, sizeof(*subtest_state)); 1146 1147 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE)) 1148 return 1; 1149 1150 subtest_state->name = strdup(msg.subtest_done.name); 1151 subtest_state->error_cnt = msg.subtest_done.error_cnt; 1152 subtest_state->skipped = msg.subtest_done.skipped; 1153 subtest_state->filtered = msg.subtest_done.filtered; 1154 1155 /* collect all logs */ 1156 if (msg.subtest_done.have_log) 1157 if (dispatch_thread_read_log(sock_fd, 1158 &subtest_state->log_buf, 1159 &subtest_state->log_cnt)) 1160 return 1; 1161 } 1162 1163 return 0; 1164 } 1165 1166 static void *dispatch_thread(void *ctx) 1167 { 1168 struct dispatch_data *data = ctx; 1169 int sock_fd; 1170 1171 sock_fd = data->sock_fd; 1172 1173 while (true) { 1174 int test_to_run = -1; 1175 struct prog_test_def *test; 1176 struct test_state *state; 1177 1178 /* grab a test */ 1179 { 1180 pthread_mutex_lock(¤t_test_lock); 1181 1182 if (current_test_idx >= prog_test_cnt) { 1183 pthread_mutex_unlock(¤t_test_lock); 1184 goto done; 1185 } 1186 1187 test = &prog_test_defs[current_test_idx]; 1188 test_to_run = current_test_idx; 1189 current_test_idx++; 1190 1191 pthread_mutex_unlock(¤t_test_lock); 1192 } 1193 1194 if (!test->should_run || test->run_serial_test) 1195 continue; 1196 1197 /* run test through worker */ 1198 { 1199 struct msg msg_do_test; 1200 1201 memset(&msg_do_test, 0, sizeof(msg_do_test)); 1202 msg_do_test.type = MSG_DO_TEST; 1203 msg_do_test.do_test.num = test_to_run; 1204 if (send_message(sock_fd, &msg_do_test) < 0) { 1205 perror("Fail to send command"); 1206 goto done; 1207 } 1208 env.worker_current_test[data->worker_id] = test_to_run; 1209 } 1210 1211 /* wait for test done */ 1212 do { 1213 struct msg msg; 1214 1215 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE)) 1216 goto error; 1217 if (test_to_run != msg.test_done.num) 1218 goto error; 1219 1220 state = &test_states[test_to_run]; 1221 state->tested = true; 1222 state->error_cnt = msg.test_done.error_cnt; 1223 state->skip_cnt = msg.test_done.skip_cnt; 1224 state->sub_succ_cnt = msg.test_done.sub_succ_cnt; 1225 state->subtest_num = msg.test_done.subtest_num; 1226 1227 /* collect all logs */ 1228 if (msg.test_done.have_log) { 1229 if (dispatch_thread_read_log(sock_fd, 1230 &state->log_buf, 1231 &state->log_cnt)) 1232 goto error; 1233 } 1234 1235 /* collect all subtests and subtest logs */ 1236 if (!state->subtest_num) 1237 break; 1238 1239 if (dispatch_thread_send_subtests(sock_fd, state)) 1240 goto error; 1241 } while (false); 1242 1243 pthread_mutex_lock(&stdout_output_lock); 1244 dump_test_log(test, state, false, true, NULL); 1245 pthread_mutex_unlock(&stdout_output_lock); 1246 } /* while (true) */ 1247 error: 1248 if (env.debug) 1249 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); 1250 1251 done: 1252 { 1253 struct msg msg_exit; 1254 1255 msg_exit.type = MSG_EXIT; 1256 if (send_message(sock_fd, &msg_exit) < 0) { 1257 if (env.debug) 1258 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", 1259 data->worker_id, strerror(errno)); 1260 } 1261 } 1262 return NULL; 1263 } 1264 1265 static void calculate_summary_and_print_errors(struct test_env *env) 1266 { 1267 int i; 1268 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0; 1269 json_writer_t *w = NULL; 1270 1271 for (i = 0; i < prog_test_cnt; i++) { 1272 struct test_state *state = &test_states[i]; 1273 1274 if (!state->tested) 1275 continue; 1276 1277 sub_succ_cnt += state->sub_succ_cnt; 1278 skip_cnt += state->skip_cnt; 1279 1280 if (state->error_cnt) 1281 fail_cnt++; 1282 else 1283 succ_cnt++; 1284 } 1285 1286 if (env->json) { 1287 w = jsonw_new(env->json); 1288 if (!w) 1289 fprintf(env->stderr, "Failed to create new JSON stream."); 1290 } 1291 1292 if (w) { 1293 jsonw_start_object(w); 1294 jsonw_uint_field(w, "success", succ_cnt); 1295 jsonw_uint_field(w, "success_subtest", sub_succ_cnt); 1296 jsonw_uint_field(w, "skipped", skip_cnt); 1297 jsonw_uint_field(w, "failed", fail_cnt); 1298 jsonw_name(w, "results"); 1299 jsonw_start_array(w); 1300 } 1301 1302 /* 1303 * We only print error logs summary when there are failed tests and 1304 * verbose mode is not enabled. Otherwise, results may be incosistent. 1305 * 1306 */ 1307 if (!verbose() && fail_cnt) { 1308 printf("\nAll error logs:\n"); 1309 1310 /* print error logs again */ 1311 for (i = 0; i < prog_test_cnt; i++) { 1312 struct prog_test_def *test = &prog_test_defs[i]; 1313 struct test_state *state = &test_states[i]; 1314 1315 if (!state->tested || !state->error_cnt) 1316 continue; 1317 1318 dump_test_log(test, state, true, true, w); 1319 } 1320 } 1321 1322 if (w) { 1323 jsonw_end_array(w); 1324 jsonw_end_object(w); 1325 jsonw_destroy(&w); 1326 } 1327 1328 if (env->json) 1329 fclose(env->json); 1330 1331 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", 1332 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt); 1333 1334 env->succ_cnt = succ_cnt; 1335 env->sub_succ_cnt = sub_succ_cnt; 1336 env->fail_cnt = fail_cnt; 1337 env->skip_cnt = skip_cnt; 1338 } 1339 1340 static void server_main(void) 1341 { 1342 pthread_t *dispatcher_threads; 1343 struct dispatch_data *data; 1344 struct sigaction sigact_int = { 1345 .sa_handler = sigint_handler, 1346 .sa_flags = SA_RESETHAND, 1347 }; 1348 int i; 1349 1350 sigaction(SIGINT, &sigact_int, NULL); 1351 1352 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); 1353 data = calloc(sizeof(struct dispatch_data), env.workers); 1354 1355 env.worker_current_test = calloc(sizeof(int), env.workers); 1356 for (i = 0; i < env.workers; i++) { 1357 int rc; 1358 1359 data[i].worker_id = i; 1360 data[i].sock_fd = env.worker_socks[i]; 1361 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); 1362 if (rc < 0) { 1363 perror("Failed to launch dispatcher thread"); 1364 exit(EXIT_ERR_SETUP_INFRA); 1365 } 1366 } 1367 1368 /* wait for all dispatcher to finish */ 1369 for (i = 0; i < env.workers; i++) { 1370 while (true) { 1371 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); 1372 1373 if (!ret) { 1374 break; 1375 } else if (ret == EBUSY) { 1376 if (env.debug) 1377 fprintf(stderr, "Still waiting for thread %d (test %d).\n", 1378 i, env.worker_current_test[i] + 1); 1379 usleep(1000 * 1000); 1380 continue; 1381 } else { 1382 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); 1383 break; 1384 } 1385 } 1386 } 1387 free(dispatcher_threads); 1388 free(env.worker_current_test); 1389 free(data); 1390 1391 /* run serial tests */ 1392 save_netns(); 1393 1394 for (int i = 0; i < prog_test_cnt; i++) { 1395 struct prog_test_def *test = &prog_test_defs[i]; 1396 1397 if (!test->should_run || !test->run_serial_test) 1398 continue; 1399 1400 run_one_test(i); 1401 } 1402 1403 /* generate summary */ 1404 fflush(stderr); 1405 fflush(stdout); 1406 1407 calculate_summary_and_print_errors(&env); 1408 1409 /* reap all workers */ 1410 for (i = 0; i < env.workers; i++) { 1411 int wstatus, pid; 1412 1413 pid = waitpid(env.worker_pids[i], &wstatus, 0); 1414 if (pid != env.worker_pids[i]) 1415 perror("Unable to reap worker"); 1416 } 1417 } 1418 1419 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt) 1420 { 1421 char *src; 1422 size_t slen; 1423 1424 src = log_buf; 1425 slen = log_cnt; 1426 while (slen) { 1427 struct msg msg_log; 1428 char *dest; 1429 size_t len; 1430 1431 memset(&msg_log, 0, sizeof(msg_log)); 1432 msg_log.type = MSG_TEST_LOG; 1433 dest = msg_log.test_log.log_buf; 1434 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; 1435 memcpy(dest, src, len); 1436 1437 src += len; 1438 slen -= len; 1439 if (!slen) 1440 msg_log.test_log.is_last = true; 1441 1442 assert(send_message(sock, &msg_log) >= 0); 1443 } 1444 } 1445 1446 static void free_subtest_state(struct subtest_state *state) 1447 { 1448 if (state->log_buf) { 1449 free(state->log_buf); 1450 state->log_buf = NULL; 1451 state->log_cnt = 0; 1452 } 1453 free(state->name); 1454 state->name = NULL; 1455 } 1456 1457 static int worker_main_send_subtests(int sock, struct test_state *state) 1458 { 1459 int i, result = 0; 1460 struct msg msg; 1461 struct subtest_state *subtest_state; 1462 1463 memset(&msg, 0, sizeof(msg)); 1464 msg.type = MSG_SUBTEST_DONE; 1465 1466 for (i = 0; i < state->subtest_num; i++) { 1467 subtest_state = &state->subtest_states[i]; 1468 1469 msg.subtest_done.num = i; 1470 1471 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1472 1473 msg.subtest_done.error_cnt = subtest_state->error_cnt; 1474 msg.subtest_done.skipped = subtest_state->skipped; 1475 msg.subtest_done.filtered = subtest_state->filtered; 1476 msg.subtest_done.have_log = false; 1477 1478 if (verbose() || state->force_log || subtest_state->error_cnt) { 1479 if (subtest_state->log_cnt) 1480 msg.subtest_done.have_log = true; 1481 } 1482 1483 if (send_message(sock, &msg) < 0) { 1484 perror("Fail to send message done"); 1485 result = 1; 1486 goto out; 1487 } 1488 1489 /* send logs */ 1490 if (msg.subtest_done.have_log) 1491 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt); 1492 1493 free_subtest_state(subtest_state); 1494 free(subtest_state->name); 1495 } 1496 1497 out: 1498 for (; i < state->subtest_num; i++) 1499 free_subtest_state(&state->subtest_states[i]); 1500 free(state->subtest_states); 1501 return result; 1502 } 1503 1504 static int worker_main(int sock) 1505 { 1506 save_netns(); 1507 1508 while (true) { 1509 /* receive command */ 1510 struct msg msg; 1511 1512 if (recv_message(sock, &msg) < 0) 1513 goto out; 1514 1515 switch (msg.type) { 1516 case MSG_EXIT: 1517 if (env.debug) 1518 fprintf(stderr, "[%d]: worker exit.\n", 1519 env.worker_id); 1520 goto out; 1521 case MSG_DO_TEST: { 1522 int test_to_run = msg.do_test.num; 1523 struct prog_test_def *test = &prog_test_defs[test_to_run]; 1524 struct test_state *state = &test_states[test_to_run]; 1525 struct msg msg; 1526 1527 if (env.debug) 1528 fprintf(stderr, "[%d]: #%d:%s running.\n", 1529 env.worker_id, 1530 test_to_run + 1, 1531 test->test_name); 1532 1533 run_one_test(test_to_run); 1534 1535 memset(&msg, 0, sizeof(msg)); 1536 msg.type = MSG_TEST_DONE; 1537 msg.test_done.num = test_to_run; 1538 msg.test_done.error_cnt = state->error_cnt; 1539 msg.test_done.skip_cnt = state->skip_cnt; 1540 msg.test_done.sub_succ_cnt = state->sub_succ_cnt; 1541 msg.test_done.subtest_num = state->subtest_num; 1542 msg.test_done.have_log = false; 1543 1544 if (verbose() || state->force_log || state->error_cnt) { 1545 if (state->log_cnt) 1546 msg.test_done.have_log = true; 1547 } 1548 if (send_message(sock, &msg) < 0) { 1549 perror("Fail to send message done"); 1550 goto out; 1551 } 1552 1553 /* send logs */ 1554 if (msg.test_done.have_log) 1555 worker_main_send_log(sock, state->log_buf, state->log_cnt); 1556 1557 if (state->log_buf) { 1558 free(state->log_buf); 1559 state->log_buf = NULL; 1560 state->log_cnt = 0; 1561 } 1562 1563 if (state->subtest_num) 1564 if (worker_main_send_subtests(sock, state)) 1565 goto out; 1566 1567 if (env.debug) 1568 fprintf(stderr, "[%d]: #%d:%s done.\n", 1569 env.worker_id, 1570 test_to_run + 1, 1571 test->test_name); 1572 break; 1573 } /* case MSG_DO_TEST */ 1574 default: 1575 if (env.debug) 1576 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); 1577 return -1; 1578 } 1579 } 1580 out: 1581 return 0; 1582 } 1583 1584 static void free_test_states(void) 1585 { 1586 int i, j; 1587 1588 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) { 1589 struct test_state *test_state = &test_states[i]; 1590 1591 for (j = 0; j < test_state->subtest_num; j++) 1592 free_subtest_state(&test_state->subtest_states[j]); 1593 1594 free(test_state->subtest_states); 1595 free(test_state->log_buf); 1596 test_state->subtest_states = NULL; 1597 test_state->log_buf = NULL; 1598 } 1599 } 1600 1601 int main(int argc, char **argv) 1602 { 1603 static const struct argp argp = { 1604 .options = opts, 1605 .parser = parse_arg, 1606 .doc = argp_program_doc, 1607 }; 1608 struct sigaction sigact = { 1609 .sa_handler = crash_handler, 1610 .sa_flags = SA_RESETHAND, 1611 }; 1612 int err, i; 1613 1614 sigaction(SIGSEGV, &sigact, NULL); 1615 1616 err = argp_parse(&argp, argc, argv, 0, NULL, &env); 1617 if (err) 1618 return err; 1619 1620 err = cd_flavor_subdir(argv[0]); 1621 if (err) 1622 return err; 1623 1624 /* Use libbpf 1.0 API mode */ 1625 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1626 libbpf_set_print(libbpf_print_fn); 1627 1628 srand(time(NULL)); 1629 1630 env.jit_enabled = is_jit_enabled(); 1631 env.nr_cpus = libbpf_num_possible_cpus(); 1632 if (env.nr_cpus < 0) { 1633 fprintf(stderr, "Failed to get number of CPUs: %d!\n", 1634 env.nr_cpus); 1635 return -1; 1636 } 1637 1638 env.stdout = stdout; 1639 env.stderr = stderr; 1640 1641 env.has_testmod = true; 1642 if (!env.list_test_names) { 1643 /* ensure previous instance of the module is unloaded */ 1644 unload_bpf_testmod(verbose()); 1645 1646 if (load_bpf_testmod(verbose())) { 1647 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); 1648 env.has_testmod = false; 1649 } 1650 } 1651 1652 /* initializing tests */ 1653 for (i = 0; i < prog_test_cnt; i++) { 1654 struct prog_test_def *test = &prog_test_defs[i]; 1655 1656 test->test_num = i + 1; 1657 test->should_run = should_run(&env.test_selector, 1658 test->test_num, test->test_name); 1659 1660 if ((test->run_test == NULL && test->run_serial_test == NULL) || 1661 (test->run_test != NULL && test->run_serial_test != NULL)) { 1662 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", 1663 test->test_num, test->test_name, test->test_name, test->test_name); 1664 exit(EXIT_ERR_SETUP_INFRA); 1665 } 1666 } 1667 1668 /* ignore workers if we are just listing */ 1669 if (env.get_test_cnt || env.list_test_names) 1670 env.workers = 0; 1671 1672 /* launch workers if requested */ 1673 env.worker_id = -1; /* main process */ 1674 if (env.workers) { 1675 env.worker_pids = calloc(sizeof(__pid_t), env.workers); 1676 env.worker_socks = calloc(sizeof(int), env.workers); 1677 if (env.debug) 1678 fprintf(stdout, "Launching %d workers.\n", env.workers); 1679 for (i = 0; i < env.workers; i++) { 1680 int sv[2]; 1681 pid_t pid; 1682 1683 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { 1684 perror("Fail to create worker socket"); 1685 return -1; 1686 } 1687 pid = fork(); 1688 if (pid < 0) { 1689 perror("Failed to fork worker"); 1690 return -1; 1691 } else if (pid != 0) { /* main process */ 1692 close(sv[1]); 1693 env.worker_pids[i] = pid; 1694 env.worker_socks[i] = sv[0]; 1695 } else { /* inside each worker process */ 1696 close(sv[0]); 1697 env.worker_id = i; 1698 return worker_main(sv[1]); 1699 } 1700 } 1701 1702 if (env.worker_id == -1) { 1703 server_main(); 1704 goto out; 1705 } 1706 } 1707 1708 /* The rest of the main process */ 1709 1710 /* on single mode */ 1711 save_netns(); 1712 1713 for (i = 0; i < prog_test_cnt; i++) { 1714 struct prog_test_def *test = &prog_test_defs[i]; 1715 1716 if (!test->should_run) 1717 continue; 1718 1719 if (env.get_test_cnt) { 1720 env.succ_cnt++; 1721 continue; 1722 } 1723 1724 if (env.list_test_names) { 1725 fprintf(env.stdout, "%s\n", test->test_name); 1726 env.succ_cnt++; 1727 continue; 1728 } 1729 1730 run_one_test(i); 1731 } 1732 1733 if (env.get_test_cnt) { 1734 printf("%d\n", env.succ_cnt); 1735 goto out; 1736 } 1737 1738 if (env.list_test_names) 1739 goto out; 1740 1741 calculate_summary_and_print_errors(&env); 1742 1743 close(env.saved_netns_fd); 1744 out: 1745 if (!env.list_test_names && env.has_testmod) 1746 unload_bpf_testmod(verbose()); 1747 1748 free_test_selector(&env.test_selector); 1749 free_test_selector(&env.subtest_selector); 1750 free_test_states(); 1751 1752 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) 1753 return EXIT_NO_TEST; 1754 1755 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1756 } 1757