1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/capability.h> 4 #include <stdlib.h> 5 #include <test_progs.h> 6 #include <bpf/btf.h> 7 8 #include "autoconf_helper.h" 9 #include "disasm_helpers.h" 10 #include "unpriv_helpers.h" 11 #include "cap_helpers.h" 12 #include "jit_disasm_helpers.h" 13 14 #define str_has_pfx(str, pfx) \ 15 (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) 16 17 #define TEST_LOADER_LOG_BUF_SZ 2097152 18 19 #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" 20 #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" 21 #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" 22 #define TEST_TAG_EXPECT_NOT_MSG_PFX "comment:test_expect_not_msg=" 23 #define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated=" 24 #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" 25 #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" 26 #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" 27 #define TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV "comment:test_expect_not_msg_unpriv=" 28 #define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv=" 29 #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" 30 #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" 31 #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" 32 #define TEST_TAG_RETVAL_PFX "comment:test_retval=" 33 #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv=" 34 #define TEST_TAG_AUXILIARY "comment:test_auxiliary" 35 #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" 36 #define TEST_BTF_PATH "comment:test_btf_path=" 37 #define TEST_TAG_ARCH "comment:test_arch=" 38 #define TEST_TAG_JITED_PFX "comment:test_jited=" 39 #define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv=" 40 #define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv=" 41 #define TEST_TAG_LOAD_MODE_PFX "comment:load_mode=" 42 #define TEST_TAG_EXPECT_STDERR_PFX "comment:test_expect_stderr=" 43 #define TEST_TAG_EXPECT_STDERR_PFX_UNPRIV "comment:test_expect_stderr_unpriv=" 44 #define TEST_TAG_EXPECT_STDOUT_PFX "comment:test_expect_stdout=" 45 #define TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV "comment:test_expect_stdout_unpriv=" 46 #define TEST_TAG_LINEAR_SIZE "comment:test_linear_size=" 47 48 /* Warning: duplicated in bpf_misc.h */ 49 #define POINTER_VALUE 0xbadcafe 50 #define TEST_DATA_LEN 64 51 52 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 53 #define EFFICIENT_UNALIGNED_ACCESS 1 54 #else 55 #define EFFICIENT_UNALIGNED_ACCESS 0 56 #endif 57 58 static int sysctl_unpriv_disabled = -1; 59 60 enum mode { 61 PRIV = 1, 62 UNPRIV = 2 63 }; 64 65 enum load_mode { 66 JITED = 1 << 0, 67 NO_JITED = 1 << 1, 68 }; 69 70 struct test_subspec { 71 char *name; 72 bool expect_failure; 73 struct expected_msgs expect_msgs; 74 struct expected_msgs expect_xlated; 75 struct expected_msgs jited; 76 struct expected_msgs stderr; 77 struct expected_msgs stdout; 78 int retval; 79 bool execute; 80 __u64 caps; 81 }; 82 83 struct test_spec { 84 const char *prog_name; 85 struct test_subspec priv; 86 struct test_subspec unpriv; 87 const char *btf_custom_path; 88 int log_level; 89 int prog_flags; 90 int mode_mask; 91 int arch_mask; 92 int load_mask; 93 int linear_sz; 94 bool auxiliary; 95 bool valid; 96 }; 97 98 static int tester_init(struct test_loader *tester) 99 { 100 if (!tester->log_buf) { 101 tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ; 102 tester->log_buf = calloc(tester->log_buf_sz, 1); 103 if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf")) 104 return -ENOMEM; 105 } 106 107 return 0; 108 } 109 110 void test_loader_fini(struct test_loader *tester) 111 { 112 if (!tester) 113 return; 114 115 free(tester->log_buf); 116 } 117 118 static void free_msgs(struct expected_msgs *msgs) 119 { 120 int i; 121 122 for (i = 0; i < msgs->cnt; i++) 123 if (msgs->patterns[i].is_regex) 124 regfree(&msgs->patterns[i].regex); 125 free(msgs->patterns); 126 msgs->patterns = NULL; 127 msgs->cnt = 0; 128 } 129 130 static void free_test_spec(struct test_spec *spec) 131 { 132 /* Deallocate expect_msgs arrays. */ 133 free_msgs(&spec->priv.expect_msgs); 134 free_msgs(&spec->unpriv.expect_msgs); 135 free_msgs(&spec->priv.expect_xlated); 136 free_msgs(&spec->unpriv.expect_xlated); 137 free_msgs(&spec->priv.jited); 138 free_msgs(&spec->unpriv.jited); 139 free_msgs(&spec->unpriv.stderr); 140 free_msgs(&spec->priv.stderr); 141 free_msgs(&spec->unpriv.stdout); 142 free_msgs(&spec->priv.stdout); 143 144 free(spec->priv.name); 145 free(spec->unpriv.name); 146 spec->priv.name = NULL; 147 spec->unpriv.name = NULL; 148 } 149 150 /* Compiles regular expression matching pattern. 151 * Pattern has a special syntax: 152 * 153 * pattern := (<verbatim text> | regex)* 154 * regex := "{{" <posix extended regular expression> "}}" 155 * 156 * In other words, pattern is a verbatim text with inclusion 157 * of regular expressions enclosed in "{{" "}}" pairs. 158 * For example, pattern "foo{{[0-9]+}}" matches strings like 159 * "foo0", "foo007", etc. 160 */ 161 static int compile_regex(const char *pattern, regex_t *regex) 162 { 163 char err_buf[256], buf[256] = {}, *ptr, *buf_end; 164 const char *original_pattern = pattern; 165 bool in_regex = false; 166 int err; 167 168 buf_end = buf + sizeof(buf); 169 ptr = buf; 170 while (*pattern && ptr < buf_end - 2) { 171 if (!in_regex && str_has_pfx(pattern, "{{")) { 172 in_regex = true; 173 pattern += 2; 174 continue; 175 } 176 if (in_regex && str_has_pfx(pattern, "}}")) { 177 in_regex = false; 178 pattern += 2; 179 continue; 180 } 181 if (in_regex) { 182 *ptr++ = *pattern++; 183 continue; 184 } 185 /* list of characters that need escaping for extended posix regex */ 186 if (strchr(".[]\\()*+?{}|^$", *pattern)) { 187 *ptr++ = '\\'; 188 *ptr++ = *pattern++; 189 continue; 190 } 191 *ptr++ = *pattern++; 192 } 193 if (*pattern) { 194 PRINT_FAIL("Regexp too long: '%s'\n", original_pattern); 195 return -EINVAL; 196 } 197 if (in_regex) { 198 PRINT_FAIL("Regexp has open '{{' but no closing '}}': '%s'\n", original_pattern); 199 return -EINVAL; 200 } 201 err = regcomp(regex, buf, REG_EXTENDED | REG_NEWLINE); 202 if (err != 0) { 203 regerror(err, regex, err_buf, sizeof(err_buf)); 204 PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", buf, err_buf); 205 return -EINVAL; 206 } 207 return 0; 208 } 209 210 static int __push_msg(const char *pattern, bool on_next_line, bool negative, 211 struct expected_msgs *msgs) 212 { 213 struct expect_msg *msg; 214 void *tmp; 215 int err; 216 217 tmp = realloc(msgs->patterns, 218 (1 + msgs->cnt) * sizeof(struct expect_msg)); 219 if (!tmp) { 220 ASSERT_FAIL("failed to realloc memory for messages\n"); 221 return -ENOMEM; 222 } 223 msgs->patterns = tmp; 224 msg = &msgs->patterns[msgs->cnt]; 225 msg->on_next_line = on_next_line; 226 msg->substr = pattern; 227 msg->negative = negative; 228 msg->is_regex = false; 229 if (strstr(pattern, "{{")) { 230 err = compile_regex(pattern, &msg->regex); 231 if (err) 232 return err; 233 msg->is_regex = true; 234 } 235 msgs->cnt += 1; 236 return 0; 237 } 238 239 static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to) 240 { 241 struct expect_msg *msg; 242 int i, err; 243 244 for (i = 0; i < from->cnt; i++) { 245 msg = &from->patterns[i]; 246 err = __push_msg(msg->substr, msg->on_next_line, msg->negative, to); 247 if (err) 248 return err; 249 } 250 return 0; 251 } 252 253 static int push_msg(const char *substr, bool negative, struct expected_msgs *msgs) 254 { 255 return __push_msg(substr, false, negative, msgs); 256 } 257 258 static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs) 259 { 260 int err; 261 262 if (strcmp(regex_str, "...") == 0) { 263 *on_next_line = false; 264 return 0; 265 } 266 err = __push_msg(regex_str, *on_next_line, false, msgs); 267 if (err) 268 return err; 269 *on_next_line = true; 270 return 0; 271 } 272 273 static int parse_int(const char *str, int *val, const char *name) 274 { 275 char *end; 276 long tmp; 277 278 errno = 0; 279 if (str_has_pfx(str, "0x")) 280 tmp = strtol(str + 2, &end, 16); 281 else 282 tmp = strtol(str, &end, 10); 283 if (errno || end[0] != '\0') { 284 PRINT_FAIL("failed to parse %s from '%s'\n", name, str); 285 return -EINVAL; 286 } 287 *val = tmp; 288 return 0; 289 } 290 291 static int parse_caps(const char *str, __u64 *val, const char *name) 292 { 293 int cap_flag = 0; 294 char *token = NULL, *saveptr = NULL; 295 296 char *str_cpy = strdup(str); 297 if (str_cpy == NULL) { 298 PRINT_FAIL("Memory allocation failed\n"); 299 return -EINVAL; 300 } 301 302 token = strtok_r(str_cpy, "|", &saveptr); 303 while (token != NULL) { 304 errno = 0; 305 if (!strncmp("CAP_", token, sizeof("CAP_") - 1)) { 306 PRINT_FAIL("define %s constant in bpf_misc.h, failed to parse caps\n", token); 307 return -EINVAL; 308 } 309 cap_flag = strtol(token, NULL, 10); 310 if (!cap_flag || errno) { 311 PRINT_FAIL("failed to parse caps %s\n", name); 312 return -EINVAL; 313 } 314 *val |= (1ULL << cap_flag); 315 token = strtok_r(NULL, "|", &saveptr); 316 } 317 318 free(str_cpy); 319 return 0; 320 } 321 322 static int parse_retval(const char *str, int *val, const char *name) 323 { 324 /* 325 * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a 326 * single int and cannot be parsed with strtol, so we handle it 327 * separately here. In addition, it expands to different expressions in 328 * different compilers so we use a prefixed _INT_MIN instead. 329 */ 330 if (strcmp(str, "_INT_MIN") == 0) { 331 *val = INT_MIN; 332 return 0; 333 } 334 335 return parse_int(str, val, name); 336 } 337 338 static void update_flags(int *flags, int flag, bool clear) 339 { 340 if (clear) 341 *flags &= ~flag; 342 else 343 *flags |= flag; 344 } 345 346 /* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix. 347 * Used to parse btf_decl_tag values. 348 * Such values require unique prefix because compiler does not add 349 * same __attribute__((btf_decl_tag(...))) twice. 350 * Test suite uses two-component tags for such cases: 351 * 352 * <pfx> __COUNTER__ '=' 353 * 354 * For example, two consecutive __msg tags '__msg("foo") __msg("foo")' 355 * would be encoded as: 356 * 357 * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1 358 * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1 359 * 360 * And the purpose of this function is to extract 'foo' from the above. 361 */ 362 static const char *skip_dynamic_pfx(const char *s, const char *pfx) 363 { 364 const char *msg; 365 366 if (strncmp(s, pfx, strlen(pfx)) != 0) 367 return NULL; 368 msg = s + strlen(pfx); 369 msg = strchr(msg, '='); 370 if (!msg) 371 return NULL; 372 return msg + 1; 373 } 374 375 enum arch { 376 ARCH_UNKNOWN = 0x1, 377 ARCH_X86_64 = 0x2, 378 ARCH_ARM64 = 0x4, 379 ARCH_RISCV64 = 0x8, 380 ARCH_S390X = 0x10, 381 }; 382 383 static int get_current_arch(void) 384 { 385 #if defined(__x86_64__) 386 return ARCH_X86_64; 387 #elif defined(__aarch64__) 388 return ARCH_ARM64; 389 #elif defined(__riscv) && __riscv_xlen == 64 390 return ARCH_RISCV64; 391 #elif defined(__s390x__) 392 return ARCH_S390X; 393 #endif 394 return ARCH_UNKNOWN; 395 } 396 397 /* Uses btf_decl_tag attributes to describe the expected test 398 * behavior, see bpf_misc.h for detailed description of each attribute 399 * and attribute combinations. 400 */ 401 static int parse_test_spec(struct test_loader *tester, 402 struct bpf_object *obj, 403 struct bpf_program *prog, 404 struct test_spec *spec) 405 { 406 const char *description = NULL; 407 bool has_unpriv_result = false; 408 bool has_unpriv_retval = false; 409 bool unpriv_xlated_on_next_line = true; 410 bool xlated_on_next_line = true; 411 bool unpriv_jit_on_next_line; 412 bool jit_on_next_line; 413 bool stderr_on_next_line = true; 414 bool unpriv_stderr_on_next_line = true; 415 bool stdout_on_next_line = true; 416 bool unpriv_stdout_on_next_line = true; 417 bool collect_jit = false; 418 int func_id, i, err = 0; 419 u32 arch_mask = 0; 420 u32 load_mask = 0; 421 struct btf *btf; 422 enum arch arch; 423 424 memset(spec, 0, sizeof(*spec)); 425 426 spec->prog_name = bpf_program__name(prog); 427 spec->prog_flags = testing_prog_flags(); 428 429 btf = bpf_object__btf(obj); 430 if (!btf) { 431 ASSERT_FAIL("BPF object has no BTF"); 432 return -EINVAL; 433 } 434 435 func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC); 436 if (func_id < 0) { 437 ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name); 438 return -EINVAL; 439 } 440 441 for (i = 1; i < btf__type_cnt(btf); i++) { 442 const char *s, *val, *msg; 443 const struct btf_type *t; 444 bool clear; 445 int flags; 446 447 t = btf__type_by_id(btf, i); 448 if (!btf_is_decl_tag(t)) 449 continue; 450 451 if (t->type != func_id || btf_decl_tag(t)->component_idx != -1) 452 continue; 453 454 s = btf__str_by_offset(btf, t->name_off); 455 if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) { 456 description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1; 457 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) { 458 spec->priv.expect_failure = true; 459 spec->mode_mask |= PRIV; 460 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) { 461 spec->priv.expect_failure = false; 462 spec->mode_mask |= PRIV; 463 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) { 464 spec->unpriv.expect_failure = true; 465 spec->mode_mask |= UNPRIV; 466 has_unpriv_result = true; 467 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) { 468 spec->unpriv.expect_failure = false; 469 spec->mode_mask |= UNPRIV; 470 has_unpriv_result = true; 471 } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) { 472 spec->auxiliary = true; 473 spec->mode_mask |= PRIV; 474 } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { 475 spec->auxiliary = true; 476 spec->mode_mask |= UNPRIV; 477 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) { 478 err = push_msg(msg, false, &spec->priv.expect_msgs); 479 if (err) 480 goto cleanup; 481 spec->mode_mask |= PRIV; 482 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX))) { 483 err = push_msg(msg, true, &spec->priv.expect_msgs); 484 if (err) 485 goto cleanup; 486 spec->mode_mask |= PRIV; 487 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) { 488 err = push_msg(msg, false, &spec->unpriv.expect_msgs); 489 if (err) 490 goto cleanup; 491 spec->mode_mask |= UNPRIV; 492 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV))) { 493 err = push_msg(msg, true, &spec->unpriv.expect_msgs); 494 if (err) 495 goto cleanup; 496 spec->mode_mask |= UNPRIV; 497 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX))) { 498 if (arch_mask == 0) { 499 PRINT_FAIL("__jited used before __arch_*"); 500 goto cleanup; 501 } 502 if (collect_jit) { 503 err = push_disasm_msg(msg, &jit_on_next_line, 504 &spec->priv.jited); 505 if (err) 506 goto cleanup; 507 spec->mode_mask |= PRIV; 508 } 509 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX_UNPRIV))) { 510 if (arch_mask == 0) { 511 PRINT_FAIL("__unpriv_jited used before __arch_*"); 512 goto cleanup; 513 } 514 if (collect_jit) { 515 err = push_disasm_msg(msg, &unpriv_jit_on_next_line, 516 &spec->unpriv.jited); 517 if (err) 518 goto cleanup; 519 spec->mode_mask |= UNPRIV; 520 } 521 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) { 522 err = push_disasm_msg(msg, &xlated_on_next_line, 523 &spec->priv.expect_xlated); 524 if (err) 525 goto cleanup; 526 spec->mode_mask |= PRIV; 527 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) { 528 err = push_disasm_msg(msg, &unpriv_xlated_on_next_line, 529 &spec->unpriv.expect_xlated); 530 if (err) 531 goto cleanup; 532 spec->mode_mask |= UNPRIV; 533 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) { 534 val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1; 535 err = parse_retval(val, &spec->priv.retval, "__retval"); 536 if (err) 537 goto cleanup; 538 spec->priv.execute = true; 539 spec->mode_mask |= PRIV; 540 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) { 541 val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1; 542 err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv"); 543 if (err) 544 goto cleanup; 545 spec->mode_mask |= UNPRIV; 546 spec->unpriv.execute = true; 547 has_unpriv_retval = true; 548 } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) { 549 val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1; 550 err = parse_int(val, &spec->log_level, "test log level"); 551 if (err) 552 goto cleanup; 553 } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) { 554 val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1; 555 556 clear = val[0] == '!'; 557 if (clear) 558 val++; 559 560 if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) { 561 update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear); 562 } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) { 563 update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear); 564 } else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) { 565 update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear); 566 } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) { 567 update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear); 568 } else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) { 569 update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear); 570 } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) { 571 update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear); 572 } else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) { 573 update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear); 574 } else /* assume numeric value */ { 575 err = parse_int(val, &flags, "test prog flags"); 576 if (err) 577 goto cleanup; 578 update_flags(&spec->prog_flags, flags, clear); 579 } 580 } else if (str_has_pfx(s, TEST_TAG_ARCH)) { 581 val = s + sizeof(TEST_TAG_ARCH) - 1; 582 if (strcmp(val, "X86_64") == 0) { 583 arch = ARCH_X86_64; 584 } else if (strcmp(val, "ARM64") == 0) { 585 arch = ARCH_ARM64; 586 } else if (strcmp(val, "RISCV64") == 0) { 587 arch = ARCH_RISCV64; 588 } else if (strcmp(val, "s390x") == 0) { 589 arch = ARCH_S390X; 590 } else { 591 PRINT_FAIL("bad arch spec: '%s'\n", val); 592 err = -EINVAL; 593 goto cleanup; 594 } 595 arch_mask |= arch; 596 collect_jit = get_current_arch() == arch; 597 unpriv_jit_on_next_line = true; 598 jit_on_next_line = true; 599 } else if (str_has_pfx(s, TEST_BTF_PATH)) { 600 spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1; 601 } else if (str_has_pfx(s, TEST_TAG_CAPS_UNPRIV)) { 602 val = s + sizeof(TEST_TAG_CAPS_UNPRIV) - 1; 603 err = parse_caps(val, &spec->unpriv.caps, "test caps"); 604 if (err) 605 goto cleanup; 606 spec->mode_mask |= UNPRIV; 607 } else if (str_has_pfx(s, TEST_TAG_LOAD_MODE_PFX)) { 608 val = s + sizeof(TEST_TAG_LOAD_MODE_PFX) - 1; 609 if (strcmp(val, "jited") == 0) { 610 load_mask = JITED; 611 } else if (strcmp(val, "no_jited") == 0) { 612 load_mask = NO_JITED; 613 } else { 614 PRINT_FAIL("bad load spec: '%s'", val); 615 err = -EINVAL; 616 goto cleanup; 617 } 618 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX))) { 619 err = push_disasm_msg(msg, &stderr_on_next_line, 620 &spec->priv.stderr); 621 if (err) 622 goto cleanup; 623 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX_UNPRIV))) { 624 err = push_disasm_msg(msg, &unpriv_stderr_on_next_line, 625 &spec->unpriv.stderr); 626 if (err) 627 goto cleanup; 628 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX))) { 629 err = push_disasm_msg(msg, &stdout_on_next_line, 630 &spec->priv.stdout); 631 if (err) 632 goto cleanup; 633 } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV))) { 634 err = push_disasm_msg(msg, &unpriv_stdout_on_next_line, 635 &spec->unpriv.stdout); 636 if (err) 637 goto cleanup; 638 } else if (str_has_pfx(s, TEST_TAG_LINEAR_SIZE)) { 639 switch (bpf_program__type(prog)) { 640 case BPF_PROG_TYPE_SCHED_ACT: 641 case BPF_PROG_TYPE_SCHED_CLS: 642 case BPF_PROG_TYPE_CGROUP_SKB: 643 val = s + sizeof(TEST_TAG_LINEAR_SIZE) - 1; 644 err = parse_int(val, &spec->linear_sz, "test linear size"); 645 if (err) 646 goto cleanup; 647 break; 648 default: 649 PRINT_FAIL("__linear_size for unsupported program type"); 650 err = -EINVAL; 651 goto cleanup; 652 } 653 } 654 } 655 656 spec->arch_mask = arch_mask ?: -1; 657 spec->load_mask = load_mask ?: (JITED | NO_JITED); 658 659 if (spec->mode_mask == 0) 660 spec->mode_mask = PRIV; 661 662 if (!description) 663 description = spec->prog_name; 664 665 if (spec->mode_mask & PRIV) { 666 spec->priv.name = strdup(description); 667 if (!spec->priv.name) { 668 PRINT_FAIL("failed to allocate memory for priv.name\n"); 669 err = -ENOMEM; 670 goto cleanup; 671 } 672 } 673 674 if (spec->mode_mask & UNPRIV) { 675 int descr_len = strlen(description); 676 const char *suffix = " @unpriv"; 677 char *name; 678 679 name = malloc(descr_len + strlen(suffix) + 1); 680 if (!name) { 681 PRINT_FAIL("failed to allocate memory for unpriv.name\n"); 682 err = -ENOMEM; 683 goto cleanup; 684 } 685 686 strcpy(name, description); 687 strcpy(&name[descr_len], suffix); 688 spec->unpriv.name = name; 689 } 690 691 if (spec->mode_mask & (PRIV | UNPRIV)) { 692 if (!has_unpriv_result) 693 spec->unpriv.expect_failure = spec->priv.expect_failure; 694 695 if (!has_unpriv_retval) { 696 spec->unpriv.retval = spec->priv.retval; 697 spec->unpriv.execute = spec->priv.execute; 698 } 699 700 if (spec->unpriv.expect_msgs.cnt == 0) 701 clone_msgs(&spec->priv.expect_msgs, &spec->unpriv.expect_msgs); 702 if (spec->unpriv.expect_xlated.cnt == 0) 703 clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated); 704 if (spec->unpriv.jited.cnt == 0) 705 clone_msgs(&spec->priv.jited, &spec->unpriv.jited); 706 if (spec->unpriv.stderr.cnt == 0) 707 clone_msgs(&spec->priv.stderr, &spec->unpriv.stderr); 708 if (spec->unpriv.stdout.cnt == 0) 709 clone_msgs(&spec->priv.stdout, &spec->unpriv.stdout); 710 } 711 712 spec->valid = true; 713 714 return 0; 715 716 cleanup: 717 free_test_spec(spec); 718 return err; 719 } 720 721 static void prepare_case(struct test_loader *tester, 722 struct test_spec *spec, 723 struct bpf_object *obj, 724 struct bpf_program *prog) 725 { 726 int min_log_level = 0, prog_flags; 727 728 if (env.verbosity > VERBOSE_NONE) 729 min_log_level = 1; 730 if (env.verbosity > VERBOSE_VERY) 731 min_log_level = 2; 732 733 bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz); 734 735 /* Make sure we set at least minimal log level, unless test requires 736 * even higher level already. Make sure to preserve independent log 737 * level 4 (verifier stats), though. 738 */ 739 if ((spec->log_level & 3) < min_log_level) 740 bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level); 741 else 742 bpf_program__set_log_level(prog, spec->log_level); 743 744 prog_flags = bpf_program__flags(prog); 745 bpf_program__set_flags(prog, prog_flags | spec->prog_flags); 746 747 tester->log_buf[0] = '\0'; 748 } 749 750 static void emit_verifier_log(const char *log_buf, bool force) 751 { 752 if (!force && env.verbosity == VERBOSE_NONE) 753 return; 754 fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf); 755 } 756 757 static void emit_xlated(const char *xlated, bool force) 758 { 759 if (!force && env.verbosity == VERBOSE_NONE) 760 return; 761 fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated); 762 } 763 764 static void emit_jited(const char *jited, bool force) 765 { 766 if (!force && env.verbosity == VERBOSE_NONE) 767 return; 768 fprintf(stdout, "JITED:\n=============\n%s=============\n", jited); 769 } 770 771 static void emit_stderr(const char *stderr, bool force) 772 { 773 if (!force && env.verbosity == VERBOSE_NONE) 774 return; 775 fprintf(stdout, "STDERR:\n=============\n%s=============\n", stderr); 776 } 777 778 static void emit_stdout(const char *bpf_stdout, bool force) 779 { 780 if (!force && env.verbosity == VERBOSE_NONE) 781 return; 782 fprintf(stdout, "STDOUT:\n=============\n%s=============\n", bpf_stdout); 783 } 784 785 static const char *match_msg(struct expect_msg *msg, const char **log) 786 { 787 const char *match = NULL; 788 regmatch_t reg_match[1]; 789 int err; 790 791 if (!msg->is_regex) { 792 match = strstr(*log, msg->substr); 793 if (match) 794 *log = match + strlen(msg->substr); 795 } else { 796 err = regexec(&msg->regex, *log, 1, reg_match, 0); 797 if (err == 0) { 798 match = *log + reg_match[0].rm_so; 799 *log += reg_match[0].rm_eo; 800 } 801 } 802 return match; 803 } 804 805 static int count_lines(const char *start, const char *end) 806 { 807 const char *tmp; 808 int n = 0; 809 810 for (tmp = start; tmp < end; ++tmp) 811 if (*tmp == '\n') 812 n++; 813 return n; 814 } 815 816 struct match { 817 const char *start; 818 const char *end; 819 int line; 820 }; 821 822 /* 823 * Positive messages are matched sequentially, each next message 824 * is looked for starting from the end of a previous matched one. 825 */ 826 static void match_positive_msgs(const char *log, struct expected_msgs *msgs, struct match *matches) 827 { 828 const char *prev_match; 829 int i, line; 830 831 prev_match = log; 832 line = 0; 833 for (i = 0; i < msgs->cnt; i++) { 834 struct expect_msg *msg = &msgs->patterns[i]; 835 const char *match = NULL; 836 837 if (msg->negative) 838 continue; 839 840 match = match_msg(msg, &log); 841 if (match) { 842 line += count_lines(prev_match, match); 843 matches[i].start = match; 844 matches[i].end = log; 845 matches[i].line = line; 846 prev_match = match; 847 } 848 } 849 } 850 851 /* 852 * Each negative messages N located between positive messages P1 and P2 853 * is matched in the span P1.end .. P2.start. Consequently, negative messages 854 * are unordered within the span. 855 */ 856 static void match_negative_msgs(const char *log, struct expected_msgs *msgs, struct match *matches) 857 { 858 const char *start = log, *end, *next, *match; 859 const char *log_end = log + strlen(log); 860 int i, j, next_positive; 861 862 for (i = 0; i < msgs->cnt; i++) { 863 struct expect_msg *msg = &msgs->patterns[i]; 864 865 /* positive message bumps span start */ 866 if (!msg->negative) { 867 start = matches[i].end ?: start; 868 continue; 869 } 870 871 /* count stride of negative patterns and adjust span end */ 872 end = log_end; 873 for (next_positive = i + 1; next_positive < msgs->cnt; next_positive++) { 874 if (!msgs->patterns[next_positive].negative) { 875 end = matches[next_positive].start; 876 break; 877 } 878 } 879 880 /* try matching negative messages within identified span */ 881 for (j = i; j < next_positive; j++) { 882 next = start; 883 match = match_msg(msg, &next); 884 if (match && next <= end) { 885 matches[j].start = match; 886 matches[j].end = next; 887 } 888 } 889 890 /* -1 to account for i++ */ 891 i = next_positive - 1; 892 } 893 } 894 895 void validate_msgs(const char *log_buf, struct expected_msgs *msgs, 896 void (*emit_fn)(const char *buf, bool force)) 897 { 898 struct match matches[msgs->cnt]; 899 struct match *prev_match = NULL; 900 int i, j; 901 902 memset(matches, 0, sizeof(*matches) * msgs->cnt); 903 match_positive_msgs(log_buf, msgs, matches); 904 match_negative_msgs(log_buf, msgs, matches); 905 906 for (i = 0; i < msgs->cnt; i++) { 907 struct expect_msg *msg = &msgs->patterns[i]; 908 struct match *match = &matches[i]; 909 const char *pat_status; 910 bool unexpected; 911 bool wrong_line; 912 bool no_match; 913 914 no_match = !msg->negative && !match->start; 915 wrong_line = !msg->negative && 916 msg->on_next_line && 917 prev_match && prev_match->line + 1 != match->line; 918 unexpected = msg->negative && match->start; 919 if (no_match || wrong_line || unexpected) { 920 PRINT_FAIL("expect_msg\n"); 921 if (env.verbosity == VERBOSE_NONE) 922 emit_fn(log_buf, true /*force*/); 923 for (j = 0; j <= i; j++) { 924 msg = &msgs->patterns[j]; 925 if (j < i) 926 pat_status = "MATCHED "; 927 else if (wrong_line) 928 pat_status = "WRONG LINE"; 929 else if (no_match) 930 pat_status = "EXPECTED "; 931 else 932 pat_status = "UNEXPECTED"; 933 msg = &msgs->patterns[j]; 934 fprintf(stderr, "%s %s: '%s'\n", 935 pat_status, 936 msg->is_regex ? " REGEX" : "SUBSTR", 937 msg->substr); 938 } 939 if (wrong_line) { 940 fprintf(stderr, 941 "expecting match at line %d, actual match is at line %d\n", 942 prev_match->line + 1, match->line); 943 } 944 break; 945 } 946 947 if (!msg->negative) 948 prev_match = match; 949 } 950 } 951 952 struct cap_state { 953 __u64 old_caps; 954 bool initialized; 955 }; 956 957 static int drop_capabilities(struct cap_state *caps) 958 { 959 const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN | 960 1ULL << CAP_PERFMON | 1ULL << CAP_BPF); 961 int err; 962 963 err = cap_disable_effective(caps_to_drop, &caps->old_caps); 964 if (err) { 965 PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(-err)); 966 return err; 967 } 968 969 caps->initialized = true; 970 return 0; 971 } 972 973 static int restore_capabilities(struct cap_state *caps) 974 { 975 int err; 976 977 if (!caps->initialized) 978 return 0; 979 980 err = cap_enable_effective(caps->old_caps, NULL); 981 if (err) 982 PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(-err)); 983 caps->initialized = false; 984 return err; 985 } 986 987 static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec) 988 { 989 if (sysctl_unpriv_disabled < 0) 990 sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0; 991 if (sysctl_unpriv_disabled) 992 return false; 993 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) 994 return false; 995 return true; 996 } 997 998 static bool is_unpriv_capable_map(struct bpf_map *map) 999 { 1000 enum bpf_map_type type; 1001 __u32 flags; 1002 1003 type = bpf_map__type(map); 1004 1005 switch (type) { 1006 case BPF_MAP_TYPE_HASH: 1007 case BPF_MAP_TYPE_PERCPU_HASH: 1008 case BPF_MAP_TYPE_HASH_OF_MAPS: 1009 flags = bpf_map__map_flags(map); 1010 return !(flags & BPF_F_ZERO_SEED); 1011 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1012 case BPF_MAP_TYPE_ARRAY: 1013 case BPF_MAP_TYPE_RINGBUF: 1014 case BPF_MAP_TYPE_PROG_ARRAY: 1015 case BPF_MAP_TYPE_CGROUP_ARRAY: 1016 case BPF_MAP_TYPE_PERCPU_ARRAY: 1017 case BPF_MAP_TYPE_USER_RINGBUF: 1018 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1019 case BPF_MAP_TYPE_CGROUP_STORAGE: 1020 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1021 return true; 1022 default: 1023 return false; 1024 } 1025 } 1026 1027 static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts, int linear_sz) 1028 { 1029 __u8 tmp_out[TEST_DATA_LEN << 2] = {}; 1030 __u8 tmp_in[TEST_DATA_LEN] = {}; 1031 struct __sk_buff ctx = {}; 1032 int err, saved_errno; 1033 LIBBPF_OPTS(bpf_test_run_opts, topts, 1034 .data_in = tmp_in, 1035 .data_size_in = sizeof(tmp_in), 1036 .data_out = tmp_out, 1037 .data_size_out = sizeof(tmp_out), 1038 .repeat = 1, 1039 ); 1040 1041 if (linear_sz) { 1042 ctx.data_end = linear_sz; 1043 topts.ctx_in = &ctx; 1044 topts.ctx_size_in = sizeof(ctx); 1045 } 1046 1047 if (empty_opts) { 1048 memset(&topts, 0, sizeof(struct bpf_test_run_opts)); 1049 topts.sz = sizeof(struct bpf_test_run_opts); 1050 } 1051 err = bpf_prog_test_run_opts(fd_prog, &topts); 1052 saved_errno = errno; 1053 1054 if (err) { 1055 PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ", 1056 saved_errno, strerror(saved_errno)); 1057 return err; 1058 } 1059 1060 ASSERT_OK(0, "bpf_prog_test_run"); 1061 *retval = topts.retval; 1062 1063 return 0; 1064 } 1065 1066 static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec) 1067 { 1068 if (!subspec->execute) 1069 return false; 1070 1071 if (subspec->expect_failure) 1072 return false; 1073 1074 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) { 1075 if (env.verbosity != VERBOSE_NONE) 1076 printf("alignment prevents execution\n"); 1077 return false; 1078 } 1079 1080 return true; 1081 } 1082 1083 /* Get a disassembly of BPF program after verifier applies all rewrites */ 1084 static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz) 1085 { 1086 struct bpf_insn *insn_start = NULL, *insn, *insn_end; 1087 __u32 insns_cnt = 0, i; 1088 char buf[64]; 1089 FILE *out = NULL; 1090 int err; 1091 1092 err = get_xlated_program(prog_fd, &insn_start, &insns_cnt); 1093 if (!ASSERT_OK(err, "get_xlated_program")) 1094 goto out; 1095 out = fmemopen(text, text_sz, "w"); 1096 if (!ASSERT_OK_PTR(out, "open_memstream")) 1097 goto out; 1098 insn_end = insn_start + insns_cnt; 1099 insn = insn_start; 1100 while (insn < insn_end) { 1101 i = insn - insn_start; 1102 insn = disasm_insn(insn, buf, sizeof(buf)); 1103 fprintf(out, "%d: %s\n", i, buf); 1104 } 1105 fflush(out); 1106 1107 out: 1108 free(insn_start); 1109 if (out) 1110 fclose(out); 1111 return err; 1112 } 1113 1114 /* Read the bpf stream corresponding to the stream_id */ 1115 static int get_stream(int stream_id, int prog_fd, char *text, size_t text_sz) 1116 { 1117 LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts); 1118 int ret; 1119 1120 ret = bpf_prog_stream_read(prog_fd, stream_id, text, text_sz, &ropts); 1121 ASSERT_GT(ret, 0, "stream read"); 1122 text[ret] = '\0'; 1123 1124 return ret; 1125 } 1126 1127 /* this function is forced noinline and has short generic name to look better 1128 * in test_progs output (in case of a failure) 1129 */ 1130 static noinline 1131 void run_subtest(struct test_loader *tester, 1132 struct bpf_object_open_opts *open_opts, 1133 const void *obj_bytes, 1134 size_t obj_byte_cnt, 1135 struct test_spec *specs, 1136 struct test_spec *spec, 1137 bool unpriv) 1138 { 1139 struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; 1140 int current_runtime = is_jit_enabled() ? JITED : NO_JITED; 1141 struct bpf_program *tprog = NULL, *tprog_iter; 1142 struct bpf_link *link, *links[32] = {}; 1143 struct test_spec *spec_iter; 1144 struct cap_state caps = {}; 1145 struct bpf_object *tobj; 1146 struct bpf_map *map; 1147 int retval, err, i; 1148 int links_cnt = 0; 1149 bool should_load; 1150 1151 if (!test__start_subtest(subspec->name)) 1152 return; 1153 1154 if ((get_current_arch() & spec->arch_mask) == 0) { 1155 test__skip(); 1156 return; 1157 } 1158 1159 if ((current_runtime & spec->load_mask) == 0) { 1160 test__skip(); 1161 return; 1162 } 1163 1164 if (unpriv) { 1165 if (!can_execute_unpriv(tester, spec)) { 1166 test__skip(); 1167 test__end_subtest(); 1168 return; 1169 } 1170 if (drop_capabilities(&caps)) { 1171 test__end_subtest(); 1172 return; 1173 } 1174 if (subspec->caps) { 1175 err = cap_enable_effective(subspec->caps, NULL); 1176 if (err) { 1177 PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(-err)); 1178 goto subtest_cleanup; 1179 } 1180 } 1181 } 1182 1183 /* Implicitly reset to NULL if next test case doesn't specify */ 1184 open_opts->btf_custom_path = spec->btf_custom_path; 1185 1186 tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts); 1187 if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ 1188 goto subtest_cleanup; 1189 1190 i = 0; 1191 bpf_object__for_each_program(tprog_iter, tobj) { 1192 spec_iter = &specs[i++]; 1193 should_load = false; 1194 1195 if (spec_iter->valid) { 1196 if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) { 1197 tprog = tprog_iter; 1198 should_load = true; 1199 } 1200 1201 if (spec_iter->auxiliary && 1202 spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV)) 1203 should_load = true; 1204 } 1205 1206 bpf_program__set_autoload(tprog_iter, should_load); 1207 } 1208 1209 prepare_case(tester, spec, tobj, tprog); 1210 1211 /* By default bpf_object__load() automatically creates all 1212 * maps declared in the skeleton. Some map types are only 1213 * allowed in priv mode. Disable autoload for such maps in 1214 * unpriv mode. 1215 */ 1216 bpf_object__for_each_map(map, tobj) 1217 bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map)); 1218 1219 err = bpf_object__load(tobj); 1220 if (subspec->expect_failure) { 1221 if (!ASSERT_ERR(err, "unexpected_load_success")) { 1222 emit_verifier_log(tester->log_buf, false /*force*/); 1223 goto tobj_cleanup; 1224 } 1225 } else { 1226 if (!ASSERT_OK(err, "unexpected_load_failure")) { 1227 emit_verifier_log(tester->log_buf, true /*force*/); 1228 goto tobj_cleanup; 1229 } 1230 } 1231 emit_verifier_log(tester->log_buf, false /*force*/); 1232 validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); 1233 1234 /* Restore capabilities because the kernel will silently ignore requests 1235 * for program info (such as xlated program text) if we are not 1236 * bpf-capable. Also, for some reason test_verifier executes programs 1237 * with all capabilities restored. Do the same here. 1238 */ 1239 if (restore_capabilities(&caps)) 1240 goto tobj_cleanup; 1241 1242 if (subspec->expect_xlated.cnt) { 1243 err = get_xlated_program_text(bpf_program__fd(tprog), 1244 tester->log_buf, tester->log_buf_sz); 1245 if (err) 1246 goto tobj_cleanup; 1247 emit_xlated(tester->log_buf, false /*force*/); 1248 validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated); 1249 } 1250 1251 if (subspec->jited.cnt) { 1252 err = get_jited_program_text(bpf_program__fd(tprog), 1253 tester->log_buf, tester->log_buf_sz); 1254 if (err == -EOPNOTSUPP) { 1255 printf("%s:SKIP: jited programs disassembly is not supported,\n", __func__); 1256 printf("%s:SKIP: tests are built w/o LLVM development libs\n", __func__); 1257 test__skip(); 1258 goto tobj_cleanup; 1259 } 1260 if (!ASSERT_EQ(err, 0, "get_jited_program_text")) 1261 goto tobj_cleanup; 1262 emit_jited(tester->log_buf, false /*force*/); 1263 validate_msgs(tester->log_buf, &subspec->jited, emit_jited); 1264 } 1265 1266 if (should_do_test_run(spec, subspec)) { 1267 /* Do bpf_map__attach_struct_ops() for each struct_ops map. 1268 * This should trigger bpf_struct_ops->reg callback on kernel side. 1269 */ 1270 bpf_object__for_each_map(map, tobj) { 1271 if (!bpf_map__autocreate(map) || 1272 bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) 1273 continue; 1274 if (links_cnt >= ARRAY_SIZE(links)) { 1275 PRINT_FAIL("too many struct_ops maps"); 1276 goto tobj_cleanup; 1277 } 1278 link = bpf_map__attach_struct_ops(map); 1279 if (!link) { 1280 PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n", 1281 bpf_map__name(map), -errno); 1282 goto tobj_cleanup; 1283 } 1284 links[links_cnt++] = link; 1285 } 1286 1287 if (tester->pre_execution_cb) { 1288 err = tester->pre_execution_cb(tobj); 1289 if (err) { 1290 PRINT_FAIL("pre_execution_cb failed: %d\n", err); 1291 goto tobj_cleanup; 1292 } 1293 } 1294 1295 err = do_prog_test_run(bpf_program__fd(tprog), &retval, 1296 bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false, 1297 spec->linear_sz); 1298 if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) { 1299 PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); 1300 goto tobj_cleanup; 1301 } 1302 1303 if (subspec->stderr.cnt) { 1304 err = get_stream(2, bpf_program__fd(tprog), 1305 tester->log_buf, tester->log_buf_sz); 1306 if (err <= 0) { 1307 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n", 1308 err, errno); 1309 goto tobj_cleanup; 1310 } 1311 emit_stderr(tester->log_buf, false /*force*/); 1312 validate_msgs(tester->log_buf, &subspec->stderr, emit_stderr); 1313 } 1314 1315 if (subspec->stdout.cnt) { 1316 err = get_stream(1, bpf_program__fd(tprog), 1317 tester->log_buf, tester->log_buf_sz); 1318 if (err <= 0) { 1319 PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n", 1320 err, errno); 1321 goto tobj_cleanup; 1322 } 1323 emit_stdout(tester->log_buf, false /*force*/); 1324 validate_msgs(tester->log_buf, &subspec->stdout, emit_stdout); 1325 } 1326 1327 /* redo bpf_map__attach_struct_ops for each test */ 1328 while (links_cnt > 0) 1329 bpf_link__destroy(links[--links_cnt]); 1330 } 1331 1332 tobj_cleanup: 1333 while (links_cnt > 0) 1334 bpf_link__destroy(links[--links_cnt]); 1335 bpf_object__close(tobj); 1336 subtest_cleanup: 1337 test__end_subtest(); 1338 restore_capabilities(&caps); 1339 } 1340 1341 static void process_subtest(struct test_loader *tester, 1342 const char *skel_name, 1343 skel_elf_bytes_fn elf_bytes_factory) 1344 { 1345 LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name); 1346 struct test_spec *specs = NULL; 1347 struct bpf_object *obj = NULL; 1348 struct bpf_program *prog; 1349 const void *obj_bytes; 1350 int err, i, nr_progs; 1351 size_t obj_byte_cnt; 1352 1353 if (tester_init(tester) < 0) 1354 return; /* failed to initialize tester */ 1355 1356 obj_bytes = elf_bytes_factory(&obj_byte_cnt); 1357 obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts); 1358 if (!ASSERT_OK_PTR(obj, "obj_open_mem")) 1359 return; 1360 1361 nr_progs = 0; 1362 bpf_object__for_each_program(prog, obj) 1363 ++nr_progs; 1364 1365 specs = calloc(nr_progs, sizeof(struct test_spec)); 1366 if (!ASSERT_OK_PTR(specs, "specs_alloc")) 1367 return; 1368 1369 i = 0; 1370 bpf_object__for_each_program(prog, obj) { 1371 /* ignore tests for which we can't derive test specification */ 1372 err = parse_test_spec(tester, obj, prog, &specs[i++]); 1373 if (err) 1374 PRINT_FAIL("Can't parse test spec for program '%s'\n", 1375 bpf_program__name(prog)); 1376 } 1377 1378 i = 0; 1379 bpf_object__for_each_program(prog, obj) { 1380 struct test_spec *spec = &specs[i++]; 1381 1382 if (!spec->valid || spec->auxiliary) 1383 continue; 1384 1385 if (spec->mode_mask & PRIV) 1386 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, 1387 specs, spec, false); 1388 if (spec->mode_mask & UNPRIV) 1389 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, 1390 specs, spec, true); 1391 1392 } 1393 1394 for (i = 0; i < nr_progs; ++i) 1395 free_test_spec(&specs[i]); 1396 free(specs); 1397 bpf_object__close(obj); 1398 } 1399 1400 void test_loader__run_subtests(struct test_loader *tester, 1401 const char *skel_name, 1402 skel_elf_bytes_fn elf_bytes_factory) 1403 { 1404 /* see comment in run_subtest() for why we do this function nesting */ 1405 process_subtest(tester, skel_name, elf_bytes_factory); 1406 } 1407