1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include <unistd.h> 5 #include <sys/syscall.h> 6 #include <task_local_storage_helpers.h> 7 #include "bpf_iter_ipv6_route.skel.h" 8 #include "bpf_iter_netlink.skel.h" 9 #include "bpf_iter_bpf_map.skel.h" 10 #include "bpf_iter_task.skel.h" 11 #include "bpf_iter_task_stack.skel.h" 12 #include "bpf_iter_task_file.skel.h" 13 #include "bpf_iter_task_vma.skel.h" 14 #include "bpf_iter_task_btf.skel.h" 15 #include "bpf_iter_tcp4.skel.h" 16 #include "bpf_iter_tcp6.skel.h" 17 #include "bpf_iter_udp4.skel.h" 18 #include "bpf_iter_udp6.skel.h" 19 #include "bpf_iter_unix.skel.h" 20 #include "bpf_iter_vma_offset.skel.h" 21 #include "bpf_iter_test_kern1.skel.h" 22 #include "bpf_iter_test_kern2.skel.h" 23 #include "bpf_iter_test_kern3.skel.h" 24 #include "bpf_iter_test_kern4.skel.h" 25 #include "bpf_iter_bpf_hash_map.skel.h" 26 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 27 #include "bpf_iter_bpf_array_map.skel.h" 28 #include "bpf_iter_bpf_percpu_array_map.skel.h" 29 #include "bpf_iter_bpf_sk_storage_helpers.skel.h" 30 #include "bpf_iter_bpf_sk_storage_map.skel.h" 31 #include "bpf_iter_test_kern5.skel.h" 32 #include "bpf_iter_test_kern6.skel.h" 33 #include "bpf_iter_bpf_link.skel.h" 34 #include "bpf_iter_ksym.skel.h" 35 #include "bpf_iter_sockmap.skel.h" 36 37 static int duration; 38 39 static void test_btf_id_or_null(void) 40 { 41 struct bpf_iter_test_kern3 *skel; 42 43 skel = bpf_iter_test_kern3__open_and_load(); 44 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { 45 bpf_iter_test_kern3__destroy(skel); 46 return; 47 } 48 } 49 50 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts) 51 { 52 struct bpf_link *link; 53 char buf[16] = {}; 54 int iter_fd, len; 55 56 link = bpf_program__attach_iter(prog, opts); 57 if (!ASSERT_OK_PTR(link, "attach_iter")) 58 return; 59 60 iter_fd = bpf_iter_create(bpf_link__fd(link)); 61 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 62 goto free_link; 63 64 /* not check contents, but ensure read() ends without error */ 65 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 66 ; 67 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 68 69 close(iter_fd); 70 71 free_link: 72 bpf_link__destroy(link); 73 } 74 75 static void do_dummy_read(struct bpf_program *prog) 76 { 77 do_dummy_read_opts(prog, NULL); 78 } 79 80 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, 81 struct bpf_map *map) 82 { 83 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 84 union bpf_iter_link_info linfo; 85 struct bpf_link *link; 86 char buf[16] = {}; 87 int iter_fd, len; 88 89 memset(&linfo, 0, sizeof(linfo)); 90 linfo.map.map_fd = bpf_map__fd(map); 91 opts.link_info = &linfo; 92 opts.link_info_len = sizeof(linfo); 93 link = bpf_program__attach_iter(prog, &opts); 94 if (!ASSERT_OK_PTR(link, "attach_map_iter")) 95 return; 96 97 iter_fd = bpf_iter_create(bpf_link__fd(link)); 98 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) { 99 bpf_link__destroy(link); 100 return; 101 } 102 103 /* Close link and map fd prematurely */ 104 bpf_link__destroy(link); 105 bpf_object__destroy_skeleton(*skel); 106 *skel = NULL; 107 108 /* Try to let map free work to run first if map is freed */ 109 usleep(100); 110 /* Memory used by both sock map and sock local storage map are 111 * freed after two synchronize_rcu() calls, so wait for it 112 */ 113 kern_sync_rcu(); 114 kern_sync_rcu(); 115 116 /* Read after both map fd and link fd are closed */ 117 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 118 ; 119 ASSERT_GE(len, 0, "read_iterator"); 120 121 close(iter_fd); 122 } 123 124 static int read_fd_into_buffer(int fd, char *buf, int size) 125 { 126 int bufleft = size; 127 int len; 128 129 do { 130 len = read(fd, buf, bufleft); 131 if (len > 0) { 132 buf += len; 133 bufleft -= len; 134 } 135 } while (len > 0); 136 137 return len < 0 ? len : size - bufleft; 138 } 139 140 static void test_ipv6_route(void) 141 { 142 struct bpf_iter_ipv6_route *skel; 143 144 skel = bpf_iter_ipv6_route__open_and_load(); 145 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) 146 return; 147 148 do_dummy_read(skel->progs.dump_ipv6_route); 149 150 bpf_iter_ipv6_route__destroy(skel); 151 } 152 153 static void test_netlink(void) 154 { 155 struct bpf_iter_netlink *skel; 156 157 skel = bpf_iter_netlink__open_and_load(); 158 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) 159 return; 160 161 do_dummy_read(skel->progs.dump_netlink); 162 163 bpf_iter_netlink__destroy(skel); 164 } 165 166 static void test_bpf_map(void) 167 { 168 struct bpf_iter_bpf_map *skel; 169 170 skel = bpf_iter_bpf_map__open_and_load(); 171 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) 172 return; 173 174 do_dummy_read(skel->progs.dump_bpf_map); 175 176 bpf_iter_bpf_map__destroy(skel); 177 } 178 179 static void check_bpf_link_info(const struct bpf_program *prog) 180 { 181 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 182 union bpf_iter_link_info linfo; 183 struct bpf_link_info info = {}; 184 struct bpf_link *link; 185 __u32 info_len; 186 int err; 187 188 memset(&linfo, 0, sizeof(linfo)); 189 linfo.task.tid = getpid(); 190 opts.link_info = &linfo; 191 opts.link_info_len = sizeof(linfo); 192 193 link = bpf_program__attach_iter(prog, &opts); 194 if (!ASSERT_OK_PTR(link, "attach_iter")) 195 return; 196 197 info_len = sizeof(info); 198 err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len); 199 ASSERT_OK(err, "bpf_obj_get_info_by_fd"); 200 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid"); 201 202 bpf_link__destroy(link); 203 } 204 205 static pthread_mutex_t do_nothing_mutex; 206 207 static void *do_nothing_wait(void *arg) 208 { 209 pthread_mutex_lock(&do_nothing_mutex); 210 pthread_mutex_unlock(&do_nothing_mutex); 211 212 pthread_exit(arg); 213 } 214 215 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, 216 int *num_unknown, int *num_known) 217 { 218 struct bpf_iter_task *skel; 219 pthread_t thread_id; 220 void *ret; 221 222 skel = bpf_iter_task__open_and_load(); 223 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) 224 return; 225 226 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); 227 228 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), 229 "pthread_create"); 230 231 skel->bss->tid = getpid(); 232 233 do_dummy_read_opts(skel->progs.dump_task, opts); 234 235 *num_unknown = skel->bss->num_unknown_tid; 236 *num_known = skel->bss->num_known_tid; 237 238 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); 239 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, 240 "pthread_join"); 241 242 bpf_iter_task__destroy(skel); 243 } 244 245 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known) 246 { 247 int num_unknown_tid, num_known_tid; 248 249 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid); 250 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid"); 251 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid"); 252 } 253 254 static void test_task_tid(void) 255 { 256 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 257 union bpf_iter_link_info linfo; 258 int num_unknown_tid, num_known_tid; 259 260 memset(&linfo, 0, sizeof(linfo)); 261 linfo.task.tid = getpid(); 262 opts.link_info = &linfo; 263 opts.link_info_len = sizeof(linfo); 264 test_task_common(&opts, 0, 1); 265 266 linfo.task.tid = 0; 267 linfo.task.pid = getpid(); 268 test_task_common(&opts, 1, 1); 269 270 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); 271 ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid"); 272 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid"); 273 } 274 275 static void test_task_pid(void) 276 { 277 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 278 union bpf_iter_link_info linfo; 279 280 memset(&linfo, 0, sizeof(linfo)); 281 linfo.task.pid = getpid(); 282 opts.link_info = &linfo; 283 opts.link_info_len = sizeof(linfo); 284 285 test_task_common(&opts, 1, 1); 286 } 287 288 static void test_task_pidfd(void) 289 { 290 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 291 union bpf_iter_link_info linfo; 292 int pidfd; 293 294 pidfd = sys_pidfd_open(getpid(), 0); 295 if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open")) 296 return; 297 298 memset(&linfo, 0, sizeof(linfo)); 299 linfo.task.pid_fd = pidfd; 300 opts.link_info = &linfo; 301 opts.link_info_len = sizeof(linfo); 302 303 test_task_common(&opts, 1, 1); 304 305 close(pidfd); 306 } 307 308 static void test_task_sleepable(void) 309 { 310 struct bpf_iter_task *skel; 311 312 skel = bpf_iter_task__open_and_load(); 313 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) 314 return; 315 316 do_dummy_read(skel->progs.dump_task_sleepable); 317 318 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, 319 "num_expected_failure_copy_from_user_task"); 320 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, 321 "num_success_copy_from_user_task"); 322 323 bpf_iter_task__destroy(skel); 324 } 325 326 static void test_task_stack(void) 327 { 328 struct bpf_iter_task_stack *skel; 329 330 skel = bpf_iter_task_stack__open_and_load(); 331 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) 332 return; 333 334 do_dummy_read(skel->progs.dump_task_stack); 335 do_dummy_read(skel->progs.get_task_user_stacks); 336 337 bpf_iter_task_stack__destroy(skel); 338 } 339 340 static void test_task_file(void) 341 { 342 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 343 struct bpf_iter_task_file *skel; 344 union bpf_iter_link_info linfo; 345 pthread_t thread_id; 346 void *ret; 347 348 skel = bpf_iter_task_file__open_and_load(); 349 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) 350 return; 351 352 skel->bss->tgid = getpid(); 353 354 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); 355 356 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), 357 "pthread_create"); 358 359 memset(&linfo, 0, sizeof(linfo)); 360 linfo.task.tid = getpid(); 361 opts.link_info = &linfo; 362 opts.link_info_len = sizeof(linfo); 363 364 do_dummy_read_opts(skel->progs.dump_task_file, &opts); 365 366 ASSERT_EQ(skel->bss->count, 0, "check_count"); 367 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); 368 369 skel->bss->last_tgid = 0; 370 skel->bss->count = 0; 371 skel->bss->unique_tgid_count = 0; 372 373 do_dummy_read(skel->progs.dump_task_file); 374 375 ASSERT_EQ(skel->bss->count, 0, "check_count"); 376 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); 377 378 check_bpf_link_info(skel->progs.dump_task_file); 379 380 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); 381 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join"); 382 ASSERT_NULL(ret, "pthread_join"); 383 384 bpf_iter_task_file__destroy(skel); 385 } 386 387 #define TASKBUFSZ 32768 388 389 static char taskbuf[TASKBUFSZ]; 390 391 static int do_btf_read(struct bpf_iter_task_btf *skel) 392 { 393 struct bpf_program *prog = skel->progs.dump_task_struct; 394 struct bpf_iter_task_btf__bss *bss = skel->bss; 395 int iter_fd = -1, err; 396 struct bpf_link *link; 397 char *buf = taskbuf; 398 int ret = 0; 399 400 link = bpf_program__attach_iter(prog, NULL); 401 if (!ASSERT_OK_PTR(link, "attach_iter")) 402 return ret; 403 404 iter_fd = bpf_iter_create(bpf_link__fd(link)); 405 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 406 goto free_link; 407 408 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); 409 if (bss->skip) { 410 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); 411 ret = 1; 412 test__skip(); 413 goto free_link; 414 } 415 416 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) 417 goto free_link; 418 419 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", 420 "check for btf representation of task_struct in iter data"); 421 free_link: 422 if (iter_fd > 0) 423 close(iter_fd); 424 bpf_link__destroy(link); 425 return ret; 426 } 427 428 static void test_task_btf(void) 429 { 430 struct bpf_iter_task_btf__bss *bss; 431 struct bpf_iter_task_btf *skel; 432 int ret; 433 434 skel = bpf_iter_task_btf__open_and_load(); 435 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) 436 return; 437 438 bss = skel->bss; 439 440 ret = do_btf_read(skel); 441 if (ret) 442 goto cleanup; 443 444 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) 445 goto cleanup; 446 447 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); 448 449 cleanup: 450 bpf_iter_task_btf__destroy(skel); 451 } 452 453 static void test_tcp4(void) 454 { 455 struct bpf_iter_tcp4 *skel; 456 457 skel = bpf_iter_tcp4__open_and_load(); 458 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) 459 return; 460 461 do_dummy_read(skel->progs.dump_tcp4); 462 463 bpf_iter_tcp4__destroy(skel); 464 } 465 466 static void test_tcp6(void) 467 { 468 struct bpf_iter_tcp6 *skel; 469 470 skel = bpf_iter_tcp6__open_and_load(); 471 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) 472 return; 473 474 do_dummy_read(skel->progs.dump_tcp6); 475 476 bpf_iter_tcp6__destroy(skel); 477 } 478 479 static void test_udp4(void) 480 { 481 struct bpf_iter_udp4 *skel; 482 483 skel = bpf_iter_udp4__open_and_load(); 484 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) 485 return; 486 487 do_dummy_read(skel->progs.dump_udp4); 488 489 bpf_iter_udp4__destroy(skel); 490 } 491 492 static void test_udp6(void) 493 { 494 struct bpf_iter_udp6 *skel; 495 496 skel = bpf_iter_udp6__open_and_load(); 497 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) 498 return; 499 500 do_dummy_read(skel->progs.dump_udp6); 501 502 bpf_iter_udp6__destroy(skel); 503 } 504 505 static void test_unix(void) 506 { 507 struct bpf_iter_unix *skel; 508 509 skel = bpf_iter_unix__open_and_load(); 510 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) 511 return; 512 513 do_dummy_read(skel->progs.dump_unix); 514 515 bpf_iter_unix__destroy(skel); 516 } 517 518 /* The expected string is less than 16 bytes */ 519 static int do_read_with_fd(int iter_fd, const char *expected, 520 bool read_one_char) 521 { 522 int len, read_buf_len, start; 523 char buf[16] = {}; 524 525 read_buf_len = read_one_char ? 1 : 16; 526 start = 0; 527 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 528 start += len; 529 if (CHECK(start >= 16, "read", "read len %d\n", len)) 530 return -1; 531 read_buf_len = read_one_char ? 1 : 16 - start; 532 } 533 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 534 return -1; 535 536 if (!ASSERT_STREQ(buf, expected, "read")) 537 return -1; 538 539 return 0; 540 } 541 542 static void test_anon_iter(bool read_one_char) 543 { 544 struct bpf_iter_test_kern1 *skel; 545 struct bpf_link *link; 546 int iter_fd, err; 547 548 skel = bpf_iter_test_kern1__open_and_load(); 549 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) 550 return; 551 552 err = bpf_iter_test_kern1__attach(skel); 553 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { 554 goto out; 555 } 556 557 link = skel->links.dump_task; 558 iter_fd = bpf_iter_create(bpf_link__fd(link)); 559 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 560 goto out; 561 562 do_read_with_fd(iter_fd, "abcd", read_one_char); 563 close(iter_fd); 564 565 out: 566 bpf_iter_test_kern1__destroy(skel); 567 } 568 569 static int do_read(const char *path, const char *expected) 570 { 571 int err, iter_fd; 572 573 iter_fd = open(path, O_RDONLY); 574 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 575 path, strerror(errno))) 576 return -1; 577 578 err = do_read_with_fd(iter_fd, expected, false); 579 close(iter_fd); 580 return err; 581 } 582 583 static void test_file_iter(void) 584 { 585 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 586 struct bpf_iter_test_kern1 *skel1; 587 struct bpf_iter_test_kern2 *skel2; 588 struct bpf_link *link; 589 int err; 590 591 skel1 = bpf_iter_test_kern1__open_and_load(); 592 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) 593 return; 594 595 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 596 if (!ASSERT_OK_PTR(link, "attach_iter")) 597 goto out; 598 599 /* unlink this path if it exists. */ 600 unlink(path); 601 602 err = bpf_link__pin(link, path); 603 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 604 goto free_link; 605 606 err = do_read(path, "abcd"); 607 if (err) 608 goto unlink_path; 609 610 /* file based iterator seems working fine. Let us a link update 611 * of the underlying link and `cat` the iterator again, its content 612 * should change. 613 */ 614 skel2 = bpf_iter_test_kern2__open_and_load(); 615 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) 616 goto unlink_path; 617 618 err = bpf_link__update_program(link, skel2->progs.dump_task); 619 if (!ASSERT_OK(err, "update_prog")) 620 goto destroy_skel2; 621 622 do_read(path, "ABCD"); 623 624 destroy_skel2: 625 bpf_iter_test_kern2__destroy(skel2); 626 unlink_path: 627 unlink(path); 628 free_link: 629 bpf_link__destroy(link); 630 out: 631 bpf_iter_test_kern1__destroy(skel1); 632 } 633 634 static void test_overflow(bool test_e2big_overflow, bool ret1) 635 { 636 __u32 map_info_len, total_read_len, expected_read_len; 637 int err, iter_fd, map1_fd, map2_fd, len; 638 struct bpf_map_info map_info = {}; 639 struct bpf_iter_test_kern4 *skel; 640 struct bpf_link *link; 641 __u32 iter_size; 642 char *buf; 643 644 skel = bpf_iter_test_kern4__open(); 645 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) 646 return; 647 648 /* create two maps: bpf program will only do bpf_seq_write 649 * for these two maps. The goal is one map output almost 650 * fills seq_file buffer and then the other will trigger 651 * overflow and needs restart. 652 */ 653 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 654 if (CHECK(map1_fd < 0, "bpf_map_create", 655 "map_creation failed: %s\n", strerror(errno))) 656 goto out; 657 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 658 if (CHECK(map2_fd < 0, "bpf_map_create", 659 "map_creation failed: %s\n", strerror(errno))) 660 goto free_map1; 661 662 /* bpf_seq_printf kernel buffer is 8 pages, so one map 663 * bpf_seq_write will mostly fill it, and the other map 664 * will partially fill and then trigger overflow and need 665 * bpf_seq_read restart. 666 */ 667 iter_size = sysconf(_SC_PAGE_SIZE) << 3; 668 669 if (test_e2big_overflow) { 670 skel->rodata->print_len = (iter_size + 8) / 8; 671 expected_read_len = 2 * (iter_size + 8); 672 } else if (!ret1) { 673 skel->rodata->print_len = (iter_size - 8) / 8; 674 expected_read_len = 2 * (iter_size - 8); 675 } else { 676 skel->rodata->print_len = 1; 677 expected_read_len = 2 * 8; 678 } 679 skel->rodata->ret1 = ret1; 680 681 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), 682 "bpf_iter_test_kern4__load")) 683 goto free_map2; 684 685 /* setup filtering map_id in bpf program */ 686 map_info_len = sizeof(map_info); 687 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 688 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 689 strerror(errno))) 690 goto free_map2; 691 skel->bss->map1_id = map_info.id; 692 693 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 694 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 695 strerror(errno))) 696 goto free_map2; 697 skel->bss->map2_id = map_info.id; 698 699 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 700 if (!ASSERT_OK_PTR(link, "attach_iter")) 701 goto free_map2; 702 703 iter_fd = bpf_iter_create(bpf_link__fd(link)); 704 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 705 goto free_link; 706 707 buf = malloc(expected_read_len); 708 if (!buf) 709 goto close_iter; 710 711 /* do read */ 712 total_read_len = 0; 713 if (test_e2big_overflow) { 714 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 715 total_read_len += len; 716 717 CHECK(len != -1 || errno != E2BIG, "read", 718 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 719 len, strerror(errno)); 720 goto free_buf; 721 } else if (!ret1) { 722 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 723 total_read_len += len; 724 725 if (CHECK(len < 0, "read", "read failed: %s\n", 726 strerror(errno))) 727 goto free_buf; 728 } else { 729 do { 730 len = read(iter_fd, buf, expected_read_len); 731 if (len > 0) 732 total_read_len += len; 733 } while (len > 0 || len == -EAGAIN); 734 735 if (CHECK(len < 0, "read", "read failed: %s\n", 736 strerror(errno))) 737 goto free_buf; 738 } 739 740 if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) 741 goto free_buf; 742 743 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) 744 goto free_buf; 745 746 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) 747 goto free_buf; 748 749 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); 750 751 free_buf: 752 free(buf); 753 close_iter: 754 close(iter_fd); 755 free_link: 756 bpf_link__destroy(link); 757 free_map2: 758 close(map2_fd); 759 free_map1: 760 close(map1_fd); 761 out: 762 bpf_iter_test_kern4__destroy(skel); 763 } 764 765 static void test_bpf_hash_map(void) 766 { 767 __u32 expected_key_a = 0, expected_key_b = 0; 768 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 769 struct bpf_iter_bpf_hash_map *skel; 770 int err, i, len, map_fd, iter_fd; 771 union bpf_iter_link_info linfo; 772 __u64 val, expected_val = 0; 773 struct bpf_link *link; 774 struct key_t { 775 int a; 776 int b; 777 int c; 778 } key; 779 char buf[64]; 780 781 skel = bpf_iter_bpf_hash_map__open(); 782 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) 783 return; 784 785 skel->bss->in_test_mode = true; 786 787 err = bpf_iter_bpf_hash_map__load(skel); 788 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) 789 goto out; 790 791 /* iterator with hashmap2 and hashmap3 should fail */ 792 memset(&linfo, 0, sizeof(linfo)); 793 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 794 opts.link_info = &linfo; 795 opts.link_info_len = sizeof(linfo); 796 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 797 if (!ASSERT_ERR_PTR(link, "attach_iter")) 798 goto out; 799 800 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 801 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 802 if (!ASSERT_ERR_PTR(link, "attach_iter")) 803 goto out; 804 805 /* hashmap1 should be good, update map values here */ 806 map_fd = bpf_map__fd(skel->maps.hashmap1); 807 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 808 key.a = i + 1; 809 key.b = i + 2; 810 key.c = i + 3; 811 val = i + 4; 812 expected_key_a += key.a; 813 expected_key_b += key.b; 814 expected_val += val; 815 816 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 817 if (!ASSERT_OK(err, "map_update")) 818 goto out; 819 } 820 821 /* Sleepable program is prohibited for hash map iterator */ 822 linfo.map.map_fd = map_fd; 823 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); 824 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) 825 goto out; 826 827 linfo.map.map_fd = map_fd; 828 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 829 if (!ASSERT_OK_PTR(link, "attach_iter")) 830 goto out; 831 832 iter_fd = bpf_iter_create(bpf_link__fd(link)); 833 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 834 goto free_link; 835 836 /* do some tests */ 837 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 838 ; 839 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 840 goto close_iter; 841 842 /* test results */ 843 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 844 goto close_iter; 845 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 846 goto close_iter; 847 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 848 goto close_iter; 849 850 close_iter: 851 close(iter_fd); 852 free_link: 853 bpf_link__destroy(link); 854 out: 855 bpf_iter_bpf_hash_map__destroy(skel); 856 } 857 858 static void test_bpf_percpu_hash_map(void) 859 { 860 __u32 expected_key_a = 0, expected_key_b = 0; 861 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 862 struct bpf_iter_bpf_percpu_hash_map *skel; 863 int err, i, j, len, map_fd, iter_fd; 864 union bpf_iter_link_info linfo; 865 __u32 expected_val = 0; 866 struct bpf_link *link; 867 struct key_t { 868 int a; 869 int b; 870 int c; 871 } key; 872 char buf[64]; 873 void *val; 874 875 skel = bpf_iter_bpf_percpu_hash_map__open(); 876 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) 877 return; 878 879 skel->rodata->num_cpus = bpf_num_possible_cpus(); 880 val = malloc(8 * bpf_num_possible_cpus()); 881 882 err = bpf_iter_bpf_percpu_hash_map__load(skel); 883 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) 884 goto out; 885 886 /* update map values here */ 887 map_fd = bpf_map__fd(skel->maps.hashmap1); 888 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 889 key.a = i + 1; 890 key.b = i + 2; 891 key.c = i + 3; 892 expected_key_a += key.a; 893 expected_key_b += key.b; 894 895 for (j = 0; j < bpf_num_possible_cpus(); j++) { 896 *(__u32 *)(val + j * 8) = i + j; 897 expected_val += i + j; 898 } 899 900 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 901 if (!ASSERT_OK(err, "map_update")) 902 goto out; 903 } 904 905 memset(&linfo, 0, sizeof(linfo)); 906 linfo.map.map_fd = map_fd; 907 opts.link_info = &linfo; 908 opts.link_info_len = sizeof(linfo); 909 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 910 if (!ASSERT_OK_PTR(link, "attach_iter")) 911 goto out; 912 913 iter_fd = bpf_iter_create(bpf_link__fd(link)); 914 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 915 goto free_link; 916 917 /* do some tests */ 918 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 919 ; 920 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 921 goto close_iter; 922 923 /* test results */ 924 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 925 goto close_iter; 926 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 927 goto close_iter; 928 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 929 goto close_iter; 930 931 close_iter: 932 close(iter_fd); 933 free_link: 934 bpf_link__destroy(link); 935 out: 936 bpf_iter_bpf_percpu_hash_map__destroy(skel); 937 free(val); 938 } 939 940 static void test_bpf_array_map(void) 941 { 942 __u64 val, expected_val = 0, res_first_val, first_val = 0; 943 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 944 __u32 expected_key = 0, res_first_key; 945 struct bpf_iter_bpf_array_map *skel; 946 union bpf_iter_link_info linfo; 947 int err, i, map_fd, iter_fd; 948 struct bpf_link *link; 949 char buf[64] = {}; 950 int len, start; 951 952 skel = bpf_iter_bpf_array_map__open_and_load(); 953 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) 954 return; 955 956 map_fd = bpf_map__fd(skel->maps.arraymap1); 957 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 958 val = i + 4; 959 expected_key += i; 960 expected_val += val; 961 962 if (i == 0) 963 first_val = val; 964 965 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 966 if (!ASSERT_OK(err, "map_update")) 967 goto out; 968 } 969 970 memset(&linfo, 0, sizeof(linfo)); 971 linfo.map.map_fd = map_fd; 972 opts.link_info = &linfo; 973 opts.link_info_len = sizeof(linfo); 974 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 975 if (!ASSERT_OK_PTR(link, "attach_iter")) 976 goto out; 977 978 iter_fd = bpf_iter_create(bpf_link__fd(link)); 979 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 980 goto free_link; 981 982 /* do some tests */ 983 start = 0; 984 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 985 start += len; 986 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 987 goto close_iter; 988 989 /* test results */ 990 res_first_key = *(__u32 *)buf; 991 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 992 if (CHECK(res_first_key != 0 || res_first_val != first_val, 993 "bpf_seq_write", 994 "seq_write failure: first key %u vs expected 0, " 995 " first value %llu vs expected %llu\n", 996 res_first_key, res_first_val, first_val)) 997 goto close_iter; 998 999 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 1000 goto close_iter; 1001 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1002 goto close_iter; 1003 1004 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 1005 err = bpf_map_lookup_elem(map_fd, &i, &val); 1006 if (!ASSERT_OK(err, "map_lookup")) 1007 goto out; 1008 if (!ASSERT_EQ(i, val, "invalid_val")) 1009 goto out; 1010 } 1011 1012 close_iter: 1013 close(iter_fd); 1014 free_link: 1015 bpf_link__destroy(link); 1016 out: 1017 bpf_iter_bpf_array_map__destroy(skel); 1018 } 1019 1020 static void test_bpf_array_map_iter_fd(void) 1021 { 1022 struct bpf_iter_bpf_array_map *skel; 1023 1024 skel = bpf_iter_bpf_array_map__open_and_load(); 1025 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) 1026 return; 1027 1028 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map, 1029 skel->maps.arraymap1); 1030 1031 bpf_iter_bpf_array_map__destroy(skel); 1032 } 1033 1034 static void test_bpf_percpu_array_map(void) 1035 { 1036 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1037 struct bpf_iter_bpf_percpu_array_map *skel; 1038 __u32 expected_key = 0, expected_val = 0; 1039 union bpf_iter_link_info linfo; 1040 int err, i, j, map_fd, iter_fd; 1041 struct bpf_link *link; 1042 char buf[64]; 1043 void *val; 1044 int len; 1045 1046 skel = bpf_iter_bpf_percpu_array_map__open(); 1047 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) 1048 return; 1049 1050 skel->rodata->num_cpus = bpf_num_possible_cpus(); 1051 val = malloc(8 * bpf_num_possible_cpus()); 1052 1053 err = bpf_iter_bpf_percpu_array_map__load(skel); 1054 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) 1055 goto out; 1056 1057 /* update map values here */ 1058 map_fd = bpf_map__fd(skel->maps.arraymap1); 1059 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 1060 expected_key += i; 1061 1062 for (j = 0; j < bpf_num_possible_cpus(); j++) { 1063 *(__u32 *)(val + j * 8) = i + j; 1064 expected_val += i + j; 1065 } 1066 1067 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 1068 if (!ASSERT_OK(err, "map_update")) 1069 goto out; 1070 } 1071 1072 memset(&linfo, 0, sizeof(linfo)); 1073 linfo.map.map_fd = map_fd; 1074 opts.link_info = &linfo; 1075 opts.link_info_len = sizeof(linfo); 1076 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 1077 if (!ASSERT_OK_PTR(link, "attach_iter")) 1078 goto out; 1079 1080 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1081 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1082 goto free_link; 1083 1084 /* do some tests */ 1085 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1086 ; 1087 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1088 goto close_iter; 1089 1090 /* test results */ 1091 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 1092 goto close_iter; 1093 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1094 goto close_iter; 1095 1096 close_iter: 1097 close(iter_fd); 1098 free_link: 1099 bpf_link__destroy(link); 1100 out: 1101 bpf_iter_bpf_percpu_array_map__destroy(skel); 1102 free(val); 1103 } 1104 1105 /* An iterator program deletes all local storage in a map. */ 1106 static void test_bpf_sk_storage_delete(void) 1107 { 1108 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1109 struct bpf_iter_bpf_sk_storage_helpers *skel; 1110 union bpf_iter_link_info linfo; 1111 int err, len, map_fd, iter_fd; 1112 struct bpf_link *link; 1113 int sock_fd = -1; 1114 __u32 val = 42; 1115 char buf[64]; 1116 1117 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1118 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 1119 return; 1120 1121 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1122 1123 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1124 if (!ASSERT_GE(sock_fd, 0, "socket")) 1125 goto out; 1126 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1127 if (!ASSERT_OK(err, "map_update")) 1128 goto out; 1129 1130 memset(&linfo, 0, sizeof(linfo)); 1131 linfo.map.map_fd = map_fd; 1132 opts.link_info = &linfo; 1133 opts.link_info_len = sizeof(linfo); 1134 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, 1135 &opts); 1136 if (!ASSERT_OK_PTR(link, "attach_iter")) 1137 goto out; 1138 1139 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1140 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1141 goto free_link; 1142 1143 /* do some tests */ 1144 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1145 ; 1146 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1147 goto close_iter; 1148 1149 /* test results */ 1150 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1151 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", 1152 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) 1153 goto close_iter; 1154 1155 close_iter: 1156 close(iter_fd); 1157 free_link: 1158 bpf_link__destroy(link); 1159 out: 1160 if (sock_fd >= 0) 1161 close(sock_fd); 1162 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1163 } 1164 1165 /* This creates a socket and its local storage. It then runs a task_iter BPF 1166 * program that replaces the existing socket local storage with the tgid of the 1167 * only task owning a file descriptor to this socket, this process, prog_tests. 1168 * It then runs a tcp socket iterator that negates the value in the existing 1169 * socket local storage, the test verifies that the resulting value is -pid. 1170 */ 1171 static void test_bpf_sk_storage_get(void) 1172 { 1173 struct bpf_iter_bpf_sk_storage_helpers *skel; 1174 int err, map_fd, val = -1; 1175 int sock_fd = -1; 1176 1177 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1178 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 1179 return; 1180 1181 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1182 if (!ASSERT_GE(sock_fd, 0, "socket")) 1183 goto out; 1184 1185 err = listen(sock_fd, 1); 1186 if (!ASSERT_OK(err, "listen")) 1187 goto close_socket; 1188 1189 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1190 1191 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1192 if (!ASSERT_OK(err, "bpf_map_update_elem")) 1193 goto close_socket; 1194 1195 do_dummy_read(skel->progs.fill_socket_owner); 1196 1197 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1198 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", 1199 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1200 getpid(), val, err)) 1201 goto close_socket; 1202 1203 do_dummy_read(skel->progs.negate_socket_local_storage); 1204 1205 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1206 CHECK(err || val != -getpid(), "bpf_map_lookup_elem", 1207 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1208 -getpid(), val, err); 1209 1210 close_socket: 1211 close(sock_fd); 1212 out: 1213 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1214 } 1215 1216 static void test_bpf_sk_stoarge_map_iter_fd(void) 1217 { 1218 struct bpf_iter_bpf_sk_storage_map *skel; 1219 1220 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1221 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) 1222 return; 1223 1224 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map, 1225 skel->maps.sk_stg_map); 1226 1227 bpf_iter_bpf_sk_storage_map__destroy(skel); 1228 } 1229 1230 static void test_bpf_sk_storage_map(void) 1231 { 1232 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1233 int err, i, len, map_fd, iter_fd, num_sockets; 1234 struct bpf_iter_bpf_sk_storage_map *skel; 1235 union bpf_iter_link_info linfo; 1236 int sock_fd[3] = {-1, -1, -1}; 1237 __u32 val, expected_val = 0; 1238 struct bpf_link *link; 1239 char buf[64]; 1240 1241 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1242 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) 1243 return; 1244 1245 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1246 num_sockets = ARRAY_SIZE(sock_fd); 1247 for (i = 0; i < num_sockets; i++) { 1248 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 1249 if (!ASSERT_GE(sock_fd[i], 0, "socket")) 1250 goto out; 1251 1252 val = i + 1; 1253 expected_val += val; 1254 1255 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 1256 BPF_NOEXIST); 1257 if (!ASSERT_OK(err, "map_update")) 1258 goto out; 1259 } 1260 1261 memset(&linfo, 0, sizeof(linfo)); 1262 linfo.map.map_fd = map_fd; 1263 opts.link_info = &linfo; 1264 opts.link_info_len = sizeof(linfo); 1265 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts); 1266 err = libbpf_get_error(link); 1267 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) { 1268 if (!err) 1269 bpf_link__destroy(link); 1270 goto out; 1271 } 1272 1273 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts); 1274 if (!ASSERT_OK_PTR(link, "attach_iter")) 1275 goto out; 1276 1277 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1278 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1279 goto free_link; 1280 1281 skel->bss->to_add_val = time(NULL); 1282 /* do some tests */ 1283 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1284 ; 1285 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1286 goto close_iter; 1287 1288 /* test results */ 1289 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) 1290 goto close_iter; 1291 1292 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1293 goto close_iter; 1294 1295 for (i = 0; i < num_sockets; i++) { 1296 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val); 1297 if (!ASSERT_OK(err, "map_lookup") || 1298 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value")) 1299 break; 1300 } 1301 1302 close_iter: 1303 close(iter_fd); 1304 free_link: 1305 bpf_link__destroy(link); 1306 out: 1307 for (i = 0; i < num_sockets; i++) { 1308 if (sock_fd[i] >= 0) 1309 close(sock_fd[i]); 1310 } 1311 bpf_iter_bpf_sk_storage_map__destroy(skel); 1312 } 1313 1314 static void test_rdonly_buf_out_of_bound(void) 1315 { 1316 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1317 struct bpf_iter_test_kern5 *skel; 1318 union bpf_iter_link_info linfo; 1319 struct bpf_link *link; 1320 1321 skel = bpf_iter_test_kern5__open_and_load(); 1322 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) 1323 return; 1324 1325 memset(&linfo, 0, sizeof(linfo)); 1326 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 1327 opts.link_info = &linfo; 1328 opts.link_info_len = sizeof(linfo); 1329 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 1330 if (!ASSERT_ERR_PTR(link, "attach_iter")) 1331 bpf_link__destroy(link); 1332 1333 bpf_iter_test_kern5__destroy(skel); 1334 } 1335 1336 static void test_buf_neg_offset(void) 1337 { 1338 struct bpf_iter_test_kern6 *skel; 1339 1340 skel = bpf_iter_test_kern6__open_and_load(); 1341 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) 1342 bpf_iter_test_kern6__destroy(skel); 1343 } 1344 1345 static void test_link_iter(void) 1346 { 1347 struct bpf_iter_bpf_link *skel; 1348 1349 skel = bpf_iter_bpf_link__open_and_load(); 1350 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) 1351 return; 1352 1353 do_dummy_read(skel->progs.dump_bpf_link); 1354 1355 bpf_iter_bpf_link__destroy(skel); 1356 } 1357 1358 static void test_ksym_iter(void) 1359 { 1360 struct bpf_iter_ksym *skel; 1361 1362 skel = bpf_iter_ksym__open_and_load(); 1363 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load")) 1364 return; 1365 1366 do_dummy_read(skel->progs.dump_ksym); 1367 1368 bpf_iter_ksym__destroy(skel); 1369 } 1370 1371 #define CMP_BUFFER_SIZE 1024 1372 static char task_vma_output[CMP_BUFFER_SIZE]; 1373 static char proc_maps_output[CMP_BUFFER_SIZE]; 1374 1375 /* remove \0 and \t from str, and only keep the first line */ 1376 static void str_strip_first_line(char *str) 1377 { 1378 char *dst = str, *src = str; 1379 1380 do { 1381 if (*src == ' ' || *src == '\t') 1382 src++; 1383 else 1384 *(dst++) = *(src++); 1385 1386 } while (*src != '\0' && *src != '\n'); 1387 1388 *dst = '\0'; 1389 } 1390 1391 static void test_task_vma_common(struct bpf_iter_attach_opts *opts) 1392 { 1393 int err, iter_fd = -1, proc_maps_fd = -1; 1394 struct bpf_iter_task_vma *skel; 1395 int len, read_size = 4; 1396 char maps_path[64]; 1397 1398 skel = bpf_iter_task_vma__open(); 1399 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open")) 1400 return; 1401 1402 skel->bss->pid = getpid(); 1403 skel->bss->one_task = opts ? 1 : 0; 1404 1405 err = bpf_iter_task_vma__load(skel); 1406 if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) 1407 goto out; 1408 1409 skel->links.proc_maps = bpf_program__attach_iter( 1410 skel->progs.proc_maps, opts); 1411 1412 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { 1413 skel->links.proc_maps = NULL; 1414 goto out; 1415 } 1416 1417 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1418 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1419 goto out; 1420 1421 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks 1422 * to trigger seq_file corner cases. 1423 */ 1424 len = 0; 1425 while (len < CMP_BUFFER_SIZE) { 1426 err = read_fd_into_buffer(iter_fd, task_vma_output + len, 1427 MIN(read_size, CMP_BUFFER_SIZE - len)); 1428 if (!err) 1429 break; 1430 if (!ASSERT_GE(err, 0, "read_iter_fd")) 1431 goto out; 1432 len += err; 1433 } 1434 if (opts) 1435 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task"); 1436 1437 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ 1438 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); 1439 proc_maps_fd = open(maps_path, O_RDONLY); 1440 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) 1441 goto out; 1442 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); 1443 if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) 1444 goto out; 1445 1446 /* strip and compare the first line of the two files */ 1447 str_strip_first_line(task_vma_output); 1448 str_strip_first_line(proc_maps_output); 1449 1450 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); 1451 1452 check_bpf_link_info(skel->progs.proc_maps); 1453 1454 out: 1455 close(proc_maps_fd); 1456 close(iter_fd); 1457 bpf_iter_task_vma__destroy(skel); 1458 } 1459 1460 void test_bpf_sockmap_map_iter_fd(void) 1461 { 1462 struct bpf_iter_sockmap *skel; 1463 1464 skel = bpf_iter_sockmap__open_and_load(); 1465 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) 1466 return; 1467 1468 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap); 1469 1470 bpf_iter_sockmap__destroy(skel); 1471 } 1472 1473 static void test_task_vma(void) 1474 { 1475 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1476 union bpf_iter_link_info linfo; 1477 1478 memset(&linfo, 0, sizeof(linfo)); 1479 linfo.task.tid = getpid(); 1480 opts.link_info = &linfo; 1481 opts.link_info_len = sizeof(linfo); 1482 1483 test_task_vma_common(&opts); 1484 test_task_vma_common(NULL); 1485 } 1486 1487 /* uprobe attach point */ 1488 static noinline int trigger_func(int arg) 1489 { 1490 asm volatile (""); 1491 return arg + 1; 1492 } 1493 1494 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) 1495 { 1496 struct bpf_iter_vma_offset *skel; 1497 char buf[16] = {}; 1498 int iter_fd, len; 1499 int pgsz, shift; 1500 1501 skel = bpf_iter_vma_offset__open_and_load(); 1502 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load")) 1503 return; 1504 1505 skel->bss->pid = getpid(); 1506 skel->bss->address = (uintptr_t)trigger_func; 1507 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++) 1508 ; 1509 skel->bss->page_shift = shift; 1510 1511 skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); 1512 if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter")) 1513 goto exit; 1514 1515 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset)); 1516 if (!ASSERT_GT(iter_fd, 0, "create_iter")) 1517 goto exit; 1518 1519 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1520 ; 1521 buf[15] = 0; 1522 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp"); 1523 1524 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset"); 1525 if (one_proc) 1526 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); 1527 else 1528 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); 1529 1530 close(iter_fd); 1531 1532 exit: 1533 bpf_iter_vma_offset__destroy(skel); 1534 } 1535 1536 static void test_task_vma_offset(void) 1537 { 1538 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1539 union bpf_iter_link_info linfo; 1540 1541 memset(&linfo, 0, sizeof(linfo)); 1542 linfo.task.pid = getpid(); 1543 opts.link_info = &linfo; 1544 opts.link_info_len = sizeof(linfo); 1545 1546 test_task_vma_offset_common(&opts, true); 1547 1548 linfo.task.pid = 0; 1549 linfo.task.tid = getpid(); 1550 test_task_vma_offset_common(&opts, true); 1551 1552 test_task_vma_offset_common(NULL, false); 1553 } 1554 1555 void test_bpf_iter(void) 1556 { 1557 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"); 1558 1559 if (test__start_subtest("btf_id_or_null")) 1560 test_btf_id_or_null(); 1561 if (test__start_subtest("ipv6_route")) 1562 test_ipv6_route(); 1563 if (test__start_subtest("netlink")) 1564 test_netlink(); 1565 if (test__start_subtest("bpf_map")) 1566 test_bpf_map(); 1567 if (test__start_subtest("task_tid")) 1568 test_task_tid(); 1569 if (test__start_subtest("task_pid")) 1570 test_task_pid(); 1571 if (test__start_subtest("task_pidfd")) 1572 test_task_pidfd(); 1573 if (test__start_subtest("task_sleepable")) 1574 test_task_sleepable(); 1575 if (test__start_subtest("task_stack")) 1576 test_task_stack(); 1577 if (test__start_subtest("task_file")) 1578 test_task_file(); 1579 if (test__start_subtest("task_vma")) 1580 test_task_vma(); 1581 if (test__start_subtest("task_btf")) 1582 test_task_btf(); 1583 if (test__start_subtest("tcp4")) 1584 test_tcp4(); 1585 if (test__start_subtest("tcp6")) 1586 test_tcp6(); 1587 if (test__start_subtest("udp4")) 1588 test_udp4(); 1589 if (test__start_subtest("udp6")) 1590 test_udp6(); 1591 if (test__start_subtest("unix")) 1592 test_unix(); 1593 if (test__start_subtest("anon")) 1594 test_anon_iter(false); 1595 if (test__start_subtest("anon-read-one-char")) 1596 test_anon_iter(true); 1597 if (test__start_subtest("file")) 1598 test_file_iter(); 1599 if (test__start_subtest("overflow")) 1600 test_overflow(false, false); 1601 if (test__start_subtest("overflow-e2big")) 1602 test_overflow(true, false); 1603 if (test__start_subtest("prog-ret-1")) 1604 test_overflow(false, true); 1605 if (test__start_subtest("bpf_hash_map")) 1606 test_bpf_hash_map(); 1607 if (test__start_subtest("bpf_percpu_hash_map")) 1608 test_bpf_percpu_hash_map(); 1609 if (test__start_subtest("bpf_array_map")) 1610 test_bpf_array_map(); 1611 if (test__start_subtest("bpf_array_map_iter_fd")) 1612 test_bpf_array_map_iter_fd(); 1613 if (test__start_subtest("bpf_percpu_array_map")) 1614 test_bpf_percpu_array_map(); 1615 if (test__start_subtest("bpf_sk_storage_map")) 1616 test_bpf_sk_storage_map(); 1617 if (test__start_subtest("bpf_sk_storage_map_iter_fd")) 1618 test_bpf_sk_stoarge_map_iter_fd(); 1619 if (test__start_subtest("bpf_sk_storage_delete")) 1620 test_bpf_sk_storage_delete(); 1621 if (test__start_subtest("bpf_sk_storage_get")) 1622 test_bpf_sk_storage_get(); 1623 if (test__start_subtest("rdonly-buf-out-of-bound")) 1624 test_rdonly_buf_out_of_bound(); 1625 if (test__start_subtest("buf-neg-offset")) 1626 test_buf_neg_offset(); 1627 if (test__start_subtest("link-iter")) 1628 test_link_iter(); 1629 if (test__start_subtest("ksym")) 1630 test_ksym_iter(); 1631 if (test__start_subtest("bpf_sockmap_map_iter_fd")) 1632 test_bpf_sockmap_map_iter_fd(); 1633 if (test__start_subtest("vma_offset")) 1634 test_task_vma_offset(); 1635 } 1636