1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include <unistd.h> 5 #include <sys/syscall.h> 6 #include <task_local_storage_helpers.h> 7 #include "bpf_iter_ipv6_route.skel.h" 8 #include "bpf_iter_netlink.skel.h" 9 #include "bpf_iter_bpf_map.skel.h" 10 #include "bpf_iter_tasks.skel.h" 11 #include "bpf_iter_task_stack.skel.h" 12 #include "bpf_iter_task_file.skel.h" 13 #include "bpf_iter_task_vmas.skel.h" 14 #include "bpf_iter_task_btf.skel.h" 15 #include "bpf_iter_tcp4.skel.h" 16 #include "bpf_iter_tcp6.skel.h" 17 #include "bpf_iter_udp4.skel.h" 18 #include "bpf_iter_udp6.skel.h" 19 #include "bpf_iter_unix.skel.h" 20 #include "bpf_iter_vma_offset.skel.h" 21 #include "bpf_iter_test_kern1.skel.h" 22 #include "bpf_iter_test_kern2.skel.h" 23 #include "bpf_iter_test_kern3.skel.h" 24 #include "bpf_iter_test_kern4.skel.h" 25 #include "bpf_iter_bpf_hash_map.skel.h" 26 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 27 #include "bpf_iter_bpf_array_map.skel.h" 28 #include "bpf_iter_bpf_percpu_array_map.skel.h" 29 #include "bpf_iter_bpf_sk_storage_helpers.skel.h" 30 #include "bpf_iter_bpf_sk_storage_map.skel.h" 31 #include "bpf_iter_test_kern5.skel.h" 32 #include "bpf_iter_test_kern6.skel.h" 33 #include "bpf_iter_bpf_link.skel.h" 34 #include "bpf_iter_ksym.skel.h" 35 #include "bpf_iter_sockmap.skel.h" 36 37 static void test_btf_id_or_null(void) 38 { 39 struct bpf_iter_test_kern3 *skel; 40 41 skel = bpf_iter_test_kern3__open_and_load(); 42 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { 43 bpf_iter_test_kern3__destroy(skel); 44 return; 45 } 46 } 47 48 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts) 49 { 50 struct bpf_link *link; 51 char buf[16] = {}; 52 int iter_fd, len; 53 54 link = bpf_program__attach_iter(prog, opts); 55 if (!ASSERT_OK_PTR(link, "attach_iter")) 56 return; 57 58 iter_fd = bpf_iter_create(bpf_link__fd(link)); 59 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 60 goto free_link; 61 62 /* not check contents, but ensure read() ends without error */ 63 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 64 ; 65 ASSERT_GE(len, 0, "read"); 66 67 close(iter_fd); 68 69 free_link: 70 bpf_link__destroy(link); 71 } 72 73 static void do_dummy_read(struct bpf_program *prog) 74 { 75 do_dummy_read_opts(prog, NULL); 76 } 77 78 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, 79 struct bpf_map *map) 80 { 81 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 82 union bpf_iter_link_info linfo; 83 struct bpf_link *link; 84 char buf[16] = {}; 85 int iter_fd, len; 86 87 memset(&linfo, 0, sizeof(linfo)); 88 linfo.map.map_fd = bpf_map__fd(map); 89 opts.link_info = &linfo; 90 opts.link_info_len = sizeof(linfo); 91 link = bpf_program__attach_iter(prog, &opts); 92 if (!ASSERT_OK_PTR(link, "attach_map_iter")) 93 return; 94 95 iter_fd = bpf_iter_create(bpf_link__fd(link)); 96 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) { 97 bpf_link__destroy(link); 98 return; 99 } 100 101 /* Close link and map fd prematurely */ 102 bpf_link__destroy(link); 103 bpf_object__destroy_skeleton(*skel); 104 *skel = NULL; 105 106 /* Try to let map free work to run first if map is freed */ 107 usleep(100); 108 /* Memory used by both sock map and sock local storage map are 109 * freed after two synchronize_rcu() calls, so wait for it 110 */ 111 kern_sync_rcu(); 112 kern_sync_rcu(); 113 114 /* Read after both map fd and link fd are closed */ 115 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 116 ; 117 ASSERT_GE(len, 0, "read_iterator"); 118 119 close(iter_fd); 120 } 121 122 static int read_fd_into_buffer(int fd, char *buf, int size) 123 { 124 int bufleft = size; 125 int len; 126 127 do { 128 len = read(fd, buf, bufleft); 129 if (len > 0) { 130 buf += len; 131 bufleft -= len; 132 } 133 } while (len > 0); 134 135 return len < 0 ? len : size - bufleft; 136 } 137 138 static void test_ipv6_route(void) 139 { 140 struct bpf_iter_ipv6_route *skel; 141 142 skel = bpf_iter_ipv6_route__open_and_load(); 143 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) 144 return; 145 146 do_dummy_read(skel->progs.dump_ipv6_route); 147 148 bpf_iter_ipv6_route__destroy(skel); 149 } 150 151 static void test_netlink(void) 152 { 153 struct bpf_iter_netlink *skel; 154 155 skel = bpf_iter_netlink__open_and_load(); 156 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) 157 return; 158 159 do_dummy_read(skel->progs.dump_netlink); 160 161 bpf_iter_netlink__destroy(skel); 162 } 163 164 static void test_bpf_map(void) 165 { 166 struct bpf_iter_bpf_map *skel; 167 168 skel = bpf_iter_bpf_map__open_and_load(); 169 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) 170 return; 171 172 do_dummy_read(skel->progs.dump_bpf_map); 173 174 bpf_iter_bpf_map__destroy(skel); 175 } 176 177 static void check_bpf_link_info(const struct bpf_program *prog) 178 { 179 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 180 union bpf_iter_link_info linfo; 181 struct bpf_link_info info = {}; 182 struct bpf_link *link; 183 __u32 info_len; 184 int err; 185 186 memset(&linfo, 0, sizeof(linfo)); 187 linfo.task.tid = getpid(); 188 opts.link_info = &linfo; 189 opts.link_info_len = sizeof(linfo); 190 191 link = bpf_program__attach_iter(prog, &opts); 192 if (!ASSERT_OK_PTR(link, "attach_iter")) 193 return; 194 195 info_len = sizeof(info); 196 err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len); 197 ASSERT_OK(err, "bpf_link_get_info_by_fd"); 198 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid"); 199 200 bpf_link__destroy(link); 201 } 202 203 static pthread_mutex_t do_nothing_mutex; 204 205 static void *do_nothing_wait(void *arg) 206 { 207 pthread_mutex_lock(&do_nothing_mutex); 208 pthread_mutex_unlock(&do_nothing_mutex); 209 210 pthread_exit(arg); 211 } 212 213 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, 214 int *num_unknown, int *num_known) 215 { 216 struct bpf_iter_tasks *skel; 217 pthread_t thread_id; 218 void *ret; 219 220 skel = bpf_iter_tasks__open_and_load(); 221 if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load")) 222 return; 223 224 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); 225 226 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), 227 "pthread_create"); 228 229 skel->bss->tid = sys_gettid(); 230 231 do_dummy_read_opts(skel->progs.dump_task, opts); 232 233 *num_unknown = skel->bss->num_unknown_tid; 234 *num_known = skel->bss->num_known_tid; 235 236 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); 237 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, 238 "pthread_join"); 239 240 bpf_iter_tasks__destroy(skel); 241 } 242 243 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known) 244 { 245 int num_unknown_tid, num_known_tid; 246 247 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid); 248 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid"); 249 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid"); 250 } 251 252 static void *run_test_task_tid(void *arg) 253 { 254 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 255 union bpf_iter_link_info linfo; 256 int num_unknown_tid, num_known_tid; 257 258 ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id"); 259 260 memset(&linfo, 0, sizeof(linfo)); 261 linfo.task.tid = sys_gettid(); 262 opts.link_info = &linfo; 263 opts.link_info_len = sizeof(linfo); 264 test_task_common(&opts, 0, 1); 265 266 linfo.task.tid = 0; 267 linfo.task.pid = getpid(); 268 /* This includes the parent thread, this thread, watchdog timer thread 269 * and the do_nothing_wait thread 270 */ 271 test_task_common(&opts, 3, 1); 272 273 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); 274 ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid"); 275 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid"); 276 277 return NULL; 278 } 279 280 static void test_task_tid(void) 281 { 282 pthread_t thread_id; 283 284 /* Create a new thread so pid and tid aren't the same */ 285 ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL), 286 "pthread_create"); 287 ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join"); 288 } 289 290 static void test_task_pid(void) 291 { 292 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 293 union bpf_iter_link_info linfo; 294 295 memset(&linfo, 0, sizeof(linfo)); 296 linfo.task.pid = getpid(); 297 opts.link_info = &linfo; 298 opts.link_info_len = sizeof(linfo); 299 300 test_task_common(&opts, 2, 1); 301 } 302 303 static void test_task_pidfd(void) 304 { 305 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 306 union bpf_iter_link_info linfo; 307 int pidfd; 308 309 pidfd = sys_pidfd_open(getpid(), 0); 310 if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open")) 311 return; 312 313 memset(&linfo, 0, sizeof(linfo)); 314 linfo.task.pid_fd = pidfd; 315 opts.link_info = &linfo; 316 opts.link_info_len = sizeof(linfo); 317 318 test_task_common(&opts, 2, 1); 319 320 close(pidfd); 321 } 322 323 static void test_task_sleepable(void) 324 { 325 struct bpf_iter_tasks *skel; 326 327 skel = bpf_iter_tasks__open_and_load(); 328 if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load")) 329 return; 330 331 do_dummy_read(skel->progs.dump_task_sleepable); 332 333 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, 334 "num_expected_failure_copy_from_user_task"); 335 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, 336 "num_success_copy_from_user_task"); 337 338 bpf_iter_tasks__destroy(skel); 339 } 340 341 static void test_task_stack(void) 342 { 343 struct bpf_iter_task_stack *skel; 344 345 skel = bpf_iter_task_stack__open_and_load(); 346 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) 347 return; 348 349 do_dummy_read(skel->progs.dump_task_stack); 350 do_dummy_read(skel->progs.get_task_user_stacks); 351 352 ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks"); 353 354 bpf_iter_task_stack__destroy(skel); 355 } 356 357 static void test_task_file(void) 358 { 359 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 360 struct bpf_iter_task_file *skel; 361 union bpf_iter_link_info linfo; 362 pthread_t thread_id; 363 void *ret; 364 365 skel = bpf_iter_task_file__open_and_load(); 366 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) 367 return; 368 369 skel->bss->tgid = getpid(); 370 371 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); 372 373 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), 374 "pthread_create"); 375 376 memset(&linfo, 0, sizeof(linfo)); 377 linfo.task.tid = getpid(); 378 opts.link_info = &linfo; 379 opts.link_info_len = sizeof(linfo); 380 381 do_dummy_read_opts(skel->progs.dump_task_file, &opts); 382 383 ASSERT_EQ(skel->bss->count, 0, "check_count"); 384 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); 385 386 skel->bss->last_tgid = 0; 387 skel->bss->count = 0; 388 skel->bss->unique_tgid_count = 0; 389 390 do_dummy_read(skel->progs.dump_task_file); 391 392 ASSERT_EQ(skel->bss->count, 0, "check_count"); 393 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); 394 395 check_bpf_link_info(skel->progs.dump_task_file); 396 397 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); 398 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join"); 399 ASSERT_NULL(ret, "pthread_join"); 400 401 bpf_iter_task_file__destroy(skel); 402 } 403 404 #define TASKBUFSZ 32768 405 406 static char taskbuf[TASKBUFSZ]; 407 408 static int do_btf_read(struct bpf_iter_task_btf *skel) 409 { 410 struct bpf_program *prog = skel->progs.dump_task_struct; 411 struct bpf_iter_task_btf__bss *bss = skel->bss; 412 int iter_fd = -1, err; 413 struct bpf_link *link; 414 char *buf = taskbuf; 415 int ret = 0; 416 417 link = bpf_program__attach_iter(prog, NULL); 418 if (!ASSERT_OK_PTR(link, "attach_iter")) 419 return ret; 420 421 iter_fd = bpf_iter_create(bpf_link__fd(link)); 422 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 423 goto free_link; 424 425 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); 426 if (bss->skip) { 427 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); 428 ret = 1; 429 test__skip(); 430 goto free_link; 431 } 432 433 if (!ASSERT_GE(err, 0, "read")) 434 goto free_link; 435 436 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", 437 "check for btf representation of task_struct in iter data"); 438 free_link: 439 if (iter_fd > 0) 440 close(iter_fd); 441 bpf_link__destroy(link); 442 return ret; 443 } 444 445 static void test_task_btf(void) 446 { 447 struct bpf_iter_task_btf__bss *bss; 448 struct bpf_iter_task_btf *skel; 449 int ret; 450 451 skel = bpf_iter_task_btf__open_and_load(); 452 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) 453 return; 454 455 bss = skel->bss; 456 457 ret = do_btf_read(skel); 458 if (ret) 459 goto cleanup; 460 461 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) 462 goto cleanup; 463 464 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); 465 466 cleanup: 467 bpf_iter_task_btf__destroy(skel); 468 } 469 470 static void test_tcp4(void) 471 { 472 struct bpf_iter_tcp4 *skel; 473 474 skel = bpf_iter_tcp4__open_and_load(); 475 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) 476 return; 477 478 do_dummy_read(skel->progs.dump_tcp4); 479 480 bpf_iter_tcp4__destroy(skel); 481 } 482 483 static void test_tcp6(void) 484 { 485 struct bpf_iter_tcp6 *skel; 486 487 skel = bpf_iter_tcp6__open_and_load(); 488 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) 489 return; 490 491 do_dummy_read(skel->progs.dump_tcp6); 492 493 bpf_iter_tcp6__destroy(skel); 494 } 495 496 static void test_udp4(void) 497 { 498 struct bpf_iter_udp4 *skel; 499 500 skel = bpf_iter_udp4__open_and_load(); 501 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) 502 return; 503 504 do_dummy_read(skel->progs.dump_udp4); 505 506 bpf_iter_udp4__destroy(skel); 507 } 508 509 static void test_udp6(void) 510 { 511 struct bpf_iter_udp6 *skel; 512 513 skel = bpf_iter_udp6__open_and_load(); 514 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) 515 return; 516 517 do_dummy_read(skel->progs.dump_udp6); 518 519 bpf_iter_udp6__destroy(skel); 520 } 521 522 static void test_unix(void) 523 { 524 struct bpf_iter_unix *skel; 525 526 skel = bpf_iter_unix__open_and_load(); 527 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) 528 return; 529 530 do_dummy_read(skel->progs.dump_unix); 531 532 bpf_iter_unix__destroy(skel); 533 } 534 535 /* The expected string is less than 16 bytes */ 536 static int do_read_with_fd(int iter_fd, const char *expected, 537 bool read_one_char) 538 { 539 int len, read_buf_len, start; 540 char buf[16] = {}; 541 542 read_buf_len = read_one_char ? 1 : 16; 543 start = 0; 544 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 545 start += len; 546 if (!ASSERT_LT(start, 16, "read")) 547 return -1; 548 read_buf_len = read_one_char ? 1 : 16 - start; 549 } 550 if (!ASSERT_GE(len, 0, "read")) 551 return -1; 552 553 if (!ASSERT_STREQ(buf, expected, "read")) 554 return -1; 555 556 return 0; 557 } 558 559 static void test_anon_iter(bool read_one_char) 560 { 561 struct bpf_iter_test_kern1 *skel; 562 struct bpf_link *link; 563 int iter_fd, err; 564 565 skel = bpf_iter_test_kern1__open_and_load(); 566 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) 567 return; 568 569 err = bpf_iter_test_kern1__attach(skel); 570 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { 571 goto out; 572 } 573 574 link = skel->links.dump_task; 575 iter_fd = bpf_iter_create(bpf_link__fd(link)); 576 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 577 goto out; 578 579 do_read_with_fd(iter_fd, "abcd", read_one_char); 580 close(iter_fd); 581 582 out: 583 bpf_iter_test_kern1__destroy(skel); 584 } 585 586 static int do_read(const char *path, const char *expected) 587 { 588 int err, iter_fd; 589 590 iter_fd = open(path, O_RDONLY); 591 if (!ASSERT_GE(iter_fd, 0, "open")) 592 return -1; 593 594 err = do_read_with_fd(iter_fd, expected, false); 595 close(iter_fd); 596 return err; 597 } 598 599 static void test_file_iter(void) 600 { 601 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 602 struct bpf_iter_test_kern1 *skel1; 603 struct bpf_iter_test_kern2 *skel2; 604 struct bpf_link *link; 605 int err; 606 607 skel1 = bpf_iter_test_kern1__open_and_load(); 608 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) 609 return; 610 611 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 612 if (!ASSERT_OK_PTR(link, "attach_iter")) 613 goto out; 614 615 /* unlink this path if it exists. */ 616 unlink(path); 617 618 err = bpf_link__pin(link, path); 619 if (!ASSERT_OK(err, "pin_iter")) 620 goto free_link; 621 622 err = do_read(path, "abcd"); 623 if (err) 624 goto unlink_path; 625 626 /* file based iterator seems working fine. Let us a link update 627 * of the underlying link and `cat` the iterator again, its content 628 * should change. 629 */ 630 skel2 = bpf_iter_test_kern2__open_and_load(); 631 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) 632 goto unlink_path; 633 634 err = bpf_link__update_program(link, skel2->progs.dump_task); 635 if (!ASSERT_OK(err, "update_prog")) 636 goto destroy_skel2; 637 638 do_read(path, "ABCD"); 639 640 destroy_skel2: 641 bpf_iter_test_kern2__destroy(skel2); 642 unlink_path: 643 unlink(path); 644 free_link: 645 bpf_link__destroy(link); 646 out: 647 bpf_iter_test_kern1__destroy(skel1); 648 } 649 650 static void test_overflow(bool test_e2big_overflow, bool ret1) 651 { 652 __u32 map_info_len, total_read_len, expected_read_len; 653 int err, iter_fd, map1_fd, map2_fd, len; 654 struct bpf_map_info map_info = {}; 655 struct bpf_iter_test_kern4 *skel; 656 struct bpf_link *link; 657 __u32 iter_size; 658 char *buf; 659 660 skel = bpf_iter_test_kern4__open(); 661 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) 662 return; 663 664 /* create two maps: bpf program will only do bpf_seq_write 665 * for these two maps. The goal is one map output almost 666 * fills seq_file buffer and then the other will trigger 667 * overflow and needs restart. 668 */ 669 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 670 if (!ASSERT_GE(map1_fd, 0, "bpf_map_create")) 671 goto out; 672 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 673 if (!ASSERT_GE(map2_fd, 0, "bpf_map_create")) 674 goto free_map1; 675 676 /* bpf_seq_printf kernel buffer is 8 pages, so one map 677 * bpf_seq_write will mostly fill it, and the other map 678 * will partially fill and then trigger overflow and need 679 * bpf_seq_read restart. 680 */ 681 iter_size = sysconf(_SC_PAGE_SIZE) << 3; 682 683 if (test_e2big_overflow) { 684 skel->rodata->print_len = (iter_size + 8) / 8; 685 expected_read_len = 2 * (iter_size + 8); 686 } else if (!ret1) { 687 skel->rodata->print_len = (iter_size - 8) / 8; 688 expected_read_len = 2 * (iter_size - 8); 689 } else { 690 skel->rodata->print_len = 1; 691 expected_read_len = 2 * 8; 692 } 693 skel->rodata->ret1 = ret1; 694 695 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), 696 "bpf_iter_test_kern4__load")) 697 goto free_map2; 698 699 /* setup filtering map_id in bpf program */ 700 map_info_len = sizeof(map_info); 701 err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len); 702 if (!ASSERT_OK(err, "get_map_info")) 703 goto free_map2; 704 skel->bss->map1_id = map_info.id; 705 706 err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len); 707 if (!ASSERT_OK(err, "get_map_info")) 708 goto free_map2; 709 skel->bss->map2_id = map_info.id; 710 711 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 712 if (!ASSERT_OK_PTR(link, "attach_iter")) 713 goto free_map2; 714 715 iter_fd = bpf_iter_create(bpf_link__fd(link)); 716 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 717 goto free_link; 718 719 buf = malloc(expected_read_len); 720 if (!ASSERT_OK_PTR(buf, "malloc")) 721 goto close_iter; 722 723 /* do read */ 724 total_read_len = 0; 725 if (test_e2big_overflow) { 726 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 727 total_read_len += len; 728 729 ASSERT_EQ(len, -1, "read"); 730 ASSERT_EQ(errno, E2BIG, "read"); 731 goto free_buf; 732 } else if (!ret1) { 733 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 734 total_read_len += len; 735 736 if (!ASSERT_GE(len, 0, "read")) 737 goto free_buf; 738 } else { 739 do { 740 len = read(iter_fd, buf, expected_read_len); 741 if (len > 0) 742 total_read_len += len; 743 } while (len > 0 || len == -EAGAIN); 744 745 if (!ASSERT_GE(len, 0, "read")) 746 goto free_buf; 747 } 748 749 if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) 750 goto free_buf; 751 752 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) 753 goto free_buf; 754 755 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) 756 goto free_buf; 757 758 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); 759 760 free_buf: 761 free(buf); 762 close_iter: 763 close(iter_fd); 764 free_link: 765 bpf_link__destroy(link); 766 free_map2: 767 close(map2_fd); 768 free_map1: 769 close(map1_fd); 770 out: 771 bpf_iter_test_kern4__destroy(skel); 772 } 773 774 static void test_bpf_hash_map(void) 775 { 776 __u32 expected_key_a = 0, expected_key_b = 0; 777 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 778 struct bpf_iter_bpf_hash_map *skel; 779 int err, i, len, map_fd, iter_fd; 780 union bpf_iter_link_info linfo; 781 __u64 val, expected_val = 0; 782 struct bpf_link *link; 783 struct key_t { 784 int a; 785 int b; 786 int c; 787 } key; 788 char buf[64]; 789 790 skel = bpf_iter_bpf_hash_map__open(); 791 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) 792 return; 793 794 skel->bss->in_test_mode = true; 795 796 err = bpf_iter_bpf_hash_map__load(skel); 797 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) 798 goto out; 799 800 /* iterator with hashmap2 and hashmap3 should fail */ 801 memset(&linfo, 0, sizeof(linfo)); 802 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 803 opts.link_info = &linfo; 804 opts.link_info_len = sizeof(linfo); 805 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 806 if (!ASSERT_ERR_PTR(link, "attach_iter")) 807 goto out; 808 809 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 810 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 811 if (!ASSERT_ERR_PTR(link, "attach_iter")) 812 goto out; 813 814 /* hashmap1 should be good, update map values here */ 815 map_fd = bpf_map__fd(skel->maps.hashmap1); 816 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 817 key.a = i + 1; 818 key.b = i + 2; 819 key.c = i + 3; 820 val = i + 4; 821 expected_key_a += key.a; 822 expected_key_b += key.b; 823 expected_val += val; 824 825 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 826 if (!ASSERT_OK(err, "map_update")) 827 goto out; 828 } 829 830 /* Sleepable program is prohibited for hash map iterator */ 831 linfo.map.map_fd = map_fd; 832 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); 833 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) 834 goto out; 835 836 linfo.map.map_fd = map_fd; 837 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 838 if (!ASSERT_OK_PTR(link, "attach_iter")) 839 goto out; 840 841 iter_fd = bpf_iter_create(bpf_link__fd(link)); 842 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 843 goto free_link; 844 845 /* do some tests */ 846 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 847 ; 848 if (!ASSERT_GE(len, 0, "read")) 849 goto close_iter; 850 851 /* test results */ 852 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 853 goto close_iter; 854 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 855 goto close_iter; 856 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 857 goto close_iter; 858 859 close_iter: 860 close(iter_fd); 861 free_link: 862 bpf_link__destroy(link); 863 out: 864 bpf_iter_bpf_hash_map__destroy(skel); 865 } 866 867 static void test_bpf_percpu_hash_map(void) 868 { 869 __u32 expected_key_a = 0, expected_key_b = 0; 870 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 871 struct bpf_iter_bpf_percpu_hash_map *skel; 872 int err, i, j, len, map_fd, iter_fd; 873 union bpf_iter_link_info linfo; 874 __u32 expected_val = 0; 875 struct bpf_link *link; 876 struct key_t { 877 int a; 878 int b; 879 int c; 880 } key; 881 char buf[64]; 882 void *val; 883 884 skel = bpf_iter_bpf_percpu_hash_map__open(); 885 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) 886 return; 887 888 skel->rodata->num_cpus = bpf_num_possible_cpus(); 889 val = malloc(8 * bpf_num_possible_cpus()); 890 if (!ASSERT_OK_PTR(val, "malloc")) 891 goto out; 892 893 err = bpf_iter_bpf_percpu_hash_map__load(skel); 894 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) 895 goto out; 896 897 /* update map values here */ 898 map_fd = bpf_map__fd(skel->maps.hashmap1); 899 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 900 key.a = i + 1; 901 key.b = i + 2; 902 key.c = i + 3; 903 expected_key_a += key.a; 904 expected_key_b += key.b; 905 906 for (j = 0; j < bpf_num_possible_cpus(); j++) { 907 *(__u32 *)(val + j * 8) = i + j; 908 expected_val += i + j; 909 } 910 911 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 912 if (!ASSERT_OK(err, "map_update")) 913 goto out; 914 } 915 916 memset(&linfo, 0, sizeof(linfo)); 917 linfo.map.map_fd = map_fd; 918 opts.link_info = &linfo; 919 opts.link_info_len = sizeof(linfo); 920 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 921 if (!ASSERT_OK_PTR(link, "attach_iter")) 922 goto out; 923 924 iter_fd = bpf_iter_create(bpf_link__fd(link)); 925 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 926 goto free_link; 927 928 /* do some tests */ 929 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 930 ; 931 if (!ASSERT_GE(len, 0, "read")) 932 goto close_iter; 933 934 /* test results */ 935 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 936 goto close_iter; 937 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 938 goto close_iter; 939 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 940 goto close_iter; 941 942 close_iter: 943 close(iter_fd); 944 free_link: 945 bpf_link__destroy(link); 946 out: 947 bpf_iter_bpf_percpu_hash_map__destroy(skel); 948 free(val); 949 } 950 951 static void test_bpf_array_map(void) 952 { 953 __u64 val, expected_val = 0, res_first_val, first_val = 0; 954 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 955 __u32 key, expected_key = 0, res_first_key; 956 int err, i, map_fd, hash_fd, iter_fd; 957 struct bpf_iter_bpf_array_map *skel; 958 union bpf_iter_link_info linfo; 959 struct bpf_link *link; 960 char buf[64] = {}; 961 int len, start; 962 963 skel = bpf_iter_bpf_array_map__open_and_load(); 964 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) 965 return; 966 967 map_fd = bpf_map__fd(skel->maps.arraymap1); 968 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 969 val = i + 4; 970 expected_key += i; 971 expected_val += val; 972 973 if (i == 0) 974 first_val = val; 975 976 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 977 if (!ASSERT_OK(err, "map_update")) 978 goto out; 979 } 980 981 memset(&linfo, 0, sizeof(linfo)); 982 linfo.map.map_fd = map_fd; 983 opts.link_info = &linfo; 984 opts.link_info_len = sizeof(linfo); 985 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 986 if (!ASSERT_OK_PTR(link, "attach_iter")) 987 goto out; 988 989 iter_fd = bpf_iter_create(bpf_link__fd(link)); 990 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 991 goto free_link; 992 993 /* do some tests */ 994 start = 0; 995 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 996 start += len; 997 if (!ASSERT_GE(len, 0, "read")) 998 goto close_iter; 999 1000 /* test results */ 1001 res_first_key = *(__u32 *)buf; 1002 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 1003 if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") || 1004 !ASSERT_EQ(res_first_val, first_val, "bpf_seq_write")) 1005 goto close_iter; 1006 1007 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 1008 goto close_iter; 1009 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1010 goto close_iter; 1011 1012 hash_fd = bpf_map__fd(skel->maps.hashmap1); 1013 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 1014 err = bpf_map_lookup_elem(map_fd, &i, &val); 1015 if (!ASSERT_OK(err, "map_lookup arraymap1")) 1016 goto close_iter; 1017 if (!ASSERT_EQ(i, val, "invalid_val arraymap1")) 1018 goto close_iter; 1019 1020 val = i + 4; 1021 err = bpf_map_lookup_elem(hash_fd, &val, &key); 1022 if (!ASSERT_OK(err, "map_lookup hashmap1")) 1023 goto close_iter; 1024 if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1")) 1025 goto close_iter; 1026 } 1027 1028 close_iter: 1029 close(iter_fd); 1030 free_link: 1031 bpf_link__destroy(link); 1032 out: 1033 bpf_iter_bpf_array_map__destroy(skel); 1034 } 1035 1036 static void test_bpf_array_map_iter_fd(void) 1037 { 1038 struct bpf_iter_bpf_array_map *skel; 1039 1040 skel = bpf_iter_bpf_array_map__open_and_load(); 1041 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) 1042 return; 1043 1044 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map, 1045 skel->maps.arraymap1); 1046 1047 bpf_iter_bpf_array_map__destroy(skel); 1048 } 1049 1050 static void test_bpf_percpu_array_map(void) 1051 { 1052 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1053 struct bpf_iter_bpf_percpu_array_map *skel; 1054 __u32 expected_key = 0, expected_val = 0; 1055 union bpf_iter_link_info linfo; 1056 int err, i, j, map_fd, iter_fd; 1057 struct bpf_link *link; 1058 char buf[64]; 1059 void *val; 1060 int len; 1061 1062 skel = bpf_iter_bpf_percpu_array_map__open(); 1063 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) 1064 return; 1065 1066 skel->rodata->num_cpus = bpf_num_possible_cpus(); 1067 val = malloc(8 * bpf_num_possible_cpus()); 1068 if (!ASSERT_OK_PTR(val, "malloc")) 1069 goto out; 1070 1071 err = bpf_iter_bpf_percpu_array_map__load(skel); 1072 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) 1073 goto out; 1074 1075 /* update map values here */ 1076 map_fd = bpf_map__fd(skel->maps.arraymap1); 1077 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 1078 expected_key += i; 1079 1080 for (j = 0; j < bpf_num_possible_cpus(); j++) { 1081 *(__u32 *)(val + j * 8) = i + j; 1082 expected_val += i + j; 1083 } 1084 1085 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 1086 if (!ASSERT_OK(err, "map_update")) 1087 goto out; 1088 } 1089 1090 memset(&linfo, 0, sizeof(linfo)); 1091 linfo.map.map_fd = map_fd; 1092 opts.link_info = &linfo; 1093 opts.link_info_len = sizeof(linfo); 1094 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 1095 if (!ASSERT_OK_PTR(link, "attach_iter")) 1096 goto out; 1097 1098 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1099 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1100 goto free_link; 1101 1102 /* do some tests */ 1103 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1104 ; 1105 if (!ASSERT_GE(len, 0, "read")) 1106 goto close_iter; 1107 1108 /* test results */ 1109 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 1110 goto close_iter; 1111 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1112 goto close_iter; 1113 1114 close_iter: 1115 close(iter_fd); 1116 free_link: 1117 bpf_link__destroy(link); 1118 out: 1119 bpf_iter_bpf_percpu_array_map__destroy(skel); 1120 free(val); 1121 } 1122 1123 /* An iterator program deletes all local storage in a map. */ 1124 static void test_bpf_sk_storage_delete(void) 1125 { 1126 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1127 struct bpf_iter_bpf_sk_storage_helpers *skel; 1128 union bpf_iter_link_info linfo; 1129 int err, len, map_fd, iter_fd; 1130 struct bpf_link *link; 1131 int sock_fd = -1; 1132 __u32 val = 42; 1133 char buf[64]; 1134 1135 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1136 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 1137 return; 1138 1139 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1140 1141 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1142 if (!ASSERT_GE(sock_fd, 0, "socket")) 1143 goto out; 1144 1145 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1146 if (!ASSERT_OK(err, "map_update")) 1147 goto out; 1148 1149 memset(&linfo, 0, sizeof(linfo)); 1150 linfo.map.map_fd = map_fd; 1151 opts.link_info = &linfo; 1152 opts.link_info_len = sizeof(linfo); 1153 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, 1154 &opts); 1155 if (!ASSERT_OK_PTR(link, "attach_iter")) 1156 goto out; 1157 1158 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1159 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1160 goto free_link; 1161 1162 /* do some tests */ 1163 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1164 ; 1165 if (!ASSERT_GE(len, 0, "read")) 1166 goto close_iter; 1167 1168 /* test results */ 1169 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1170 1171 /* Note: The following assertions serve to ensure 1172 * the value was deleted. It does so by asserting 1173 * that bpf_map_lookup_elem has failed. This might 1174 * seem counterintuitive at first. 1175 */ 1176 ASSERT_ERR(err, "bpf_map_lookup_elem"); 1177 ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem"); 1178 1179 close_iter: 1180 close(iter_fd); 1181 free_link: 1182 bpf_link__destroy(link); 1183 out: 1184 if (sock_fd >= 0) 1185 close(sock_fd); 1186 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1187 } 1188 1189 /* This creates a socket and its local storage. It then runs a task_iter BPF 1190 * program that replaces the existing socket local storage with the tgid of the 1191 * only task owning a file descriptor to this socket, this process, prog_tests. 1192 * It then runs a tcp socket iterator that negates the value in the existing 1193 * socket local storage, the test verifies that the resulting value is -pid. 1194 */ 1195 static void test_bpf_sk_storage_get(void) 1196 { 1197 struct bpf_iter_bpf_sk_storage_helpers *skel; 1198 int err, map_fd, val = -1; 1199 int sock_fd = -1; 1200 1201 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1202 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 1203 return; 1204 1205 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1206 if (!ASSERT_GE(sock_fd, 0, "socket")) 1207 goto out; 1208 1209 err = listen(sock_fd, 1); 1210 if (!ASSERT_OK(err, "listen")) 1211 goto close_socket; 1212 1213 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1214 1215 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1216 if (!ASSERT_OK(err, "bpf_map_update_elem")) 1217 goto close_socket; 1218 1219 do_dummy_read(skel->progs.fill_socket_owner); 1220 1221 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1222 if (!ASSERT_OK(err, "bpf_map_lookup_elem") || 1223 !ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem")) 1224 goto close_socket; 1225 1226 do_dummy_read(skel->progs.negate_socket_local_storage); 1227 1228 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1229 ASSERT_OK(err, "bpf_map_lookup_elem"); 1230 ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem"); 1231 1232 close_socket: 1233 close(sock_fd); 1234 out: 1235 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1236 } 1237 1238 static void test_bpf_sk_storage_map_iter_fd(void) 1239 { 1240 struct bpf_iter_bpf_sk_storage_map *skel; 1241 1242 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1243 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) 1244 return; 1245 1246 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map, 1247 skel->maps.sk_stg_map); 1248 1249 bpf_iter_bpf_sk_storage_map__destroy(skel); 1250 } 1251 1252 static void test_bpf_sk_storage_map(void) 1253 { 1254 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1255 int err, i, len, map_fd, iter_fd, num_sockets; 1256 struct bpf_iter_bpf_sk_storage_map *skel; 1257 union bpf_iter_link_info linfo; 1258 int sock_fd[3] = {-1, -1, -1}; 1259 __u32 val, expected_val = 0; 1260 struct bpf_link *link; 1261 char buf[64]; 1262 1263 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1264 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) 1265 return; 1266 1267 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1268 num_sockets = ARRAY_SIZE(sock_fd); 1269 for (i = 0; i < num_sockets; i++) { 1270 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 1271 if (!ASSERT_GE(sock_fd[i], 0, "socket")) 1272 goto out; 1273 1274 val = i + 1; 1275 expected_val += val; 1276 1277 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 1278 BPF_NOEXIST); 1279 if (!ASSERT_OK(err, "map_update")) 1280 goto out; 1281 } 1282 1283 memset(&linfo, 0, sizeof(linfo)); 1284 linfo.map.map_fd = map_fd; 1285 opts.link_info = &linfo; 1286 opts.link_info_len = sizeof(linfo); 1287 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts); 1288 err = libbpf_get_error(link); 1289 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) { 1290 if (!err) 1291 bpf_link__destroy(link); 1292 goto out; 1293 } 1294 1295 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts); 1296 if (!ASSERT_OK_PTR(link, "attach_iter")) 1297 goto out; 1298 1299 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1300 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1301 goto free_link; 1302 1303 skel->bss->to_add_val = time(NULL); 1304 /* do some tests */ 1305 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1306 ; 1307 if (!ASSERT_GE(len, 0, "read")) 1308 goto close_iter; 1309 1310 /* test results */ 1311 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) 1312 goto close_iter; 1313 1314 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1315 goto close_iter; 1316 1317 for (i = 0; i < num_sockets; i++) { 1318 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val); 1319 if (!ASSERT_OK(err, "map_lookup") || 1320 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value")) 1321 break; 1322 } 1323 1324 close_iter: 1325 close(iter_fd); 1326 free_link: 1327 bpf_link__destroy(link); 1328 out: 1329 for (i = 0; i < num_sockets; i++) { 1330 if (sock_fd[i] >= 0) 1331 close(sock_fd[i]); 1332 } 1333 bpf_iter_bpf_sk_storage_map__destroy(skel); 1334 } 1335 1336 static void test_rdonly_buf_out_of_bound(void) 1337 { 1338 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1339 struct bpf_iter_test_kern5 *skel; 1340 union bpf_iter_link_info linfo; 1341 struct bpf_link *link; 1342 1343 skel = bpf_iter_test_kern5__open_and_load(); 1344 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) 1345 return; 1346 1347 memset(&linfo, 0, sizeof(linfo)); 1348 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 1349 opts.link_info = &linfo; 1350 opts.link_info_len = sizeof(linfo); 1351 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 1352 if (!ASSERT_ERR_PTR(link, "attach_iter")) 1353 bpf_link__destroy(link); 1354 1355 bpf_iter_test_kern5__destroy(skel); 1356 } 1357 1358 static void test_buf_neg_offset(void) 1359 { 1360 struct bpf_iter_test_kern6 *skel; 1361 1362 skel = bpf_iter_test_kern6__open_and_load(); 1363 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) 1364 bpf_iter_test_kern6__destroy(skel); 1365 } 1366 1367 static void test_link_iter(void) 1368 { 1369 struct bpf_iter_bpf_link *skel; 1370 1371 skel = bpf_iter_bpf_link__open_and_load(); 1372 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) 1373 return; 1374 1375 do_dummy_read(skel->progs.dump_bpf_link); 1376 1377 bpf_iter_bpf_link__destroy(skel); 1378 } 1379 1380 static void test_ksym_iter(void) 1381 { 1382 struct bpf_iter_ksym *skel; 1383 1384 skel = bpf_iter_ksym__open_and_load(); 1385 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load")) 1386 return; 1387 1388 do_dummy_read(skel->progs.dump_ksym); 1389 1390 bpf_iter_ksym__destroy(skel); 1391 } 1392 1393 #define CMP_BUFFER_SIZE 1024 1394 static char task_vma_output[CMP_BUFFER_SIZE]; 1395 static char proc_maps_output[CMP_BUFFER_SIZE]; 1396 1397 /* remove \0 and \t from str, and only keep the first line */ 1398 static void str_strip_first_line(char *str) 1399 { 1400 char *dst = str, *src = str; 1401 1402 do { 1403 if (*src == ' ' || *src == '\t') 1404 src++; 1405 else 1406 *(dst++) = *(src++); 1407 1408 } while (*src != '\0' && *src != '\n'); 1409 1410 *dst = '\0'; 1411 } 1412 1413 static void test_task_vma_common(struct bpf_iter_attach_opts *opts) 1414 { 1415 int err, iter_fd = -1, proc_maps_fd = -1; 1416 struct bpf_iter_task_vmas *skel; 1417 int len, read_size = 4; 1418 char maps_path[64]; 1419 1420 skel = bpf_iter_task_vmas__open(); 1421 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open")) 1422 return; 1423 1424 skel->bss->pid = getpid(); 1425 skel->bss->one_task = opts ? 1 : 0; 1426 1427 err = bpf_iter_task_vmas__load(skel); 1428 if (!ASSERT_OK(err, "bpf_iter_task_vmas__load")) 1429 goto out; 1430 1431 skel->links.proc_maps = bpf_program__attach_iter( 1432 skel->progs.proc_maps, opts); 1433 1434 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { 1435 skel->links.proc_maps = NULL; 1436 goto out; 1437 } 1438 1439 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1440 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1441 goto out; 1442 1443 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks 1444 * to trigger seq_file corner cases. 1445 */ 1446 len = 0; 1447 while (len < CMP_BUFFER_SIZE) { 1448 err = read_fd_into_buffer(iter_fd, task_vma_output + len, 1449 MIN(read_size, CMP_BUFFER_SIZE - len)); 1450 if (!err) 1451 break; 1452 if (!ASSERT_GE(err, 0, "read_iter_fd")) 1453 goto out; 1454 len += err; 1455 } 1456 if (opts) 1457 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task"); 1458 1459 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ 1460 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); 1461 proc_maps_fd = open(maps_path, O_RDONLY); 1462 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) 1463 goto out; 1464 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); 1465 if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) 1466 goto out; 1467 1468 /* strip and compare the first line of the two files */ 1469 str_strip_first_line(task_vma_output); 1470 str_strip_first_line(proc_maps_output); 1471 1472 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); 1473 1474 check_bpf_link_info(skel->progs.proc_maps); 1475 1476 out: 1477 close(proc_maps_fd); 1478 close(iter_fd); 1479 bpf_iter_task_vmas__destroy(skel); 1480 } 1481 1482 static void test_task_vma_dead_task(void) 1483 { 1484 struct bpf_iter_task_vmas *skel; 1485 int wstatus, child_pid = -1; 1486 time_t start_tm, cur_tm; 1487 int err, iter_fd = -1; 1488 int wait_sec = 3; 1489 1490 skel = bpf_iter_task_vmas__open(); 1491 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open")) 1492 return; 1493 1494 skel->bss->pid = getpid(); 1495 1496 err = bpf_iter_task_vmas__load(skel); 1497 if (!ASSERT_OK(err, "bpf_iter_task_vmas__load")) 1498 goto out; 1499 1500 skel->links.proc_maps = bpf_program__attach_iter( 1501 skel->progs.proc_maps, NULL); 1502 1503 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { 1504 skel->links.proc_maps = NULL; 1505 goto out; 1506 } 1507 1508 start_tm = time(NULL); 1509 cur_tm = start_tm; 1510 1511 child_pid = fork(); 1512 if (child_pid == 0) { 1513 /* Fork short-lived processes in the background. */ 1514 while (cur_tm < start_tm + wait_sec) { 1515 system("echo > /dev/null"); 1516 cur_tm = time(NULL); 1517 } 1518 exit(0); 1519 } 1520 1521 if (!ASSERT_GE(child_pid, 0, "fork_child")) 1522 goto out; 1523 1524 while (cur_tm < start_tm + wait_sec) { 1525 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1526 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1527 goto out; 1528 1529 /* Drain all data from iter_fd. */ 1530 while (cur_tm < start_tm + wait_sec) { 1531 err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE); 1532 if (!ASSERT_GE(err, 0, "read_iter_fd")) 1533 goto out; 1534 1535 cur_tm = time(NULL); 1536 1537 if (err == 0) 1538 break; 1539 } 1540 1541 close(iter_fd); 1542 iter_fd = -1; 1543 } 1544 1545 check_bpf_link_info(skel->progs.proc_maps); 1546 1547 out: 1548 waitpid(child_pid, &wstatus, 0); 1549 close(iter_fd); 1550 bpf_iter_task_vmas__destroy(skel); 1551 } 1552 1553 void test_bpf_sockmap_map_iter_fd(void) 1554 { 1555 struct bpf_iter_sockmap *skel; 1556 1557 skel = bpf_iter_sockmap__open_and_load(); 1558 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) 1559 return; 1560 1561 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap); 1562 1563 bpf_iter_sockmap__destroy(skel); 1564 } 1565 1566 static void test_task_vma(void) 1567 { 1568 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1569 union bpf_iter_link_info linfo; 1570 1571 memset(&linfo, 0, sizeof(linfo)); 1572 linfo.task.tid = getpid(); 1573 opts.link_info = &linfo; 1574 opts.link_info_len = sizeof(linfo); 1575 1576 test_task_vma_common(&opts); 1577 test_task_vma_common(NULL); 1578 } 1579 1580 /* uprobe attach point */ 1581 static noinline int trigger_func(int arg) 1582 { 1583 asm volatile (""); 1584 return arg + 1; 1585 } 1586 1587 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) 1588 { 1589 struct bpf_iter_vma_offset *skel; 1590 char buf[16] = {}; 1591 int iter_fd, len; 1592 int pgsz, shift; 1593 1594 skel = bpf_iter_vma_offset__open_and_load(); 1595 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load")) 1596 return; 1597 1598 skel->bss->pid = getpid(); 1599 skel->bss->address = (uintptr_t)trigger_func; 1600 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++) 1601 ; 1602 skel->bss->page_shift = shift; 1603 1604 skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); 1605 if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter")) 1606 goto exit; 1607 1608 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset)); 1609 if (!ASSERT_GT(iter_fd, 0, "create_iter")) 1610 goto exit; 1611 1612 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1613 ; 1614 buf[15] = 0; 1615 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp"); 1616 1617 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset"); 1618 if (one_proc) 1619 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); 1620 else 1621 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); 1622 1623 close(iter_fd); 1624 1625 exit: 1626 bpf_iter_vma_offset__destroy(skel); 1627 } 1628 1629 static void test_task_vma_offset(void) 1630 { 1631 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1632 union bpf_iter_link_info linfo; 1633 1634 memset(&linfo, 0, sizeof(linfo)); 1635 linfo.task.pid = getpid(); 1636 opts.link_info = &linfo; 1637 opts.link_info_len = sizeof(linfo); 1638 1639 test_task_vma_offset_common(&opts, true); 1640 1641 linfo.task.pid = 0; 1642 linfo.task.tid = getpid(); 1643 test_task_vma_offset_common(&opts, true); 1644 1645 test_task_vma_offset_common(NULL, false); 1646 } 1647 1648 void test_bpf_iter(void) 1649 { 1650 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"); 1651 1652 if (test__start_subtest("btf_id_or_null")) 1653 test_btf_id_or_null(); 1654 if (test__start_subtest("ipv6_route")) 1655 test_ipv6_route(); 1656 if (test__start_subtest("netlink")) 1657 test_netlink(); 1658 if (test__start_subtest("bpf_map")) 1659 test_bpf_map(); 1660 if (test__start_subtest("task_tid")) 1661 test_task_tid(); 1662 if (test__start_subtest("task_pid")) 1663 test_task_pid(); 1664 if (test__start_subtest("task_pidfd")) 1665 test_task_pidfd(); 1666 if (test__start_subtest("task_sleepable")) 1667 test_task_sleepable(); 1668 if (test__start_subtest("task_stack")) 1669 test_task_stack(); 1670 if (test__start_subtest("task_file")) 1671 test_task_file(); 1672 if (test__start_subtest("task_vma")) 1673 test_task_vma(); 1674 if (test__start_subtest("task_vma_dead_task")) 1675 test_task_vma_dead_task(); 1676 if (test__start_subtest("task_btf")) 1677 test_task_btf(); 1678 if (test__start_subtest("tcp4")) 1679 test_tcp4(); 1680 if (test__start_subtest("tcp6")) 1681 test_tcp6(); 1682 if (test__start_subtest("udp4")) 1683 test_udp4(); 1684 if (test__start_subtest("udp6")) 1685 test_udp6(); 1686 if (test__start_subtest("unix")) 1687 test_unix(); 1688 if (test__start_subtest("anon")) 1689 test_anon_iter(false); 1690 if (test__start_subtest("anon-read-one-char")) 1691 test_anon_iter(true); 1692 if (test__start_subtest("file")) 1693 test_file_iter(); 1694 if (test__start_subtest("overflow")) 1695 test_overflow(false, false); 1696 if (test__start_subtest("overflow-e2big")) 1697 test_overflow(true, false); 1698 if (test__start_subtest("prog-ret-1")) 1699 test_overflow(false, true); 1700 if (test__start_subtest("bpf_hash_map")) 1701 test_bpf_hash_map(); 1702 if (test__start_subtest("bpf_percpu_hash_map")) 1703 test_bpf_percpu_hash_map(); 1704 if (test__start_subtest("bpf_array_map")) 1705 test_bpf_array_map(); 1706 if (test__start_subtest("bpf_array_map_iter_fd")) 1707 test_bpf_array_map_iter_fd(); 1708 if (test__start_subtest("bpf_percpu_array_map")) 1709 test_bpf_percpu_array_map(); 1710 if (test__start_subtest("bpf_sk_storage_map")) 1711 test_bpf_sk_storage_map(); 1712 if (test__start_subtest("bpf_sk_storage_map_iter_fd")) 1713 test_bpf_sk_storage_map_iter_fd(); 1714 if (test__start_subtest("bpf_sk_storage_delete")) 1715 test_bpf_sk_storage_delete(); 1716 if (test__start_subtest("bpf_sk_storage_get")) 1717 test_bpf_sk_storage_get(); 1718 if (test__start_subtest("rdonly-buf-out-of-bound")) 1719 test_rdonly_buf_out_of_bound(); 1720 if (test__start_subtest("buf-neg-offset")) 1721 test_buf_neg_offset(); 1722 if (test__start_subtest("link-iter")) 1723 test_link_iter(); 1724 if (test__start_subtest("ksym")) 1725 test_ksym_iter(); 1726 if (test__start_subtest("bpf_sockmap_map_iter_fd")) 1727 test_bpf_sockmap_map_iter_fd(); 1728 if (test__start_subtest("vma_offset")) 1729 test_task_vma_offset(); 1730 } 1731