1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #define _GNU_SOURCE 5 #include <errno.h> 6 #include <fcntl.h> 7 #include <signal.h> 8 #include <stdarg.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <time.h> 13 #include <unistd.h> 14 #include <net/if.h> 15 #include <sys/ioctl.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <sys/syscall.h> 19 20 #include <linux/err.h> 21 #include <linux/perf_event.h> 22 #include <linux/sizes.h> 23 24 #include <bpf/bpf.h> 25 #include <bpf/btf.h> 26 #include <bpf/libbpf.h> 27 28 #include "cfg.h" 29 #include "main.h" 30 #include "xlated_dumper.h" 31 32 enum dump_mode { 33 DUMP_JITED, 34 DUMP_XLATED, 35 }; 36 37 static const char * const attach_type_strings[] = { 38 [BPF_SK_SKB_STREAM_PARSER] = "stream_parser", 39 [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict", 40 [BPF_SK_MSG_VERDICT] = "msg_verdict", 41 [BPF_FLOW_DISSECTOR] = "flow_dissector", 42 [__MAX_BPF_ATTACH_TYPE] = NULL, 43 }; 44 45 static enum bpf_attach_type parse_attach_type(const char *str) 46 { 47 enum bpf_attach_type type; 48 49 for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { 50 if (attach_type_strings[type] && 51 is_prefix(str, attach_type_strings[type])) 52 return type; 53 } 54 55 return __MAX_BPF_ATTACH_TYPE; 56 } 57 58 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) 59 { 60 struct timespec real_time_ts, boot_time_ts; 61 time_t wallclock_secs; 62 struct tm load_tm; 63 64 buf[--size] = '\0'; 65 66 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) || 67 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) { 68 perror("Can't read clocks"); 69 snprintf(buf, size, "%llu", nsecs / 1000000000); 70 return; 71 } 72 73 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + 74 (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) / 75 1000000000; 76 77 78 if (!localtime_r(&wallclock_secs, &load_tm)) { 79 snprintf(buf, size, "%llu", nsecs / 1000000000); 80 return; 81 } 82 83 if (json_output) 84 strftime(buf, size, "%s", &load_tm); 85 else 86 strftime(buf, size, "%FT%T%z", &load_tm); 87 } 88 89 static int prog_fd_by_nametag(void *nametag, int **fds, bool tag) 90 { 91 unsigned int id = 0; 92 int fd, nb_fds = 0; 93 void *tmp; 94 int err; 95 96 while (true) { 97 struct bpf_prog_info info = {}; 98 __u32 len = sizeof(info); 99 100 err = bpf_prog_get_next_id(id, &id); 101 if (err) { 102 if (errno != ENOENT) { 103 p_err("%s", strerror(errno)); 104 goto err_close_fds; 105 } 106 return nb_fds; 107 } 108 109 fd = bpf_prog_get_fd_by_id(id); 110 if (fd < 0) { 111 p_err("can't get prog by id (%u): %s", 112 id, strerror(errno)); 113 goto err_close_fds; 114 } 115 116 err = bpf_obj_get_info_by_fd(fd, &info, &len); 117 if (err) { 118 p_err("can't get prog info (%u): %s", 119 id, strerror(errno)); 120 goto err_close_fd; 121 } 122 123 if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) || 124 (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) { 125 close(fd); 126 continue; 127 } 128 129 if (nb_fds > 0) { 130 tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); 131 if (!tmp) { 132 p_err("failed to realloc"); 133 goto err_close_fd; 134 } 135 *fds = tmp; 136 } 137 (*fds)[nb_fds++] = fd; 138 } 139 140 err_close_fd: 141 close(fd); 142 err_close_fds: 143 while (--nb_fds >= 0) 144 close((*fds)[nb_fds]); 145 return -1; 146 } 147 148 static int prog_parse_fds(int *argc, char ***argv, int **fds) 149 { 150 if (is_prefix(**argv, "id")) { 151 unsigned int id; 152 char *endptr; 153 154 NEXT_ARGP(); 155 156 id = strtoul(**argv, &endptr, 0); 157 if (*endptr) { 158 p_err("can't parse %s as ID", **argv); 159 return -1; 160 } 161 NEXT_ARGP(); 162 163 (*fds)[0] = bpf_prog_get_fd_by_id(id); 164 if ((*fds)[0] < 0) { 165 p_err("get by id (%u): %s", id, strerror(errno)); 166 return -1; 167 } 168 return 1; 169 } else if (is_prefix(**argv, "tag")) { 170 unsigned char tag[BPF_TAG_SIZE]; 171 172 NEXT_ARGP(); 173 174 if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2, 175 tag + 3, tag + 4, tag + 5, tag + 6, tag + 7) 176 != BPF_TAG_SIZE) { 177 p_err("can't parse tag"); 178 return -1; 179 } 180 NEXT_ARGP(); 181 182 return prog_fd_by_nametag(tag, fds, true); 183 } else if (is_prefix(**argv, "name")) { 184 char *name; 185 186 NEXT_ARGP(); 187 188 name = **argv; 189 if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { 190 p_err("can't parse name"); 191 return -1; 192 } 193 NEXT_ARGP(); 194 195 return prog_fd_by_nametag(name, fds, false); 196 } else if (is_prefix(**argv, "pinned")) { 197 char *path; 198 199 NEXT_ARGP(); 200 201 path = **argv; 202 NEXT_ARGP(); 203 204 (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG); 205 if ((*fds)[0] < 0) 206 return -1; 207 return 1; 208 } 209 210 p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv); 211 return -1; 212 } 213 214 int prog_parse_fd(int *argc, char ***argv) 215 { 216 int *fds = NULL; 217 int nb_fds, fd; 218 219 fds = malloc(sizeof(int)); 220 if (!fds) { 221 p_err("mem alloc failed"); 222 return -1; 223 } 224 nb_fds = prog_parse_fds(argc, argv, &fds); 225 if (nb_fds != 1) { 226 if (nb_fds > 1) { 227 p_err("several programs match this handle"); 228 while (nb_fds--) 229 close(fds[nb_fds]); 230 } 231 fd = -1; 232 goto exit_free; 233 } 234 235 fd = fds[0]; 236 exit_free: 237 free(fds); 238 return fd; 239 } 240 241 static void show_prog_maps(int fd, u32 num_maps) 242 { 243 struct bpf_prog_info info = {}; 244 __u32 len = sizeof(info); 245 __u32 map_ids[num_maps]; 246 unsigned int i; 247 int err; 248 249 info.nr_map_ids = num_maps; 250 info.map_ids = ptr_to_u64(map_ids); 251 252 err = bpf_obj_get_info_by_fd(fd, &info, &len); 253 if (err || !info.nr_map_ids) 254 return; 255 256 if (json_output) { 257 jsonw_name(json_wtr, "map_ids"); 258 jsonw_start_array(json_wtr); 259 for (i = 0; i < info.nr_map_ids; i++) 260 jsonw_uint(json_wtr, map_ids[i]); 261 jsonw_end_array(json_wtr); 262 } else { 263 printf(" map_ids "); 264 for (i = 0; i < info.nr_map_ids; i++) 265 printf("%u%s", map_ids[i], 266 i == info.nr_map_ids - 1 ? "" : ","); 267 } 268 } 269 270 static void print_prog_header_json(struct bpf_prog_info *info) 271 { 272 jsonw_uint_field(json_wtr, "id", info->id); 273 if (info->type < ARRAY_SIZE(prog_type_name)) 274 jsonw_string_field(json_wtr, "type", 275 prog_type_name[info->type]); 276 else 277 jsonw_uint_field(json_wtr, "type", info->type); 278 279 if (*info->name) 280 jsonw_string_field(json_wtr, "name", info->name); 281 282 jsonw_name(json_wtr, "tag"); 283 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"", 284 info->tag[0], info->tag[1], info->tag[2], info->tag[3], 285 info->tag[4], info->tag[5], info->tag[6], info->tag[7]); 286 287 jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible); 288 if (info->run_time_ns) { 289 jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns); 290 jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt); 291 } 292 } 293 294 static void print_prog_json(struct bpf_prog_info *info, int fd) 295 { 296 char *memlock; 297 298 jsonw_start_object(json_wtr); 299 print_prog_header_json(info); 300 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); 301 302 if (info->load_time) { 303 char buf[32]; 304 305 print_boot_time(info->load_time, buf, sizeof(buf)); 306 307 /* Piggy back on load_time, since 0 uid is a valid one */ 308 jsonw_name(json_wtr, "loaded_at"); 309 jsonw_printf(json_wtr, "%s", buf); 310 jsonw_uint_field(json_wtr, "uid", info->created_by_uid); 311 } 312 313 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len); 314 315 if (info->jited_prog_len) { 316 jsonw_bool_field(json_wtr, "jited", true); 317 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len); 318 } else { 319 jsonw_bool_field(json_wtr, "jited", false); 320 } 321 322 memlock = get_fdinfo(fd, "memlock"); 323 if (memlock) 324 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock)); 325 free(memlock); 326 327 if (info->nr_map_ids) 328 show_prog_maps(fd, info->nr_map_ids); 329 330 if (info->btf_id) 331 jsonw_int_field(json_wtr, "btf_id", info->btf_id); 332 333 if (!hash_empty(prog_table.table)) { 334 struct pinned_obj *obj; 335 336 jsonw_name(json_wtr, "pinned"); 337 jsonw_start_array(json_wtr); 338 hash_for_each_possible(prog_table.table, obj, hash, info->id) { 339 if (obj->id == info->id) 340 jsonw_string(json_wtr, obj->path); 341 } 342 jsonw_end_array(json_wtr); 343 } 344 345 jsonw_end_object(json_wtr); 346 } 347 348 static void print_prog_header_plain(struct bpf_prog_info *info) 349 { 350 printf("%u: ", info->id); 351 if (info->type < ARRAY_SIZE(prog_type_name)) 352 printf("%s ", prog_type_name[info->type]); 353 else 354 printf("type %u ", info->type); 355 356 if (*info->name) 357 printf("name %s ", info->name); 358 359 printf("tag "); 360 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); 361 print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino); 362 printf("%s", info->gpl_compatible ? " gpl" : ""); 363 if (info->run_time_ns) 364 printf(" run_time_ns %lld run_cnt %lld", 365 info->run_time_ns, info->run_cnt); 366 printf("\n"); 367 } 368 369 static void print_prog_plain(struct bpf_prog_info *info, int fd) 370 { 371 char *memlock; 372 373 print_prog_header_plain(info); 374 375 if (info->load_time) { 376 char buf[32]; 377 378 print_boot_time(info->load_time, buf, sizeof(buf)); 379 380 /* Piggy back on load_time, since 0 uid is a valid one */ 381 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid); 382 } 383 384 printf("\txlated %uB", info->xlated_prog_len); 385 386 if (info->jited_prog_len) 387 printf(" jited %uB", info->jited_prog_len); 388 else 389 printf(" not jited"); 390 391 memlock = get_fdinfo(fd, "memlock"); 392 if (memlock) 393 printf(" memlock %sB", memlock); 394 free(memlock); 395 396 if (info->nr_map_ids) 397 show_prog_maps(fd, info->nr_map_ids); 398 399 if (!hash_empty(prog_table.table)) { 400 struct pinned_obj *obj; 401 402 hash_for_each_possible(prog_table.table, obj, hash, info->id) { 403 if (obj->id == info->id) 404 printf("\n\tpinned %s", obj->path); 405 } 406 } 407 408 if (info->btf_id) 409 printf("\n\tbtf_id %d", info->btf_id); 410 411 printf("\n"); 412 } 413 414 static int show_prog(int fd) 415 { 416 struct bpf_prog_info info = {}; 417 __u32 len = sizeof(info); 418 int err; 419 420 err = bpf_obj_get_info_by_fd(fd, &info, &len); 421 if (err) { 422 p_err("can't get prog info: %s", strerror(errno)); 423 return -1; 424 } 425 426 if (json_output) 427 print_prog_json(&info, fd); 428 else 429 print_prog_plain(&info, fd); 430 431 return 0; 432 } 433 434 static int do_show_subset(int argc, char **argv) 435 { 436 int *fds = NULL; 437 int nb_fds, i; 438 int err = -1; 439 440 fds = malloc(sizeof(int)); 441 if (!fds) { 442 p_err("mem alloc failed"); 443 return -1; 444 } 445 nb_fds = prog_parse_fds(&argc, &argv, &fds); 446 if (nb_fds < 1) 447 goto exit_free; 448 449 if (json_output && nb_fds > 1) 450 jsonw_start_array(json_wtr); /* root array */ 451 for (i = 0; i < nb_fds; i++) { 452 err = show_prog(fds[i]); 453 if (err) { 454 for (; i < nb_fds; i++) 455 close(fds[i]); 456 break; 457 } 458 close(fds[i]); 459 } 460 if (json_output && nb_fds > 1) 461 jsonw_end_array(json_wtr); /* root array */ 462 463 exit_free: 464 free(fds); 465 return err; 466 } 467 468 static int do_show(int argc, char **argv) 469 { 470 __u32 id = 0; 471 int err; 472 int fd; 473 474 if (show_pinned) 475 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG); 476 477 if (argc == 2) 478 return do_show_subset(argc, argv); 479 480 if (argc) 481 return BAD_ARG(); 482 483 if (json_output) 484 jsonw_start_array(json_wtr); 485 while (true) { 486 err = bpf_prog_get_next_id(id, &id); 487 if (err) { 488 if (errno == ENOENT) { 489 err = 0; 490 break; 491 } 492 p_err("can't get next program: %s%s", strerror(errno), 493 errno == EINVAL ? " -- kernel too old?" : ""); 494 err = -1; 495 break; 496 } 497 498 fd = bpf_prog_get_fd_by_id(id); 499 if (fd < 0) { 500 if (errno == ENOENT) 501 continue; 502 p_err("can't get prog by id (%u): %s", 503 id, strerror(errno)); 504 err = -1; 505 break; 506 } 507 508 err = show_prog(fd); 509 close(fd); 510 if (err) 511 break; 512 } 513 514 if (json_output) 515 jsonw_end_array(json_wtr); 516 517 return err; 518 } 519 520 static int 521 prog_dump(struct bpf_prog_info *info, enum dump_mode mode, 522 char *filepath, bool opcodes, bool visual, bool linum) 523 { 524 struct bpf_prog_linfo *prog_linfo = NULL; 525 const char *disasm_opt = NULL; 526 struct dump_data dd = {}; 527 void *func_info = NULL; 528 struct btf *btf = NULL; 529 char func_sig[1024]; 530 unsigned char *buf; 531 __u32 member_len; 532 ssize_t n; 533 int fd; 534 535 if (mode == DUMP_JITED) { 536 if (info->jited_prog_len == 0 || !info->jited_prog_insns) { 537 p_info("no instructions returned"); 538 return -1; 539 } 540 buf = (unsigned char *)(info->jited_prog_insns); 541 member_len = info->jited_prog_len; 542 } else { /* DUMP_XLATED */ 543 if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { 544 p_err("error retrieving insn dump: kernel.kptr_restrict set?"); 545 return -1; 546 } 547 buf = (unsigned char *)info->xlated_prog_insns; 548 member_len = info->xlated_prog_len; 549 } 550 551 if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) { 552 p_err("failed to get btf"); 553 return -1; 554 } 555 556 func_info = (void *)info->func_info; 557 558 if (info->nr_line_info) { 559 prog_linfo = bpf_prog_linfo__new(info); 560 if (!prog_linfo) 561 p_info("error in processing bpf_line_info. continue without it."); 562 } 563 564 if (filepath) { 565 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); 566 if (fd < 0) { 567 p_err("can't open file %s: %s", filepath, 568 strerror(errno)); 569 return -1; 570 } 571 572 n = write(fd, buf, member_len); 573 close(fd); 574 if (n != member_len) { 575 p_err("error writing output file: %s", 576 n < 0 ? strerror(errno) : "short write"); 577 return -1; 578 } 579 580 if (json_output) 581 jsonw_null(json_wtr); 582 } else if (mode == DUMP_JITED) { 583 const char *name = NULL; 584 585 if (info->ifindex) { 586 name = ifindex_to_bfd_params(info->ifindex, 587 info->netns_dev, 588 info->netns_ino, 589 &disasm_opt); 590 if (!name) 591 return -1; 592 } 593 594 if (info->nr_jited_func_lens && info->jited_func_lens) { 595 struct kernel_sym *sym = NULL; 596 struct bpf_func_info *record; 597 char sym_name[SYM_MAX_NAME]; 598 unsigned char *img = buf; 599 __u64 *ksyms = NULL; 600 __u32 *lens; 601 __u32 i; 602 if (info->nr_jited_ksyms) { 603 kernel_syms_load(&dd); 604 ksyms = (__u64 *) info->jited_ksyms; 605 } 606 607 if (json_output) 608 jsonw_start_array(json_wtr); 609 610 lens = (__u32 *) info->jited_func_lens; 611 for (i = 0; i < info->nr_jited_func_lens; i++) { 612 if (ksyms) { 613 sym = kernel_syms_search(&dd, ksyms[i]); 614 if (sym) 615 sprintf(sym_name, "%s", sym->name); 616 else 617 sprintf(sym_name, "0x%016llx", ksyms[i]); 618 } else { 619 strcpy(sym_name, "unknown"); 620 } 621 622 if (func_info) { 623 record = func_info + i * info->func_info_rec_size; 624 btf_dumper_type_only(btf, record->type_id, 625 func_sig, 626 sizeof(func_sig)); 627 } 628 629 if (json_output) { 630 jsonw_start_object(json_wtr); 631 if (func_info && func_sig[0] != '\0') { 632 jsonw_name(json_wtr, "proto"); 633 jsonw_string(json_wtr, func_sig); 634 } 635 jsonw_name(json_wtr, "name"); 636 jsonw_string(json_wtr, sym_name); 637 jsonw_name(json_wtr, "insns"); 638 } else { 639 if (func_info && func_sig[0] != '\0') 640 printf("%s:\n", func_sig); 641 printf("%s:\n", sym_name); 642 } 643 644 disasm_print_insn(img, lens[i], opcodes, 645 name, disasm_opt, btf, 646 prog_linfo, ksyms[i], i, 647 linum); 648 649 img += lens[i]; 650 651 if (json_output) 652 jsonw_end_object(json_wtr); 653 else 654 printf("\n"); 655 } 656 657 if (json_output) 658 jsonw_end_array(json_wtr); 659 } else { 660 disasm_print_insn(buf, member_len, opcodes, name, 661 disasm_opt, btf, NULL, 0, 0, false); 662 } 663 } else if (visual) { 664 if (json_output) 665 jsonw_null(json_wtr); 666 else 667 dump_xlated_cfg(buf, member_len); 668 } else { 669 kernel_syms_load(&dd); 670 dd.nr_jited_ksyms = info->nr_jited_ksyms; 671 dd.jited_ksyms = (__u64 *) info->jited_ksyms; 672 dd.btf = btf; 673 dd.func_info = func_info; 674 dd.finfo_rec_size = info->func_info_rec_size; 675 dd.prog_linfo = prog_linfo; 676 677 if (json_output) 678 dump_xlated_json(&dd, buf, member_len, opcodes, 679 linum); 680 else 681 dump_xlated_plain(&dd, buf, member_len, opcodes, 682 linum); 683 kernel_syms_destroy(&dd); 684 } 685 686 return 0; 687 } 688 689 static int do_dump(int argc, char **argv) 690 { 691 struct bpf_prog_info_linear *info_linear; 692 char *filepath = NULL; 693 bool opcodes = false; 694 bool visual = false; 695 enum dump_mode mode; 696 bool linum = false; 697 int *fds = NULL; 698 int nb_fds, i = 0; 699 int err = -1; 700 __u64 arrays; 701 702 if (is_prefix(*argv, "jited")) { 703 if (disasm_init()) 704 return -1; 705 mode = DUMP_JITED; 706 } else if (is_prefix(*argv, "xlated")) { 707 mode = DUMP_XLATED; 708 } else { 709 p_err("expected 'xlated' or 'jited', got: %s", *argv); 710 return -1; 711 } 712 NEXT_ARG(); 713 714 if (argc < 2) 715 usage(); 716 717 fds = malloc(sizeof(int)); 718 if (!fds) { 719 p_err("mem alloc failed"); 720 return -1; 721 } 722 nb_fds = prog_parse_fds(&argc, &argv, &fds); 723 if (nb_fds < 1) 724 goto exit_free; 725 726 if (is_prefix(*argv, "file")) { 727 NEXT_ARG(); 728 if (!argc) { 729 p_err("expected file path"); 730 goto exit_close; 731 } 732 if (nb_fds > 1) { 733 p_err("several programs matched"); 734 goto exit_close; 735 } 736 737 filepath = *argv; 738 NEXT_ARG(); 739 } else if (is_prefix(*argv, "opcodes")) { 740 opcodes = true; 741 NEXT_ARG(); 742 } else if (is_prefix(*argv, "visual")) { 743 if (nb_fds > 1) { 744 p_err("several programs matched"); 745 goto exit_close; 746 } 747 748 visual = true; 749 NEXT_ARG(); 750 } else if (is_prefix(*argv, "linum")) { 751 linum = true; 752 NEXT_ARG(); 753 } 754 755 if (argc) { 756 usage(); 757 goto exit_close; 758 } 759 760 if (mode == DUMP_JITED) 761 arrays = 1UL << BPF_PROG_INFO_JITED_INSNS; 762 else 763 arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS; 764 765 arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS; 766 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; 767 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; 768 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; 769 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; 770 771 if (json_output && nb_fds > 1) 772 jsonw_start_array(json_wtr); /* root array */ 773 for (i = 0; i < nb_fds; i++) { 774 info_linear = bpf_program__get_prog_info_linear(fds[i], arrays); 775 if (IS_ERR_OR_NULL(info_linear)) { 776 p_err("can't get prog info: %s", strerror(errno)); 777 break; 778 } 779 780 if (json_output && nb_fds > 1) { 781 jsonw_start_object(json_wtr); /* prog object */ 782 print_prog_header_json(&info_linear->info); 783 jsonw_name(json_wtr, "insns"); 784 } else if (nb_fds > 1) { 785 print_prog_header_plain(&info_linear->info); 786 } 787 788 err = prog_dump(&info_linear->info, mode, filepath, opcodes, 789 visual, linum); 790 791 if (json_output && nb_fds > 1) 792 jsonw_end_object(json_wtr); /* prog object */ 793 else if (i != nb_fds - 1 && nb_fds > 1) 794 printf("\n"); 795 796 free(info_linear); 797 if (err) 798 break; 799 close(fds[i]); 800 } 801 if (json_output && nb_fds > 1) 802 jsonw_end_array(json_wtr); /* root array */ 803 804 exit_close: 805 for (; i < nb_fds; i++) 806 close(fds[i]); 807 exit_free: 808 free(fds); 809 return err; 810 } 811 812 static int do_pin(int argc, char **argv) 813 { 814 int err; 815 816 err = do_pin_any(argc, argv, prog_parse_fd); 817 if (!err && json_output) 818 jsonw_null(json_wtr); 819 return err; 820 } 821 822 struct map_replace { 823 int idx; 824 int fd; 825 char *name; 826 }; 827 828 static int map_replace_compar(const void *p1, const void *p2) 829 { 830 const struct map_replace *a = p1, *b = p2; 831 832 return a->idx - b->idx; 833 } 834 835 static int parse_attach_detach_args(int argc, char **argv, int *progfd, 836 enum bpf_attach_type *attach_type, 837 int *mapfd) 838 { 839 if (!REQ_ARGS(3)) 840 return -EINVAL; 841 842 *progfd = prog_parse_fd(&argc, &argv); 843 if (*progfd < 0) 844 return *progfd; 845 846 *attach_type = parse_attach_type(*argv); 847 if (*attach_type == __MAX_BPF_ATTACH_TYPE) { 848 p_err("invalid attach/detach type"); 849 return -EINVAL; 850 } 851 852 if (*attach_type == BPF_FLOW_DISSECTOR) { 853 *mapfd = -1; 854 return 0; 855 } 856 857 NEXT_ARG(); 858 if (!REQ_ARGS(2)) 859 return -EINVAL; 860 861 *mapfd = map_parse_fd(&argc, &argv); 862 if (*mapfd < 0) 863 return *mapfd; 864 865 return 0; 866 } 867 868 static int do_attach(int argc, char **argv) 869 { 870 enum bpf_attach_type attach_type; 871 int err, progfd; 872 int mapfd; 873 874 err = parse_attach_detach_args(argc, argv, 875 &progfd, &attach_type, &mapfd); 876 if (err) 877 return err; 878 879 err = bpf_prog_attach(progfd, mapfd, attach_type, 0); 880 if (err) { 881 p_err("failed prog attach to map"); 882 return -EINVAL; 883 } 884 885 if (json_output) 886 jsonw_null(json_wtr); 887 return 0; 888 } 889 890 static int do_detach(int argc, char **argv) 891 { 892 enum bpf_attach_type attach_type; 893 int err, progfd; 894 int mapfd; 895 896 err = parse_attach_detach_args(argc, argv, 897 &progfd, &attach_type, &mapfd); 898 if (err) 899 return err; 900 901 err = bpf_prog_detach2(progfd, mapfd, attach_type); 902 if (err) { 903 p_err("failed prog detach from map"); 904 return -EINVAL; 905 } 906 907 if (json_output) 908 jsonw_null(json_wtr); 909 return 0; 910 } 911 912 static int check_single_stdin(char *file_data_in, char *file_ctx_in) 913 { 914 if (file_data_in && file_ctx_in && 915 !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) { 916 p_err("cannot use standard input for both data_in and ctx_in"); 917 return -1; 918 } 919 920 return 0; 921 } 922 923 static int get_run_data(const char *fname, void **data_ptr, unsigned int *size) 924 { 925 size_t block_size = 256; 926 size_t buf_size = block_size; 927 size_t nb_read = 0; 928 void *tmp; 929 FILE *f; 930 931 if (!fname) { 932 *data_ptr = NULL; 933 *size = 0; 934 return 0; 935 } 936 937 if (!strcmp(fname, "-")) 938 f = stdin; 939 else 940 f = fopen(fname, "r"); 941 if (!f) { 942 p_err("failed to open %s: %s", fname, strerror(errno)); 943 return -1; 944 } 945 946 *data_ptr = malloc(block_size); 947 if (!*data_ptr) { 948 p_err("failed to allocate memory for data_in/ctx_in: %s", 949 strerror(errno)); 950 goto err_fclose; 951 } 952 953 while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) { 954 if (feof(f)) 955 break; 956 if (ferror(f)) { 957 p_err("failed to read data_in/ctx_in from %s: %s", 958 fname, strerror(errno)); 959 goto err_free; 960 } 961 if (nb_read > buf_size - block_size) { 962 if (buf_size == UINT32_MAX) { 963 p_err("data_in/ctx_in is too long (max: %d)", 964 UINT32_MAX); 965 goto err_free; 966 } 967 /* No space for fread()-ing next chunk; realloc() */ 968 buf_size *= 2; 969 tmp = realloc(*data_ptr, buf_size); 970 if (!tmp) { 971 p_err("failed to reallocate data_in/ctx_in: %s", 972 strerror(errno)); 973 goto err_free; 974 } 975 *data_ptr = tmp; 976 } 977 } 978 if (f != stdin) 979 fclose(f); 980 981 *size = nb_read; 982 return 0; 983 984 err_free: 985 free(*data_ptr); 986 *data_ptr = NULL; 987 err_fclose: 988 if (f != stdin) 989 fclose(f); 990 return -1; 991 } 992 993 static void hex_print(void *data, unsigned int size, FILE *f) 994 { 995 size_t i, j; 996 char c; 997 998 for (i = 0; i < size; i += 16) { 999 /* Row offset */ 1000 fprintf(f, "%07zx\t", i); 1001 1002 /* Hexadecimal values */ 1003 for (j = i; j < i + 16 && j < size; j++) 1004 fprintf(f, "%02x%s", *(uint8_t *)(data + j), 1005 j % 2 ? " " : ""); 1006 for (; j < i + 16; j++) 1007 fprintf(f, " %s", j % 2 ? " " : ""); 1008 1009 /* ASCII values (if relevant), '.' otherwise */ 1010 fprintf(f, "| "); 1011 for (j = i; j < i + 16 && j < size; j++) { 1012 c = *(char *)(data + j); 1013 if (c < ' ' || c > '~') 1014 c = '.'; 1015 fprintf(f, "%c%s", c, j == i + 7 ? " " : ""); 1016 } 1017 1018 fprintf(f, "\n"); 1019 } 1020 } 1021 1022 static int 1023 print_run_output(void *data, unsigned int size, const char *fname, 1024 const char *json_key) 1025 { 1026 size_t nb_written; 1027 FILE *f; 1028 1029 if (!fname) 1030 return 0; 1031 1032 if (!strcmp(fname, "-")) { 1033 f = stdout; 1034 if (json_output) { 1035 jsonw_name(json_wtr, json_key); 1036 print_data_json(data, size); 1037 } else { 1038 hex_print(data, size, f); 1039 } 1040 return 0; 1041 } 1042 1043 f = fopen(fname, "w"); 1044 if (!f) { 1045 p_err("failed to open %s: %s", fname, strerror(errno)); 1046 return -1; 1047 } 1048 1049 nb_written = fwrite(data, 1, size, f); 1050 fclose(f); 1051 if (nb_written != size) { 1052 p_err("failed to write output data/ctx: %s", strerror(errno)); 1053 return -1; 1054 } 1055 1056 return 0; 1057 } 1058 1059 static int alloc_run_data(void **data_ptr, unsigned int size_out) 1060 { 1061 *data_ptr = calloc(size_out, 1); 1062 if (!*data_ptr) { 1063 p_err("failed to allocate memory for output data/ctx: %s", 1064 strerror(errno)); 1065 return -1; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int do_run(int argc, char **argv) 1072 { 1073 char *data_fname_in = NULL, *data_fname_out = NULL; 1074 char *ctx_fname_in = NULL, *ctx_fname_out = NULL; 1075 struct bpf_prog_test_run_attr test_attr = {0}; 1076 const unsigned int default_size = SZ_32K; 1077 void *data_in = NULL, *data_out = NULL; 1078 void *ctx_in = NULL, *ctx_out = NULL; 1079 unsigned int repeat = 1; 1080 int fd, err; 1081 1082 if (!REQ_ARGS(4)) 1083 return -1; 1084 1085 fd = prog_parse_fd(&argc, &argv); 1086 if (fd < 0) 1087 return -1; 1088 1089 while (argc) { 1090 if (detect_common_prefix(*argv, "data_in", "data_out", 1091 "data_size_out", NULL)) 1092 return -1; 1093 if (detect_common_prefix(*argv, "ctx_in", "ctx_out", 1094 "ctx_size_out", NULL)) 1095 return -1; 1096 1097 if (is_prefix(*argv, "data_in")) { 1098 NEXT_ARG(); 1099 if (!REQ_ARGS(1)) 1100 return -1; 1101 1102 data_fname_in = GET_ARG(); 1103 if (check_single_stdin(data_fname_in, ctx_fname_in)) 1104 return -1; 1105 } else if (is_prefix(*argv, "data_out")) { 1106 NEXT_ARG(); 1107 if (!REQ_ARGS(1)) 1108 return -1; 1109 1110 data_fname_out = GET_ARG(); 1111 } else if (is_prefix(*argv, "data_size_out")) { 1112 char *endptr; 1113 1114 NEXT_ARG(); 1115 if (!REQ_ARGS(1)) 1116 return -1; 1117 1118 test_attr.data_size_out = strtoul(*argv, &endptr, 0); 1119 if (*endptr) { 1120 p_err("can't parse %s as output data size", 1121 *argv); 1122 return -1; 1123 } 1124 NEXT_ARG(); 1125 } else if (is_prefix(*argv, "ctx_in")) { 1126 NEXT_ARG(); 1127 if (!REQ_ARGS(1)) 1128 return -1; 1129 1130 ctx_fname_in = GET_ARG(); 1131 if (check_single_stdin(data_fname_in, ctx_fname_in)) 1132 return -1; 1133 } else if (is_prefix(*argv, "ctx_out")) { 1134 NEXT_ARG(); 1135 if (!REQ_ARGS(1)) 1136 return -1; 1137 1138 ctx_fname_out = GET_ARG(); 1139 } else if (is_prefix(*argv, "ctx_size_out")) { 1140 char *endptr; 1141 1142 NEXT_ARG(); 1143 if (!REQ_ARGS(1)) 1144 return -1; 1145 1146 test_attr.ctx_size_out = strtoul(*argv, &endptr, 0); 1147 if (*endptr) { 1148 p_err("can't parse %s as output context size", 1149 *argv); 1150 return -1; 1151 } 1152 NEXT_ARG(); 1153 } else if (is_prefix(*argv, "repeat")) { 1154 char *endptr; 1155 1156 NEXT_ARG(); 1157 if (!REQ_ARGS(1)) 1158 return -1; 1159 1160 repeat = strtoul(*argv, &endptr, 0); 1161 if (*endptr) { 1162 p_err("can't parse %s as repeat number", 1163 *argv); 1164 return -1; 1165 } 1166 NEXT_ARG(); 1167 } else { 1168 p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?", 1169 *argv); 1170 return -1; 1171 } 1172 } 1173 1174 err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in); 1175 if (err) 1176 return -1; 1177 1178 if (data_in) { 1179 if (!test_attr.data_size_out) 1180 test_attr.data_size_out = default_size; 1181 err = alloc_run_data(&data_out, test_attr.data_size_out); 1182 if (err) 1183 goto free_data_in; 1184 } 1185 1186 err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in); 1187 if (err) 1188 goto free_data_out; 1189 1190 if (ctx_in) { 1191 if (!test_attr.ctx_size_out) 1192 test_attr.ctx_size_out = default_size; 1193 err = alloc_run_data(&ctx_out, test_attr.ctx_size_out); 1194 if (err) 1195 goto free_ctx_in; 1196 } 1197 1198 test_attr.prog_fd = fd; 1199 test_attr.repeat = repeat; 1200 test_attr.data_in = data_in; 1201 test_attr.data_out = data_out; 1202 test_attr.ctx_in = ctx_in; 1203 test_attr.ctx_out = ctx_out; 1204 1205 err = bpf_prog_test_run_xattr(&test_attr); 1206 if (err) { 1207 p_err("failed to run program: %s", strerror(errno)); 1208 goto free_ctx_out; 1209 } 1210 1211 err = 0; 1212 1213 if (json_output) 1214 jsonw_start_object(json_wtr); /* root */ 1215 1216 /* Do not exit on errors occurring when printing output data/context, 1217 * we still want to print return value and duration for program run. 1218 */ 1219 if (test_attr.data_size_out) 1220 err += print_run_output(test_attr.data_out, 1221 test_attr.data_size_out, 1222 data_fname_out, "data_out"); 1223 if (test_attr.ctx_size_out) 1224 err += print_run_output(test_attr.ctx_out, 1225 test_attr.ctx_size_out, 1226 ctx_fname_out, "ctx_out"); 1227 1228 if (json_output) { 1229 jsonw_uint_field(json_wtr, "retval", test_attr.retval); 1230 jsonw_uint_field(json_wtr, "duration", test_attr.duration); 1231 jsonw_end_object(json_wtr); /* root */ 1232 } else { 1233 fprintf(stdout, "Return value: %u, duration%s: %uns\n", 1234 test_attr.retval, 1235 repeat > 1 ? " (average)" : "", test_attr.duration); 1236 } 1237 1238 free_ctx_out: 1239 free(ctx_out); 1240 free_ctx_in: 1241 free(ctx_in); 1242 free_data_out: 1243 free(data_out); 1244 free_data_in: 1245 free(data_in); 1246 1247 return err; 1248 } 1249 1250 static int load_with_options(int argc, char **argv, bool first_prog_only) 1251 { 1252 enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC; 1253 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, 1254 .relaxed_maps = relaxed_maps, 1255 ); 1256 struct bpf_object_load_attr load_attr = { 0 }; 1257 enum bpf_attach_type expected_attach_type; 1258 struct map_replace *map_replace = NULL; 1259 struct bpf_program *prog = NULL, *pos; 1260 unsigned int old_map_fds = 0; 1261 const char *pinmaps = NULL; 1262 struct bpf_object *obj; 1263 struct bpf_map *map; 1264 const char *pinfile; 1265 unsigned int i, j; 1266 __u32 ifindex = 0; 1267 const char *file; 1268 int idx, err; 1269 1270 1271 if (!REQ_ARGS(2)) 1272 return -1; 1273 file = GET_ARG(); 1274 pinfile = GET_ARG(); 1275 1276 while (argc) { 1277 if (is_prefix(*argv, "type")) { 1278 char *type; 1279 1280 NEXT_ARG(); 1281 1282 if (common_prog_type != BPF_PROG_TYPE_UNSPEC) { 1283 p_err("program type already specified"); 1284 goto err_free_reuse_maps; 1285 } 1286 if (!REQ_ARGS(1)) 1287 goto err_free_reuse_maps; 1288 1289 /* Put a '/' at the end of type to appease libbpf */ 1290 type = malloc(strlen(*argv) + 2); 1291 if (!type) { 1292 p_err("mem alloc failed"); 1293 goto err_free_reuse_maps; 1294 } 1295 *type = 0; 1296 strcat(type, *argv); 1297 strcat(type, "/"); 1298 1299 err = libbpf_prog_type_by_name(type, &common_prog_type, 1300 &expected_attach_type); 1301 free(type); 1302 if (err < 0) 1303 goto err_free_reuse_maps; 1304 1305 NEXT_ARG(); 1306 } else if (is_prefix(*argv, "map")) { 1307 void *new_map_replace; 1308 char *endptr, *name; 1309 int fd; 1310 1311 NEXT_ARG(); 1312 1313 if (!REQ_ARGS(4)) 1314 goto err_free_reuse_maps; 1315 1316 if (is_prefix(*argv, "idx")) { 1317 NEXT_ARG(); 1318 1319 idx = strtoul(*argv, &endptr, 0); 1320 if (*endptr) { 1321 p_err("can't parse %s as IDX", *argv); 1322 goto err_free_reuse_maps; 1323 } 1324 name = NULL; 1325 } else if (is_prefix(*argv, "name")) { 1326 NEXT_ARG(); 1327 1328 name = *argv; 1329 idx = -1; 1330 } else { 1331 p_err("expected 'idx' or 'name', got: '%s'?", 1332 *argv); 1333 goto err_free_reuse_maps; 1334 } 1335 NEXT_ARG(); 1336 1337 fd = map_parse_fd(&argc, &argv); 1338 if (fd < 0) 1339 goto err_free_reuse_maps; 1340 1341 new_map_replace = reallocarray(map_replace, 1342 old_map_fds + 1, 1343 sizeof(*map_replace)); 1344 if (!new_map_replace) { 1345 p_err("mem alloc failed"); 1346 goto err_free_reuse_maps; 1347 } 1348 map_replace = new_map_replace; 1349 1350 map_replace[old_map_fds].idx = idx; 1351 map_replace[old_map_fds].name = name; 1352 map_replace[old_map_fds].fd = fd; 1353 old_map_fds++; 1354 } else if (is_prefix(*argv, "dev")) { 1355 NEXT_ARG(); 1356 1357 if (ifindex) { 1358 p_err("offload device already specified"); 1359 goto err_free_reuse_maps; 1360 } 1361 if (!REQ_ARGS(1)) 1362 goto err_free_reuse_maps; 1363 1364 ifindex = if_nametoindex(*argv); 1365 if (!ifindex) { 1366 p_err("unrecognized netdevice '%s': %s", 1367 *argv, strerror(errno)); 1368 goto err_free_reuse_maps; 1369 } 1370 NEXT_ARG(); 1371 } else if (is_prefix(*argv, "pinmaps")) { 1372 NEXT_ARG(); 1373 1374 if (!REQ_ARGS(1)) 1375 goto err_free_reuse_maps; 1376 1377 pinmaps = GET_ARG(); 1378 } else { 1379 p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?", 1380 *argv); 1381 goto err_free_reuse_maps; 1382 } 1383 } 1384 1385 set_max_rlimit(); 1386 1387 obj = bpf_object__open_file(file, &open_opts); 1388 if (IS_ERR_OR_NULL(obj)) { 1389 p_err("failed to open object file"); 1390 goto err_free_reuse_maps; 1391 } 1392 1393 bpf_object__for_each_program(pos, obj) { 1394 enum bpf_prog_type prog_type = common_prog_type; 1395 1396 if (prog_type == BPF_PROG_TYPE_UNSPEC) { 1397 const char *sec_name = bpf_program__title(pos, false); 1398 1399 err = libbpf_prog_type_by_name(sec_name, &prog_type, 1400 &expected_attach_type); 1401 if (err < 0) 1402 goto err_close_obj; 1403 } 1404 1405 bpf_program__set_ifindex(pos, ifindex); 1406 bpf_program__set_type(pos, prog_type); 1407 bpf_program__set_expected_attach_type(pos, expected_attach_type); 1408 } 1409 1410 qsort(map_replace, old_map_fds, sizeof(*map_replace), 1411 map_replace_compar); 1412 1413 /* After the sort maps by name will be first on the list, because they 1414 * have idx == -1. Resolve them. 1415 */ 1416 j = 0; 1417 while (j < old_map_fds && map_replace[j].name) { 1418 i = 0; 1419 bpf_object__for_each_map(map, obj) { 1420 if (!strcmp(bpf_map__name(map), map_replace[j].name)) { 1421 map_replace[j].idx = i; 1422 break; 1423 } 1424 i++; 1425 } 1426 if (map_replace[j].idx == -1) { 1427 p_err("unable to find map '%s'", map_replace[j].name); 1428 goto err_close_obj; 1429 } 1430 j++; 1431 } 1432 /* Resort if any names were resolved */ 1433 if (j) 1434 qsort(map_replace, old_map_fds, sizeof(*map_replace), 1435 map_replace_compar); 1436 1437 /* Set ifindex and name reuse */ 1438 j = 0; 1439 idx = 0; 1440 bpf_object__for_each_map(map, obj) { 1441 if (!bpf_map__is_offload_neutral(map)) 1442 bpf_map__set_ifindex(map, ifindex); 1443 1444 if (j < old_map_fds && idx == map_replace[j].idx) { 1445 err = bpf_map__reuse_fd(map, map_replace[j++].fd); 1446 if (err) { 1447 p_err("unable to set up map reuse: %d", err); 1448 goto err_close_obj; 1449 } 1450 1451 /* Next reuse wants to apply to the same map */ 1452 if (j < old_map_fds && map_replace[j].idx == idx) { 1453 p_err("replacement for map idx %d specified more than once", 1454 idx); 1455 goto err_close_obj; 1456 } 1457 } 1458 1459 idx++; 1460 } 1461 if (j < old_map_fds) { 1462 p_err("map idx '%d' not used", map_replace[j].idx); 1463 goto err_close_obj; 1464 } 1465 1466 load_attr.obj = obj; 1467 if (verifier_logs) 1468 /* log_level1 + log_level2 + stats, but not stable UAPI */ 1469 load_attr.log_level = 1 + 2 + 4; 1470 1471 err = bpf_object__load_xattr(&load_attr); 1472 if (err) { 1473 p_err("failed to load object file"); 1474 goto err_close_obj; 1475 } 1476 1477 err = mount_bpffs_for_pin(pinfile); 1478 if (err) 1479 goto err_close_obj; 1480 1481 if (first_prog_only) { 1482 prog = bpf_program__next(NULL, obj); 1483 if (!prog) { 1484 p_err("object file doesn't contain any bpf program"); 1485 goto err_close_obj; 1486 } 1487 1488 err = bpf_obj_pin(bpf_program__fd(prog), pinfile); 1489 if (err) { 1490 p_err("failed to pin program %s", 1491 bpf_program__title(prog, false)); 1492 goto err_close_obj; 1493 } 1494 } else { 1495 err = bpf_object__pin_programs(obj, pinfile); 1496 if (err) { 1497 p_err("failed to pin all programs"); 1498 goto err_close_obj; 1499 } 1500 } 1501 1502 if (pinmaps) { 1503 err = bpf_object__pin_maps(obj, pinmaps); 1504 if (err) { 1505 p_err("failed to pin all maps"); 1506 goto err_unpin; 1507 } 1508 } 1509 1510 if (json_output) 1511 jsonw_null(json_wtr); 1512 1513 bpf_object__close(obj); 1514 for (i = 0; i < old_map_fds; i++) 1515 close(map_replace[i].fd); 1516 free(map_replace); 1517 1518 return 0; 1519 1520 err_unpin: 1521 if (first_prog_only) 1522 unlink(pinfile); 1523 else 1524 bpf_object__unpin_programs(obj, pinfile); 1525 err_close_obj: 1526 bpf_object__close(obj); 1527 err_free_reuse_maps: 1528 for (i = 0; i < old_map_fds; i++) 1529 close(map_replace[i].fd); 1530 free(map_replace); 1531 return -1; 1532 } 1533 1534 static int do_load(int argc, char **argv) 1535 { 1536 return load_with_options(argc, argv, true); 1537 } 1538 1539 static int do_loadall(int argc, char **argv) 1540 { 1541 return load_with_options(argc, argv, false); 1542 } 1543 1544 #ifdef BPFTOOL_WITHOUT_SKELETONS 1545 1546 static int do_profile(int argc, char **argv) 1547 { 1548 p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0"); 1549 return 0; 1550 } 1551 1552 #else /* BPFTOOL_WITHOUT_SKELETONS */ 1553 1554 #include "profiler.skel.h" 1555 1556 struct profile_metric { 1557 const char *name; 1558 struct bpf_perf_event_value val; 1559 struct perf_event_attr attr; 1560 bool selected; 1561 1562 /* calculate ratios like instructions per cycle */ 1563 const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */ 1564 const char *ratio_desc; 1565 const float ratio_mul; 1566 } metrics[] = { 1567 { 1568 .name = "cycles", 1569 .attr = { 1570 .type = PERF_TYPE_HARDWARE, 1571 .config = PERF_COUNT_HW_CPU_CYCLES, 1572 .exclude_user = 1, 1573 }, 1574 }, 1575 { 1576 .name = "instructions", 1577 .attr = { 1578 .type = PERF_TYPE_HARDWARE, 1579 .config = PERF_COUNT_HW_INSTRUCTIONS, 1580 .exclude_user = 1, 1581 }, 1582 .ratio_metric = 1, 1583 .ratio_desc = "insns per cycle", 1584 .ratio_mul = 1.0, 1585 }, 1586 { 1587 .name = "l1d_loads", 1588 .attr = { 1589 .type = PERF_TYPE_HW_CACHE, 1590 .config = 1591 PERF_COUNT_HW_CACHE_L1D | 1592 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1593 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16), 1594 .exclude_user = 1, 1595 }, 1596 }, 1597 { 1598 .name = "llc_misses", 1599 .attr = { 1600 .type = PERF_TYPE_HW_CACHE, 1601 .config = 1602 PERF_COUNT_HW_CACHE_LL | 1603 (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1604 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16), 1605 .exclude_user = 1 1606 }, 1607 .ratio_metric = 2, 1608 .ratio_desc = "LLC misses per million insns", 1609 .ratio_mul = 1e6, 1610 }, 1611 }; 1612 1613 static __u64 profile_total_count; 1614 1615 #define MAX_NUM_PROFILE_METRICS 4 1616 1617 static int profile_parse_metrics(int argc, char **argv) 1618 { 1619 unsigned int metric_cnt; 1620 int selected_cnt = 0; 1621 unsigned int i; 1622 1623 metric_cnt = sizeof(metrics) / sizeof(struct profile_metric); 1624 1625 while (argc > 0) { 1626 for (i = 0; i < metric_cnt; i++) { 1627 if (is_prefix(argv[0], metrics[i].name)) { 1628 if (!metrics[i].selected) 1629 selected_cnt++; 1630 metrics[i].selected = true; 1631 break; 1632 } 1633 } 1634 if (i == metric_cnt) { 1635 p_err("unknown metric %s", argv[0]); 1636 return -1; 1637 } 1638 NEXT_ARG(); 1639 } 1640 if (selected_cnt > MAX_NUM_PROFILE_METRICS) { 1641 p_err("too many (%d) metrics, please specify no more than %d metrics at at time", 1642 selected_cnt, MAX_NUM_PROFILE_METRICS); 1643 return -1; 1644 } 1645 return selected_cnt; 1646 } 1647 1648 static void profile_read_values(struct profiler_bpf *obj) 1649 { 1650 __u32 m, cpu, num_cpu = obj->rodata->num_cpu; 1651 int reading_map_fd, count_map_fd; 1652 __u64 counts[num_cpu]; 1653 __u32 key = 0; 1654 int err; 1655 1656 reading_map_fd = bpf_map__fd(obj->maps.accum_readings); 1657 count_map_fd = bpf_map__fd(obj->maps.counts); 1658 if (reading_map_fd < 0 || count_map_fd < 0) { 1659 p_err("failed to get fd for map"); 1660 return; 1661 } 1662 1663 err = bpf_map_lookup_elem(count_map_fd, &key, counts); 1664 if (err) { 1665 p_err("failed to read count_map: %s", strerror(errno)); 1666 return; 1667 } 1668 1669 profile_total_count = 0; 1670 for (cpu = 0; cpu < num_cpu; cpu++) 1671 profile_total_count += counts[cpu]; 1672 1673 for (m = 0; m < ARRAY_SIZE(metrics); m++) { 1674 struct bpf_perf_event_value values[num_cpu]; 1675 1676 if (!metrics[m].selected) 1677 continue; 1678 1679 err = bpf_map_lookup_elem(reading_map_fd, &key, values); 1680 if (err) { 1681 p_err("failed to read reading_map: %s", 1682 strerror(errno)); 1683 return; 1684 } 1685 for (cpu = 0; cpu < num_cpu; cpu++) { 1686 metrics[m].val.counter += values[cpu].counter; 1687 metrics[m].val.enabled += values[cpu].enabled; 1688 metrics[m].val.running += values[cpu].running; 1689 } 1690 key++; 1691 } 1692 } 1693 1694 static void profile_print_readings_json(void) 1695 { 1696 __u32 m; 1697 1698 jsonw_start_array(json_wtr); 1699 for (m = 0; m < ARRAY_SIZE(metrics); m++) { 1700 if (!metrics[m].selected) 1701 continue; 1702 jsonw_start_object(json_wtr); 1703 jsonw_string_field(json_wtr, "metric", metrics[m].name); 1704 jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count); 1705 jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter); 1706 jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled); 1707 jsonw_lluint_field(json_wtr, "running", metrics[m].val.running); 1708 1709 jsonw_end_object(json_wtr); 1710 } 1711 jsonw_end_array(json_wtr); 1712 } 1713 1714 static void profile_print_readings_plain(void) 1715 { 1716 __u32 m; 1717 1718 printf("\n%18llu %-20s\n", profile_total_count, "run_cnt"); 1719 for (m = 0; m < ARRAY_SIZE(metrics); m++) { 1720 struct bpf_perf_event_value *val = &metrics[m].val; 1721 int r; 1722 1723 if (!metrics[m].selected) 1724 continue; 1725 printf("%18llu %-20s", val->counter, metrics[m].name); 1726 1727 r = metrics[m].ratio_metric - 1; 1728 if (r >= 0 && metrics[r].selected && 1729 metrics[r].val.counter > 0) { 1730 printf("# %8.2f %-30s", 1731 val->counter * metrics[m].ratio_mul / 1732 metrics[r].val.counter, 1733 metrics[m].ratio_desc); 1734 } else { 1735 printf("%-41s", ""); 1736 } 1737 1738 if (val->enabled > val->running) 1739 printf("(%4.2f%%)", 1740 val->running * 100.0 / val->enabled); 1741 printf("\n"); 1742 } 1743 } 1744 1745 static void profile_print_readings(void) 1746 { 1747 if (json_output) 1748 profile_print_readings_json(); 1749 else 1750 profile_print_readings_plain(); 1751 } 1752 1753 static char *profile_target_name(int tgt_fd) 1754 { 1755 struct bpf_prog_info_linear *info_linear; 1756 struct bpf_func_info *func_info; 1757 const struct btf_type *t; 1758 char *name = NULL; 1759 struct btf *btf; 1760 1761 info_linear = bpf_program__get_prog_info_linear( 1762 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO); 1763 if (IS_ERR_OR_NULL(info_linear)) { 1764 p_err("failed to get info_linear for prog FD %d", tgt_fd); 1765 return NULL; 1766 } 1767 1768 if (info_linear->info.btf_id == 0 || 1769 btf__get_from_id(info_linear->info.btf_id, &btf)) { 1770 p_err("prog FD %d doesn't have valid btf", tgt_fd); 1771 goto out; 1772 } 1773 1774 func_info = (struct bpf_func_info *)(info_linear->info.func_info); 1775 t = btf__type_by_id(btf, func_info[0].type_id); 1776 if (!t) { 1777 p_err("btf %d doesn't have type %d", 1778 info_linear->info.btf_id, func_info[0].type_id); 1779 goto out; 1780 } 1781 name = strdup(btf__name_by_offset(btf, t->name_off)); 1782 out: 1783 free(info_linear); 1784 return name; 1785 } 1786 1787 static struct profiler_bpf *profile_obj; 1788 static int profile_tgt_fd = -1; 1789 static char *profile_tgt_name; 1790 static int *profile_perf_events; 1791 static int profile_perf_event_cnt; 1792 1793 static void profile_close_perf_events(struct profiler_bpf *obj) 1794 { 1795 int i; 1796 1797 for (i = profile_perf_event_cnt - 1; i >= 0; i--) 1798 close(profile_perf_events[i]); 1799 1800 free(profile_perf_events); 1801 profile_perf_event_cnt = 0; 1802 } 1803 1804 static int profile_open_perf_events(struct profiler_bpf *obj) 1805 { 1806 unsigned int cpu, m; 1807 int map_fd, pmu_fd; 1808 1809 profile_perf_events = calloc( 1810 sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric); 1811 if (!profile_perf_events) { 1812 p_err("failed to allocate memory for perf_event array: %s", 1813 strerror(errno)); 1814 return -1; 1815 } 1816 map_fd = bpf_map__fd(obj->maps.events); 1817 if (map_fd < 0) { 1818 p_err("failed to get fd for events map"); 1819 return -1; 1820 } 1821 1822 for (m = 0; m < ARRAY_SIZE(metrics); m++) { 1823 if (!metrics[m].selected) 1824 continue; 1825 for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) { 1826 pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr, 1827 -1/*pid*/, cpu, -1/*group_fd*/, 0); 1828 if (pmu_fd < 0 || 1829 bpf_map_update_elem(map_fd, &profile_perf_event_cnt, 1830 &pmu_fd, BPF_ANY) || 1831 ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) { 1832 p_err("failed to create event %s on cpu %d", 1833 metrics[m].name, cpu); 1834 return -1; 1835 } 1836 profile_perf_events[profile_perf_event_cnt++] = pmu_fd; 1837 } 1838 } 1839 return 0; 1840 } 1841 1842 static void profile_print_and_cleanup(void) 1843 { 1844 profile_close_perf_events(profile_obj); 1845 profile_read_values(profile_obj); 1846 profile_print_readings(); 1847 profiler_bpf__destroy(profile_obj); 1848 1849 close(profile_tgt_fd); 1850 free(profile_tgt_name); 1851 } 1852 1853 static void int_exit(int signo) 1854 { 1855 profile_print_and_cleanup(); 1856 exit(0); 1857 } 1858 1859 static int do_profile(int argc, char **argv) 1860 { 1861 int num_metric, num_cpu, err = -1; 1862 struct bpf_program *prog; 1863 unsigned long duration; 1864 char *endptr; 1865 1866 /* we at least need two args for the prog and one metric */ 1867 if (!REQ_ARGS(3)) 1868 return -EINVAL; 1869 1870 /* parse target fd */ 1871 profile_tgt_fd = prog_parse_fd(&argc, &argv); 1872 if (profile_tgt_fd < 0) { 1873 p_err("failed to parse fd"); 1874 return -1; 1875 } 1876 1877 /* parse profiling optional duration */ 1878 if (argc > 2 && is_prefix(argv[0], "duration")) { 1879 NEXT_ARG(); 1880 duration = strtoul(*argv, &endptr, 0); 1881 if (*endptr) 1882 usage(); 1883 NEXT_ARG(); 1884 } else { 1885 duration = UINT_MAX; 1886 } 1887 1888 num_metric = profile_parse_metrics(argc, argv); 1889 if (num_metric <= 0) 1890 goto out; 1891 1892 num_cpu = libbpf_num_possible_cpus(); 1893 if (num_cpu <= 0) { 1894 p_err("failed to identify number of CPUs"); 1895 goto out; 1896 } 1897 1898 profile_obj = profiler_bpf__open(); 1899 if (!profile_obj) { 1900 p_err("failed to open and/or load BPF object"); 1901 goto out; 1902 } 1903 1904 profile_obj->rodata->num_cpu = num_cpu; 1905 profile_obj->rodata->num_metric = num_metric; 1906 1907 /* adjust map sizes */ 1908 bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu); 1909 bpf_map__resize(profile_obj->maps.fentry_readings, num_metric); 1910 bpf_map__resize(profile_obj->maps.accum_readings, num_metric); 1911 bpf_map__resize(profile_obj->maps.counts, 1); 1912 1913 /* change target name */ 1914 profile_tgt_name = profile_target_name(profile_tgt_fd); 1915 if (!profile_tgt_name) 1916 goto out; 1917 1918 bpf_object__for_each_program(prog, profile_obj->obj) { 1919 err = bpf_program__set_attach_target(prog, profile_tgt_fd, 1920 profile_tgt_name); 1921 if (err) { 1922 p_err("failed to set attach target\n"); 1923 goto out; 1924 } 1925 } 1926 1927 set_max_rlimit(); 1928 err = profiler_bpf__load(profile_obj); 1929 if (err) { 1930 p_err("failed to load profile_obj"); 1931 goto out; 1932 } 1933 1934 err = profile_open_perf_events(profile_obj); 1935 if (err) 1936 goto out; 1937 1938 err = profiler_bpf__attach(profile_obj); 1939 if (err) { 1940 p_err("failed to attach profile_obj"); 1941 goto out; 1942 } 1943 signal(SIGINT, int_exit); 1944 1945 sleep(duration); 1946 profile_print_and_cleanup(); 1947 return 0; 1948 1949 out: 1950 profile_close_perf_events(profile_obj); 1951 if (profile_obj) 1952 profiler_bpf__destroy(profile_obj); 1953 close(profile_tgt_fd); 1954 free(profile_tgt_name); 1955 return err; 1956 } 1957 1958 #endif /* BPFTOOL_WITHOUT_SKELETONS */ 1959 1960 static int do_help(int argc, char **argv) 1961 { 1962 if (json_output) { 1963 jsonw_null(json_wtr); 1964 return 0; 1965 } 1966 1967 fprintf(stderr, 1968 "Usage: %s %s { show | list } [PROG]\n" 1969 " %s %s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n" 1970 " %s %s dump jited PROG [{ file FILE | opcodes | linum }]\n" 1971 " %s %s pin PROG FILE\n" 1972 " %s %s { load | loadall } OBJ PATH \\\n" 1973 " [type TYPE] [dev NAME] \\\n" 1974 " [map { idx IDX | name NAME } MAP]\\\n" 1975 " [pinmaps MAP_DIR]\n" 1976 " %s %s attach PROG ATTACH_TYPE [MAP]\n" 1977 " %s %s detach PROG ATTACH_TYPE [MAP]\n" 1978 " %s %s run PROG \\\n" 1979 " data_in FILE \\\n" 1980 " [data_out FILE [data_size_out L]] \\\n" 1981 " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n" 1982 " [repeat N]\n" 1983 " %s %s profile PROG [duration DURATION] METRICs\n" 1984 " %s %s tracelog\n" 1985 " %s %s help\n" 1986 "\n" 1987 " " HELP_SPEC_MAP "\n" 1988 " " HELP_SPEC_PROGRAM "\n" 1989 " TYPE := { socket | kprobe | kretprobe | classifier | action |\n" 1990 " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n" 1991 " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n" 1992 " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n" 1993 " sk_reuseport | flow_dissector | cgroup/sysctl |\n" 1994 " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" 1995 " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" 1996 " cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n" 1997 " cgroup/recvmsg6 | cgroup/getsockopt | cgroup/setsockopt |\n" 1998 " struct_ops | fentry | fexit | freplace }\n" 1999 " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n" 2000 " flow_dissector }\n" 2001 " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n" 2002 " " HELP_SPEC_OPTIONS "\n" 2003 "", 2004 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 2005 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 2006 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 2007 bin_name, argv[-2], bin_name, argv[-2]); 2008 2009 return 0; 2010 } 2011 2012 static const struct cmd cmds[] = { 2013 { "show", do_show }, 2014 { "list", do_show }, 2015 { "help", do_help }, 2016 { "dump", do_dump }, 2017 { "pin", do_pin }, 2018 { "load", do_load }, 2019 { "loadall", do_loadall }, 2020 { "attach", do_attach }, 2021 { "detach", do_detach }, 2022 { "tracelog", do_tracelog }, 2023 { "run", do_run }, 2024 { "profile", do_profile }, 2025 { 0 } 2026 }; 2027 2028 int do_prog(int argc, char **argv) 2029 { 2030 return cmd_select(cmds, argc, argv, do_help); 2031 } 2032