1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2020 Facebook */ 3 4 #include <errno.h> 5 #include <linux/err.h> 6 #include <linux/netfilter.h> 7 #include <linux/netfilter_arp.h> 8 #include <linux/perf_event.h> 9 #include <net/if.h> 10 #include <stdio.h> 11 #include <unistd.h> 12 13 #include <bpf/bpf.h> 14 #include <bpf/hashmap.h> 15 16 #include "json_writer.h" 17 #include "main.h" 18 #include "xlated_dumper.h" 19 20 #define PERF_HW_CACHE_LEN 128 21 22 static struct hashmap *link_table; 23 static struct dump_data dd; 24 25 static const char *perf_type_name[PERF_TYPE_MAX] = { 26 [PERF_TYPE_HARDWARE] = "hardware", 27 [PERF_TYPE_SOFTWARE] = "software", 28 [PERF_TYPE_TRACEPOINT] = "tracepoint", 29 [PERF_TYPE_HW_CACHE] = "hw-cache", 30 [PERF_TYPE_RAW] = "raw", 31 [PERF_TYPE_BREAKPOINT] = "breakpoint", 32 }; 33 34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = { 35 [PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles", 36 [PERF_COUNT_HW_INSTRUCTIONS] = "instructions", 37 [PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references", 38 [PERF_COUNT_HW_CACHE_MISSES] = "cache-misses", 39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions", 40 [PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses", 41 [PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles", 42 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend", 43 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend", 44 [PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles", 45 }; 46 47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = { 48 [PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock", 49 [PERF_COUNT_SW_TASK_CLOCK] = "task-clock", 50 [PERF_COUNT_SW_PAGE_FAULTS] = "page-faults", 51 [PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches", 52 [PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations", 53 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults", 54 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults", 55 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults", 56 [PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults", 57 [PERF_COUNT_SW_DUMMY] = "dummy", 58 [PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output", 59 [PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches", 60 }; 61 62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = { 63 [PERF_COUNT_HW_CACHE_L1D] = "L1-dcache", 64 [PERF_COUNT_HW_CACHE_L1I] = "L1-icache", 65 [PERF_COUNT_HW_CACHE_LL] = "LLC", 66 [PERF_COUNT_HW_CACHE_DTLB] = "dTLB", 67 [PERF_COUNT_HW_CACHE_ITLB] = "iTLB", 68 [PERF_COUNT_HW_CACHE_BPU] = "branch", 69 [PERF_COUNT_HW_CACHE_NODE] = "node", 70 }; 71 72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = { 73 [PERF_COUNT_HW_CACHE_OP_READ] = "load", 74 [PERF_COUNT_HW_CACHE_OP_WRITE] = "store", 75 [PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch", 76 }; 77 78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = { 79 [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs", 80 [PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses", 81 }; 82 83 #define perf_event_name(array, id) ({ \ 84 const char *event_str = NULL; \ 85 \ 86 if ((id) < ARRAY_SIZE(array)) \ 87 event_str = array[id]; \ 88 event_str; \ 89 }) 90 91 static int link_parse_fd(int *argc, char ***argv) 92 { 93 int fd; 94 95 if (is_prefix(**argv, "id")) { 96 unsigned int id; 97 char *endptr; 98 99 NEXT_ARGP(); 100 101 id = strtoul(**argv, &endptr, 0); 102 if (*endptr) { 103 p_err("can't parse %s as ID", **argv); 104 return -1; 105 } 106 NEXT_ARGP(); 107 108 fd = bpf_link_get_fd_by_id(id); 109 if (fd < 0) 110 p_err("failed to get link with ID %d: %s", id, strerror(errno)); 111 return fd; 112 } else if (is_prefix(**argv, "pinned")) { 113 char *path; 114 115 NEXT_ARGP(); 116 117 path = **argv; 118 NEXT_ARGP(); 119 120 return open_obj_pinned_any(path, BPF_OBJ_LINK); 121 } 122 123 p_err("expected 'id' or 'pinned', got: '%s'?", **argv); 124 return -1; 125 } 126 127 static void 128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr) 129 { 130 const char *link_type_str; 131 132 jsonw_uint_field(wtr, "id", info->id); 133 link_type_str = libbpf_bpf_link_type_str(info->type); 134 if (link_type_str) 135 jsonw_string_field(wtr, "type", link_type_str); 136 else 137 jsonw_uint_field(wtr, "type", info->type); 138 139 jsonw_uint_field(json_wtr, "prog_id", info->prog_id); 140 } 141 142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr) 143 { 144 const char *attach_type_str; 145 146 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 147 if (attach_type_str) 148 jsonw_string_field(wtr, "attach_type", attach_type_str); 149 else 150 jsonw_uint_field(wtr, "attach_type", attach_type); 151 } 152 153 static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr) 154 { 155 char devname[IF_NAMESIZE] = "(unknown)"; 156 157 if (ifindex) 158 if_indextoname(ifindex, devname); 159 else 160 snprintf(devname, sizeof(devname), "(detached)"); 161 jsonw_string_field(wtr, "devname", devname); 162 jsonw_uint_field(wtr, "ifindex", ifindex); 163 } 164 165 static bool is_iter_map_target(const char *target_name) 166 { 167 return strcmp(target_name, "bpf_map_elem") == 0 || 168 strcmp(target_name, "bpf_sk_storage_map") == 0; 169 } 170 171 static bool is_iter_cgroup_target(const char *target_name) 172 { 173 return strcmp(target_name, "cgroup") == 0; 174 } 175 176 static const char *cgroup_order_string(__u32 order) 177 { 178 switch (order) { 179 case BPF_CGROUP_ITER_ORDER_UNSPEC: 180 return "order_unspec"; 181 case BPF_CGROUP_ITER_SELF_ONLY: 182 return "self_only"; 183 case BPF_CGROUP_ITER_DESCENDANTS_PRE: 184 return "descendants_pre"; 185 case BPF_CGROUP_ITER_DESCENDANTS_POST: 186 return "descendants_post"; 187 case BPF_CGROUP_ITER_ANCESTORS_UP: 188 return "ancestors_up"; 189 default: /* won't happen */ 190 return "unknown"; 191 } 192 } 193 194 static bool is_iter_task_target(const char *target_name) 195 { 196 return strcmp(target_name, "task") == 0 || 197 strcmp(target_name, "task_file") == 0 || 198 strcmp(target_name, "task_vma") == 0; 199 } 200 201 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) 202 { 203 const char *target_name = u64_to_ptr(info->iter.target_name); 204 205 jsonw_string_field(wtr, "target_name", target_name); 206 207 if (is_iter_map_target(target_name)) 208 jsonw_uint_field(wtr, "map_id", info->iter.map.map_id); 209 else if (is_iter_task_target(target_name)) { 210 if (info->iter.task.tid) 211 jsonw_uint_field(wtr, "tid", info->iter.task.tid); 212 else if (info->iter.task.pid) 213 jsonw_uint_field(wtr, "pid", info->iter.task.pid); 214 } 215 216 if (is_iter_cgroup_target(target_name)) { 217 jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id); 218 jsonw_string_field(wtr, "order", 219 cgroup_order_string(info->iter.cgroup.order)); 220 } 221 } 222 223 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr) 224 { 225 jsonw_uint_field(json_wtr, "pf", 226 info->netfilter.pf); 227 jsonw_uint_field(json_wtr, "hook", 228 info->netfilter.hooknum); 229 jsonw_int_field(json_wtr, "prio", 230 info->netfilter.priority); 231 jsonw_uint_field(json_wtr, "flags", 232 info->netfilter.flags); 233 } 234 235 static int get_prog_info(int prog_id, struct bpf_prog_info *info) 236 { 237 __u32 len = sizeof(*info); 238 int err, prog_fd; 239 240 prog_fd = bpf_prog_get_fd_by_id(prog_id); 241 if (prog_fd < 0) 242 return prog_fd; 243 244 memset(info, 0, sizeof(*info)); 245 err = bpf_prog_get_info_by_fd(prog_fd, info, &len); 246 if (err) 247 p_err("can't get prog info: %s", strerror(errno)); 248 close(prog_fd); 249 return err; 250 } 251 252 static int cmp_u64(const void *A, const void *B) 253 { 254 const __u64 *a = A, *b = B; 255 256 return *a - *b; 257 } 258 259 static void 260 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) 261 { 262 __u32 i, j = 0; 263 __u64 *addrs; 264 265 jsonw_bool_field(json_wtr, "retprobe", 266 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN); 267 jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count); 268 jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed); 269 jsonw_name(json_wtr, "funcs"); 270 jsonw_start_array(json_wtr); 271 addrs = u64_to_ptr(info->kprobe_multi.addrs); 272 qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64); 273 274 /* Load it once for all. */ 275 if (!dd.sym_count) 276 kernel_syms_load(&dd); 277 for (i = 0; i < dd.sym_count; i++) { 278 if (dd.sym_mapping[i].address != addrs[j]) 279 continue; 280 jsonw_start_object(json_wtr); 281 jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address); 282 jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name); 283 /* Print null if it is vmlinux */ 284 if (dd.sym_mapping[i].module[0] == '\0') { 285 jsonw_name(json_wtr, "module"); 286 jsonw_null(json_wtr); 287 } else { 288 jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module); 289 } 290 jsonw_end_object(json_wtr); 291 if (j++ == info->kprobe_multi.count) 292 break; 293 } 294 jsonw_end_array(json_wtr); 295 } 296 297 static void 298 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 299 { 300 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE); 301 jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr); 302 jsonw_string_field(wtr, "func", 303 u64_to_ptr(info->perf_event.kprobe.func_name)); 304 jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset); 305 jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed); 306 } 307 308 static void 309 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 310 { 311 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE); 312 jsonw_string_field(wtr, "file", 313 u64_to_ptr(info->perf_event.uprobe.file_name)); 314 jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset); 315 } 316 317 static void 318 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr) 319 { 320 jsonw_string_field(wtr, "tracepoint", 321 u64_to_ptr(info->perf_event.tracepoint.tp_name)); 322 } 323 324 static char *perf_config_hw_cache_str(__u64 config) 325 { 326 const char *hw_cache, *result, *op; 327 char *str = malloc(PERF_HW_CACHE_LEN); 328 329 if (!str) { 330 p_err("mem alloc failed"); 331 return NULL; 332 } 333 334 hw_cache = perf_event_name(evsel__hw_cache, config & 0xff); 335 if (hw_cache) 336 snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache); 337 else 338 snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff); 339 340 op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff); 341 if (op) 342 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 343 "%s-", op); 344 else 345 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 346 "%lld-", (config >> 8) & 0xff); 347 348 result = perf_event_name(evsel__hw_cache_result, config >> 16); 349 if (result) 350 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 351 "%s", result); 352 else 353 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 354 "%lld", config >> 16); 355 return str; 356 } 357 358 static const char *perf_config_str(__u32 type, __u64 config) 359 { 360 const char *perf_config; 361 362 switch (type) { 363 case PERF_TYPE_HARDWARE: 364 perf_config = perf_event_name(event_symbols_hw, config); 365 break; 366 case PERF_TYPE_SOFTWARE: 367 perf_config = perf_event_name(event_symbols_sw, config); 368 break; 369 case PERF_TYPE_HW_CACHE: 370 perf_config = perf_config_hw_cache_str(config); 371 break; 372 default: 373 perf_config = NULL; 374 break; 375 } 376 return perf_config; 377 } 378 379 static void 380 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr) 381 { 382 __u64 config = info->perf_event.event.config; 383 __u32 type = info->perf_event.event.type; 384 const char *perf_type, *perf_config; 385 386 perf_type = perf_event_name(perf_type_name, type); 387 if (perf_type) 388 jsonw_string_field(wtr, "event_type", perf_type); 389 else 390 jsonw_uint_field(wtr, "event_type", type); 391 392 perf_config = perf_config_str(type, config); 393 if (perf_config) 394 jsonw_string_field(wtr, "event_config", perf_config); 395 else 396 jsonw_uint_field(wtr, "event_config", config); 397 398 if (type == PERF_TYPE_HW_CACHE && perf_config) 399 free((void *)perf_config); 400 } 401 402 static int show_link_close_json(int fd, struct bpf_link_info *info) 403 { 404 struct bpf_prog_info prog_info; 405 const char *prog_type_str; 406 int err; 407 408 jsonw_start_object(json_wtr); 409 410 show_link_header_json(info, json_wtr); 411 412 switch (info->type) { 413 case BPF_LINK_TYPE_RAW_TRACEPOINT: 414 jsonw_string_field(json_wtr, "tp_name", 415 u64_to_ptr(info->raw_tracepoint.tp_name)); 416 break; 417 case BPF_LINK_TYPE_TRACING: 418 err = get_prog_info(info->prog_id, &prog_info); 419 if (err) 420 return err; 421 422 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 423 /* libbpf will return NULL for variants unknown to it. */ 424 if (prog_type_str) 425 jsonw_string_field(json_wtr, "prog_type", prog_type_str); 426 else 427 jsonw_uint_field(json_wtr, "prog_type", prog_info.type); 428 429 show_link_attach_type_json(info->tracing.attach_type, 430 json_wtr); 431 jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id); 432 jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id); 433 break; 434 case BPF_LINK_TYPE_CGROUP: 435 jsonw_lluint_field(json_wtr, "cgroup_id", 436 info->cgroup.cgroup_id); 437 show_link_attach_type_json(info->cgroup.attach_type, json_wtr); 438 break; 439 case BPF_LINK_TYPE_ITER: 440 show_iter_json(info, json_wtr); 441 break; 442 case BPF_LINK_TYPE_NETNS: 443 jsonw_uint_field(json_wtr, "netns_ino", 444 info->netns.netns_ino); 445 show_link_attach_type_json(info->netns.attach_type, json_wtr); 446 break; 447 case BPF_LINK_TYPE_NETFILTER: 448 netfilter_dump_json(info, json_wtr); 449 break; 450 case BPF_LINK_TYPE_TCX: 451 show_link_ifindex_json(info->tcx.ifindex, json_wtr); 452 show_link_attach_type_json(info->tcx.attach_type, json_wtr); 453 break; 454 case BPF_LINK_TYPE_NETKIT: 455 show_link_ifindex_json(info->netkit.ifindex, json_wtr); 456 show_link_attach_type_json(info->netkit.attach_type, json_wtr); 457 break; 458 case BPF_LINK_TYPE_XDP: 459 show_link_ifindex_json(info->xdp.ifindex, json_wtr); 460 break; 461 case BPF_LINK_TYPE_STRUCT_OPS: 462 jsonw_uint_field(json_wtr, "map_id", 463 info->struct_ops.map_id); 464 break; 465 case BPF_LINK_TYPE_KPROBE_MULTI: 466 show_kprobe_multi_json(info, json_wtr); 467 break; 468 case BPF_LINK_TYPE_PERF_EVENT: 469 switch (info->perf_event.type) { 470 case BPF_PERF_EVENT_EVENT: 471 show_perf_event_event_json(info, json_wtr); 472 break; 473 case BPF_PERF_EVENT_TRACEPOINT: 474 show_perf_event_tracepoint_json(info, json_wtr); 475 break; 476 case BPF_PERF_EVENT_KPROBE: 477 case BPF_PERF_EVENT_KRETPROBE: 478 show_perf_event_kprobe_json(info, json_wtr); 479 break; 480 case BPF_PERF_EVENT_UPROBE: 481 case BPF_PERF_EVENT_URETPROBE: 482 show_perf_event_uprobe_json(info, json_wtr); 483 break; 484 default: 485 break; 486 } 487 break; 488 default: 489 break; 490 } 491 492 if (!hashmap__empty(link_table)) { 493 struct hashmap_entry *entry; 494 495 jsonw_name(json_wtr, "pinned"); 496 jsonw_start_array(json_wtr); 497 hashmap__for_each_key_entry(link_table, entry, info->id) 498 jsonw_string(json_wtr, entry->pvalue); 499 jsonw_end_array(json_wtr); 500 } 501 502 emit_obj_refs_json(refs_table, info->id, json_wtr); 503 504 jsonw_end_object(json_wtr); 505 506 return 0; 507 } 508 509 static void show_link_header_plain(struct bpf_link_info *info) 510 { 511 const char *link_type_str; 512 513 printf("%u: ", info->id); 514 link_type_str = libbpf_bpf_link_type_str(info->type); 515 if (link_type_str) 516 printf("%s ", link_type_str); 517 else 518 printf("type %u ", info->type); 519 520 if (info->type == BPF_LINK_TYPE_STRUCT_OPS) 521 printf("map %u ", info->struct_ops.map_id); 522 else 523 printf("prog %u ", info->prog_id); 524 } 525 526 static void show_link_attach_type_plain(__u32 attach_type) 527 { 528 const char *attach_type_str; 529 530 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 531 if (attach_type_str) 532 printf("attach_type %s ", attach_type_str); 533 else 534 printf("attach_type %u ", attach_type); 535 } 536 537 static void show_link_ifindex_plain(__u32 ifindex) 538 { 539 char devname[IF_NAMESIZE * 2] = "(unknown)"; 540 char tmpname[IF_NAMESIZE]; 541 char *ret = NULL; 542 543 if (ifindex) 544 ret = if_indextoname(ifindex, tmpname); 545 else 546 snprintf(devname, sizeof(devname), "(detached)"); 547 if (ret) 548 snprintf(devname, sizeof(devname), "%s(%d)", 549 tmpname, ifindex); 550 printf("ifindex %s ", devname); 551 } 552 553 static void show_iter_plain(struct bpf_link_info *info) 554 { 555 const char *target_name = u64_to_ptr(info->iter.target_name); 556 557 printf("target_name %s ", target_name); 558 559 if (is_iter_map_target(target_name)) 560 printf("map_id %u ", info->iter.map.map_id); 561 else if (is_iter_task_target(target_name)) { 562 if (info->iter.task.tid) 563 printf("tid %u ", info->iter.task.tid); 564 else if (info->iter.task.pid) 565 printf("pid %u ", info->iter.task.pid); 566 } 567 568 if (is_iter_cgroup_target(target_name)) { 569 printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id); 570 printf("order %s ", 571 cgroup_order_string(info->iter.cgroup.order)); 572 } 573 } 574 575 static const char * const pf2name[] = { 576 [NFPROTO_INET] = "inet", 577 [NFPROTO_IPV4] = "ip", 578 [NFPROTO_ARP] = "arp", 579 [NFPROTO_NETDEV] = "netdev", 580 [NFPROTO_BRIDGE] = "bridge", 581 [NFPROTO_IPV6] = "ip6", 582 }; 583 584 static const char * const inethook2name[] = { 585 [NF_INET_PRE_ROUTING] = "prerouting", 586 [NF_INET_LOCAL_IN] = "input", 587 [NF_INET_FORWARD] = "forward", 588 [NF_INET_LOCAL_OUT] = "output", 589 [NF_INET_POST_ROUTING] = "postrouting", 590 }; 591 592 static const char * const arphook2name[] = { 593 [NF_ARP_IN] = "input", 594 [NF_ARP_OUT] = "output", 595 }; 596 597 void netfilter_dump_plain(const struct bpf_link_info *info) 598 { 599 const char *hookname = NULL, *pfname = NULL; 600 unsigned int hook = info->netfilter.hooknum; 601 unsigned int pf = info->netfilter.pf; 602 603 if (pf < ARRAY_SIZE(pf2name)) 604 pfname = pf2name[pf]; 605 606 switch (pf) { 607 case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */ 608 case NFPROTO_IPV4: 609 case NFPROTO_IPV6: 610 case NFPROTO_INET: 611 if (hook < ARRAY_SIZE(inethook2name)) 612 hookname = inethook2name[hook]; 613 break; 614 case NFPROTO_ARP: 615 if (hook < ARRAY_SIZE(arphook2name)) 616 hookname = arphook2name[hook]; 617 default: 618 break; 619 } 620 621 if (pfname) 622 printf("\n\t%s", pfname); 623 else 624 printf("\n\tpf: %d", pf); 625 626 if (hookname) 627 printf(" %s", hookname); 628 else 629 printf(", hook %u,", hook); 630 631 printf(" prio %d", info->netfilter.priority); 632 633 if (info->netfilter.flags) 634 printf(" flags 0x%x", info->netfilter.flags); 635 } 636 637 static void show_kprobe_multi_plain(struct bpf_link_info *info) 638 { 639 __u32 i, j = 0; 640 __u64 *addrs; 641 642 if (!info->kprobe_multi.count) 643 return; 644 645 if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN) 646 printf("\n\tkretprobe.multi "); 647 else 648 printf("\n\tkprobe.multi "); 649 printf("func_cnt %u ", info->kprobe_multi.count); 650 if (info->kprobe_multi.missed) 651 printf("missed %llu ", info->kprobe_multi.missed); 652 addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs); 653 qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64); 654 655 /* Load it once for all. */ 656 if (!dd.sym_count) 657 kernel_syms_load(&dd); 658 if (!dd.sym_count) 659 return; 660 661 printf("\n\t%-16s %s", "addr", "func [module]"); 662 for (i = 0; i < dd.sym_count; i++) { 663 if (dd.sym_mapping[i].address != addrs[j]) 664 continue; 665 printf("\n\t%016lx %s", 666 dd.sym_mapping[i].address, dd.sym_mapping[i].name); 667 if (dd.sym_mapping[i].module[0] != '\0') 668 printf(" [%s] ", dd.sym_mapping[i].module); 669 else 670 printf(" "); 671 672 if (j++ == info->kprobe_multi.count) 673 break; 674 } 675 } 676 677 static void show_perf_event_kprobe_plain(struct bpf_link_info *info) 678 { 679 const char *buf; 680 681 buf = u64_to_ptr(info->perf_event.kprobe.func_name); 682 if (buf[0] == '\0' && !info->perf_event.kprobe.addr) 683 return; 684 685 if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE) 686 printf("\n\tkretprobe "); 687 else 688 printf("\n\tkprobe "); 689 if (info->perf_event.kprobe.addr) 690 printf("%llx ", info->perf_event.kprobe.addr); 691 printf("%s", buf); 692 if (info->perf_event.kprobe.offset) 693 printf("+%#x", info->perf_event.kprobe.offset); 694 if (info->perf_event.kprobe.missed) 695 printf(" missed %llu", info->perf_event.kprobe.missed); 696 printf(" "); 697 } 698 699 static void show_perf_event_uprobe_plain(struct bpf_link_info *info) 700 { 701 const char *buf; 702 703 buf = u64_to_ptr(info->perf_event.uprobe.file_name); 704 if (buf[0] == '\0') 705 return; 706 707 if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE) 708 printf("\n\turetprobe "); 709 else 710 printf("\n\tuprobe "); 711 printf("%s+%#x ", buf, info->perf_event.uprobe.offset); 712 } 713 714 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) 715 { 716 const char *buf; 717 718 buf = u64_to_ptr(info->perf_event.tracepoint.tp_name); 719 if (buf[0] == '\0') 720 return; 721 722 printf("\n\ttracepoint %s ", buf); 723 } 724 725 static void show_perf_event_event_plain(struct bpf_link_info *info) 726 { 727 __u64 config = info->perf_event.event.config; 728 __u32 type = info->perf_event.event.type; 729 const char *perf_type, *perf_config; 730 731 printf("\n\tevent "); 732 perf_type = perf_event_name(perf_type_name, type); 733 if (perf_type) 734 printf("%s:", perf_type); 735 else 736 printf("%u :", type); 737 738 perf_config = perf_config_str(type, config); 739 if (perf_config) 740 printf("%s ", perf_config); 741 else 742 printf("%llu ", config); 743 744 if (type == PERF_TYPE_HW_CACHE && perf_config) 745 free((void *)perf_config); 746 } 747 748 static int show_link_close_plain(int fd, struct bpf_link_info *info) 749 { 750 struct bpf_prog_info prog_info; 751 const char *prog_type_str; 752 int err; 753 754 show_link_header_plain(info); 755 756 switch (info->type) { 757 case BPF_LINK_TYPE_RAW_TRACEPOINT: 758 printf("\n\ttp '%s' ", 759 (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); 760 break; 761 case BPF_LINK_TYPE_TRACING: 762 err = get_prog_info(info->prog_id, &prog_info); 763 if (err) 764 return err; 765 766 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 767 /* libbpf will return NULL for variants unknown to it. */ 768 if (prog_type_str) 769 printf("\n\tprog_type %s ", prog_type_str); 770 else 771 printf("\n\tprog_type %u ", prog_info.type); 772 773 show_link_attach_type_plain(info->tracing.attach_type); 774 if (info->tracing.target_obj_id || info->tracing.target_btf_id) 775 printf("\n\ttarget_obj_id %u target_btf_id %u ", 776 info->tracing.target_obj_id, 777 info->tracing.target_btf_id); 778 break; 779 case BPF_LINK_TYPE_CGROUP: 780 printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id); 781 show_link_attach_type_plain(info->cgroup.attach_type); 782 break; 783 case BPF_LINK_TYPE_ITER: 784 show_iter_plain(info); 785 break; 786 case BPF_LINK_TYPE_NETNS: 787 printf("\n\tnetns_ino %u ", info->netns.netns_ino); 788 show_link_attach_type_plain(info->netns.attach_type); 789 break; 790 case BPF_LINK_TYPE_NETFILTER: 791 netfilter_dump_plain(info); 792 break; 793 case BPF_LINK_TYPE_TCX: 794 printf("\n\t"); 795 show_link_ifindex_plain(info->tcx.ifindex); 796 show_link_attach_type_plain(info->tcx.attach_type); 797 break; 798 case BPF_LINK_TYPE_NETKIT: 799 printf("\n\t"); 800 show_link_ifindex_plain(info->netkit.ifindex); 801 show_link_attach_type_plain(info->netkit.attach_type); 802 break; 803 case BPF_LINK_TYPE_XDP: 804 printf("\n\t"); 805 show_link_ifindex_plain(info->xdp.ifindex); 806 break; 807 case BPF_LINK_TYPE_KPROBE_MULTI: 808 show_kprobe_multi_plain(info); 809 break; 810 case BPF_LINK_TYPE_PERF_EVENT: 811 switch (info->perf_event.type) { 812 case BPF_PERF_EVENT_EVENT: 813 show_perf_event_event_plain(info); 814 break; 815 case BPF_PERF_EVENT_TRACEPOINT: 816 show_perf_event_tracepoint_plain(info); 817 break; 818 case BPF_PERF_EVENT_KPROBE: 819 case BPF_PERF_EVENT_KRETPROBE: 820 show_perf_event_kprobe_plain(info); 821 break; 822 case BPF_PERF_EVENT_UPROBE: 823 case BPF_PERF_EVENT_URETPROBE: 824 show_perf_event_uprobe_plain(info); 825 break; 826 default: 827 break; 828 } 829 break; 830 default: 831 break; 832 } 833 834 if (!hashmap__empty(link_table)) { 835 struct hashmap_entry *entry; 836 837 hashmap__for_each_key_entry(link_table, entry, info->id) 838 printf("\n\tpinned %s", (char *)entry->pvalue); 839 } 840 emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); 841 842 printf("\n"); 843 844 return 0; 845 } 846 847 static int do_show_link(int fd) 848 { 849 struct bpf_link_info info; 850 __u32 len = sizeof(info); 851 __u64 *addrs = NULL; 852 char buf[PATH_MAX]; 853 int count; 854 int err; 855 856 memset(&info, 0, sizeof(info)); 857 buf[0] = '\0'; 858 again: 859 err = bpf_link_get_info_by_fd(fd, &info, &len); 860 if (err) { 861 p_err("can't get link info: %s", 862 strerror(errno)); 863 close(fd); 864 return err; 865 } 866 if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT && 867 !info.raw_tracepoint.tp_name) { 868 info.raw_tracepoint.tp_name = ptr_to_u64(&buf); 869 info.raw_tracepoint.tp_name_len = sizeof(buf); 870 goto again; 871 } 872 if (info.type == BPF_LINK_TYPE_ITER && 873 !info.iter.target_name) { 874 info.iter.target_name = ptr_to_u64(&buf); 875 info.iter.target_name_len = sizeof(buf); 876 goto again; 877 } 878 if (info.type == BPF_LINK_TYPE_KPROBE_MULTI && 879 !info.kprobe_multi.addrs) { 880 count = info.kprobe_multi.count; 881 if (count) { 882 addrs = calloc(count, sizeof(__u64)); 883 if (!addrs) { 884 p_err("mem alloc failed"); 885 close(fd); 886 return -ENOMEM; 887 } 888 info.kprobe_multi.addrs = ptr_to_u64(addrs); 889 goto again; 890 } 891 } 892 if (info.type == BPF_LINK_TYPE_PERF_EVENT) { 893 switch (info.perf_event.type) { 894 case BPF_PERF_EVENT_TRACEPOINT: 895 if (!info.perf_event.tracepoint.tp_name) { 896 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); 897 info.perf_event.tracepoint.name_len = sizeof(buf); 898 goto again; 899 } 900 break; 901 case BPF_PERF_EVENT_KPROBE: 902 case BPF_PERF_EVENT_KRETPROBE: 903 if (!info.perf_event.kprobe.func_name) { 904 info.perf_event.kprobe.func_name = ptr_to_u64(&buf); 905 info.perf_event.kprobe.name_len = sizeof(buf); 906 goto again; 907 } 908 break; 909 case BPF_PERF_EVENT_UPROBE: 910 case BPF_PERF_EVENT_URETPROBE: 911 if (!info.perf_event.uprobe.file_name) { 912 info.perf_event.uprobe.file_name = ptr_to_u64(&buf); 913 info.perf_event.uprobe.name_len = sizeof(buf); 914 goto again; 915 } 916 break; 917 default: 918 break; 919 } 920 } 921 922 if (json_output) 923 show_link_close_json(fd, &info); 924 else 925 show_link_close_plain(fd, &info); 926 927 if (addrs) 928 free(addrs); 929 close(fd); 930 return 0; 931 } 932 933 static int do_show(int argc, char **argv) 934 { 935 __u32 id = 0; 936 int err, fd; 937 938 if (show_pinned) { 939 link_table = hashmap__new(hash_fn_for_key_as_id, 940 equal_fn_for_key_as_id, NULL); 941 if (IS_ERR(link_table)) { 942 p_err("failed to create hashmap for pinned paths"); 943 return -1; 944 } 945 build_pinned_obj_table(link_table, BPF_OBJ_LINK); 946 } 947 build_obj_refs_table(&refs_table, BPF_OBJ_LINK); 948 949 if (argc == 2) { 950 fd = link_parse_fd(&argc, &argv); 951 if (fd < 0) 952 return fd; 953 do_show_link(fd); 954 goto out; 955 } 956 957 if (argc) 958 return BAD_ARG(); 959 960 if (json_output) 961 jsonw_start_array(json_wtr); 962 while (true) { 963 err = bpf_link_get_next_id(id, &id); 964 if (err) { 965 if (errno == ENOENT) 966 break; 967 p_err("can't get next link: %s%s", strerror(errno), 968 errno == EINVAL ? " -- kernel too old?" : ""); 969 break; 970 } 971 972 fd = bpf_link_get_fd_by_id(id); 973 if (fd < 0) { 974 if (errno == ENOENT) 975 continue; 976 p_err("can't get link by id (%u): %s", 977 id, strerror(errno)); 978 break; 979 } 980 981 err = do_show_link(fd); 982 if (err) 983 break; 984 } 985 if (json_output) 986 jsonw_end_array(json_wtr); 987 988 delete_obj_refs_table(refs_table); 989 990 if (show_pinned) 991 delete_pinned_obj_table(link_table); 992 993 out: 994 if (dd.sym_count) 995 kernel_syms_destroy(&dd); 996 return errno == ENOENT ? 0 : -1; 997 } 998 999 static int do_pin(int argc, char **argv) 1000 { 1001 int err; 1002 1003 err = do_pin_any(argc, argv, link_parse_fd); 1004 if (!err && json_output) 1005 jsonw_null(json_wtr); 1006 return err; 1007 } 1008 1009 static int do_detach(int argc, char **argv) 1010 { 1011 int err, fd; 1012 1013 if (argc != 2) { 1014 p_err("link specifier is invalid or missing\n"); 1015 return 1; 1016 } 1017 1018 fd = link_parse_fd(&argc, &argv); 1019 if (fd < 0) 1020 return 1; 1021 1022 err = bpf_link_detach(fd); 1023 if (err) 1024 err = -errno; 1025 close(fd); 1026 if (err) { 1027 p_err("failed link detach: %s", strerror(-err)); 1028 return 1; 1029 } 1030 1031 if (json_output) 1032 jsonw_null(json_wtr); 1033 1034 return 0; 1035 } 1036 1037 static int do_help(int argc, char **argv) 1038 { 1039 if (json_output) { 1040 jsonw_null(json_wtr); 1041 return 0; 1042 } 1043 1044 fprintf(stderr, 1045 "Usage: %1$s %2$s { show | list } [LINK]\n" 1046 " %1$s %2$s pin LINK FILE\n" 1047 " %1$s %2$s detach LINK\n" 1048 " %1$s %2$s help\n" 1049 "\n" 1050 " " HELP_SPEC_LINK "\n" 1051 " " HELP_SPEC_OPTIONS " |\n" 1052 " {-f|--bpffs} | {-n|--nomount} }\n" 1053 "", 1054 bin_name, argv[-2]); 1055 1056 return 0; 1057 } 1058 1059 static const struct cmd cmds[] = { 1060 { "show", do_show }, 1061 { "list", do_show }, 1062 { "help", do_help }, 1063 { "pin", do_pin }, 1064 { "detach", do_detach }, 1065 { 0 } 1066 }; 1067 1068 int do_link(int argc, char **argv) 1069 { 1070 return cmd_select(cmds, argc, argv, do_help); 1071 } 1072