1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2020 Facebook */ 3 4 #include <errno.h> 5 #include <linux/err.h> 6 #include <linux/netfilter.h> 7 #include <linux/netfilter_arp.h> 8 #include <linux/perf_event.h> 9 #include <net/if.h> 10 #include <stdio.h> 11 #include <unistd.h> 12 13 #include <bpf/bpf.h> 14 #include <bpf/hashmap.h> 15 16 #include "json_writer.h" 17 #include "main.h" 18 #include "xlated_dumper.h" 19 20 #define PERF_HW_CACHE_LEN 128 21 22 static struct hashmap *link_table; 23 static struct dump_data dd; 24 25 static const char *perf_type_name[PERF_TYPE_MAX] = { 26 [PERF_TYPE_HARDWARE] = "hardware", 27 [PERF_TYPE_SOFTWARE] = "software", 28 [PERF_TYPE_TRACEPOINT] = "tracepoint", 29 [PERF_TYPE_HW_CACHE] = "hw-cache", 30 [PERF_TYPE_RAW] = "raw", 31 [PERF_TYPE_BREAKPOINT] = "breakpoint", 32 }; 33 34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = { 35 [PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles", 36 [PERF_COUNT_HW_INSTRUCTIONS] = "instructions", 37 [PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references", 38 [PERF_COUNT_HW_CACHE_MISSES] = "cache-misses", 39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions", 40 [PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses", 41 [PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles", 42 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend", 43 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend", 44 [PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles", 45 }; 46 47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = { 48 [PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock", 49 [PERF_COUNT_SW_TASK_CLOCK] = "task-clock", 50 [PERF_COUNT_SW_PAGE_FAULTS] = "page-faults", 51 [PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches", 52 [PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations", 53 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults", 54 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults", 55 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults", 56 [PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults", 57 [PERF_COUNT_SW_DUMMY] = "dummy", 58 [PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output", 59 [PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches", 60 }; 61 62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = { 63 [PERF_COUNT_HW_CACHE_L1D] = "L1-dcache", 64 [PERF_COUNT_HW_CACHE_L1I] = "L1-icache", 65 [PERF_COUNT_HW_CACHE_LL] = "LLC", 66 [PERF_COUNT_HW_CACHE_DTLB] = "dTLB", 67 [PERF_COUNT_HW_CACHE_ITLB] = "iTLB", 68 [PERF_COUNT_HW_CACHE_BPU] = "branch", 69 [PERF_COUNT_HW_CACHE_NODE] = "node", 70 }; 71 72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = { 73 [PERF_COUNT_HW_CACHE_OP_READ] = "load", 74 [PERF_COUNT_HW_CACHE_OP_WRITE] = "store", 75 [PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch", 76 }; 77 78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = { 79 [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs", 80 [PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses", 81 }; 82 83 #define perf_event_name(array, id) ({ \ 84 const char *event_str = NULL; \ 85 \ 86 if ((id) < ARRAY_SIZE(array)) \ 87 event_str = array[id]; \ 88 event_str; \ 89 }) 90 91 static int link_parse_fd(int *argc, char ***argv) 92 { 93 int fd; 94 95 if (is_prefix(**argv, "id")) { 96 unsigned int id; 97 char *endptr; 98 99 NEXT_ARGP(); 100 101 id = strtoul(**argv, &endptr, 0); 102 if (*endptr) { 103 p_err("can't parse %s as ID", **argv); 104 return -1; 105 } 106 NEXT_ARGP(); 107 108 fd = bpf_link_get_fd_by_id(id); 109 if (fd < 0) 110 p_err("failed to get link with ID %d: %s", id, strerror(errno)); 111 return fd; 112 } else if (is_prefix(**argv, "pinned")) { 113 char *path; 114 115 NEXT_ARGP(); 116 117 path = **argv; 118 NEXT_ARGP(); 119 120 return open_obj_pinned_any(path, BPF_OBJ_LINK); 121 } 122 123 p_err("expected 'id' or 'pinned', got: '%s'?", **argv); 124 return -1; 125 } 126 127 static void 128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr) 129 { 130 const char *link_type_str; 131 132 jsonw_uint_field(wtr, "id", info->id); 133 link_type_str = libbpf_bpf_link_type_str(info->type); 134 if (link_type_str) 135 jsonw_string_field(wtr, "type", link_type_str); 136 else 137 jsonw_uint_field(wtr, "type", info->type); 138 139 jsonw_uint_field(json_wtr, "prog_id", info->prog_id); 140 } 141 142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr) 143 { 144 const char *attach_type_str; 145 146 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 147 if (attach_type_str) 148 jsonw_string_field(wtr, "attach_type", attach_type_str); 149 else 150 jsonw_uint_field(wtr, "attach_type", attach_type); 151 } 152 153 static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr) 154 { 155 char devname[IF_NAMESIZE] = "(unknown)"; 156 157 if (ifindex) 158 if_indextoname(ifindex, devname); 159 else 160 snprintf(devname, sizeof(devname), "(detached)"); 161 jsonw_string_field(wtr, "devname", devname); 162 jsonw_uint_field(wtr, "ifindex", ifindex); 163 } 164 165 static bool is_iter_map_target(const char *target_name) 166 { 167 return strcmp(target_name, "bpf_map_elem") == 0 || 168 strcmp(target_name, "bpf_sk_storage_map") == 0; 169 } 170 171 static bool is_iter_cgroup_target(const char *target_name) 172 { 173 return strcmp(target_name, "cgroup") == 0; 174 } 175 176 static const char *cgroup_order_string(__u32 order) 177 { 178 switch (order) { 179 case BPF_CGROUP_ITER_ORDER_UNSPEC: 180 return "order_unspec"; 181 case BPF_CGROUP_ITER_SELF_ONLY: 182 return "self_only"; 183 case BPF_CGROUP_ITER_DESCENDANTS_PRE: 184 return "descendants_pre"; 185 case BPF_CGROUP_ITER_DESCENDANTS_POST: 186 return "descendants_post"; 187 case BPF_CGROUP_ITER_ANCESTORS_UP: 188 return "ancestors_up"; 189 default: /* won't happen */ 190 return "unknown"; 191 } 192 } 193 194 static bool is_iter_task_target(const char *target_name) 195 { 196 return strcmp(target_name, "task") == 0 || 197 strcmp(target_name, "task_file") == 0 || 198 strcmp(target_name, "task_vma") == 0; 199 } 200 201 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) 202 { 203 const char *target_name = u64_to_ptr(info->iter.target_name); 204 205 jsonw_string_field(wtr, "target_name", target_name); 206 207 if (is_iter_map_target(target_name)) 208 jsonw_uint_field(wtr, "map_id", info->iter.map.map_id); 209 else if (is_iter_task_target(target_name)) { 210 if (info->iter.task.tid) 211 jsonw_uint_field(wtr, "tid", info->iter.task.tid); 212 else if (info->iter.task.pid) 213 jsonw_uint_field(wtr, "pid", info->iter.task.pid); 214 } 215 216 if (is_iter_cgroup_target(target_name)) { 217 jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id); 218 jsonw_string_field(wtr, "order", 219 cgroup_order_string(info->iter.cgroup.order)); 220 } 221 } 222 223 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr) 224 { 225 jsonw_uint_field(json_wtr, "pf", 226 info->netfilter.pf); 227 jsonw_uint_field(json_wtr, "hook", 228 info->netfilter.hooknum); 229 jsonw_int_field(json_wtr, "prio", 230 info->netfilter.priority); 231 jsonw_uint_field(json_wtr, "flags", 232 info->netfilter.flags); 233 } 234 235 static int get_prog_info(int prog_id, struct bpf_prog_info *info) 236 { 237 __u32 len = sizeof(*info); 238 int err, prog_fd; 239 240 prog_fd = bpf_prog_get_fd_by_id(prog_id); 241 if (prog_fd < 0) 242 return prog_fd; 243 244 memset(info, 0, sizeof(*info)); 245 err = bpf_prog_get_info_by_fd(prog_fd, info, &len); 246 if (err) 247 p_err("can't get prog info: %s", strerror(errno)); 248 close(prog_fd); 249 return err; 250 } 251 252 static int cmp_u64(const void *A, const void *B) 253 { 254 const __u64 *a = A, *b = B; 255 256 return *a - *b; 257 } 258 259 static void 260 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) 261 { 262 __u32 i, j = 0; 263 __u64 *addrs; 264 265 jsonw_bool_field(json_wtr, "retprobe", 266 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN); 267 jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count); 268 jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed); 269 jsonw_name(json_wtr, "funcs"); 270 jsonw_start_array(json_wtr); 271 addrs = u64_to_ptr(info->kprobe_multi.addrs); 272 qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64); 273 274 /* Load it once for all. */ 275 if (!dd.sym_count) 276 kernel_syms_load(&dd); 277 for (i = 0; i < dd.sym_count; i++) { 278 if (dd.sym_mapping[i].address != addrs[j]) 279 continue; 280 jsonw_start_object(json_wtr); 281 jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address); 282 jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name); 283 /* Print null if it is vmlinux */ 284 if (dd.sym_mapping[i].module[0] == '\0') { 285 jsonw_name(json_wtr, "module"); 286 jsonw_null(json_wtr); 287 } else { 288 jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module); 289 } 290 jsonw_end_object(json_wtr); 291 if (j++ == info->kprobe_multi.count) 292 break; 293 } 294 jsonw_end_array(json_wtr); 295 } 296 297 static __u64 *u64_to_arr(__u64 val) 298 { 299 return (__u64 *) u64_to_ptr(val); 300 } 301 302 static void 303 show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) 304 { 305 __u32 i; 306 307 jsonw_bool_field(json_wtr, "retprobe", 308 info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN); 309 jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path)); 310 jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count); 311 jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid); 312 jsonw_name(json_wtr, "funcs"); 313 jsonw_start_array(json_wtr); 314 315 for (i = 0; i < info->uprobe_multi.count; i++) { 316 jsonw_start_object(json_wtr); 317 jsonw_uint_field(json_wtr, "offset", 318 u64_to_arr(info->uprobe_multi.offsets)[i]); 319 jsonw_uint_field(json_wtr, "ref_ctr_offset", 320 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]); 321 jsonw_uint_field(json_wtr, "cookie", 322 u64_to_arr(info->uprobe_multi.cookies)[i]); 323 jsonw_end_object(json_wtr); 324 } 325 jsonw_end_array(json_wtr); 326 } 327 328 static void 329 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 330 { 331 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE); 332 jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr); 333 jsonw_string_field(wtr, "func", 334 u64_to_ptr(info->perf_event.kprobe.func_name)); 335 jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset); 336 jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed); 337 } 338 339 static void 340 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 341 { 342 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE); 343 jsonw_string_field(wtr, "file", 344 u64_to_ptr(info->perf_event.uprobe.file_name)); 345 jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset); 346 } 347 348 static void 349 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr) 350 { 351 jsonw_string_field(wtr, "tracepoint", 352 u64_to_ptr(info->perf_event.tracepoint.tp_name)); 353 } 354 355 static char *perf_config_hw_cache_str(__u64 config) 356 { 357 const char *hw_cache, *result, *op; 358 char *str = malloc(PERF_HW_CACHE_LEN); 359 360 if (!str) { 361 p_err("mem alloc failed"); 362 return NULL; 363 } 364 365 hw_cache = perf_event_name(evsel__hw_cache, config & 0xff); 366 if (hw_cache) 367 snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache); 368 else 369 snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff); 370 371 op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff); 372 if (op) 373 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 374 "%s-", op); 375 else 376 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 377 "%lld-", (config >> 8) & 0xff); 378 379 result = perf_event_name(evsel__hw_cache_result, config >> 16); 380 if (result) 381 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 382 "%s", result); 383 else 384 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 385 "%lld", config >> 16); 386 return str; 387 } 388 389 static const char *perf_config_str(__u32 type, __u64 config) 390 { 391 const char *perf_config; 392 393 switch (type) { 394 case PERF_TYPE_HARDWARE: 395 perf_config = perf_event_name(event_symbols_hw, config); 396 break; 397 case PERF_TYPE_SOFTWARE: 398 perf_config = perf_event_name(event_symbols_sw, config); 399 break; 400 case PERF_TYPE_HW_CACHE: 401 perf_config = perf_config_hw_cache_str(config); 402 break; 403 default: 404 perf_config = NULL; 405 break; 406 } 407 return perf_config; 408 } 409 410 static void 411 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr) 412 { 413 __u64 config = info->perf_event.event.config; 414 __u32 type = info->perf_event.event.type; 415 const char *perf_type, *perf_config; 416 417 perf_type = perf_event_name(perf_type_name, type); 418 if (perf_type) 419 jsonw_string_field(wtr, "event_type", perf_type); 420 else 421 jsonw_uint_field(wtr, "event_type", type); 422 423 perf_config = perf_config_str(type, config); 424 if (perf_config) 425 jsonw_string_field(wtr, "event_config", perf_config); 426 else 427 jsonw_uint_field(wtr, "event_config", config); 428 429 if (type == PERF_TYPE_HW_CACHE && perf_config) 430 free((void *)perf_config); 431 } 432 433 static int show_link_close_json(int fd, struct bpf_link_info *info) 434 { 435 struct bpf_prog_info prog_info; 436 const char *prog_type_str; 437 int err; 438 439 jsonw_start_object(json_wtr); 440 441 show_link_header_json(info, json_wtr); 442 443 switch (info->type) { 444 case BPF_LINK_TYPE_RAW_TRACEPOINT: 445 jsonw_string_field(json_wtr, "tp_name", 446 u64_to_ptr(info->raw_tracepoint.tp_name)); 447 break; 448 case BPF_LINK_TYPE_TRACING: 449 err = get_prog_info(info->prog_id, &prog_info); 450 if (err) 451 return err; 452 453 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 454 /* libbpf will return NULL for variants unknown to it. */ 455 if (prog_type_str) 456 jsonw_string_field(json_wtr, "prog_type", prog_type_str); 457 else 458 jsonw_uint_field(json_wtr, "prog_type", prog_info.type); 459 460 show_link_attach_type_json(info->tracing.attach_type, 461 json_wtr); 462 jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id); 463 jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id); 464 break; 465 case BPF_LINK_TYPE_CGROUP: 466 jsonw_lluint_field(json_wtr, "cgroup_id", 467 info->cgroup.cgroup_id); 468 show_link_attach_type_json(info->cgroup.attach_type, json_wtr); 469 break; 470 case BPF_LINK_TYPE_ITER: 471 show_iter_json(info, json_wtr); 472 break; 473 case BPF_LINK_TYPE_NETNS: 474 jsonw_uint_field(json_wtr, "netns_ino", 475 info->netns.netns_ino); 476 show_link_attach_type_json(info->netns.attach_type, json_wtr); 477 break; 478 case BPF_LINK_TYPE_NETFILTER: 479 netfilter_dump_json(info, json_wtr); 480 break; 481 case BPF_LINK_TYPE_TCX: 482 show_link_ifindex_json(info->tcx.ifindex, json_wtr); 483 show_link_attach_type_json(info->tcx.attach_type, json_wtr); 484 break; 485 case BPF_LINK_TYPE_NETKIT: 486 show_link_ifindex_json(info->netkit.ifindex, json_wtr); 487 show_link_attach_type_json(info->netkit.attach_type, json_wtr); 488 break; 489 case BPF_LINK_TYPE_XDP: 490 show_link_ifindex_json(info->xdp.ifindex, json_wtr); 491 break; 492 case BPF_LINK_TYPE_STRUCT_OPS: 493 jsonw_uint_field(json_wtr, "map_id", 494 info->struct_ops.map_id); 495 break; 496 case BPF_LINK_TYPE_KPROBE_MULTI: 497 show_kprobe_multi_json(info, json_wtr); 498 break; 499 case BPF_LINK_TYPE_UPROBE_MULTI: 500 show_uprobe_multi_json(info, json_wtr); 501 break; 502 case BPF_LINK_TYPE_PERF_EVENT: 503 switch (info->perf_event.type) { 504 case BPF_PERF_EVENT_EVENT: 505 show_perf_event_event_json(info, json_wtr); 506 break; 507 case BPF_PERF_EVENT_TRACEPOINT: 508 show_perf_event_tracepoint_json(info, json_wtr); 509 break; 510 case BPF_PERF_EVENT_KPROBE: 511 case BPF_PERF_EVENT_KRETPROBE: 512 show_perf_event_kprobe_json(info, json_wtr); 513 break; 514 case BPF_PERF_EVENT_UPROBE: 515 case BPF_PERF_EVENT_URETPROBE: 516 show_perf_event_uprobe_json(info, json_wtr); 517 break; 518 default: 519 break; 520 } 521 break; 522 default: 523 break; 524 } 525 526 if (!hashmap__empty(link_table)) { 527 struct hashmap_entry *entry; 528 529 jsonw_name(json_wtr, "pinned"); 530 jsonw_start_array(json_wtr); 531 hashmap__for_each_key_entry(link_table, entry, info->id) 532 jsonw_string(json_wtr, entry->pvalue); 533 jsonw_end_array(json_wtr); 534 } 535 536 emit_obj_refs_json(refs_table, info->id, json_wtr); 537 538 jsonw_end_object(json_wtr); 539 540 return 0; 541 } 542 543 static void show_link_header_plain(struct bpf_link_info *info) 544 { 545 const char *link_type_str; 546 547 printf("%u: ", info->id); 548 link_type_str = libbpf_bpf_link_type_str(info->type); 549 if (link_type_str) 550 printf("%s ", link_type_str); 551 else 552 printf("type %u ", info->type); 553 554 if (info->type == BPF_LINK_TYPE_STRUCT_OPS) 555 printf("map %u ", info->struct_ops.map_id); 556 else 557 printf("prog %u ", info->prog_id); 558 } 559 560 static void show_link_attach_type_plain(__u32 attach_type) 561 { 562 const char *attach_type_str; 563 564 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 565 if (attach_type_str) 566 printf("attach_type %s ", attach_type_str); 567 else 568 printf("attach_type %u ", attach_type); 569 } 570 571 static void show_link_ifindex_plain(__u32 ifindex) 572 { 573 char devname[IF_NAMESIZE * 2] = "(unknown)"; 574 char tmpname[IF_NAMESIZE]; 575 char *ret = NULL; 576 577 if (ifindex) 578 ret = if_indextoname(ifindex, tmpname); 579 else 580 snprintf(devname, sizeof(devname), "(detached)"); 581 if (ret) 582 snprintf(devname, sizeof(devname), "%s(%d)", 583 tmpname, ifindex); 584 printf("ifindex %s ", devname); 585 } 586 587 static void show_iter_plain(struct bpf_link_info *info) 588 { 589 const char *target_name = u64_to_ptr(info->iter.target_name); 590 591 printf("target_name %s ", target_name); 592 593 if (is_iter_map_target(target_name)) 594 printf("map_id %u ", info->iter.map.map_id); 595 else if (is_iter_task_target(target_name)) { 596 if (info->iter.task.tid) 597 printf("tid %u ", info->iter.task.tid); 598 else if (info->iter.task.pid) 599 printf("pid %u ", info->iter.task.pid); 600 } 601 602 if (is_iter_cgroup_target(target_name)) { 603 printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id); 604 printf("order %s ", 605 cgroup_order_string(info->iter.cgroup.order)); 606 } 607 } 608 609 static const char * const pf2name[] = { 610 [NFPROTO_INET] = "inet", 611 [NFPROTO_IPV4] = "ip", 612 [NFPROTO_ARP] = "arp", 613 [NFPROTO_NETDEV] = "netdev", 614 [NFPROTO_BRIDGE] = "bridge", 615 [NFPROTO_IPV6] = "ip6", 616 }; 617 618 static const char * const inethook2name[] = { 619 [NF_INET_PRE_ROUTING] = "prerouting", 620 [NF_INET_LOCAL_IN] = "input", 621 [NF_INET_FORWARD] = "forward", 622 [NF_INET_LOCAL_OUT] = "output", 623 [NF_INET_POST_ROUTING] = "postrouting", 624 }; 625 626 static const char * const arphook2name[] = { 627 [NF_ARP_IN] = "input", 628 [NF_ARP_OUT] = "output", 629 }; 630 631 void netfilter_dump_plain(const struct bpf_link_info *info) 632 { 633 const char *hookname = NULL, *pfname = NULL; 634 unsigned int hook = info->netfilter.hooknum; 635 unsigned int pf = info->netfilter.pf; 636 637 if (pf < ARRAY_SIZE(pf2name)) 638 pfname = pf2name[pf]; 639 640 switch (pf) { 641 case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */ 642 case NFPROTO_IPV4: 643 case NFPROTO_IPV6: 644 case NFPROTO_INET: 645 if (hook < ARRAY_SIZE(inethook2name)) 646 hookname = inethook2name[hook]; 647 break; 648 case NFPROTO_ARP: 649 if (hook < ARRAY_SIZE(arphook2name)) 650 hookname = arphook2name[hook]; 651 default: 652 break; 653 } 654 655 if (pfname) 656 printf("\n\t%s", pfname); 657 else 658 printf("\n\tpf: %d", pf); 659 660 if (hookname) 661 printf(" %s", hookname); 662 else 663 printf(", hook %u,", hook); 664 665 printf(" prio %d", info->netfilter.priority); 666 667 if (info->netfilter.flags) 668 printf(" flags 0x%x", info->netfilter.flags); 669 } 670 671 static void show_kprobe_multi_plain(struct bpf_link_info *info) 672 { 673 __u32 i, j = 0; 674 __u64 *addrs; 675 676 if (!info->kprobe_multi.count) 677 return; 678 679 if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN) 680 printf("\n\tkretprobe.multi "); 681 else 682 printf("\n\tkprobe.multi "); 683 printf("func_cnt %u ", info->kprobe_multi.count); 684 if (info->kprobe_multi.missed) 685 printf("missed %llu ", info->kprobe_multi.missed); 686 addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs); 687 qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64); 688 689 /* Load it once for all. */ 690 if (!dd.sym_count) 691 kernel_syms_load(&dd); 692 if (!dd.sym_count) 693 return; 694 695 printf("\n\t%-16s %s", "addr", "func [module]"); 696 for (i = 0; i < dd.sym_count; i++) { 697 if (dd.sym_mapping[i].address != addrs[j]) 698 continue; 699 printf("\n\t%016lx %s", 700 dd.sym_mapping[i].address, dd.sym_mapping[i].name); 701 if (dd.sym_mapping[i].module[0] != '\0') 702 printf(" [%s] ", dd.sym_mapping[i].module); 703 else 704 printf(" "); 705 706 if (j++ == info->kprobe_multi.count) 707 break; 708 } 709 } 710 711 static void show_uprobe_multi_plain(struct bpf_link_info *info) 712 { 713 __u32 i; 714 715 if (!info->uprobe_multi.count) 716 return; 717 718 if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN) 719 printf("\n\turetprobe.multi "); 720 else 721 printf("\n\tuprobe.multi "); 722 723 printf("path %s ", (char *) u64_to_ptr(info->uprobe_multi.path)); 724 printf("func_cnt %u ", info->uprobe_multi.count); 725 726 if (info->uprobe_multi.pid) 727 printf("pid %d ", info->uprobe_multi.pid); 728 729 printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies"); 730 for (i = 0; i < info->uprobe_multi.count; i++) { 731 printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx", 732 u64_to_arr(info->uprobe_multi.offsets)[i], 733 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i], 734 u64_to_arr(info->uprobe_multi.cookies)[i]); 735 } 736 } 737 738 static void show_perf_event_kprobe_plain(struct bpf_link_info *info) 739 { 740 const char *buf; 741 742 buf = u64_to_ptr(info->perf_event.kprobe.func_name); 743 if (buf[0] == '\0' && !info->perf_event.kprobe.addr) 744 return; 745 746 if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE) 747 printf("\n\tkretprobe "); 748 else 749 printf("\n\tkprobe "); 750 if (info->perf_event.kprobe.addr) 751 printf("%llx ", info->perf_event.kprobe.addr); 752 printf("%s", buf); 753 if (info->perf_event.kprobe.offset) 754 printf("+%#x", info->perf_event.kprobe.offset); 755 if (info->perf_event.kprobe.missed) 756 printf(" missed %llu", info->perf_event.kprobe.missed); 757 printf(" "); 758 } 759 760 static void show_perf_event_uprobe_plain(struct bpf_link_info *info) 761 { 762 const char *buf; 763 764 buf = u64_to_ptr(info->perf_event.uprobe.file_name); 765 if (buf[0] == '\0') 766 return; 767 768 if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE) 769 printf("\n\turetprobe "); 770 else 771 printf("\n\tuprobe "); 772 printf("%s+%#x ", buf, info->perf_event.uprobe.offset); 773 } 774 775 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) 776 { 777 const char *buf; 778 779 buf = u64_to_ptr(info->perf_event.tracepoint.tp_name); 780 if (buf[0] == '\0') 781 return; 782 783 printf("\n\ttracepoint %s ", buf); 784 } 785 786 static void show_perf_event_event_plain(struct bpf_link_info *info) 787 { 788 __u64 config = info->perf_event.event.config; 789 __u32 type = info->perf_event.event.type; 790 const char *perf_type, *perf_config; 791 792 printf("\n\tevent "); 793 perf_type = perf_event_name(perf_type_name, type); 794 if (perf_type) 795 printf("%s:", perf_type); 796 else 797 printf("%u :", type); 798 799 perf_config = perf_config_str(type, config); 800 if (perf_config) 801 printf("%s ", perf_config); 802 else 803 printf("%llu ", config); 804 805 if (type == PERF_TYPE_HW_CACHE && perf_config) 806 free((void *)perf_config); 807 } 808 809 static int show_link_close_plain(int fd, struct bpf_link_info *info) 810 { 811 struct bpf_prog_info prog_info; 812 const char *prog_type_str; 813 int err; 814 815 show_link_header_plain(info); 816 817 switch (info->type) { 818 case BPF_LINK_TYPE_RAW_TRACEPOINT: 819 printf("\n\ttp '%s' ", 820 (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); 821 break; 822 case BPF_LINK_TYPE_TRACING: 823 err = get_prog_info(info->prog_id, &prog_info); 824 if (err) 825 return err; 826 827 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 828 /* libbpf will return NULL for variants unknown to it. */ 829 if (prog_type_str) 830 printf("\n\tprog_type %s ", prog_type_str); 831 else 832 printf("\n\tprog_type %u ", prog_info.type); 833 834 show_link_attach_type_plain(info->tracing.attach_type); 835 if (info->tracing.target_obj_id || info->tracing.target_btf_id) 836 printf("\n\ttarget_obj_id %u target_btf_id %u ", 837 info->tracing.target_obj_id, 838 info->tracing.target_btf_id); 839 break; 840 case BPF_LINK_TYPE_CGROUP: 841 printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id); 842 show_link_attach_type_plain(info->cgroup.attach_type); 843 break; 844 case BPF_LINK_TYPE_ITER: 845 show_iter_plain(info); 846 break; 847 case BPF_LINK_TYPE_NETNS: 848 printf("\n\tnetns_ino %u ", info->netns.netns_ino); 849 show_link_attach_type_plain(info->netns.attach_type); 850 break; 851 case BPF_LINK_TYPE_NETFILTER: 852 netfilter_dump_plain(info); 853 break; 854 case BPF_LINK_TYPE_TCX: 855 printf("\n\t"); 856 show_link_ifindex_plain(info->tcx.ifindex); 857 show_link_attach_type_plain(info->tcx.attach_type); 858 break; 859 case BPF_LINK_TYPE_NETKIT: 860 printf("\n\t"); 861 show_link_ifindex_plain(info->netkit.ifindex); 862 show_link_attach_type_plain(info->netkit.attach_type); 863 break; 864 case BPF_LINK_TYPE_XDP: 865 printf("\n\t"); 866 show_link_ifindex_plain(info->xdp.ifindex); 867 break; 868 case BPF_LINK_TYPE_KPROBE_MULTI: 869 show_kprobe_multi_plain(info); 870 break; 871 case BPF_LINK_TYPE_UPROBE_MULTI: 872 show_uprobe_multi_plain(info); 873 break; 874 case BPF_LINK_TYPE_PERF_EVENT: 875 switch (info->perf_event.type) { 876 case BPF_PERF_EVENT_EVENT: 877 show_perf_event_event_plain(info); 878 break; 879 case BPF_PERF_EVENT_TRACEPOINT: 880 show_perf_event_tracepoint_plain(info); 881 break; 882 case BPF_PERF_EVENT_KPROBE: 883 case BPF_PERF_EVENT_KRETPROBE: 884 show_perf_event_kprobe_plain(info); 885 break; 886 case BPF_PERF_EVENT_UPROBE: 887 case BPF_PERF_EVENT_URETPROBE: 888 show_perf_event_uprobe_plain(info); 889 break; 890 default: 891 break; 892 } 893 break; 894 default: 895 break; 896 } 897 898 if (!hashmap__empty(link_table)) { 899 struct hashmap_entry *entry; 900 901 hashmap__for_each_key_entry(link_table, entry, info->id) 902 printf("\n\tpinned %s", (char *)entry->pvalue); 903 } 904 emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); 905 906 printf("\n"); 907 908 return 0; 909 } 910 911 static int do_show_link(int fd) 912 { 913 __u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL; 914 struct bpf_link_info info; 915 __u32 len = sizeof(info); 916 char path_buf[PATH_MAX]; 917 __u64 *addrs = NULL; 918 char buf[PATH_MAX]; 919 int count; 920 int err; 921 922 memset(&info, 0, sizeof(info)); 923 buf[0] = '\0'; 924 again: 925 err = bpf_link_get_info_by_fd(fd, &info, &len); 926 if (err) { 927 p_err("can't get link info: %s", 928 strerror(errno)); 929 close(fd); 930 return err; 931 } 932 if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT && 933 !info.raw_tracepoint.tp_name) { 934 info.raw_tracepoint.tp_name = ptr_to_u64(&buf); 935 info.raw_tracepoint.tp_name_len = sizeof(buf); 936 goto again; 937 } 938 if (info.type == BPF_LINK_TYPE_ITER && 939 !info.iter.target_name) { 940 info.iter.target_name = ptr_to_u64(&buf); 941 info.iter.target_name_len = sizeof(buf); 942 goto again; 943 } 944 if (info.type == BPF_LINK_TYPE_KPROBE_MULTI && 945 !info.kprobe_multi.addrs) { 946 count = info.kprobe_multi.count; 947 if (count) { 948 addrs = calloc(count, sizeof(__u64)); 949 if (!addrs) { 950 p_err("mem alloc failed"); 951 close(fd); 952 return -ENOMEM; 953 } 954 info.kprobe_multi.addrs = ptr_to_u64(addrs); 955 goto again; 956 } 957 } 958 if (info.type == BPF_LINK_TYPE_UPROBE_MULTI && 959 !info.uprobe_multi.offsets) { 960 count = info.uprobe_multi.count; 961 if (count) { 962 offsets = calloc(count, sizeof(__u64)); 963 if (!offsets) { 964 p_err("mem alloc failed"); 965 close(fd); 966 return -ENOMEM; 967 } 968 info.uprobe_multi.offsets = ptr_to_u64(offsets); 969 ref_ctr_offsets = calloc(count, sizeof(__u64)); 970 if (!ref_ctr_offsets) { 971 p_err("mem alloc failed"); 972 free(offsets); 973 close(fd); 974 return -ENOMEM; 975 } 976 info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets); 977 cookies = calloc(count, sizeof(__u64)); 978 if (!cookies) { 979 p_err("mem alloc failed"); 980 free(cookies); 981 free(offsets); 982 close(fd); 983 return -ENOMEM; 984 } 985 info.uprobe_multi.cookies = ptr_to_u64(cookies); 986 info.uprobe_multi.path = ptr_to_u64(path_buf); 987 info.uprobe_multi.path_size = sizeof(path_buf); 988 goto again; 989 } 990 } 991 if (info.type == BPF_LINK_TYPE_PERF_EVENT) { 992 switch (info.perf_event.type) { 993 case BPF_PERF_EVENT_TRACEPOINT: 994 if (!info.perf_event.tracepoint.tp_name) { 995 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); 996 info.perf_event.tracepoint.name_len = sizeof(buf); 997 goto again; 998 } 999 break; 1000 case BPF_PERF_EVENT_KPROBE: 1001 case BPF_PERF_EVENT_KRETPROBE: 1002 if (!info.perf_event.kprobe.func_name) { 1003 info.perf_event.kprobe.func_name = ptr_to_u64(&buf); 1004 info.perf_event.kprobe.name_len = sizeof(buf); 1005 goto again; 1006 } 1007 break; 1008 case BPF_PERF_EVENT_UPROBE: 1009 case BPF_PERF_EVENT_URETPROBE: 1010 if (!info.perf_event.uprobe.file_name) { 1011 info.perf_event.uprobe.file_name = ptr_to_u64(&buf); 1012 info.perf_event.uprobe.name_len = sizeof(buf); 1013 goto again; 1014 } 1015 break; 1016 default: 1017 break; 1018 } 1019 } 1020 1021 if (json_output) 1022 show_link_close_json(fd, &info); 1023 else 1024 show_link_close_plain(fd, &info); 1025 1026 free(ref_ctr_offsets); 1027 free(cookies); 1028 free(offsets); 1029 free(addrs); 1030 close(fd); 1031 return 0; 1032 } 1033 1034 static int do_show(int argc, char **argv) 1035 { 1036 __u32 id = 0; 1037 int err, fd; 1038 1039 if (show_pinned) { 1040 link_table = hashmap__new(hash_fn_for_key_as_id, 1041 equal_fn_for_key_as_id, NULL); 1042 if (IS_ERR(link_table)) { 1043 p_err("failed to create hashmap for pinned paths"); 1044 return -1; 1045 } 1046 build_pinned_obj_table(link_table, BPF_OBJ_LINK); 1047 } 1048 build_obj_refs_table(&refs_table, BPF_OBJ_LINK); 1049 1050 if (argc == 2) { 1051 fd = link_parse_fd(&argc, &argv); 1052 if (fd < 0) 1053 return fd; 1054 do_show_link(fd); 1055 goto out; 1056 } 1057 1058 if (argc) 1059 return BAD_ARG(); 1060 1061 if (json_output) 1062 jsonw_start_array(json_wtr); 1063 while (true) { 1064 err = bpf_link_get_next_id(id, &id); 1065 if (err) { 1066 if (errno == ENOENT) 1067 break; 1068 p_err("can't get next link: %s%s", strerror(errno), 1069 errno == EINVAL ? " -- kernel too old?" : ""); 1070 break; 1071 } 1072 1073 fd = bpf_link_get_fd_by_id(id); 1074 if (fd < 0) { 1075 if (errno == ENOENT) 1076 continue; 1077 p_err("can't get link by id (%u): %s", 1078 id, strerror(errno)); 1079 break; 1080 } 1081 1082 err = do_show_link(fd); 1083 if (err) 1084 break; 1085 } 1086 if (json_output) 1087 jsonw_end_array(json_wtr); 1088 1089 delete_obj_refs_table(refs_table); 1090 1091 if (show_pinned) 1092 delete_pinned_obj_table(link_table); 1093 1094 out: 1095 if (dd.sym_count) 1096 kernel_syms_destroy(&dd); 1097 return errno == ENOENT ? 0 : -1; 1098 } 1099 1100 static int do_pin(int argc, char **argv) 1101 { 1102 int err; 1103 1104 err = do_pin_any(argc, argv, link_parse_fd); 1105 if (!err && json_output) 1106 jsonw_null(json_wtr); 1107 return err; 1108 } 1109 1110 static int do_detach(int argc, char **argv) 1111 { 1112 int err, fd; 1113 1114 if (argc != 2) { 1115 p_err("link specifier is invalid or missing\n"); 1116 return 1; 1117 } 1118 1119 fd = link_parse_fd(&argc, &argv); 1120 if (fd < 0) 1121 return 1; 1122 1123 err = bpf_link_detach(fd); 1124 if (err) 1125 err = -errno; 1126 close(fd); 1127 if (err) { 1128 p_err("failed link detach: %s", strerror(-err)); 1129 return 1; 1130 } 1131 1132 if (json_output) 1133 jsonw_null(json_wtr); 1134 1135 return 0; 1136 } 1137 1138 static int do_help(int argc, char **argv) 1139 { 1140 if (json_output) { 1141 jsonw_null(json_wtr); 1142 return 0; 1143 } 1144 1145 fprintf(stderr, 1146 "Usage: %1$s %2$s { show | list } [LINK]\n" 1147 " %1$s %2$s pin LINK FILE\n" 1148 " %1$s %2$s detach LINK\n" 1149 " %1$s %2$s help\n" 1150 "\n" 1151 " " HELP_SPEC_LINK "\n" 1152 " " HELP_SPEC_OPTIONS " |\n" 1153 " {-f|--bpffs} | {-n|--nomount} }\n" 1154 "", 1155 bin_name, argv[-2]); 1156 1157 return 0; 1158 } 1159 1160 static const struct cmd cmds[] = { 1161 { "show", do_show }, 1162 { "list", do_show }, 1163 { "help", do_help }, 1164 { "pin", do_pin }, 1165 { "detach", do_detach }, 1166 { 0 } 1167 }; 1168 1169 int do_link(int argc, char **argv) 1170 { 1171 return cmd_select(cmds, argc, argv, do_help); 1172 } 1173