1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stddef.h> 4 #include <stdint.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <bpf/bpf.h> 9 #include <bpf/btf.h> 10 #include <bpf/libbpf.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/err.h> 14 #include <linux/perf_event.h> 15 #include <linux/string.h> 16 #include <linux/zalloc.h> 17 #include <internal/lib.h> 18 #include <perf/event.h> 19 #include <symbol/kallsyms.h> 20 #include "bpf-event.h" 21 #include "bpf-utils.h" 22 #include "debug.h" 23 #include "dso.h" 24 #include "symbol.h" 25 #include "machine.h" 26 #include "env.h" 27 #include "session.h" 28 #include "map.h" 29 #include "evlist.h" 30 #include "record.h" 31 #include "util/synthetic-events.h" 32 33 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len) 34 { 35 int ret = 0; 36 size_t i; 37 38 for (i = 0; i < len; i++) 39 ret += snprintf(buf + ret, size - ret, "%02x", data[i]); 40 return ret; 41 } 42 43 static int machine__process_bpf_event_load(struct machine *machine, 44 union perf_event *event, 45 struct perf_sample *sample __maybe_unused) 46 { 47 struct bpf_prog_info_node *info_node; 48 struct perf_env *env = machine->env; 49 struct perf_bpil *info_linear; 50 int id = event->bpf.id; 51 unsigned int i; 52 53 /* perf-record, no need to handle bpf-event */ 54 if (env == NULL) 55 return 0; 56 57 info_node = perf_env__find_bpf_prog_info(env, id); 58 if (!info_node) 59 return 0; 60 info_linear = info_node->info_linear; 61 62 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) { 63 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms); 64 u64 addr = addrs[i]; 65 struct map *map = maps__find(machine__kernel_maps(machine), addr); 66 67 if (map) { 68 struct dso *dso = map__dso(map); 69 70 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_PROG_INFO); 71 dso__bpf_prog(dso)->id = id; 72 dso__bpf_prog(dso)->sub_id = i; 73 dso__bpf_prog(dso)->env = env; 74 map__put(map); 75 } 76 } 77 return 0; 78 } 79 80 int machine__process_bpf(struct machine *machine, union perf_event *event, 81 struct perf_sample *sample) 82 { 83 if (dump_trace) 84 perf_event__fprintf_bpf(event, stdout); 85 86 switch (event->bpf.type) { 87 case PERF_BPF_EVENT_PROG_LOAD: 88 return machine__process_bpf_event_load(machine, event, sample); 89 90 case PERF_BPF_EVENT_PROG_UNLOAD: 91 /* 92 * Do not free bpf_prog_info and btf of the program here, 93 * as annotation still need them. They will be freed at 94 * the end of the session. 95 */ 96 break; 97 default: 98 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 99 break; 100 } 101 return 0; 102 } 103 104 static int perf_env__fetch_btf(struct perf_env *env, 105 u32 btf_id, 106 struct btf *btf) 107 { 108 struct btf_node *node; 109 u32 data_size; 110 const void *data; 111 112 data = btf__raw_data(btf, &data_size); 113 114 node = malloc(data_size + sizeof(struct btf_node)); 115 if (!node) 116 return -1; 117 118 node->id = btf_id; 119 node->data_size = data_size; 120 memcpy(node->data, data, data_size); 121 122 if (!perf_env__insert_btf(env, node)) { 123 /* Insertion failed because of a duplicate. */ 124 free(node); 125 return -1; 126 } 127 return 0; 128 } 129 130 static int synthesize_bpf_prog_name(char *buf, int size, 131 struct bpf_prog_info *info, 132 struct btf *btf, 133 u32 sub_id) 134 { 135 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags); 136 void *func_infos = (void *)(uintptr_t)(info->func_info); 137 u32 sub_prog_cnt = info->nr_jited_ksyms; 138 const struct bpf_func_info *finfo; 139 const char *short_name = NULL; 140 const struct btf_type *t; 141 int name_len; 142 143 name_len = snprintf(buf, size, "bpf_prog_"); 144 name_len += snprintf_hex(buf + name_len, size - name_len, 145 prog_tags[sub_id], BPF_TAG_SIZE); 146 if (btf) { 147 finfo = func_infos + sub_id * info->func_info_rec_size; 148 t = btf__type_by_id(btf, finfo->type_id); 149 short_name = btf__name_by_offset(btf, t->name_off); 150 } else if (sub_id == 0 && sub_prog_cnt == 1) { 151 /* no subprog */ 152 if (info->name[0]) 153 short_name = info->name; 154 } else 155 short_name = "F"; 156 if (short_name) 157 name_len += snprintf(buf + name_len, size - name_len, 158 "_%s", short_name); 159 return name_len; 160 } 161 162 #ifdef HAVE_LIBBPF_STRINGS_SUPPORT 163 164 #define BPF_METADATA_PREFIX "bpf_metadata_" 165 #define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1) 166 167 static bool name_has_bpf_metadata_prefix(const char **s) 168 { 169 if (strncmp(*s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) != 0) 170 return false; 171 *s += BPF_METADATA_PREFIX_LEN; 172 return true; 173 } 174 175 struct bpf_metadata_map { 176 struct btf *btf; 177 const struct btf_type *datasec; 178 void *rodata; 179 size_t rodata_size; 180 unsigned int num_vars; 181 }; 182 183 static int bpf_metadata_read_map_data(__u32 map_id, struct bpf_metadata_map *map) 184 { 185 int map_fd; 186 struct bpf_map_info map_info; 187 __u32 map_info_len; 188 int key; 189 struct btf *btf; 190 const struct btf_type *datasec; 191 struct btf_var_secinfo *vsi; 192 unsigned int vlen, vars; 193 void *rodata; 194 195 map_fd = bpf_map_get_fd_by_id(map_id); 196 if (map_fd < 0) 197 return -1; 198 199 memset(&map_info, 0, sizeof(map_info)); 200 map_info_len = sizeof(map_info); 201 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len) < 0) 202 goto out_close; 203 204 /* If it's not an .rodata map, don't bother. */ 205 if (map_info.type != BPF_MAP_TYPE_ARRAY || 206 map_info.key_size != sizeof(int) || 207 map_info.max_entries != 1 || 208 !map_info.btf_value_type_id || 209 !strstr(map_info.name, ".rodata")) { 210 goto out_close; 211 } 212 213 btf = btf__load_from_kernel_by_id(map_info.btf_id); 214 if (!btf) 215 goto out_close; 216 datasec = btf__type_by_id(btf, map_info.btf_value_type_id); 217 if (!btf_is_datasec(datasec)) 218 goto out_free_btf; 219 220 /* 221 * If there aren't any variables with the "bpf_metadata_" prefix, 222 * don't bother. 223 */ 224 vlen = btf_vlen(datasec); 225 vsi = btf_var_secinfos(datasec); 226 vars = 0; 227 for (unsigned int i = 0; i < vlen; i++, vsi++) { 228 const struct btf_type *t_var = btf__type_by_id(btf, vsi->type); 229 const char *name = btf__name_by_offset(btf, t_var->name_off); 230 231 if (name_has_bpf_metadata_prefix(&name)) 232 vars++; 233 } 234 if (vars == 0) 235 goto out_free_btf; 236 237 rodata = zalloc(map_info.value_size); 238 if (!rodata) 239 goto out_free_btf; 240 key = 0; 241 if (bpf_map_lookup_elem(map_fd, &key, rodata)) { 242 free(rodata); 243 goto out_free_btf; 244 } 245 close(map_fd); 246 247 map->btf = btf; 248 map->datasec = datasec; 249 map->rodata = rodata; 250 map->rodata_size = map_info.value_size; 251 map->num_vars = vars; 252 return 0; 253 254 out_free_btf: 255 btf__free(btf); 256 out_close: 257 close(map_fd); 258 return -1; 259 } 260 261 struct format_btf_ctx { 262 char *buf; 263 size_t buf_size; 264 size_t buf_idx; 265 }; 266 267 static void format_btf_cb(void *arg, const char *fmt, va_list ap) 268 { 269 int n; 270 struct format_btf_ctx *ctx = (struct format_btf_ctx *)arg; 271 272 n = vsnprintf(ctx->buf + ctx->buf_idx, ctx->buf_size - ctx->buf_idx, 273 fmt, ap); 274 ctx->buf_idx += n; 275 if (ctx->buf_idx >= ctx->buf_size) 276 ctx->buf_idx = ctx->buf_size; 277 } 278 279 static void format_btf_variable(struct btf *btf, char *buf, size_t buf_size, 280 const struct btf_type *t, const void *btf_data) 281 { 282 struct format_btf_ctx ctx = { 283 .buf = buf, 284 .buf_idx = 0, 285 .buf_size = buf_size, 286 }; 287 const struct btf_dump_type_data_opts opts = { 288 .sz = sizeof(struct btf_dump_type_data_opts), 289 .skip_names = 1, 290 .compact = 1, 291 .emit_strings = 1, 292 }; 293 struct btf_dump *d; 294 size_t btf_size; 295 296 d = btf_dump__new(btf, format_btf_cb, &ctx, NULL); 297 btf_size = btf__resolve_size(btf, t->type); 298 btf_dump__dump_type_data(d, t->type, btf_data, btf_size, &opts); 299 btf_dump__free(d); 300 } 301 302 static void bpf_metadata_fill_event(struct bpf_metadata_map *map, 303 struct perf_record_bpf_metadata *bpf_metadata_event) 304 { 305 struct btf_var_secinfo *vsi; 306 unsigned int i, vlen; 307 308 memset(bpf_metadata_event->prog_name, 0, BPF_PROG_NAME_LEN); 309 vlen = btf_vlen(map->datasec); 310 vsi = btf_var_secinfos(map->datasec); 311 312 for (i = 0; i < vlen; i++, vsi++) { 313 const struct btf_type *t_var = btf__type_by_id(map->btf, 314 vsi->type); 315 const char *name = btf__name_by_offset(map->btf, 316 t_var->name_off); 317 const __u64 nr_entries = bpf_metadata_event->nr_entries; 318 struct perf_record_bpf_metadata_entry *entry; 319 320 if (!name_has_bpf_metadata_prefix(&name)) 321 continue; 322 323 if (nr_entries >= (__u64)map->num_vars) 324 break; 325 326 entry = &bpf_metadata_event->entries[nr_entries]; 327 memset(entry, 0, sizeof(*entry)); 328 snprintf(entry->key, BPF_METADATA_KEY_LEN, "%s", name); 329 format_btf_variable(map->btf, entry->value, 330 BPF_METADATA_VALUE_LEN, t_var, 331 map->rodata + vsi->offset); 332 bpf_metadata_event->nr_entries++; 333 } 334 } 335 336 static void bpf_metadata_free_map_data(struct bpf_metadata_map *map) 337 { 338 btf__free(map->btf); 339 free(map->rodata); 340 } 341 342 static struct bpf_metadata *bpf_metadata_alloc(__u32 nr_prog_tags, 343 __u32 nr_variables) 344 { 345 struct bpf_metadata *metadata; 346 size_t event_size; 347 348 metadata = zalloc(sizeof(struct bpf_metadata)); 349 if (!metadata) 350 return NULL; 351 352 metadata->prog_names = zalloc(nr_prog_tags * sizeof(char *)); 353 if (!metadata->prog_names) { 354 bpf_metadata_free(metadata); 355 return NULL; 356 } 357 for (__u32 prog_index = 0; prog_index < nr_prog_tags; prog_index++) { 358 metadata->prog_names[prog_index] = zalloc(BPF_PROG_NAME_LEN); 359 if (!metadata->prog_names[prog_index]) { 360 bpf_metadata_free(metadata); 361 return NULL; 362 } 363 metadata->nr_prog_names++; 364 } 365 366 event_size = sizeof(metadata->event->bpf_metadata) + 367 nr_variables * sizeof(metadata->event->bpf_metadata.entries[0]); 368 metadata->event = zalloc(event_size); 369 if (!metadata->event) { 370 bpf_metadata_free(metadata); 371 return NULL; 372 } 373 metadata->event->bpf_metadata = (struct perf_record_bpf_metadata) { 374 .header = { 375 .type = PERF_RECORD_BPF_METADATA, 376 .size = event_size, 377 }, 378 .nr_entries = 0, 379 }; 380 381 return metadata; 382 } 383 384 static struct bpf_metadata *bpf_metadata_create(struct bpf_prog_info *info) 385 { 386 struct bpf_metadata *metadata; 387 const __u32 *map_ids = (__u32 *)(uintptr_t)info->map_ids; 388 389 for (__u32 map_index = 0; map_index < info->nr_map_ids; map_index++) { 390 struct bpf_metadata_map map; 391 392 if (bpf_metadata_read_map_data(map_ids[map_index], &map) != 0) 393 continue; 394 395 metadata = bpf_metadata_alloc(info->nr_prog_tags, map.num_vars); 396 if (!metadata) 397 continue; 398 399 bpf_metadata_fill_event(&map, &metadata->event->bpf_metadata); 400 401 for (__u32 index = 0; index < info->nr_prog_tags; index++) { 402 synthesize_bpf_prog_name(metadata->prog_names[index], 403 BPF_PROG_NAME_LEN, info, 404 map.btf, index); 405 } 406 407 bpf_metadata_free_map_data(&map); 408 409 return metadata; 410 } 411 412 return NULL; 413 } 414 415 static int synthesize_perf_record_bpf_metadata(const struct bpf_metadata *metadata, 416 const struct perf_tool *tool, 417 perf_event__handler_t process, 418 struct machine *machine) 419 { 420 const size_t event_size = metadata->event->header.size; 421 union perf_event *event; 422 int err = 0; 423 424 event = zalloc(event_size + machine->id_hdr_size); 425 if (!event) 426 return -1; 427 memcpy(event, metadata->event, event_size); 428 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 429 event->header.size += machine->id_hdr_size; 430 for (__u32 index = 0; index < metadata->nr_prog_names; index++) { 431 memcpy(event->bpf_metadata.prog_name, 432 metadata->prog_names[index], BPF_PROG_NAME_LEN); 433 err = perf_tool__process_synth_event(tool, event, machine, 434 process); 435 if (err != 0) 436 break; 437 } 438 439 free(event); 440 return err; 441 } 442 443 void bpf_metadata_free(struct bpf_metadata *metadata) 444 { 445 if (metadata == NULL) 446 return; 447 for (__u32 index = 0; index < metadata->nr_prog_names; index++) 448 free(metadata->prog_names[index]); 449 free(metadata->prog_names); 450 free(metadata->event); 451 free(metadata); 452 } 453 454 #else /* HAVE_LIBBPF_STRINGS_SUPPORT */ 455 456 static struct bpf_metadata *bpf_metadata_create(struct bpf_prog_info *info __maybe_unused) 457 { 458 return NULL; 459 } 460 461 static int synthesize_perf_record_bpf_metadata(const struct bpf_metadata *metadata __maybe_unused, 462 const struct perf_tool *tool __maybe_unused, 463 perf_event__handler_t process __maybe_unused, 464 struct machine *machine __maybe_unused) 465 { 466 return 0; 467 } 468 469 void bpf_metadata_free(struct bpf_metadata *metadata __maybe_unused) 470 { 471 } 472 473 #endif /* HAVE_LIBBPF_STRINGS_SUPPORT */ 474 475 /* 476 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf 477 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And 478 * one PERF_RECORD_KSYMBOL is generated for each sub program. 479 * 480 * Returns: 481 * 0 for success; 482 * -1 for failures; 483 * -2 for lack of kernel support. 484 */ 485 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, 486 perf_event__handler_t process, 487 struct machine *machine, 488 int fd, 489 union perf_event *event, 490 struct record_opts *opts) 491 { 492 struct perf_record_ksymbol *ksymbol_event = &event->ksymbol; 493 struct perf_record_bpf_event *bpf_event = &event->bpf; 494 const struct perf_tool *tool = session->tool; 495 struct bpf_prog_info_node *info_node; 496 struct perf_bpil *info_linear; 497 struct bpf_metadata *metadata; 498 struct bpf_prog_info *info; 499 struct btf *btf = NULL; 500 struct perf_env *env; 501 u32 sub_prog_cnt, i; 502 int err = 0; 503 u64 arrays; 504 505 /* 506 * for perf-record and perf-report use header.env; 507 * otherwise, use global perf_env. 508 */ 509 env = session->data ? &session->header.env : &perf_env; 510 511 arrays = 1UL << PERF_BPIL_JITED_KSYMS; 512 arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS; 513 arrays |= 1UL << PERF_BPIL_FUNC_INFO; 514 arrays |= 1UL << PERF_BPIL_PROG_TAGS; 515 arrays |= 1UL << PERF_BPIL_JITED_INSNS; 516 arrays |= 1UL << PERF_BPIL_LINE_INFO; 517 arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO; 518 arrays |= 1UL << PERF_BPIL_MAP_IDS; 519 520 info_linear = get_bpf_prog_info_linear(fd, arrays); 521 if (IS_ERR_OR_NULL(info_linear)) { 522 info_linear = NULL; 523 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 524 return -1; 525 } 526 527 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) { 528 free(info_linear); 529 pr_debug("%s: the kernel is too old, aborting\n", __func__); 530 return -2; 531 } 532 533 info = &info_linear->info; 534 if (!info->jited_ksyms) { 535 free(info_linear); 536 return -1; 537 } 538 539 /* number of ksyms, func_lengths, and tags should match */ 540 sub_prog_cnt = info->nr_jited_ksyms; 541 if (sub_prog_cnt != info->nr_prog_tags || 542 sub_prog_cnt != info->nr_jited_func_lens) { 543 free(info_linear); 544 return -1; 545 } 546 547 /* check BTF func info support */ 548 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) { 549 /* btf func info number should be same as sub_prog_cnt */ 550 if (sub_prog_cnt != info->nr_func_info) { 551 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); 552 free(info_linear); 553 return -1; 554 } 555 btf = btf__load_from_kernel_by_id(info->btf_id); 556 if (libbpf_get_error(btf)) { 557 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id); 558 err = -1; 559 goto out; 560 } 561 perf_env__fetch_btf(env, info->btf_id, btf); 562 } 563 564 /* Synthesize PERF_RECORD_KSYMBOL */ 565 for (i = 0; i < sub_prog_cnt; i++) { 566 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 567 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 568 int name_len; 569 570 *ksymbol_event = (struct perf_record_ksymbol) { 571 .header = { 572 .type = PERF_RECORD_KSYMBOL, 573 .size = offsetof(struct perf_record_ksymbol, name), 574 }, 575 .addr = prog_addrs[i], 576 .len = prog_lens[i], 577 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 578 .flags = 0, 579 }; 580 581 name_len = synthesize_bpf_prog_name(ksymbol_event->name, 582 KSYM_NAME_LEN, info, btf, i); 583 ksymbol_event->header.size += PERF_ALIGN(name_len + 1, 584 sizeof(u64)); 585 586 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 587 event->header.size += machine->id_hdr_size; 588 err = perf_tool__process_synth_event(tool, event, 589 machine, process); 590 } 591 592 if (!opts->no_bpf_event) { 593 /* Synthesize PERF_RECORD_BPF_EVENT */ 594 *bpf_event = (struct perf_record_bpf_event) { 595 .header = { 596 .type = PERF_RECORD_BPF_EVENT, 597 .size = sizeof(struct perf_record_bpf_event), 598 }, 599 .type = PERF_BPF_EVENT_PROG_LOAD, 600 .flags = 0, 601 .id = info->id, 602 }; 603 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE); 604 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 605 event->header.size += machine->id_hdr_size; 606 607 /* save bpf_prog_info to env */ 608 info_node = malloc(sizeof(struct bpf_prog_info_node)); 609 if (!info_node) { 610 err = -1; 611 goto out; 612 } 613 614 info_node->info_linear = info_linear; 615 if (!perf_env__insert_bpf_prog_info(env, info_node)) { 616 free(info_linear); 617 free(info_node); 618 } 619 info_linear = NULL; 620 621 /* 622 * process after saving bpf_prog_info to env, so that 623 * required information is ready for look up 624 */ 625 err = perf_tool__process_synth_event(tool, event, 626 machine, process); 627 628 /* Synthesize PERF_RECORD_BPF_METADATA */ 629 metadata = bpf_metadata_create(info); 630 if (metadata != NULL) { 631 err = synthesize_perf_record_bpf_metadata(metadata, 632 tool, process, 633 machine); 634 bpf_metadata_free(metadata); 635 } 636 } 637 638 out: 639 free(info_linear); 640 btf__free(btf); 641 return err ? -1 : 0; 642 } 643 644 struct kallsyms_parse { 645 union perf_event *event; 646 perf_event__handler_t process; 647 struct machine *machine; 648 const struct perf_tool *tool; 649 }; 650 651 static int 652 process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data) 653 { 654 struct machine *machine = data->machine; 655 union perf_event *event = data->event; 656 struct perf_record_ksymbol *ksymbol; 657 int len; 658 659 ksymbol = &event->ksymbol; 660 661 *ksymbol = (struct perf_record_ksymbol) { 662 .header = { 663 .type = PERF_RECORD_KSYMBOL, 664 .size = offsetof(struct perf_record_ksymbol, name), 665 }, 666 .addr = addr, 667 .len = page_size, 668 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 669 .flags = 0, 670 }; 671 672 len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name); 673 ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64)); 674 memset((void *) event + event->header.size, 0, machine->id_hdr_size); 675 event->header.size += machine->id_hdr_size; 676 677 return perf_tool__process_synth_event(data->tool, event, machine, 678 data->process); 679 } 680 681 static int 682 kallsyms_process_symbol(void *data, const char *_name, 683 char type __maybe_unused, u64 start) 684 { 685 char disp[KSYM_NAME_LEN]; 686 char *module, *name; 687 unsigned long id; 688 int err = 0; 689 690 module = strchr(_name, '\t'); 691 if (!module) 692 return 0; 693 694 /* We are going after [bpf] module ... */ 695 if (strcmp(module + 1, "[bpf]")) 696 return 0; 697 698 name = memdup(_name, (module - _name) + 1); 699 if (!name) 700 return -ENOMEM; 701 702 name[module - _name] = 0; 703 704 /* .. and only for trampolines and dispatchers */ 705 if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) || 706 (sscanf(name, "bpf_dispatcher_%s", disp) == 1)) 707 err = process_bpf_image(name, start, data); 708 709 free(name); 710 return err; 711 } 712 713 int perf_event__synthesize_bpf_events(struct perf_session *session, 714 perf_event__handler_t process, 715 struct machine *machine, 716 struct record_opts *opts) 717 { 718 const char *kallsyms_filename = "/proc/kallsyms"; 719 struct kallsyms_parse arg; 720 union perf_event *event; 721 __u32 id = 0; 722 int err; 723 int fd; 724 725 if (opts->no_bpf_event) 726 return 0; 727 728 event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size); 729 if (!event) 730 return -1; 731 732 /* Synthesize all the bpf programs in system. */ 733 while (true) { 734 err = bpf_prog_get_next_id(id, &id); 735 if (err) { 736 if (errno == ENOENT) { 737 err = 0; 738 break; 739 } 740 pr_debug("%s: can't get next program: %s%s\n", 741 __func__, strerror(errno), 742 errno == EINVAL ? " -- kernel too old?" : ""); 743 /* don't report error on old kernel or EPERM */ 744 err = (errno == EINVAL || errno == EPERM) ? 0 : -1; 745 break; 746 } 747 fd = bpf_prog_get_fd_by_id(id); 748 if (fd < 0) { 749 pr_debug("%s: failed to get fd for prog_id %u\n", 750 __func__, id); 751 continue; 752 } 753 754 err = perf_event__synthesize_one_bpf_prog(session, process, 755 machine, fd, 756 event, opts); 757 close(fd); 758 if (err) { 759 /* do not return error for old kernel */ 760 if (err == -2) 761 err = 0; 762 break; 763 } 764 } 765 766 /* Synthesize all the bpf images - trampolines/dispatchers. */ 767 if (symbol_conf.kallsyms_name != NULL) 768 kallsyms_filename = symbol_conf.kallsyms_name; 769 770 arg = (struct kallsyms_parse) { 771 .event = event, 772 .process = process, 773 .machine = machine, 774 .tool = session->tool, 775 }; 776 777 if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) { 778 pr_err("%s: failed to synthesize bpf images: %s\n", 779 __func__, strerror(errno)); 780 } 781 782 free(event); 783 return err; 784 } 785 786 static void perf_env__add_bpf_info(struct perf_env *env, u32 id) 787 { 788 struct bpf_prog_info_node *info_node; 789 struct perf_bpil *info_linear; 790 struct btf *btf = NULL; 791 u64 arrays; 792 u32 btf_id; 793 int fd; 794 795 fd = bpf_prog_get_fd_by_id(id); 796 if (fd < 0) 797 return; 798 799 arrays = 1UL << PERF_BPIL_JITED_KSYMS; 800 arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS; 801 arrays |= 1UL << PERF_BPIL_FUNC_INFO; 802 arrays |= 1UL << PERF_BPIL_PROG_TAGS; 803 arrays |= 1UL << PERF_BPIL_JITED_INSNS; 804 arrays |= 1UL << PERF_BPIL_LINE_INFO; 805 arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO; 806 807 info_linear = get_bpf_prog_info_linear(fd, arrays); 808 if (IS_ERR_OR_NULL(info_linear)) { 809 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 810 goto out; 811 } 812 813 btf_id = info_linear->info.btf_id; 814 815 info_node = malloc(sizeof(struct bpf_prog_info_node)); 816 if (info_node) { 817 info_node->info_linear = info_linear; 818 if (!perf_env__insert_bpf_prog_info(env, info_node)) { 819 free(info_linear); 820 free(info_node); 821 } 822 } else 823 free(info_linear); 824 825 if (btf_id == 0) 826 goto out; 827 828 btf = btf__load_from_kernel_by_id(btf_id); 829 if (libbpf_get_error(btf)) { 830 pr_debug("%s: failed to get BTF of id %u, aborting\n", 831 __func__, btf_id); 832 goto out; 833 } 834 perf_env__fetch_btf(env, btf_id, btf); 835 836 out: 837 btf__free(btf); 838 close(fd); 839 } 840 841 static int bpf_event__sb_cb(union perf_event *event, void *data) 842 { 843 struct perf_env *env = data; 844 845 if (event->header.type != PERF_RECORD_BPF_EVENT) 846 return -1; 847 848 switch (event->bpf.type) { 849 case PERF_BPF_EVENT_PROG_LOAD: 850 perf_env__add_bpf_info(env, event->bpf.id); 851 852 case PERF_BPF_EVENT_PROG_UNLOAD: 853 /* 854 * Do not free bpf_prog_info and btf of the program here, 855 * as annotation still need them. They will be freed at 856 * the end of the session. 857 */ 858 break; 859 default: 860 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 861 break; 862 } 863 864 return 0; 865 } 866 867 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env) 868 { 869 struct perf_event_attr attr = { 870 .type = PERF_TYPE_SOFTWARE, 871 .config = PERF_COUNT_SW_DUMMY, 872 .sample_id_all = 1, 873 .watermark = 1, 874 .bpf_event = 1, 875 .size = sizeof(attr), /* to capture ABI version */ 876 }; 877 878 /* 879 * Older gcc versions don't support designated initializers, like above, 880 * for unnamed union members, such as the following: 881 */ 882 attr.wakeup_watermark = 1; 883 884 return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); 885 } 886 887 void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 888 struct perf_env *env, 889 FILE *fp) 890 { 891 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 892 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 893 char name[KSYM_NAME_LEN]; 894 struct btf *btf = NULL; 895 u32 sub_prog_cnt, i; 896 897 sub_prog_cnt = info->nr_jited_ksyms; 898 if (sub_prog_cnt != info->nr_prog_tags || 899 sub_prog_cnt != info->nr_jited_func_lens) 900 return; 901 902 if (info->btf_id) { 903 struct btf_node *node; 904 905 node = __perf_env__find_btf(env, info->btf_id); 906 if (node) 907 btf = btf__new((__u8 *)(node->data), 908 node->data_size); 909 } 910 911 if (sub_prog_cnt == 1) { 912 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0); 913 fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n", 914 info->id, name, prog_addrs[0], prog_lens[0]); 915 goto out; 916 } 917 918 fprintf(fp, "# bpf_prog_info %u:\n", info->id); 919 for (i = 0; i < sub_prog_cnt; i++) { 920 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i); 921 922 fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n", 923 i, name, prog_addrs[i], prog_lens[i]); 924 } 925 out: 926 btf__free(btf); 927 } 928