1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <endian.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <fcntl.h> 9 #include <unistd.h> 10 #include <errno.h> 11 #include <linux/err.h> 12 #include <linux/btf.h> 13 #include <gelf.h> 14 #include "btf.h" 15 #include "bpf.h" 16 #include "libbpf.h" 17 #include "libbpf_internal.h" 18 #include "hashmap.h" 19 20 #define BTF_MAX_NR_TYPES 0x7fffffff 21 #define BTF_MAX_STR_OFFSET 0x7fffffff 22 23 static struct btf_type btf_void; 24 25 struct btf { 26 union { 27 struct btf_header *hdr; 28 void *data; 29 }; 30 struct btf_type **types; 31 const char *strings; 32 void *nohdr_data; 33 __u32 nr_types; 34 __u32 types_size; 35 __u32 data_size; 36 int fd; 37 }; 38 39 static inline __u64 ptr_to_u64(const void *ptr) 40 { 41 return (__u64) (unsigned long) ptr; 42 } 43 44 static int btf_add_type(struct btf *btf, struct btf_type *t) 45 { 46 if (btf->types_size - btf->nr_types < 2) { 47 struct btf_type **new_types; 48 __u32 expand_by, new_size; 49 50 if (btf->types_size == BTF_MAX_NR_TYPES) 51 return -E2BIG; 52 53 expand_by = max(btf->types_size >> 2, 16); 54 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); 55 56 new_types = realloc(btf->types, sizeof(*new_types) * new_size); 57 if (!new_types) 58 return -ENOMEM; 59 60 if (btf->nr_types == 0) 61 new_types[0] = &btf_void; 62 63 btf->types = new_types; 64 btf->types_size = new_size; 65 } 66 67 btf->types[++(btf->nr_types)] = t; 68 69 return 0; 70 } 71 72 static int btf_parse_hdr(struct btf *btf) 73 { 74 const struct btf_header *hdr = btf->hdr; 75 __u32 meta_left; 76 77 if (btf->data_size < sizeof(struct btf_header)) { 78 pr_debug("BTF header not found\n"); 79 return -EINVAL; 80 } 81 82 if (hdr->magic != BTF_MAGIC) { 83 pr_debug("Invalid BTF magic:%x\n", hdr->magic); 84 return -EINVAL; 85 } 86 87 if (hdr->version != BTF_VERSION) { 88 pr_debug("Unsupported BTF version:%u\n", hdr->version); 89 return -ENOTSUP; 90 } 91 92 if (hdr->flags) { 93 pr_debug("Unsupported BTF flags:%x\n", hdr->flags); 94 return -ENOTSUP; 95 } 96 97 meta_left = btf->data_size - sizeof(*hdr); 98 if (!meta_left) { 99 pr_debug("BTF has no data\n"); 100 return -EINVAL; 101 } 102 103 if (meta_left < hdr->type_off) { 104 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); 105 return -EINVAL; 106 } 107 108 if (meta_left < hdr->str_off) { 109 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); 110 return -EINVAL; 111 } 112 113 if (hdr->type_off >= hdr->str_off) { 114 pr_debug("BTF type section offset >= string section offset. No type?\n"); 115 return -EINVAL; 116 } 117 118 if (hdr->type_off & 0x02) { 119 pr_debug("BTF type section is not aligned to 4 bytes\n"); 120 return -EINVAL; 121 } 122 123 btf->nohdr_data = btf->hdr + 1; 124 125 return 0; 126 } 127 128 static int btf_parse_str_sec(struct btf *btf) 129 { 130 const struct btf_header *hdr = btf->hdr; 131 const char *start = btf->nohdr_data + hdr->str_off; 132 const char *end = start + btf->hdr->str_len; 133 134 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || 135 start[0] || end[-1]) { 136 pr_debug("Invalid BTF string section\n"); 137 return -EINVAL; 138 } 139 140 btf->strings = start; 141 142 return 0; 143 } 144 145 static int btf_type_size(struct btf_type *t) 146 { 147 int base_size = sizeof(struct btf_type); 148 __u16 vlen = btf_vlen(t); 149 150 switch (btf_kind(t)) { 151 case BTF_KIND_FWD: 152 case BTF_KIND_CONST: 153 case BTF_KIND_VOLATILE: 154 case BTF_KIND_RESTRICT: 155 case BTF_KIND_PTR: 156 case BTF_KIND_TYPEDEF: 157 case BTF_KIND_FUNC: 158 return base_size; 159 case BTF_KIND_INT: 160 return base_size + sizeof(__u32); 161 case BTF_KIND_ENUM: 162 return base_size + vlen * sizeof(struct btf_enum); 163 case BTF_KIND_ARRAY: 164 return base_size + sizeof(struct btf_array); 165 case BTF_KIND_STRUCT: 166 case BTF_KIND_UNION: 167 return base_size + vlen * sizeof(struct btf_member); 168 case BTF_KIND_FUNC_PROTO: 169 return base_size + vlen * sizeof(struct btf_param); 170 case BTF_KIND_VAR: 171 return base_size + sizeof(struct btf_var); 172 case BTF_KIND_DATASEC: 173 return base_size + vlen * sizeof(struct btf_var_secinfo); 174 default: 175 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 176 return -EINVAL; 177 } 178 } 179 180 static int btf_parse_type_sec(struct btf *btf) 181 { 182 struct btf_header *hdr = btf->hdr; 183 void *nohdr_data = btf->nohdr_data; 184 void *next_type = nohdr_data + hdr->type_off; 185 void *end_type = nohdr_data + hdr->str_off; 186 187 while (next_type < end_type) { 188 struct btf_type *t = next_type; 189 int type_size; 190 int err; 191 192 type_size = btf_type_size(t); 193 if (type_size < 0) 194 return type_size; 195 next_type += type_size; 196 err = btf_add_type(btf, t); 197 if (err) 198 return err; 199 } 200 201 return 0; 202 } 203 204 __u32 btf__get_nr_types(const struct btf *btf) 205 { 206 return btf->nr_types; 207 } 208 209 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 210 { 211 if (type_id > btf->nr_types) 212 return NULL; 213 214 return btf->types[type_id]; 215 } 216 217 static bool btf_type_is_void(const struct btf_type *t) 218 { 219 return t == &btf_void || btf_is_fwd(t); 220 } 221 222 static bool btf_type_is_void_or_null(const struct btf_type *t) 223 { 224 return !t || btf_type_is_void(t); 225 } 226 227 #define MAX_RESOLVE_DEPTH 32 228 229 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 230 { 231 const struct btf_array *array; 232 const struct btf_type *t; 233 __u32 nelems = 1; 234 __s64 size = -1; 235 int i; 236 237 t = btf__type_by_id(btf, type_id); 238 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 239 i++) { 240 switch (btf_kind(t)) { 241 case BTF_KIND_INT: 242 case BTF_KIND_STRUCT: 243 case BTF_KIND_UNION: 244 case BTF_KIND_ENUM: 245 case BTF_KIND_DATASEC: 246 size = t->size; 247 goto done; 248 case BTF_KIND_PTR: 249 size = sizeof(void *); 250 goto done; 251 case BTF_KIND_TYPEDEF: 252 case BTF_KIND_VOLATILE: 253 case BTF_KIND_CONST: 254 case BTF_KIND_RESTRICT: 255 case BTF_KIND_VAR: 256 type_id = t->type; 257 break; 258 case BTF_KIND_ARRAY: 259 array = btf_array(t); 260 if (nelems && array->nelems > UINT32_MAX / nelems) 261 return -E2BIG; 262 nelems *= array->nelems; 263 type_id = array->type; 264 break; 265 default: 266 return -EINVAL; 267 } 268 269 t = btf__type_by_id(btf, type_id); 270 } 271 272 done: 273 if (size < 0) 274 return -EINVAL; 275 if (nelems && size > UINT32_MAX / nelems) 276 return -E2BIG; 277 278 return nelems * size; 279 } 280 281 int btf__resolve_type(const struct btf *btf, __u32 type_id) 282 { 283 const struct btf_type *t; 284 int depth = 0; 285 286 t = btf__type_by_id(btf, type_id); 287 while (depth < MAX_RESOLVE_DEPTH && 288 !btf_type_is_void_or_null(t) && 289 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { 290 type_id = t->type; 291 t = btf__type_by_id(btf, type_id); 292 depth++; 293 } 294 295 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) 296 return -EINVAL; 297 298 return type_id; 299 } 300 301 __s32 btf__find_by_name(const struct btf *btf, const char *type_name) 302 { 303 __u32 i; 304 305 if (!strcmp(type_name, "void")) 306 return 0; 307 308 for (i = 1; i <= btf->nr_types; i++) { 309 const struct btf_type *t = btf->types[i]; 310 const char *name = btf__name_by_offset(btf, t->name_off); 311 312 if (name && !strcmp(type_name, name)) 313 return i; 314 } 315 316 return -ENOENT; 317 } 318 319 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, 320 __u32 kind) 321 { 322 __u32 i; 323 324 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) 325 return 0; 326 327 for (i = 1; i <= btf->nr_types; i++) { 328 const struct btf_type *t = btf->types[i]; 329 const char *name; 330 331 if (btf_kind(t) != kind) 332 continue; 333 name = btf__name_by_offset(btf, t->name_off); 334 if (name && !strcmp(type_name, name)) 335 return i; 336 } 337 338 return -ENOENT; 339 } 340 341 void btf__free(struct btf *btf) 342 { 343 if (!btf) 344 return; 345 346 if (btf->fd != -1) 347 close(btf->fd); 348 349 free(btf->data); 350 free(btf->types); 351 free(btf); 352 } 353 354 struct btf *btf__new(__u8 *data, __u32 size) 355 { 356 struct btf *btf; 357 int err; 358 359 btf = calloc(1, sizeof(struct btf)); 360 if (!btf) 361 return ERR_PTR(-ENOMEM); 362 363 btf->fd = -1; 364 365 btf->data = malloc(size); 366 if (!btf->data) { 367 err = -ENOMEM; 368 goto done; 369 } 370 371 memcpy(btf->data, data, size); 372 btf->data_size = size; 373 374 err = btf_parse_hdr(btf); 375 if (err) 376 goto done; 377 378 err = btf_parse_str_sec(btf); 379 if (err) 380 goto done; 381 382 err = btf_parse_type_sec(btf); 383 384 done: 385 if (err) { 386 btf__free(btf); 387 return ERR_PTR(err); 388 } 389 390 return btf; 391 } 392 393 static bool btf_check_endianness(const GElf_Ehdr *ehdr) 394 { 395 #if __BYTE_ORDER == __LITTLE_ENDIAN 396 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; 397 #elif __BYTE_ORDER == __BIG_ENDIAN 398 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; 399 #else 400 # error "Unrecognized __BYTE_ORDER__" 401 #endif 402 } 403 404 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 405 { 406 Elf_Data *btf_data = NULL, *btf_ext_data = NULL; 407 int err = 0, fd = -1, idx = 0; 408 struct btf *btf = NULL; 409 Elf_Scn *scn = NULL; 410 Elf *elf = NULL; 411 GElf_Ehdr ehdr; 412 413 if (elf_version(EV_CURRENT) == EV_NONE) { 414 pr_warn("failed to init libelf for %s\n", path); 415 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 416 } 417 418 fd = open(path, O_RDONLY); 419 if (fd < 0) { 420 err = -errno; 421 pr_warn("failed to open %s: %s\n", path, strerror(errno)); 422 return ERR_PTR(err); 423 } 424 425 err = -LIBBPF_ERRNO__FORMAT; 426 427 elf = elf_begin(fd, ELF_C_READ, NULL); 428 if (!elf) { 429 pr_warn("failed to open %s as ELF file\n", path); 430 goto done; 431 } 432 if (!gelf_getehdr(elf, &ehdr)) { 433 pr_warn("failed to get EHDR from %s\n", path); 434 goto done; 435 } 436 if (!btf_check_endianness(&ehdr)) { 437 pr_warn("non-native ELF endianness is not supported\n"); 438 goto done; 439 } 440 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { 441 pr_warn("failed to get e_shstrndx from %s\n", path); 442 goto done; 443 } 444 445 while ((scn = elf_nextscn(elf, scn)) != NULL) { 446 GElf_Shdr sh; 447 char *name; 448 449 idx++; 450 if (gelf_getshdr(scn, &sh) != &sh) { 451 pr_warn("failed to get section(%d) header from %s\n", 452 idx, path); 453 goto done; 454 } 455 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); 456 if (!name) { 457 pr_warn("failed to get section(%d) name from %s\n", 458 idx, path); 459 goto done; 460 } 461 if (strcmp(name, BTF_ELF_SEC) == 0) { 462 btf_data = elf_getdata(scn, 0); 463 if (!btf_data) { 464 pr_warn("failed to get section(%d, %s) data from %s\n", 465 idx, name, path); 466 goto done; 467 } 468 continue; 469 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { 470 btf_ext_data = elf_getdata(scn, 0); 471 if (!btf_ext_data) { 472 pr_warn("failed to get section(%d, %s) data from %s\n", 473 idx, name, path); 474 goto done; 475 } 476 continue; 477 } 478 } 479 480 err = 0; 481 482 if (!btf_data) { 483 err = -ENOENT; 484 goto done; 485 } 486 btf = btf__new(btf_data->d_buf, btf_data->d_size); 487 if (IS_ERR(btf)) 488 goto done; 489 490 if (btf_ext && btf_ext_data) { 491 *btf_ext = btf_ext__new(btf_ext_data->d_buf, 492 btf_ext_data->d_size); 493 if (IS_ERR(*btf_ext)) 494 goto done; 495 } else if (btf_ext) { 496 *btf_ext = NULL; 497 } 498 done: 499 if (elf) 500 elf_end(elf); 501 close(fd); 502 503 if (err) 504 return ERR_PTR(err); 505 /* 506 * btf is always parsed before btf_ext, so no need to clean up 507 * btf_ext, if btf loading failed 508 */ 509 if (IS_ERR(btf)) 510 return btf; 511 if (btf_ext && IS_ERR(*btf_ext)) { 512 btf__free(btf); 513 err = PTR_ERR(*btf_ext); 514 return ERR_PTR(err); 515 } 516 return btf; 517 } 518 519 static int compare_vsi_off(const void *_a, const void *_b) 520 { 521 const struct btf_var_secinfo *a = _a; 522 const struct btf_var_secinfo *b = _b; 523 524 return a->offset - b->offset; 525 } 526 527 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 528 struct btf_type *t) 529 { 530 __u32 size = 0, off = 0, i, vars = btf_vlen(t); 531 const char *name = btf__name_by_offset(btf, t->name_off); 532 const struct btf_type *t_var; 533 struct btf_var_secinfo *vsi; 534 const struct btf_var *var; 535 int ret; 536 537 if (!name) { 538 pr_debug("No name found in string section for DATASEC kind.\n"); 539 return -ENOENT; 540 } 541 542 ret = bpf_object__section_size(obj, name, &size); 543 if (ret || !size || (t->size && t->size != size)) { 544 pr_debug("Invalid size for section %s: %u bytes\n", name, size); 545 return -ENOENT; 546 } 547 548 t->size = size; 549 550 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 551 t_var = btf__type_by_id(btf, vsi->type); 552 var = btf_var(t_var); 553 554 if (!btf_is_var(t_var)) { 555 pr_debug("Non-VAR type seen in section %s\n", name); 556 return -EINVAL; 557 } 558 559 if (var->linkage == BTF_VAR_STATIC) 560 continue; 561 562 name = btf__name_by_offset(btf, t_var->name_off); 563 if (!name) { 564 pr_debug("No name found in string section for VAR kind\n"); 565 return -ENOENT; 566 } 567 568 ret = bpf_object__variable_offset(obj, name, &off); 569 if (ret) { 570 pr_debug("No offset found in symbol table for VAR %s\n", 571 name); 572 return -ENOENT; 573 } 574 575 vsi->offset = off; 576 } 577 578 qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off); 579 return 0; 580 } 581 582 int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 583 { 584 int err = 0; 585 __u32 i; 586 587 for (i = 1; i <= btf->nr_types; i++) { 588 struct btf_type *t = btf->types[i]; 589 590 /* Loader needs to fix up some of the things compiler 591 * couldn't get its hands on while emitting BTF. This 592 * is section size and global variable offset. We use 593 * the info from the ELF itself for this purpose. 594 */ 595 if (btf_is_datasec(t)) { 596 err = btf_fixup_datasec(obj, btf, t); 597 if (err) 598 break; 599 } 600 } 601 602 return err; 603 } 604 605 int btf__load(struct btf *btf) 606 { 607 __u32 log_buf_size = BPF_LOG_BUF_SIZE; 608 char *log_buf = NULL; 609 int err = 0; 610 611 if (btf->fd >= 0) 612 return -EEXIST; 613 614 log_buf = malloc(log_buf_size); 615 if (!log_buf) 616 return -ENOMEM; 617 618 *log_buf = 0; 619 620 btf->fd = bpf_load_btf(btf->data, btf->data_size, 621 log_buf, log_buf_size, false); 622 if (btf->fd < 0) { 623 err = -errno; 624 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); 625 if (*log_buf) 626 pr_warn("%s\n", log_buf); 627 goto done; 628 } 629 630 done: 631 free(log_buf); 632 return err; 633 } 634 635 int btf__fd(const struct btf *btf) 636 { 637 return btf->fd; 638 } 639 640 const void *btf__get_raw_data(const struct btf *btf, __u32 *size) 641 { 642 *size = btf->data_size; 643 return btf->data; 644 } 645 646 const char *btf__name_by_offset(const struct btf *btf, __u32 offset) 647 { 648 if (offset < btf->hdr->str_len) 649 return &btf->strings[offset]; 650 else 651 return NULL; 652 } 653 654 int btf__get_from_id(__u32 id, struct btf **btf) 655 { 656 struct bpf_btf_info btf_info = { 0 }; 657 __u32 len = sizeof(btf_info); 658 __u32 last_size; 659 int btf_fd; 660 void *ptr; 661 int err; 662 663 err = 0; 664 *btf = NULL; 665 btf_fd = bpf_btf_get_fd_by_id(id); 666 if (btf_fd < 0) 667 return 0; 668 669 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so 670 * let's start with a sane default - 4KiB here - and resize it only if 671 * bpf_obj_get_info_by_fd() needs a bigger buffer. 672 */ 673 btf_info.btf_size = 4096; 674 last_size = btf_info.btf_size; 675 ptr = malloc(last_size); 676 if (!ptr) { 677 err = -ENOMEM; 678 goto exit_free; 679 } 680 681 memset(ptr, 0, last_size); 682 btf_info.btf = ptr_to_u64(ptr); 683 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 684 685 if (!err && btf_info.btf_size > last_size) { 686 void *temp_ptr; 687 688 last_size = btf_info.btf_size; 689 temp_ptr = realloc(ptr, last_size); 690 if (!temp_ptr) { 691 err = -ENOMEM; 692 goto exit_free; 693 } 694 ptr = temp_ptr; 695 memset(ptr, 0, last_size); 696 btf_info.btf = ptr_to_u64(ptr); 697 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 698 } 699 700 if (err || btf_info.btf_size > last_size) { 701 err = errno; 702 goto exit_free; 703 } 704 705 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); 706 if (IS_ERR(*btf)) { 707 err = PTR_ERR(*btf); 708 *btf = NULL; 709 } 710 711 exit_free: 712 close(btf_fd); 713 free(ptr); 714 715 return err; 716 } 717 718 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, 719 __u32 expected_key_size, __u32 expected_value_size, 720 __u32 *key_type_id, __u32 *value_type_id) 721 { 722 const struct btf_type *container_type; 723 const struct btf_member *key, *value; 724 const size_t max_name = 256; 725 char container_name[max_name]; 726 __s64 key_size, value_size; 727 __s32 container_id; 728 729 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == 730 max_name) { 731 pr_warn("map:%s length of '____btf_map_%s' is too long\n", 732 map_name, map_name); 733 return -EINVAL; 734 } 735 736 container_id = btf__find_by_name(btf, container_name); 737 if (container_id < 0) { 738 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 739 map_name, container_name); 740 return container_id; 741 } 742 743 container_type = btf__type_by_id(btf, container_id); 744 if (!container_type) { 745 pr_warn("map:%s cannot find BTF type for container_id:%u\n", 746 map_name, container_id); 747 return -EINVAL; 748 } 749 750 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { 751 pr_warn("map:%s container_name:%s is an invalid container struct\n", 752 map_name, container_name); 753 return -EINVAL; 754 } 755 756 key = btf_members(container_type); 757 value = key + 1; 758 759 key_size = btf__resolve_size(btf, key->type); 760 if (key_size < 0) { 761 pr_warn("map:%s invalid BTF key_type_size\n", map_name); 762 return key_size; 763 } 764 765 if (expected_key_size != key_size) { 766 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 767 map_name, (__u32)key_size, expected_key_size); 768 return -EINVAL; 769 } 770 771 value_size = btf__resolve_size(btf, value->type); 772 if (value_size < 0) { 773 pr_warn("map:%s invalid BTF value_type_size\n", map_name); 774 return value_size; 775 } 776 777 if (expected_value_size != value_size) { 778 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 779 map_name, (__u32)value_size, expected_value_size); 780 return -EINVAL; 781 } 782 783 *key_type_id = key->type; 784 *value_type_id = value->type; 785 786 return 0; 787 } 788 789 struct btf_ext_sec_setup_param { 790 __u32 off; 791 __u32 len; 792 __u32 min_rec_size; 793 struct btf_ext_info *ext_info; 794 const char *desc; 795 }; 796 797 static int btf_ext_setup_info(struct btf_ext *btf_ext, 798 struct btf_ext_sec_setup_param *ext_sec) 799 { 800 const struct btf_ext_info_sec *sinfo; 801 struct btf_ext_info *ext_info; 802 __u32 info_left, record_size; 803 /* The start of the info sec (including the __u32 record_size). */ 804 void *info; 805 806 if (ext_sec->len == 0) 807 return 0; 808 809 if (ext_sec->off & 0x03) { 810 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", 811 ext_sec->desc); 812 return -EINVAL; 813 } 814 815 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; 816 info_left = ext_sec->len; 817 818 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { 819 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", 820 ext_sec->desc, ext_sec->off, ext_sec->len); 821 return -EINVAL; 822 } 823 824 /* At least a record size */ 825 if (info_left < sizeof(__u32)) { 826 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); 827 return -EINVAL; 828 } 829 830 /* The record size needs to meet the minimum standard */ 831 record_size = *(__u32 *)info; 832 if (record_size < ext_sec->min_rec_size || 833 record_size & 0x03) { 834 pr_debug("%s section in .BTF.ext has invalid record size %u\n", 835 ext_sec->desc, record_size); 836 return -EINVAL; 837 } 838 839 sinfo = info + sizeof(__u32); 840 info_left -= sizeof(__u32); 841 842 /* If no records, return failure now so .BTF.ext won't be used. */ 843 if (!info_left) { 844 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); 845 return -EINVAL; 846 } 847 848 while (info_left) { 849 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); 850 __u64 total_record_size; 851 __u32 num_records; 852 853 if (info_left < sec_hdrlen) { 854 pr_debug("%s section header is not found in .BTF.ext\n", 855 ext_sec->desc); 856 return -EINVAL; 857 } 858 859 num_records = sinfo->num_info; 860 if (num_records == 0) { 861 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 862 ext_sec->desc); 863 return -EINVAL; 864 } 865 866 total_record_size = sec_hdrlen + 867 (__u64)num_records * record_size; 868 if (info_left < total_record_size) { 869 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 870 ext_sec->desc); 871 return -EINVAL; 872 } 873 874 info_left -= total_record_size; 875 sinfo = (void *)sinfo + total_record_size; 876 } 877 878 ext_info = ext_sec->ext_info; 879 ext_info->len = ext_sec->len - sizeof(__u32); 880 ext_info->rec_size = record_size; 881 ext_info->info = info + sizeof(__u32); 882 883 return 0; 884 } 885 886 static int btf_ext_setup_func_info(struct btf_ext *btf_ext) 887 { 888 struct btf_ext_sec_setup_param param = { 889 .off = btf_ext->hdr->func_info_off, 890 .len = btf_ext->hdr->func_info_len, 891 .min_rec_size = sizeof(struct bpf_func_info_min), 892 .ext_info = &btf_ext->func_info, 893 .desc = "func_info" 894 }; 895 896 return btf_ext_setup_info(btf_ext, ¶m); 897 } 898 899 static int btf_ext_setup_line_info(struct btf_ext *btf_ext) 900 { 901 struct btf_ext_sec_setup_param param = { 902 .off = btf_ext->hdr->line_info_off, 903 .len = btf_ext->hdr->line_info_len, 904 .min_rec_size = sizeof(struct bpf_line_info_min), 905 .ext_info = &btf_ext->line_info, 906 .desc = "line_info", 907 }; 908 909 return btf_ext_setup_info(btf_ext, ¶m); 910 } 911 912 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext) 913 { 914 struct btf_ext_sec_setup_param param = { 915 .off = btf_ext->hdr->field_reloc_off, 916 .len = btf_ext->hdr->field_reloc_len, 917 .min_rec_size = sizeof(struct bpf_field_reloc), 918 .ext_info = &btf_ext->field_reloc_info, 919 .desc = "field_reloc", 920 }; 921 922 return btf_ext_setup_info(btf_ext, ¶m); 923 } 924 925 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 926 { 927 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 928 929 if (data_size < offsetofend(struct btf_ext_header, hdr_len) || 930 data_size < hdr->hdr_len) { 931 pr_debug("BTF.ext header not found"); 932 return -EINVAL; 933 } 934 935 if (hdr->magic != BTF_MAGIC) { 936 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); 937 return -EINVAL; 938 } 939 940 if (hdr->version != BTF_VERSION) { 941 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); 942 return -ENOTSUP; 943 } 944 945 if (hdr->flags) { 946 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); 947 return -ENOTSUP; 948 } 949 950 if (data_size == hdr->hdr_len) { 951 pr_debug("BTF.ext has no data\n"); 952 return -EINVAL; 953 } 954 955 return 0; 956 } 957 958 void btf_ext__free(struct btf_ext *btf_ext) 959 { 960 if (!btf_ext) 961 return; 962 free(btf_ext->data); 963 free(btf_ext); 964 } 965 966 struct btf_ext *btf_ext__new(__u8 *data, __u32 size) 967 { 968 struct btf_ext *btf_ext; 969 int err; 970 971 err = btf_ext_parse_hdr(data, size); 972 if (err) 973 return ERR_PTR(err); 974 975 btf_ext = calloc(1, sizeof(struct btf_ext)); 976 if (!btf_ext) 977 return ERR_PTR(-ENOMEM); 978 979 btf_ext->data_size = size; 980 btf_ext->data = malloc(size); 981 if (!btf_ext->data) { 982 err = -ENOMEM; 983 goto done; 984 } 985 memcpy(btf_ext->data, data, size); 986 987 if (btf_ext->hdr->hdr_len < 988 offsetofend(struct btf_ext_header, line_info_len)) 989 goto done; 990 err = btf_ext_setup_func_info(btf_ext); 991 if (err) 992 goto done; 993 994 err = btf_ext_setup_line_info(btf_ext); 995 if (err) 996 goto done; 997 998 if (btf_ext->hdr->hdr_len < 999 offsetofend(struct btf_ext_header, field_reloc_len)) 1000 goto done; 1001 err = btf_ext_setup_field_reloc(btf_ext); 1002 if (err) 1003 goto done; 1004 1005 done: 1006 if (err) { 1007 btf_ext__free(btf_ext); 1008 return ERR_PTR(err); 1009 } 1010 1011 return btf_ext; 1012 } 1013 1014 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) 1015 { 1016 *size = btf_ext->data_size; 1017 return btf_ext->data; 1018 } 1019 1020 static int btf_ext_reloc_info(const struct btf *btf, 1021 const struct btf_ext_info *ext_info, 1022 const char *sec_name, __u32 insns_cnt, 1023 void **info, __u32 *cnt) 1024 { 1025 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); 1026 __u32 i, record_size, existing_len, records_len; 1027 struct btf_ext_info_sec *sinfo; 1028 const char *info_sec_name; 1029 __u64 remain_len; 1030 void *data; 1031 1032 record_size = ext_info->rec_size; 1033 sinfo = ext_info->info; 1034 remain_len = ext_info->len; 1035 while (remain_len > 0) { 1036 records_len = sinfo->num_info * record_size; 1037 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); 1038 if (strcmp(info_sec_name, sec_name)) { 1039 remain_len -= sec_hdrlen + records_len; 1040 sinfo = (void *)sinfo + sec_hdrlen + records_len; 1041 continue; 1042 } 1043 1044 existing_len = (*cnt) * record_size; 1045 data = realloc(*info, existing_len + records_len); 1046 if (!data) 1047 return -ENOMEM; 1048 1049 memcpy(data + existing_len, sinfo->data, records_len); 1050 /* adjust insn_off only, the rest data will be passed 1051 * to the kernel. 1052 */ 1053 for (i = 0; i < sinfo->num_info; i++) { 1054 __u32 *insn_off; 1055 1056 insn_off = data + existing_len + (i * record_size); 1057 *insn_off = *insn_off / sizeof(struct bpf_insn) + 1058 insns_cnt; 1059 } 1060 *info = data; 1061 *cnt += sinfo->num_info; 1062 return 0; 1063 } 1064 1065 return -ENOENT; 1066 } 1067 1068 int btf_ext__reloc_func_info(const struct btf *btf, 1069 const struct btf_ext *btf_ext, 1070 const char *sec_name, __u32 insns_cnt, 1071 void **func_info, __u32 *cnt) 1072 { 1073 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, 1074 insns_cnt, func_info, cnt); 1075 } 1076 1077 int btf_ext__reloc_line_info(const struct btf *btf, 1078 const struct btf_ext *btf_ext, 1079 const char *sec_name, __u32 insns_cnt, 1080 void **line_info, __u32 *cnt) 1081 { 1082 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, 1083 insns_cnt, line_info, cnt); 1084 } 1085 1086 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) 1087 { 1088 return btf_ext->func_info.rec_size; 1089 } 1090 1091 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) 1092 { 1093 return btf_ext->line_info.rec_size; 1094 } 1095 1096 struct btf_dedup; 1097 1098 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1099 const struct btf_dedup_opts *opts); 1100 static void btf_dedup_free(struct btf_dedup *d); 1101 static int btf_dedup_strings(struct btf_dedup *d); 1102 static int btf_dedup_prim_types(struct btf_dedup *d); 1103 static int btf_dedup_struct_types(struct btf_dedup *d); 1104 static int btf_dedup_ref_types(struct btf_dedup *d); 1105 static int btf_dedup_compact_types(struct btf_dedup *d); 1106 static int btf_dedup_remap_types(struct btf_dedup *d); 1107 1108 /* 1109 * Deduplicate BTF types and strings. 1110 * 1111 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF 1112 * section with all BTF type descriptors and string data. It overwrites that 1113 * memory in-place with deduplicated types and strings without any loss of 1114 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section 1115 * is provided, all the strings referenced from .BTF.ext section are honored 1116 * and updated to point to the right offsets after deduplication. 1117 * 1118 * If function returns with error, type/string data might be garbled and should 1119 * be discarded. 1120 * 1121 * More verbose and detailed description of both problem btf_dedup is solving, 1122 * as well as solution could be found at: 1123 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html 1124 * 1125 * Problem description and justification 1126 * ===================================== 1127 * 1128 * BTF type information is typically emitted either as a result of conversion 1129 * from DWARF to BTF or directly by compiler. In both cases, each compilation 1130 * unit contains information about a subset of all the types that are used 1131 * in an application. These subsets are frequently overlapping and contain a lot 1132 * of duplicated information when later concatenated together into a single 1133 * binary. This algorithm ensures that each unique type is represented by single 1134 * BTF type descriptor, greatly reducing resulting size of BTF data. 1135 * 1136 * Compilation unit isolation and subsequent duplication of data is not the only 1137 * problem. The same type hierarchy (e.g., struct and all the type that struct 1138 * references) in different compilation units can be represented in BTF to 1139 * various degrees of completeness (or, rather, incompleteness) due to 1140 * struct/union forward declarations. 1141 * 1142 * Let's take a look at an example, that we'll use to better understand the 1143 * problem (and solution). Suppose we have two compilation units, each using 1144 * same `struct S`, but each of them having incomplete type information about 1145 * struct's fields: 1146 * 1147 * // CU #1: 1148 * struct S; 1149 * struct A { 1150 * int a; 1151 * struct A* self; 1152 * struct S* parent; 1153 * }; 1154 * struct B; 1155 * struct S { 1156 * struct A* a_ptr; 1157 * struct B* b_ptr; 1158 * }; 1159 * 1160 * // CU #2: 1161 * struct S; 1162 * struct A; 1163 * struct B { 1164 * int b; 1165 * struct B* self; 1166 * struct S* parent; 1167 * }; 1168 * struct S { 1169 * struct A* a_ptr; 1170 * struct B* b_ptr; 1171 * }; 1172 * 1173 * In case of CU #1, BTF data will know only that `struct B` exist (but no 1174 * more), but will know the complete type information about `struct A`. While 1175 * for CU #2, it will know full type information about `struct B`, but will 1176 * only know about forward declaration of `struct A` (in BTF terms, it will 1177 * have `BTF_KIND_FWD` type descriptor with name `B`). 1178 * 1179 * This compilation unit isolation means that it's possible that there is no 1180 * single CU with complete type information describing structs `S`, `A`, and 1181 * `B`. Also, we might get tons of duplicated and redundant type information. 1182 * 1183 * Additional complication we need to keep in mind comes from the fact that 1184 * types, in general, can form graphs containing cycles, not just DAGs. 1185 * 1186 * While algorithm does deduplication, it also merges and resolves type 1187 * information (unless disabled throught `struct btf_opts`), whenever possible. 1188 * E.g., in the example above with two compilation units having partial type 1189 * information for structs `A` and `B`, the output of algorithm will emit 1190 * a single copy of each BTF type that describes structs `A`, `B`, and `S` 1191 * (as well as type information for `int` and pointers), as if they were defined 1192 * in a single compilation unit as: 1193 * 1194 * struct A { 1195 * int a; 1196 * struct A* self; 1197 * struct S* parent; 1198 * }; 1199 * struct B { 1200 * int b; 1201 * struct B* self; 1202 * struct S* parent; 1203 * }; 1204 * struct S { 1205 * struct A* a_ptr; 1206 * struct B* b_ptr; 1207 * }; 1208 * 1209 * Algorithm summary 1210 * ================= 1211 * 1212 * Algorithm completes its work in 6 separate passes: 1213 * 1214 * 1. Strings deduplication. 1215 * 2. Primitive types deduplication (int, enum, fwd). 1216 * 3. Struct/union types deduplication. 1217 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func 1218 * protos, and const/volatile/restrict modifiers). 1219 * 5. Types compaction. 1220 * 6. Types remapping. 1221 * 1222 * Algorithm determines canonical type descriptor, which is a single 1223 * representative type for each truly unique type. This canonical type is the 1224 * one that will go into final deduplicated BTF type information. For 1225 * struct/unions, it is also the type that algorithm will merge additional type 1226 * information into (while resolving FWDs), as it discovers it from data in 1227 * other CUs. Each input BTF type eventually gets either mapped to itself, if 1228 * that type is canonical, or to some other type, if that type is equivalent 1229 * and was chosen as canonical representative. This mapping is stored in 1230 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that 1231 * FWD type got resolved to. 1232 * 1233 * To facilitate fast discovery of canonical types, we also maintain canonical 1234 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash 1235 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types 1236 * that match that signature. With sufficiently good choice of type signature 1237 * hashing function, we can limit number of canonical types for each unique type 1238 * signature to a very small number, allowing to find canonical type for any 1239 * duplicated type very quickly. 1240 * 1241 * Struct/union deduplication is the most critical part and algorithm for 1242 * deduplicating structs/unions is described in greater details in comments for 1243 * `btf_dedup_is_equiv` function. 1244 */ 1245 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, 1246 const struct btf_dedup_opts *opts) 1247 { 1248 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); 1249 int err; 1250 1251 if (IS_ERR(d)) { 1252 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); 1253 return -EINVAL; 1254 } 1255 1256 err = btf_dedup_strings(d); 1257 if (err < 0) { 1258 pr_debug("btf_dedup_strings failed:%d\n", err); 1259 goto done; 1260 } 1261 err = btf_dedup_prim_types(d); 1262 if (err < 0) { 1263 pr_debug("btf_dedup_prim_types failed:%d\n", err); 1264 goto done; 1265 } 1266 err = btf_dedup_struct_types(d); 1267 if (err < 0) { 1268 pr_debug("btf_dedup_struct_types failed:%d\n", err); 1269 goto done; 1270 } 1271 err = btf_dedup_ref_types(d); 1272 if (err < 0) { 1273 pr_debug("btf_dedup_ref_types failed:%d\n", err); 1274 goto done; 1275 } 1276 err = btf_dedup_compact_types(d); 1277 if (err < 0) { 1278 pr_debug("btf_dedup_compact_types failed:%d\n", err); 1279 goto done; 1280 } 1281 err = btf_dedup_remap_types(d); 1282 if (err < 0) { 1283 pr_debug("btf_dedup_remap_types failed:%d\n", err); 1284 goto done; 1285 } 1286 1287 done: 1288 btf_dedup_free(d); 1289 return err; 1290 } 1291 1292 #define BTF_UNPROCESSED_ID ((__u32)-1) 1293 #define BTF_IN_PROGRESS_ID ((__u32)-2) 1294 1295 struct btf_dedup { 1296 /* .BTF section to be deduped in-place */ 1297 struct btf *btf; 1298 /* 1299 * Optional .BTF.ext section. When provided, any strings referenced 1300 * from it will be taken into account when deduping strings 1301 */ 1302 struct btf_ext *btf_ext; 1303 /* 1304 * This is a map from any type's signature hash to a list of possible 1305 * canonical representative type candidates. Hash collisions are 1306 * ignored, so even types of various kinds can share same list of 1307 * candidates, which is fine because we rely on subsequent 1308 * btf_xxx_equal() checks to authoritatively verify type equality. 1309 */ 1310 struct hashmap *dedup_table; 1311 /* Canonical types map */ 1312 __u32 *map; 1313 /* Hypothetical mapping, used during type graph equivalence checks */ 1314 __u32 *hypot_map; 1315 __u32 *hypot_list; 1316 size_t hypot_cnt; 1317 size_t hypot_cap; 1318 /* Various option modifying behavior of algorithm */ 1319 struct btf_dedup_opts opts; 1320 }; 1321 1322 struct btf_str_ptr { 1323 const char *str; 1324 __u32 new_off; 1325 bool used; 1326 }; 1327 1328 struct btf_str_ptrs { 1329 struct btf_str_ptr *ptrs; 1330 const char *data; 1331 __u32 cnt; 1332 __u32 cap; 1333 }; 1334 1335 static long hash_combine(long h, long value) 1336 { 1337 return h * 31 + value; 1338 } 1339 1340 #define for_each_dedup_cand(d, node, hash) \ 1341 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) 1342 1343 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) 1344 { 1345 return hashmap__append(d->dedup_table, 1346 (void *)hash, (void *)(long)type_id); 1347 } 1348 1349 static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1350 __u32 from_id, __u32 to_id) 1351 { 1352 if (d->hypot_cnt == d->hypot_cap) { 1353 __u32 *new_list; 1354 1355 d->hypot_cap += max(16, d->hypot_cap / 2); 1356 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); 1357 if (!new_list) 1358 return -ENOMEM; 1359 d->hypot_list = new_list; 1360 } 1361 d->hypot_list[d->hypot_cnt++] = from_id; 1362 d->hypot_map[from_id] = to_id; 1363 return 0; 1364 } 1365 1366 static void btf_dedup_clear_hypot_map(struct btf_dedup *d) 1367 { 1368 int i; 1369 1370 for (i = 0; i < d->hypot_cnt; i++) 1371 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; 1372 d->hypot_cnt = 0; 1373 } 1374 1375 static void btf_dedup_free(struct btf_dedup *d) 1376 { 1377 hashmap__free(d->dedup_table); 1378 d->dedup_table = NULL; 1379 1380 free(d->map); 1381 d->map = NULL; 1382 1383 free(d->hypot_map); 1384 d->hypot_map = NULL; 1385 1386 free(d->hypot_list); 1387 d->hypot_list = NULL; 1388 1389 free(d); 1390 } 1391 1392 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) 1393 { 1394 return (size_t)key; 1395 } 1396 1397 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) 1398 { 1399 return 0; 1400 } 1401 1402 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) 1403 { 1404 return k1 == k2; 1405 } 1406 1407 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1408 const struct btf_dedup_opts *opts) 1409 { 1410 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 1411 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; 1412 int i, err = 0; 1413 1414 if (!d) 1415 return ERR_PTR(-ENOMEM); 1416 1417 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; 1418 /* dedup_table_size is now used only to force collisions in tests */ 1419 if (opts && opts->dedup_table_size == 1) 1420 hash_fn = btf_dedup_collision_hash_fn; 1421 1422 d->btf = btf; 1423 d->btf_ext = btf_ext; 1424 1425 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); 1426 if (IS_ERR(d->dedup_table)) { 1427 err = PTR_ERR(d->dedup_table); 1428 d->dedup_table = NULL; 1429 goto done; 1430 } 1431 1432 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1433 if (!d->map) { 1434 err = -ENOMEM; 1435 goto done; 1436 } 1437 /* special BTF "void" type is made canonical immediately */ 1438 d->map[0] = 0; 1439 for (i = 1; i <= btf->nr_types; i++) { 1440 struct btf_type *t = d->btf->types[i]; 1441 1442 /* VAR and DATASEC are never deduped and are self-canonical */ 1443 if (btf_is_var(t) || btf_is_datasec(t)) 1444 d->map[i] = i; 1445 else 1446 d->map[i] = BTF_UNPROCESSED_ID; 1447 } 1448 1449 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); 1450 if (!d->hypot_map) { 1451 err = -ENOMEM; 1452 goto done; 1453 } 1454 for (i = 0; i <= btf->nr_types; i++) 1455 d->hypot_map[i] = BTF_UNPROCESSED_ID; 1456 1457 done: 1458 if (err) { 1459 btf_dedup_free(d); 1460 return ERR_PTR(err); 1461 } 1462 1463 return d; 1464 } 1465 1466 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx); 1467 1468 /* 1469 * Iterate over all possible places in .BTF and .BTF.ext that can reference 1470 * string and pass pointer to it to a provided callback `fn`. 1471 */ 1472 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) 1473 { 1474 void *line_data_cur, *line_data_end; 1475 int i, j, r, rec_size; 1476 struct btf_type *t; 1477 1478 for (i = 1; i <= d->btf->nr_types; i++) { 1479 t = d->btf->types[i]; 1480 r = fn(&t->name_off, ctx); 1481 if (r) 1482 return r; 1483 1484 switch (btf_kind(t)) { 1485 case BTF_KIND_STRUCT: 1486 case BTF_KIND_UNION: { 1487 struct btf_member *m = btf_members(t); 1488 __u16 vlen = btf_vlen(t); 1489 1490 for (j = 0; j < vlen; j++) { 1491 r = fn(&m->name_off, ctx); 1492 if (r) 1493 return r; 1494 m++; 1495 } 1496 break; 1497 } 1498 case BTF_KIND_ENUM: { 1499 struct btf_enum *m = btf_enum(t); 1500 __u16 vlen = btf_vlen(t); 1501 1502 for (j = 0; j < vlen; j++) { 1503 r = fn(&m->name_off, ctx); 1504 if (r) 1505 return r; 1506 m++; 1507 } 1508 break; 1509 } 1510 case BTF_KIND_FUNC_PROTO: { 1511 struct btf_param *m = btf_params(t); 1512 __u16 vlen = btf_vlen(t); 1513 1514 for (j = 0; j < vlen; j++) { 1515 r = fn(&m->name_off, ctx); 1516 if (r) 1517 return r; 1518 m++; 1519 } 1520 break; 1521 } 1522 default: 1523 break; 1524 } 1525 } 1526 1527 if (!d->btf_ext) 1528 return 0; 1529 1530 line_data_cur = d->btf_ext->line_info.info; 1531 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len; 1532 rec_size = d->btf_ext->line_info.rec_size; 1533 1534 while (line_data_cur < line_data_end) { 1535 struct btf_ext_info_sec *sec = line_data_cur; 1536 struct bpf_line_info_min *line_info; 1537 __u32 num_info = sec->num_info; 1538 1539 r = fn(&sec->sec_name_off, ctx); 1540 if (r) 1541 return r; 1542 1543 line_data_cur += sizeof(struct btf_ext_info_sec); 1544 for (i = 0; i < num_info; i++) { 1545 line_info = line_data_cur; 1546 r = fn(&line_info->file_name_off, ctx); 1547 if (r) 1548 return r; 1549 r = fn(&line_info->line_off, ctx); 1550 if (r) 1551 return r; 1552 line_data_cur += rec_size; 1553 } 1554 } 1555 1556 return 0; 1557 } 1558 1559 static int str_sort_by_content(const void *a1, const void *a2) 1560 { 1561 const struct btf_str_ptr *p1 = a1; 1562 const struct btf_str_ptr *p2 = a2; 1563 1564 return strcmp(p1->str, p2->str); 1565 } 1566 1567 static int str_sort_by_offset(const void *a1, const void *a2) 1568 { 1569 const struct btf_str_ptr *p1 = a1; 1570 const struct btf_str_ptr *p2 = a2; 1571 1572 if (p1->str != p2->str) 1573 return p1->str < p2->str ? -1 : 1; 1574 return 0; 1575 } 1576 1577 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) 1578 { 1579 const struct btf_str_ptr *p = pelem; 1580 1581 if (str_ptr != p->str) 1582 return (const char *)str_ptr < p->str ? -1 : 1; 1583 return 0; 1584 } 1585 1586 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) 1587 { 1588 struct btf_str_ptrs *strs; 1589 struct btf_str_ptr *s; 1590 1591 if (*str_off_ptr == 0) 1592 return 0; 1593 1594 strs = ctx; 1595 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1596 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1597 if (!s) 1598 return -EINVAL; 1599 s->used = true; 1600 return 0; 1601 } 1602 1603 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) 1604 { 1605 struct btf_str_ptrs *strs; 1606 struct btf_str_ptr *s; 1607 1608 if (*str_off_ptr == 0) 1609 return 0; 1610 1611 strs = ctx; 1612 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, 1613 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); 1614 if (!s) 1615 return -EINVAL; 1616 *str_off_ptr = s->new_off; 1617 return 0; 1618 } 1619 1620 /* 1621 * Dedup string and filter out those that are not referenced from either .BTF 1622 * or .BTF.ext (if provided) sections. 1623 * 1624 * This is done by building index of all strings in BTF's string section, 1625 * then iterating over all entities that can reference strings (e.g., type 1626 * names, struct field names, .BTF.ext line info, etc) and marking corresponding 1627 * strings as used. After that all used strings are deduped and compacted into 1628 * sequential blob of memory and new offsets are calculated. Then all the string 1629 * references are iterated again and rewritten using new offsets. 1630 */ 1631 static int btf_dedup_strings(struct btf_dedup *d) 1632 { 1633 const struct btf_header *hdr = d->btf->hdr; 1634 char *start = (char *)d->btf->nohdr_data + hdr->str_off; 1635 char *end = start + d->btf->hdr->str_len; 1636 char *p = start, *tmp_strs = NULL; 1637 struct btf_str_ptrs strs = { 1638 .cnt = 0, 1639 .cap = 0, 1640 .ptrs = NULL, 1641 .data = start, 1642 }; 1643 int i, j, err = 0, grp_idx; 1644 bool grp_used; 1645 1646 /* build index of all strings */ 1647 while (p < end) { 1648 if (strs.cnt + 1 > strs.cap) { 1649 struct btf_str_ptr *new_ptrs; 1650 1651 strs.cap += max(strs.cnt / 2, 16); 1652 new_ptrs = realloc(strs.ptrs, 1653 sizeof(strs.ptrs[0]) * strs.cap); 1654 if (!new_ptrs) { 1655 err = -ENOMEM; 1656 goto done; 1657 } 1658 strs.ptrs = new_ptrs; 1659 } 1660 1661 strs.ptrs[strs.cnt].str = p; 1662 strs.ptrs[strs.cnt].used = false; 1663 1664 p += strlen(p) + 1; 1665 strs.cnt++; 1666 } 1667 1668 /* temporary storage for deduplicated strings */ 1669 tmp_strs = malloc(d->btf->hdr->str_len); 1670 if (!tmp_strs) { 1671 err = -ENOMEM; 1672 goto done; 1673 } 1674 1675 /* mark all used strings */ 1676 strs.ptrs[0].used = true; 1677 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); 1678 if (err) 1679 goto done; 1680 1681 /* sort strings by context, so that we can identify duplicates */ 1682 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); 1683 1684 /* 1685 * iterate groups of equal strings and if any instance in a group was 1686 * referenced, emit single instance and remember new offset 1687 */ 1688 p = tmp_strs; 1689 grp_idx = 0; 1690 grp_used = strs.ptrs[0].used; 1691 /* iterate past end to avoid code duplication after loop */ 1692 for (i = 1; i <= strs.cnt; i++) { 1693 /* 1694 * when i == strs.cnt, we want to skip string comparison and go 1695 * straight to handling last group of strings (otherwise we'd 1696 * need to handle last group after the loop w/ duplicated code) 1697 */ 1698 if (i < strs.cnt && 1699 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { 1700 grp_used = grp_used || strs.ptrs[i].used; 1701 continue; 1702 } 1703 1704 /* 1705 * this check would have been required after the loop to handle 1706 * last group of strings, but due to <= condition in a loop 1707 * we avoid that duplication 1708 */ 1709 if (grp_used) { 1710 int new_off = p - tmp_strs; 1711 __u32 len = strlen(strs.ptrs[grp_idx].str); 1712 1713 memmove(p, strs.ptrs[grp_idx].str, len + 1); 1714 for (j = grp_idx; j < i; j++) 1715 strs.ptrs[j].new_off = new_off; 1716 p += len + 1; 1717 } 1718 1719 if (i < strs.cnt) { 1720 grp_idx = i; 1721 grp_used = strs.ptrs[i].used; 1722 } 1723 } 1724 1725 /* replace original strings with deduped ones */ 1726 d->btf->hdr->str_len = p - tmp_strs; 1727 memmove(start, tmp_strs, d->btf->hdr->str_len); 1728 end = start + d->btf->hdr->str_len; 1729 1730 /* restore original order for further binary search lookups */ 1731 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); 1732 1733 /* remap string offsets */ 1734 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); 1735 if (err) 1736 goto done; 1737 1738 d->btf->hdr->str_len = end - start; 1739 1740 done: 1741 free(tmp_strs); 1742 free(strs.ptrs); 1743 return err; 1744 } 1745 1746 static long btf_hash_common(struct btf_type *t) 1747 { 1748 long h; 1749 1750 h = hash_combine(0, t->name_off); 1751 h = hash_combine(h, t->info); 1752 h = hash_combine(h, t->size); 1753 return h; 1754 } 1755 1756 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) 1757 { 1758 return t1->name_off == t2->name_off && 1759 t1->info == t2->info && 1760 t1->size == t2->size; 1761 } 1762 1763 /* Calculate type signature hash of INT. */ 1764 static long btf_hash_int(struct btf_type *t) 1765 { 1766 __u32 info = *(__u32 *)(t + 1); 1767 long h; 1768 1769 h = btf_hash_common(t); 1770 h = hash_combine(h, info); 1771 return h; 1772 } 1773 1774 /* Check structural equality of two INTs. */ 1775 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) 1776 { 1777 __u32 info1, info2; 1778 1779 if (!btf_equal_common(t1, t2)) 1780 return false; 1781 info1 = *(__u32 *)(t1 + 1); 1782 info2 = *(__u32 *)(t2 + 1); 1783 return info1 == info2; 1784 } 1785 1786 /* Calculate type signature hash of ENUM. */ 1787 static long btf_hash_enum(struct btf_type *t) 1788 { 1789 long h; 1790 1791 /* don't hash vlen and enum members to support enum fwd resolving */ 1792 h = hash_combine(0, t->name_off); 1793 h = hash_combine(h, t->info & ~0xffff); 1794 h = hash_combine(h, t->size); 1795 return h; 1796 } 1797 1798 /* Check structural equality of two ENUMs. */ 1799 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) 1800 { 1801 const struct btf_enum *m1, *m2; 1802 __u16 vlen; 1803 int i; 1804 1805 if (!btf_equal_common(t1, t2)) 1806 return false; 1807 1808 vlen = btf_vlen(t1); 1809 m1 = btf_enum(t1); 1810 m2 = btf_enum(t2); 1811 for (i = 0; i < vlen; i++) { 1812 if (m1->name_off != m2->name_off || m1->val != m2->val) 1813 return false; 1814 m1++; 1815 m2++; 1816 } 1817 return true; 1818 } 1819 1820 static inline bool btf_is_enum_fwd(struct btf_type *t) 1821 { 1822 return btf_is_enum(t) && btf_vlen(t) == 0; 1823 } 1824 1825 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) 1826 { 1827 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) 1828 return btf_equal_enum(t1, t2); 1829 /* ignore vlen when comparing */ 1830 return t1->name_off == t2->name_off && 1831 (t1->info & ~0xffff) == (t2->info & ~0xffff) && 1832 t1->size == t2->size; 1833 } 1834 1835 /* 1836 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, 1837 * as referenced type IDs equivalence is established separately during type 1838 * graph equivalence check algorithm. 1839 */ 1840 static long btf_hash_struct(struct btf_type *t) 1841 { 1842 const struct btf_member *member = btf_members(t); 1843 __u32 vlen = btf_vlen(t); 1844 long h = btf_hash_common(t); 1845 int i; 1846 1847 for (i = 0; i < vlen; i++) { 1848 h = hash_combine(h, member->name_off); 1849 h = hash_combine(h, member->offset); 1850 /* no hashing of referenced type ID, it can be unresolved yet */ 1851 member++; 1852 } 1853 return h; 1854 } 1855 1856 /* 1857 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 1858 * IDs. This check is performed during type graph equivalence check and 1859 * referenced types equivalence is checked separately. 1860 */ 1861 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) 1862 { 1863 const struct btf_member *m1, *m2; 1864 __u16 vlen; 1865 int i; 1866 1867 if (!btf_equal_common(t1, t2)) 1868 return false; 1869 1870 vlen = btf_vlen(t1); 1871 m1 = btf_members(t1); 1872 m2 = btf_members(t2); 1873 for (i = 0; i < vlen; i++) { 1874 if (m1->name_off != m2->name_off || m1->offset != m2->offset) 1875 return false; 1876 m1++; 1877 m2++; 1878 } 1879 return true; 1880 } 1881 1882 /* 1883 * Calculate type signature hash of ARRAY, including referenced type IDs, 1884 * under assumption that they were already resolved to canonical type IDs and 1885 * are not going to change. 1886 */ 1887 static long btf_hash_array(struct btf_type *t) 1888 { 1889 const struct btf_array *info = btf_array(t); 1890 long h = btf_hash_common(t); 1891 1892 h = hash_combine(h, info->type); 1893 h = hash_combine(h, info->index_type); 1894 h = hash_combine(h, info->nelems); 1895 return h; 1896 } 1897 1898 /* 1899 * Check exact equality of two ARRAYs, taking into account referenced 1900 * type IDs, under assumption that they were already resolved to canonical 1901 * type IDs and are not going to change. 1902 * This function is called during reference types deduplication to compare 1903 * ARRAY to potential canonical representative. 1904 */ 1905 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) 1906 { 1907 const struct btf_array *info1, *info2; 1908 1909 if (!btf_equal_common(t1, t2)) 1910 return false; 1911 1912 info1 = btf_array(t1); 1913 info2 = btf_array(t2); 1914 return info1->type == info2->type && 1915 info1->index_type == info2->index_type && 1916 info1->nelems == info2->nelems; 1917 } 1918 1919 /* 1920 * Check structural compatibility of two ARRAYs, ignoring referenced type 1921 * IDs. This check is performed during type graph equivalence check and 1922 * referenced types equivalence is checked separately. 1923 */ 1924 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) 1925 { 1926 if (!btf_equal_common(t1, t2)) 1927 return false; 1928 1929 return btf_array(t1)->nelems == btf_array(t2)->nelems; 1930 } 1931 1932 /* 1933 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, 1934 * under assumption that they were already resolved to canonical type IDs and 1935 * are not going to change. 1936 */ 1937 static long btf_hash_fnproto(struct btf_type *t) 1938 { 1939 const struct btf_param *member = btf_params(t); 1940 __u16 vlen = btf_vlen(t); 1941 long h = btf_hash_common(t); 1942 int i; 1943 1944 for (i = 0; i < vlen; i++) { 1945 h = hash_combine(h, member->name_off); 1946 h = hash_combine(h, member->type); 1947 member++; 1948 } 1949 return h; 1950 } 1951 1952 /* 1953 * Check exact equality of two FUNC_PROTOs, taking into account referenced 1954 * type IDs, under assumption that they were already resolved to canonical 1955 * type IDs and are not going to change. 1956 * This function is called during reference types deduplication to compare 1957 * FUNC_PROTO to potential canonical representative. 1958 */ 1959 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) 1960 { 1961 const struct btf_param *m1, *m2; 1962 __u16 vlen; 1963 int i; 1964 1965 if (!btf_equal_common(t1, t2)) 1966 return false; 1967 1968 vlen = btf_vlen(t1); 1969 m1 = btf_params(t1); 1970 m2 = btf_params(t2); 1971 for (i = 0; i < vlen; i++) { 1972 if (m1->name_off != m2->name_off || m1->type != m2->type) 1973 return false; 1974 m1++; 1975 m2++; 1976 } 1977 return true; 1978 } 1979 1980 /* 1981 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 1982 * IDs. This check is performed during type graph equivalence check and 1983 * referenced types equivalence is checked separately. 1984 */ 1985 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) 1986 { 1987 const struct btf_param *m1, *m2; 1988 __u16 vlen; 1989 int i; 1990 1991 /* skip return type ID */ 1992 if (t1->name_off != t2->name_off || t1->info != t2->info) 1993 return false; 1994 1995 vlen = btf_vlen(t1); 1996 m1 = btf_params(t1); 1997 m2 = btf_params(t2); 1998 for (i = 0; i < vlen; i++) { 1999 if (m1->name_off != m2->name_off) 2000 return false; 2001 m1++; 2002 m2++; 2003 } 2004 return true; 2005 } 2006 2007 /* 2008 * Deduplicate primitive types, that can't reference other types, by calculating 2009 * their type signature hash and comparing them with any possible canonical 2010 * candidate. If no canonical candidate matches, type itself is marked as 2011 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 2012 */ 2013 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 2014 { 2015 struct btf_type *t = d->btf->types[type_id]; 2016 struct hashmap_entry *hash_entry; 2017 struct btf_type *cand; 2018 /* if we don't find equivalent type, then we are canonical */ 2019 __u32 new_id = type_id; 2020 __u32 cand_id; 2021 long h; 2022 2023 switch (btf_kind(t)) { 2024 case BTF_KIND_CONST: 2025 case BTF_KIND_VOLATILE: 2026 case BTF_KIND_RESTRICT: 2027 case BTF_KIND_PTR: 2028 case BTF_KIND_TYPEDEF: 2029 case BTF_KIND_ARRAY: 2030 case BTF_KIND_STRUCT: 2031 case BTF_KIND_UNION: 2032 case BTF_KIND_FUNC: 2033 case BTF_KIND_FUNC_PROTO: 2034 case BTF_KIND_VAR: 2035 case BTF_KIND_DATASEC: 2036 return 0; 2037 2038 case BTF_KIND_INT: 2039 h = btf_hash_int(t); 2040 for_each_dedup_cand(d, hash_entry, h) { 2041 cand_id = (__u32)(long)hash_entry->value; 2042 cand = d->btf->types[cand_id]; 2043 if (btf_equal_int(t, cand)) { 2044 new_id = cand_id; 2045 break; 2046 } 2047 } 2048 break; 2049 2050 case BTF_KIND_ENUM: 2051 h = btf_hash_enum(t); 2052 for_each_dedup_cand(d, hash_entry, h) { 2053 cand_id = (__u32)(long)hash_entry->value; 2054 cand = d->btf->types[cand_id]; 2055 if (btf_equal_enum(t, cand)) { 2056 new_id = cand_id; 2057 break; 2058 } 2059 if (d->opts.dont_resolve_fwds) 2060 continue; 2061 if (btf_compat_enum(t, cand)) { 2062 if (btf_is_enum_fwd(t)) { 2063 /* resolve fwd to full enum */ 2064 new_id = cand_id; 2065 break; 2066 } 2067 /* resolve canonical enum fwd to full enum */ 2068 d->map[cand_id] = type_id; 2069 } 2070 } 2071 break; 2072 2073 case BTF_KIND_FWD: 2074 h = btf_hash_common(t); 2075 for_each_dedup_cand(d, hash_entry, h) { 2076 cand_id = (__u32)(long)hash_entry->value; 2077 cand = d->btf->types[cand_id]; 2078 if (btf_equal_common(t, cand)) { 2079 new_id = cand_id; 2080 break; 2081 } 2082 } 2083 break; 2084 2085 default: 2086 return -EINVAL; 2087 } 2088 2089 d->map[type_id] = new_id; 2090 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2091 return -ENOMEM; 2092 2093 return 0; 2094 } 2095 2096 static int btf_dedup_prim_types(struct btf_dedup *d) 2097 { 2098 int i, err; 2099 2100 for (i = 1; i <= d->btf->nr_types; i++) { 2101 err = btf_dedup_prim_type(d, i); 2102 if (err) 2103 return err; 2104 } 2105 return 0; 2106 } 2107 2108 /* 2109 * Check whether type is already mapped into canonical one (could be to itself). 2110 */ 2111 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) 2112 { 2113 return d->map[type_id] <= BTF_MAX_NR_TYPES; 2114 } 2115 2116 /* 2117 * Resolve type ID into its canonical type ID, if any; otherwise return original 2118 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow 2119 * STRUCT/UNION link and resolve it into canonical type ID as well. 2120 */ 2121 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) 2122 { 2123 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2124 type_id = d->map[type_id]; 2125 return type_id; 2126 } 2127 2128 /* 2129 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 2130 * type ID. 2131 */ 2132 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 2133 { 2134 __u32 orig_type_id = type_id; 2135 2136 if (!btf_is_fwd(d->btf->types[type_id])) 2137 return type_id; 2138 2139 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 2140 type_id = d->map[type_id]; 2141 2142 if (!btf_is_fwd(d->btf->types[type_id])) 2143 return type_id; 2144 2145 return orig_type_id; 2146 } 2147 2148 2149 static inline __u16 btf_fwd_kind(struct btf_type *t) 2150 { 2151 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; 2152 } 2153 2154 /* 2155 * Check equivalence of BTF type graph formed by candidate struct/union (we'll 2156 * call it "candidate graph" in this description for brevity) to a type graph 2157 * formed by (potential) canonical struct/union ("canonical graph" for brevity 2158 * here, though keep in mind that not all types in canonical graph are 2159 * necessarily canonical representatives themselves, some of them might be 2160 * duplicates or its uniqueness might not have been established yet). 2161 * Returns: 2162 * - >0, if type graphs are equivalent; 2163 * - 0, if not equivalent; 2164 * - <0, on error. 2165 * 2166 * Algorithm performs side-by-side DFS traversal of both type graphs and checks 2167 * equivalence of BTF types at each step. If at any point BTF types in candidate 2168 * and canonical graphs are not compatible structurally, whole graphs are 2169 * incompatible. If types are structurally equivalent (i.e., all information 2170 * except referenced type IDs is exactly the same), a mapping from `canon_id` to 2171 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). 2172 * If a type references other types, then those referenced types are checked 2173 * for equivalence recursively. 2174 * 2175 * During DFS traversal, if we find that for current `canon_id` type we 2176 * already have some mapping in hypothetical map, we check for two possible 2177 * situations: 2178 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will 2179 * happen when type graphs have cycles. In this case we assume those two 2180 * types are equivalent. 2181 * - `canon_id` is mapped to different type. This is contradiction in our 2182 * hypothetical mapping, because same graph in canonical graph corresponds 2183 * to two different types in candidate graph, which for equivalent type 2184 * graphs shouldn't happen. This condition terminates equivalence check 2185 * with negative result. 2186 * 2187 * If type graphs traversal exhausts types to check and find no contradiction, 2188 * then type graphs are equivalent. 2189 * 2190 * When checking types for equivalence, there is one special case: FWD types. 2191 * If FWD type resolution is allowed and one of the types (either from canonical 2192 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind 2193 * flag) and their names match, hypothetical mapping is updated to point from 2194 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, 2195 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. 2196 * 2197 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, 2198 * if there are two exactly named (or anonymous) structs/unions that are 2199 * compatible structurally, one of which has FWD field, while other is concrete 2200 * STRUCT/UNION, but according to C sources they are different structs/unions 2201 * that are referencing different types with the same name. This is extremely 2202 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if 2203 * this logic is causing problems. 2204 * 2205 * Doing FWD resolution means that both candidate and/or canonical graphs can 2206 * consists of portions of the graph that come from multiple compilation units. 2207 * This is due to the fact that types within single compilation unit are always 2208 * deduplicated and FWDs are already resolved, if referenced struct/union 2209 * definiton is available. So, if we had unresolved FWD and found corresponding 2210 * STRUCT/UNION, they will be from different compilation units. This 2211 * consequently means that when we "link" FWD to corresponding STRUCT/UNION, 2212 * type graph will likely have at least two different BTF types that describe 2213 * same type (e.g., most probably there will be two different BTF types for the 2214 * same 'int' primitive type) and could even have "overlapping" parts of type 2215 * graph that describe same subset of types. 2216 * 2217 * This in turn means that our assumption that each type in canonical graph 2218 * must correspond to exactly one type in candidate graph might not hold 2219 * anymore and will make it harder to detect contradictions using hypothetical 2220 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION 2221 * resolution only in canonical graph. FWDs in candidate graphs are never 2222 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs 2223 * that can occur: 2224 * - Both types in canonical and candidate graphs are FWDs. If they are 2225 * structurally equivalent, then they can either be both resolved to the 2226 * same STRUCT/UNION or not resolved at all. In both cases they are 2227 * equivalent and there is no need to resolve FWD on candidate side. 2228 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, 2229 * so nothing to resolve as well, algorithm will check equivalence anyway. 2230 * - Type in canonical graph is FWD, while type in candidate is concrete 2231 * STRUCT/UNION. In this case candidate graph comes from single compilation 2232 * unit, so there is exactly one BTF type for each unique C type. After 2233 * resolving FWD into STRUCT/UNION, there might be more than one BTF type 2234 * in canonical graph mapping to single BTF type in candidate graph, but 2235 * because hypothetical mapping maps from canonical to candidate types, it's 2236 * alright, and we still maintain the property of having single `canon_id` 2237 * mapping to single `cand_id` (there could be two different `canon_id` 2238 * mapped to the same `cand_id`, but it's not contradictory). 2239 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate 2240 * graph is FWD. In this case we are just going to check compatibility of 2241 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll 2242 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to 2243 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs 2244 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from 2245 * canonical graph. 2246 */ 2247 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, 2248 __u32 canon_id) 2249 { 2250 struct btf_type *cand_type; 2251 struct btf_type *canon_type; 2252 __u32 hypot_type_id; 2253 __u16 cand_kind; 2254 __u16 canon_kind; 2255 int i, eq; 2256 2257 /* if both resolve to the same canonical, they must be equivalent */ 2258 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) 2259 return 1; 2260 2261 canon_id = resolve_fwd_id(d, canon_id); 2262 2263 hypot_type_id = d->hypot_map[canon_id]; 2264 if (hypot_type_id <= BTF_MAX_NR_TYPES) 2265 return hypot_type_id == cand_id; 2266 2267 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 2268 return -ENOMEM; 2269 2270 cand_type = d->btf->types[cand_id]; 2271 canon_type = d->btf->types[canon_id]; 2272 cand_kind = btf_kind(cand_type); 2273 canon_kind = btf_kind(canon_type); 2274 2275 if (cand_type->name_off != canon_type->name_off) 2276 return 0; 2277 2278 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 2279 if (!d->opts.dont_resolve_fwds 2280 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) 2281 && cand_kind != canon_kind) { 2282 __u16 real_kind; 2283 __u16 fwd_kind; 2284 2285 if (cand_kind == BTF_KIND_FWD) { 2286 real_kind = canon_kind; 2287 fwd_kind = btf_fwd_kind(cand_type); 2288 } else { 2289 real_kind = cand_kind; 2290 fwd_kind = btf_fwd_kind(canon_type); 2291 } 2292 return fwd_kind == real_kind; 2293 } 2294 2295 if (cand_kind != canon_kind) 2296 return 0; 2297 2298 switch (cand_kind) { 2299 case BTF_KIND_INT: 2300 return btf_equal_int(cand_type, canon_type); 2301 2302 case BTF_KIND_ENUM: 2303 if (d->opts.dont_resolve_fwds) 2304 return btf_equal_enum(cand_type, canon_type); 2305 else 2306 return btf_compat_enum(cand_type, canon_type); 2307 2308 case BTF_KIND_FWD: 2309 return btf_equal_common(cand_type, canon_type); 2310 2311 case BTF_KIND_CONST: 2312 case BTF_KIND_VOLATILE: 2313 case BTF_KIND_RESTRICT: 2314 case BTF_KIND_PTR: 2315 case BTF_KIND_TYPEDEF: 2316 case BTF_KIND_FUNC: 2317 if (cand_type->info != canon_type->info) 2318 return 0; 2319 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2320 2321 case BTF_KIND_ARRAY: { 2322 const struct btf_array *cand_arr, *canon_arr; 2323 2324 if (!btf_compat_array(cand_type, canon_type)) 2325 return 0; 2326 cand_arr = btf_array(cand_type); 2327 canon_arr = btf_array(canon_type); 2328 eq = btf_dedup_is_equiv(d, 2329 cand_arr->index_type, canon_arr->index_type); 2330 if (eq <= 0) 2331 return eq; 2332 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); 2333 } 2334 2335 case BTF_KIND_STRUCT: 2336 case BTF_KIND_UNION: { 2337 const struct btf_member *cand_m, *canon_m; 2338 __u16 vlen; 2339 2340 if (!btf_shallow_equal_struct(cand_type, canon_type)) 2341 return 0; 2342 vlen = btf_vlen(cand_type); 2343 cand_m = btf_members(cand_type); 2344 canon_m = btf_members(canon_type); 2345 for (i = 0; i < vlen; i++) { 2346 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); 2347 if (eq <= 0) 2348 return eq; 2349 cand_m++; 2350 canon_m++; 2351 } 2352 2353 return 1; 2354 } 2355 2356 case BTF_KIND_FUNC_PROTO: { 2357 const struct btf_param *cand_p, *canon_p; 2358 __u16 vlen; 2359 2360 if (!btf_compat_fnproto(cand_type, canon_type)) 2361 return 0; 2362 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2363 if (eq <= 0) 2364 return eq; 2365 vlen = btf_vlen(cand_type); 2366 cand_p = btf_params(cand_type); 2367 canon_p = btf_params(canon_type); 2368 for (i = 0; i < vlen; i++) { 2369 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); 2370 if (eq <= 0) 2371 return eq; 2372 cand_p++; 2373 canon_p++; 2374 } 2375 return 1; 2376 } 2377 2378 default: 2379 return -EINVAL; 2380 } 2381 return 0; 2382 } 2383 2384 /* 2385 * Use hypothetical mapping, produced by successful type graph equivalence 2386 * check, to augment existing struct/union canonical mapping, where possible. 2387 * 2388 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record 2389 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: 2390 * it doesn't matter if FWD type was part of canonical graph or candidate one, 2391 * we are recording the mapping anyway. As opposed to carefulness required 2392 * for struct/union correspondence mapping (described below), for FWD resolution 2393 * it's not important, as by the time that FWD type (reference type) will be 2394 * deduplicated all structs/unions will be deduped already anyway. 2395 * 2396 * Recording STRUCT/UNION mapping is purely a performance optimization and is 2397 * not required for correctness. It needs to be done carefully to ensure that 2398 * struct/union from candidate's type graph is not mapped into corresponding 2399 * struct/union from canonical type graph that itself hasn't been resolved into 2400 * canonical representative. The only guarantee we have is that canonical 2401 * struct/union was determined as canonical and that won't change. But any 2402 * types referenced through that struct/union fields could have been not yet 2403 * resolved, so in case like that it's too early to establish any kind of 2404 * correspondence between structs/unions. 2405 * 2406 * No canonical correspondence is derived for primitive types (they are already 2407 * deduplicated completely already anyway) or reference types (they rely on 2408 * stability of struct/union canonical relationship for equivalence checks). 2409 */ 2410 static void btf_dedup_merge_hypot_map(struct btf_dedup *d) 2411 { 2412 __u32 cand_type_id, targ_type_id; 2413 __u16 t_kind, c_kind; 2414 __u32 t_id, c_id; 2415 int i; 2416 2417 for (i = 0; i < d->hypot_cnt; i++) { 2418 cand_type_id = d->hypot_list[i]; 2419 targ_type_id = d->hypot_map[cand_type_id]; 2420 t_id = resolve_type_id(d, targ_type_id); 2421 c_id = resolve_type_id(d, cand_type_id); 2422 t_kind = btf_kind(d->btf->types[t_id]); 2423 c_kind = btf_kind(d->btf->types[c_id]); 2424 /* 2425 * Resolve FWD into STRUCT/UNION. 2426 * It's ok to resolve FWD into STRUCT/UNION that's not yet 2427 * mapped to canonical representative (as opposed to 2428 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 2429 * eventually that struct is going to be mapped and all resolved 2430 * FWDs will automatically resolve to correct canonical 2431 * representative. This will happen before ref type deduping, 2432 * which critically depends on stability of these mapping. This 2433 * stability is not a requirement for STRUCT/UNION equivalence 2434 * checks, though. 2435 */ 2436 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) 2437 d->map[c_id] = t_id; 2438 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 2439 d->map[t_id] = c_id; 2440 2441 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && 2442 c_kind != BTF_KIND_FWD && 2443 is_type_mapped(d, c_id) && 2444 !is_type_mapped(d, t_id)) { 2445 /* 2446 * as a perf optimization, we can map struct/union 2447 * that's part of type graph we just verified for 2448 * equivalence. We can do that for struct/union that has 2449 * canonical representative only, though. 2450 */ 2451 d->map[t_id] = c_id; 2452 } 2453 } 2454 } 2455 2456 /* 2457 * Deduplicate struct/union types. 2458 * 2459 * For each struct/union type its type signature hash is calculated, taking 2460 * into account type's name, size, number, order and names of fields, but 2461 * ignoring type ID's referenced from fields, because they might not be deduped 2462 * completely until after reference types deduplication phase. This type hash 2463 * is used to iterate over all potential canonical types, sharing same hash. 2464 * For each canonical candidate we check whether type graphs that they form 2465 * (through referenced types in fields and so on) are equivalent using algorithm 2466 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and 2467 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping 2468 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence 2469 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to 2470 * potentially map other structs/unions to their canonical representatives, 2471 * if such relationship hasn't yet been established. This speeds up algorithm 2472 * by eliminating some of the duplicate work. 2473 * 2474 * If no matching canonical representative was found, struct/union is marked 2475 * as canonical for itself and is added into btf_dedup->dedup_table hash map 2476 * for further look ups. 2477 */ 2478 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) 2479 { 2480 struct btf_type *cand_type, *t; 2481 struct hashmap_entry *hash_entry; 2482 /* if we don't find equivalent type, then we are canonical */ 2483 __u32 new_id = type_id; 2484 __u16 kind; 2485 long h; 2486 2487 /* already deduped or is in process of deduping (loop detected) */ 2488 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2489 return 0; 2490 2491 t = d->btf->types[type_id]; 2492 kind = btf_kind(t); 2493 2494 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 2495 return 0; 2496 2497 h = btf_hash_struct(t); 2498 for_each_dedup_cand(d, hash_entry, h) { 2499 __u32 cand_id = (__u32)(long)hash_entry->value; 2500 int eq; 2501 2502 /* 2503 * Even though btf_dedup_is_equiv() checks for 2504 * btf_shallow_equal_struct() internally when checking two 2505 * structs (unions) for equivalence, we need to guard here 2506 * from picking matching FWD type as a dedup candidate. 2507 * This can happen due to hash collision. In such case just 2508 * relying on btf_dedup_is_equiv() would lead to potentially 2509 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2510 * FWD and compatible STRUCT/UNION are considered equivalent. 2511 */ 2512 cand_type = d->btf->types[cand_id]; 2513 if (!btf_shallow_equal_struct(t, cand_type)) 2514 continue; 2515 2516 btf_dedup_clear_hypot_map(d); 2517 eq = btf_dedup_is_equiv(d, type_id, cand_id); 2518 if (eq < 0) 2519 return eq; 2520 if (!eq) 2521 continue; 2522 new_id = cand_id; 2523 btf_dedup_merge_hypot_map(d); 2524 break; 2525 } 2526 2527 d->map[type_id] = new_id; 2528 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2529 return -ENOMEM; 2530 2531 return 0; 2532 } 2533 2534 static int btf_dedup_struct_types(struct btf_dedup *d) 2535 { 2536 int i, err; 2537 2538 for (i = 1; i <= d->btf->nr_types; i++) { 2539 err = btf_dedup_struct_type(d, i); 2540 if (err) 2541 return err; 2542 } 2543 return 0; 2544 } 2545 2546 /* 2547 * Deduplicate reference type. 2548 * 2549 * Once all primitive and struct/union types got deduplicated, we can easily 2550 * deduplicate all other (reference) BTF types. This is done in two steps: 2551 * 2552 * 1. Resolve all referenced type IDs into their canonical type IDs. This 2553 * resolution can be done either immediately for primitive or struct/union types 2554 * (because they were deduped in previous two phases) or recursively for 2555 * reference types. Recursion will always terminate at either primitive or 2556 * struct/union type, at which point we can "unwind" chain of reference types 2557 * one by one. There is no danger of encountering cycles because in C type 2558 * system the only way to form type cycle is through struct/union, so any chain 2559 * of reference types, even those taking part in a type cycle, will inevitably 2560 * reach struct/union at some point. 2561 * 2562 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type 2563 * becomes "stable", in the sense that no further deduplication will cause 2564 * any changes to it. With that, it's now possible to calculate type's signature 2565 * hash (this time taking into account referenced type IDs) and loop over all 2566 * potential canonical representatives. If no match was found, current type 2567 * will become canonical representative of itself and will be added into 2568 * btf_dedup->dedup_table as another possible canonical representative. 2569 */ 2570 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) 2571 { 2572 struct hashmap_entry *hash_entry; 2573 __u32 new_id = type_id, cand_id; 2574 struct btf_type *t, *cand; 2575 /* if we don't find equivalent type, then we are representative type */ 2576 int ref_type_id; 2577 long h; 2578 2579 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2580 return -ELOOP; 2581 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2582 return resolve_type_id(d, type_id); 2583 2584 t = d->btf->types[type_id]; 2585 d->map[type_id] = BTF_IN_PROGRESS_ID; 2586 2587 switch (btf_kind(t)) { 2588 case BTF_KIND_CONST: 2589 case BTF_KIND_VOLATILE: 2590 case BTF_KIND_RESTRICT: 2591 case BTF_KIND_PTR: 2592 case BTF_KIND_TYPEDEF: 2593 case BTF_KIND_FUNC: 2594 ref_type_id = btf_dedup_ref_type(d, t->type); 2595 if (ref_type_id < 0) 2596 return ref_type_id; 2597 t->type = ref_type_id; 2598 2599 h = btf_hash_common(t); 2600 for_each_dedup_cand(d, hash_entry, h) { 2601 cand_id = (__u32)(long)hash_entry->value; 2602 cand = d->btf->types[cand_id]; 2603 if (btf_equal_common(t, cand)) { 2604 new_id = cand_id; 2605 break; 2606 } 2607 } 2608 break; 2609 2610 case BTF_KIND_ARRAY: { 2611 struct btf_array *info = btf_array(t); 2612 2613 ref_type_id = btf_dedup_ref_type(d, info->type); 2614 if (ref_type_id < 0) 2615 return ref_type_id; 2616 info->type = ref_type_id; 2617 2618 ref_type_id = btf_dedup_ref_type(d, info->index_type); 2619 if (ref_type_id < 0) 2620 return ref_type_id; 2621 info->index_type = ref_type_id; 2622 2623 h = btf_hash_array(t); 2624 for_each_dedup_cand(d, hash_entry, h) { 2625 cand_id = (__u32)(long)hash_entry->value; 2626 cand = d->btf->types[cand_id]; 2627 if (btf_equal_array(t, cand)) { 2628 new_id = cand_id; 2629 break; 2630 } 2631 } 2632 break; 2633 } 2634 2635 case BTF_KIND_FUNC_PROTO: { 2636 struct btf_param *param; 2637 __u16 vlen; 2638 int i; 2639 2640 ref_type_id = btf_dedup_ref_type(d, t->type); 2641 if (ref_type_id < 0) 2642 return ref_type_id; 2643 t->type = ref_type_id; 2644 2645 vlen = btf_vlen(t); 2646 param = btf_params(t); 2647 for (i = 0; i < vlen; i++) { 2648 ref_type_id = btf_dedup_ref_type(d, param->type); 2649 if (ref_type_id < 0) 2650 return ref_type_id; 2651 param->type = ref_type_id; 2652 param++; 2653 } 2654 2655 h = btf_hash_fnproto(t); 2656 for_each_dedup_cand(d, hash_entry, h) { 2657 cand_id = (__u32)(long)hash_entry->value; 2658 cand = d->btf->types[cand_id]; 2659 if (btf_equal_fnproto(t, cand)) { 2660 new_id = cand_id; 2661 break; 2662 } 2663 } 2664 break; 2665 } 2666 2667 default: 2668 return -EINVAL; 2669 } 2670 2671 d->map[type_id] = new_id; 2672 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 2673 return -ENOMEM; 2674 2675 return new_id; 2676 } 2677 2678 static int btf_dedup_ref_types(struct btf_dedup *d) 2679 { 2680 int i, err; 2681 2682 for (i = 1; i <= d->btf->nr_types; i++) { 2683 err = btf_dedup_ref_type(d, i); 2684 if (err < 0) 2685 return err; 2686 } 2687 /* we won't need d->dedup_table anymore */ 2688 hashmap__free(d->dedup_table); 2689 d->dedup_table = NULL; 2690 return 0; 2691 } 2692 2693 /* 2694 * Compact types. 2695 * 2696 * After we established for each type its corresponding canonical representative 2697 * type, we now can eliminate types that are not canonical and leave only 2698 * canonical ones layed out sequentially in memory by copying them over 2699 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 2700 * a map from original type ID to a new compacted type ID, which will be used 2701 * during next phase to "fix up" type IDs, referenced from struct/union and 2702 * reference types. 2703 */ 2704 static int btf_dedup_compact_types(struct btf_dedup *d) 2705 { 2706 struct btf_type **new_types; 2707 __u32 next_type_id = 1; 2708 char *types_start, *p; 2709 int i, len; 2710 2711 /* we are going to reuse hypot_map to store compaction remapping */ 2712 d->hypot_map[0] = 0; 2713 for (i = 1; i <= d->btf->nr_types; i++) 2714 d->hypot_map[i] = BTF_UNPROCESSED_ID; 2715 2716 types_start = d->btf->nohdr_data + d->btf->hdr->type_off; 2717 p = types_start; 2718 2719 for (i = 1; i <= d->btf->nr_types; i++) { 2720 if (d->map[i] != i) 2721 continue; 2722 2723 len = btf_type_size(d->btf->types[i]); 2724 if (len < 0) 2725 return len; 2726 2727 memmove(p, d->btf->types[i], len); 2728 d->hypot_map[i] = next_type_id; 2729 d->btf->types[next_type_id] = (struct btf_type *)p; 2730 p += len; 2731 next_type_id++; 2732 } 2733 2734 /* shrink struct btf's internal types index and update btf_header */ 2735 d->btf->nr_types = next_type_id - 1; 2736 d->btf->types_size = d->btf->nr_types; 2737 d->btf->hdr->type_len = p - types_start; 2738 new_types = realloc(d->btf->types, 2739 (1 + d->btf->nr_types) * sizeof(struct btf_type *)); 2740 if (!new_types) 2741 return -ENOMEM; 2742 d->btf->types = new_types; 2743 2744 /* make sure string section follows type information without gaps */ 2745 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; 2746 memmove(p, d->btf->strings, d->btf->hdr->str_len); 2747 d->btf->strings = p; 2748 p += d->btf->hdr->str_len; 2749 2750 d->btf->data_size = p - (char *)d->btf->data; 2751 return 0; 2752 } 2753 2754 /* 2755 * Figure out final (deduplicated and compacted) type ID for provided original 2756 * `type_id` by first resolving it into corresponding canonical type ID and 2757 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 2758 * which is populated during compaction phase. 2759 */ 2760 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) 2761 { 2762 __u32 resolved_type_id, new_type_id; 2763 2764 resolved_type_id = resolve_type_id(d, type_id); 2765 new_type_id = d->hypot_map[resolved_type_id]; 2766 if (new_type_id > BTF_MAX_NR_TYPES) 2767 return -EINVAL; 2768 return new_type_id; 2769 } 2770 2771 /* 2772 * Remap referenced type IDs into deduped type IDs. 2773 * 2774 * After BTF types are deduplicated and compacted, their final type IDs may 2775 * differ from original ones. The map from original to a corresponding 2776 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 2777 * compaction phase. During remapping phase we are rewriting all type IDs 2778 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 2779 * their final deduped type IDs. 2780 */ 2781 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) 2782 { 2783 struct btf_type *t = d->btf->types[type_id]; 2784 int i, r; 2785 2786 switch (btf_kind(t)) { 2787 case BTF_KIND_INT: 2788 case BTF_KIND_ENUM: 2789 break; 2790 2791 case BTF_KIND_FWD: 2792 case BTF_KIND_CONST: 2793 case BTF_KIND_VOLATILE: 2794 case BTF_KIND_RESTRICT: 2795 case BTF_KIND_PTR: 2796 case BTF_KIND_TYPEDEF: 2797 case BTF_KIND_FUNC: 2798 case BTF_KIND_VAR: 2799 r = btf_dedup_remap_type_id(d, t->type); 2800 if (r < 0) 2801 return r; 2802 t->type = r; 2803 break; 2804 2805 case BTF_KIND_ARRAY: { 2806 struct btf_array *arr_info = btf_array(t); 2807 2808 r = btf_dedup_remap_type_id(d, arr_info->type); 2809 if (r < 0) 2810 return r; 2811 arr_info->type = r; 2812 r = btf_dedup_remap_type_id(d, arr_info->index_type); 2813 if (r < 0) 2814 return r; 2815 arr_info->index_type = r; 2816 break; 2817 } 2818 2819 case BTF_KIND_STRUCT: 2820 case BTF_KIND_UNION: { 2821 struct btf_member *member = btf_members(t); 2822 __u16 vlen = btf_vlen(t); 2823 2824 for (i = 0; i < vlen; i++) { 2825 r = btf_dedup_remap_type_id(d, member->type); 2826 if (r < 0) 2827 return r; 2828 member->type = r; 2829 member++; 2830 } 2831 break; 2832 } 2833 2834 case BTF_KIND_FUNC_PROTO: { 2835 struct btf_param *param = btf_params(t); 2836 __u16 vlen = btf_vlen(t); 2837 2838 r = btf_dedup_remap_type_id(d, t->type); 2839 if (r < 0) 2840 return r; 2841 t->type = r; 2842 2843 for (i = 0; i < vlen; i++) { 2844 r = btf_dedup_remap_type_id(d, param->type); 2845 if (r < 0) 2846 return r; 2847 param->type = r; 2848 param++; 2849 } 2850 break; 2851 } 2852 2853 case BTF_KIND_DATASEC: { 2854 struct btf_var_secinfo *var = btf_var_secinfos(t); 2855 __u16 vlen = btf_vlen(t); 2856 2857 for (i = 0; i < vlen; i++) { 2858 r = btf_dedup_remap_type_id(d, var->type); 2859 if (r < 0) 2860 return r; 2861 var->type = r; 2862 var++; 2863 } 2864 break; 2865 } 2866 2867 default: 2868 return -EINVAL; 2869 } 2870 2871 return 0; 2872 } 2873 2874 static int btf_dedup_remap_types(struct btf_dedup *d) 2875 { 2876 int i, r; 2877 2878 for (i = 1; i <= d->btf->nr_types; i++) { 2879 r = btf_dedup_remap_type(d, i); 2880 if (r < 0) 2881 return r; 2882 } 2883 return 0; 2884 } 2885