1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <byteswap.h> 5 #include <endian.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <fcntl.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <sys/utsname.h> 13 #include <sys/param.h> 14 #include <sys/stat.h> 15 #include <linux/kernel.h> 16 #include <linux/err.h> 17 #include <linux/btf.h> 18 #include <gelf.h> 19 #include "btf.h" 20 #include "bpf.h" 21 #include "libbpf.h" 22 #include "libbpf_internal.h" 23 #include "hashmap.h" 24 #include "strset.h" 25 26 #define BTF_MAX_NR_TYPES 0x7fffffffU 27 #define BTF_MAX_STR_OFFSET 0x7fffffffU 28 29 static struct btf_type btf_void; 30 31 struct btf { 32 /* raw BTF data in native endianness */ 33 void *raw_data; 34 /* raw BTF data in non-native endianness */ 35 void *raw_data_swapped; 36 __u32 raw_size; 37 /* whether target endianness differs from the native one */ 38 bool swapped_endian; 39 40 /* 41 * When BTF is loaded from an ELF or raw memory it is stored 42 * in a contiguous memory block. The hdr, type_data, and, strs_data 43 * point inside that memory region to their respective parts of BTF 44 * representation: 45 * 46 * +--------------------------------+ 47 * | Header | Types | Strings | 48 * +--------------------------------+ 49 * ^ ^ ^ 50 * | | | 51 * hdr | | 52 * types_data-+ | 53 * strs_data------------+ 54 * 55 * If BTF data is later modified, e.g., due to types added or 56 * removed, BTF deduplication performed, etc, this contiguous 57 * representation is broken up into three independently allocated 58 * memory regions to be able to modify them independently. 59 * raw_data is nulled out at that point, but can be later allocated 60 * and cached again if user calls btf__get_raw_data(), at which point 61 * raw_data will contain a contiguous copy of header, types, and 62 * strings: 63 * 64 * +----------+ +---------+ +-----------+ 65 * | Header | | Types | | Strings | 66 * +----------+ +---------+ +-----------+ 67 * ^ ^ ^ 68 * | | | 69 * hdr | | 70 * types_data----+ | 71 * strset__data(strs_set)-----+ 72 * 73 * +----------+---------+-----------+ 74 * | Header | Types | Strings | 75 * raw_data----->+----------+---------+-----------+ 76 */ 77 struct btf_header *hdr; 78 79 void *types_data; 80 size_t types_data_cap; /* used size stored in hdr->type_len */ 81 82 /* type ID to `struct btf_type *` lookup index 83 * type_offs[0] corresponds to the first non-VOID type: 84 * - for base BTF it's type [1]; 85 * - for split BTF it's the first non-base BTF type. 86 */ 87 __u32 *type_offs; 88 size_t type_offs_cap; 89 /* number of types in this BTF instance: 90 * - doesn't include special [0] void type; 91 * - for split BTF counts number of types added on top of base BTF. 92 */ 93 __u32 nr_types; 94 /* if not NULL, points to the base BTF on top of which the current 95 * split BTF is based 96 */ 97 struct btf *base_btf; 98 /* BTF type ID of the first type in this BTF instance: 99 * - for base BTF it's equal to 1; 100 * - for split BTF it's equal to biggest type ID of base BTF plus 1. 101 */ 102 int start_id; 103 /* logical string offset of this BTF instance: 104 * - for base BTF it's equal to 0; 105 * - for split BTF it's equal to total size of base BTF's string section size. 106 */ 107 int start_str_off; 108 109 /* only one of strs_data or strs_set can be non-NULL, depending on 110 * whether BTF is in a modifiable state (strs_set is used) or not 111 * (strs_data points inside raw_data) 112 */ 113 void *strs_data; 114 /* a set of unique strings */ 115 struct strset *strs_set; 116 /* whether strings are already deduplicated */ 117 bool strs_deduped; 118 119 /* BTF object FD, if loaded into kernel */ 120 int fd; 121 122 /* Pointer size (in bytes) for a target architecture of this BTF */ 123 int ptr_sz; 124 }; 125 126 static inline __u64 ptr_to_u64(const void *ptr) 127 { 128 return (__u64) (unsigned long) ptr; 129 } 130 131 /* Ensure given dynamically allocated memory region pointed to by *data* with 132 * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough 133 * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements 134 * are already used. At most *max_cnt* elements can be ever allocated. 135 * If necessary, memory is reallocated and all existing data is copied over, 136 * new pointer to the memory region is stored at *data, new memory region 137 * capacity (in number of elements) is stored in *cap. 138 * On success, memory pointer to the beginning of unused memory is returned. 139 * On error, NULL is returned. 140 */ 141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, 142 size_t cur_cnt, size_t max_cnt, size_t add_cnt) 143 { 144 size_t new_cnt; 145 void *new_data; 146 147 if (cur_cnt + add_cnt <= *cap_cnt) 148 return *data + cur_cnt * elem_sz; 149 150 /* requested more than the set limit */ 151 if (cur_cnt + add_cnt > max_cnt) 152 return NULL; 153 154 new_cnt = *cap_cnt; 155 new_cnt += new_cnt / 4; /* expand by 25% */ 156 if (new_cnt < 16) /* but at least 16 elements */ 157 new_cnt = 16; 158 if (new_cnt > max_cnt) /* but not exceeding a set limit */ 159 new_cnt = max_cnt; 160 if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ 161 new_cnt = cur_cnt + add_cnt; 162 163 new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); 164 if (!new_data) 165 return NULL; 166 167 /* zero out newly allocated portion of memory */ 168 memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); 169 170 *data = new_data; 171 *cap_cnt = new_cnt; 172 return new_data + cur_cnt * elem_sz; 173 } 174 175 /* Ensure given dynamically allocated memory region has enough allocated space 176 * to accommodate *need_cnt* elements of size *elem_sz* bytes each 177 */ 178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) 179 { 180 void *p; 181 182 if (need_cnt <= *cap_cnt) 183 return 0; 184 185 p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); 186 if (!p) 187 return -ENOMEM; 188 189 return 0; 190 } 191 192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt) 193 { 194 return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), 195 btf->nr_types, BTF_MAX_NR_TYPES, add_cnt); 196 } 197 198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) 199 { 200 __u32 *p; 201 202 p = btf_add_type_offs_mem(btf, 1); 203 if (!p) 204 return -ENOMEM; 205 206 *p = type_off; 207 return 0; 208 } 209 210 static void btf_bswap_hdr(struct btf_header *h) 211 { 212 h->magic = bswap_16(h->magic); 213 h->hdr_len = bswap_32(h->hdr_len); 214 h->type_off = bswap_32(h->type_off); 215 h->type_len = bswap_32(h->type_len); 216 h->str_off = bswap_32(h->str_off); 217 h->str_len = bswap_32(h->str_len); 218 } 219 220 static int btf_parse_hdr(struct btf *btf) 221 { 222 struct btf_header *hdr = btf->hdr; 223 __u32 meta_left; 224 225 if (btf->raw_size < sizeof(struct btf_header)) { 226 pr_debug("BTF header not found\n"); 227 return -EINVAL; 228 } 229 230 if (hdr->magic == bswap_16(BTF_MAGIC)) { 231 btf->swapped_endian = true; 232 if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { 233 pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", 234 bswap_32(hdr->hdr_len)); 235 return -ENOTSUP; 236 } 237 btf_bswap_hdr(hdr); 238 } else if (hdr->magic != BTF_MAGIC) { 239 pr_debug("Invalid BTF magic:%x\n", hdr->magic); 240 return -EINVAL; 241 } 242 243 meta_left = btf->raw_size - sizeof(*hdr); 244 if (meta_left < hdr->str_off + hdr->str_len) { 245 pr_debug("Invalid BTF total size:%u\n", btf->raw_size); 246 return -EINVAL; 247 } 248 249 if (hdr->type_off + hdr->type_len > hdr->str_off) { 250 pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", 251 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); 252 return -EINVAL; 253 } 254 255 if (hdr->type_off % 4) { 256 pr_debug("BTF type section is not aligned to 4 bytes\n"); 257 return -EINVAL; 258 } 259 260 return 0; 261 } 262 263 static int btf_parse_str_sec(struct btf *btf) 264 { 265 const struct btf_header *hdr = btf->hdr; 266 const char *start = btf->strs_data; 267 const char *end = start + btf->hdr->str_len; 268 269 if (btf->base_btf && hdr->str_len == 0) 270 return 0; 271 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) { 272 pr_debug("Invalid BTF string section\n"); 273 return -EINVAL; 274 } 275 if (!btf->base_btf && start[0]) { 276 pr_debug("Invalid BTF string section\n"); 277 return -EINVAL; 278 } 279 return 0; 280 } 281 282 static int btf_type_size(const struct btf_type *t) 283 { 284 const int base_size = sizeof(struct btf_type); 285 __u16 vlen = btf_vlen(t); 286 287 switch (btf_kind(t)) { 288 case BTF_KIND_FWD: 289 case BTF_KIND_CONST: 290 case BTF_KIND_VOLATILE: 291 case BTF_KIND_RESTRICT: 292 case BTF_KIND_PTR: 293 case BTF_KIND_TYPEDEF: 294 case BTF_KIND_FUNC: 295 case BTF_KIND_FLOAT: 296 return base_size; 297 case BTF_KIND_INT: 298 return base_size + sizeof(__u32); 299 case BTF_KIND_ENUM: 300 return base_size + vlen * sizeof(struct btf_enum); 301 case BTF_KIND_ARRAY: 302 return base_size + sizeof(struct btf_array); 303 case BTF_KIND_STRUCT: 304 case BTF_KIND_UNION: 305 return base_size + vlen * sizeof(struct btf_member); 306 case BTF_KIND_FUNC_PROTO: 307 return base_size + vlen * sizeof(struct btf_param); 308 case BTF_KIND_VAR: 309 return base_size + sizeof(struct btf_var); 310 case BTF_KIND_DATASEC: 311 return base_size + vlen * sizeof(struct btf_var_secinfo); 312 case BTF_KIND_TAG: 313 return base_size + sizeof(struct btf_tag); 314 default: 315 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 316 return -EINVAL; 317 } 318 } 319 320 static void btf_bswap_type_base(struct btf_type *t) 321 { 322 t->name_off = bswap_32(t->name_off); 323 t->info = bswap_32(t->info); 324 t->type = bswap_32(t->type); 325 } 326 327 static int btf_bswap_type_rest(struct btf_type *t) 328 { 329 struct btf_var_secinfo *v; 330 struct btf_member *m; 331 struct btf_array *a; 332 struct btf_param *p; 333 struct btf_enum *e; 334 __u16 vlen = btf_vlen(t); 335 int i; 336 337 switch (btf_kind(t)) { 338 case BTF_KIND_FWD: 339 case BTF_KIND_CONST: 340 case BTF_KIND_VOLATILE: 341 case BTF_KIND_RESTRICT: 342 case BTF_KIND_PTR: 343 case BTF_KIND_TYPEDEF: 344 case BTF_KIND_FUNC: 345 case BTF_KIND_FLOAT: 346 return 0; 347 case BTF_KIND_INT: 348 *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); 349 return 0; 350 case BTF_KIND_ENUM: 351 for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { 352 e->name_off = bswap_32(e->name_off); 353 e->val = bswap_32(e->val); 354 } 355 return 0; 356 case BTF_KIND_ARRAY: 357 a = btf_array(t); 358 a->type = bswap_32(a->type); 359 a->index_type = bswap_32(a->index_type); 360 a->nelems = bswap_32(a->nelems); 361 return 0; 362 case BTF_KIND_STRUCT: 363 case BTF_KIND_UNION: 364 for (i = 0, m = btf_members(t); i < vlen; i++, m++) { 365 m->name_off = bswap_32(m->name_off); 366 m->type = bswap_32(m->type); 367 m->offset = bswap_32(m->offset); 368 } 369 return 0; 370 case BTF_KIND_FUNC_PROTO: 371 for (i = 0, p = btf_params(t); i < vlen; i++, p++) { 372 p->name_off = bswap_32(p->name_off); 373 p->type = bswap_32(p->type); 374 } 375 return 0; 376 case BTF_KIND_VAR: 377 btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); 378 return 0; 379 case BTF_KIND_DATASEC: 380 for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { 381 v->type = bswap_32(v->type); 382 v->offset = bswap_32(v->offset); 383 v->size = bswap_32(v->size); 384 } 385 return 0; 386 case BTF_KIND_TAG: 387 btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx); 388 return 0; 389 default: 390 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 391 return -EINVAL; 392 } 393 } 394 395 static int btf_parse_type_sec(struct btf *btf) 396 { 397 struct btf_header *hdr = btf->hdr; 398 void *next_type = btf->types_data; 399 void *end_type = next_type + hdr->type_len; 400 int err, type_size; 401 402 while (next_type + sizeof(struct btf_type) <= end_type) { 403 if (btf->swapped_endian) 404 btf_bswap_type_base(next_type); 405 406 type_size = btf_type_size(next_type); 407 if (type_size < 0) 408 return type_size; 409 if (next_type + type_size > end_type) { 410 pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types); 411 return -EINVAL; 412 } 413 414 if (btf->swapped_endian && btf_bswap_type_rest(next_type)) 415 return -EINVAL; 416 417 err = btf_add_type_idx_entry(btf, next_type - btf->types_data); 418 if (err) 419 return err; 420 421 next_type += type_size; 422 btf->nr_types++; 423 } 424 425 if (next_type != end_type) { 426 pr_warn("BTF types data is malformed\n"); 427 return -EINVAL; 428 } 429 430 return 0; 431 } 432 433 __u32 btf__get_nr_types(const struct btf *btf) 434 { 435 return btf->start_id + btf->nr_types - 1; 436 } 437 438 const struct btf *btf__base_btf(const struct btf *btf) 439 { 440 return btf->base_btf; 441 } 442 443 /* internal helper returning non-const pointer to a type */ 444 struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) 445 { 446 if (type_id == 0) 447 return &btf_void; 448 if (type_id < btf->start_id) 449 return btf_type_by_id(btf->base_btf, type_id); 450 return btf->types_data + btf->type_offs[type_id - btf->start_id]; 451 } 452 453 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 454 { 455 if (type_id >= btf->start_id + btf->nr_types) 456 return errno = EINVAL, NULL; 457 return btf_type_by_id((struct btf *)btf, type_id); 458 } 459 460 static int determine_ptr_size(const struct btf *btf) 461 { 462 const struct btf_type *t; 463 const char *name; 464 int i, n; 465 466 if (btf->base_btf && btf->base_btf->ptr_sz > 0) 467 return btf->base_btf->ptr_sz; 468 469 n = btf__get_nr_types(btf); 470 for (i = 1; i <= n; i++) { 471 t = btf__type_by_id(btf, i); 472 if (!btf_is_int(t)) 473 continue; 474 475 name = btf__name_by_offset(btf, t->name_off); 476 if (!name) 477 continue; 478 479 if (strcmp(name, "long int") == 0 || 480 strcmp(name, "long unsigned int") == 0) { 481 if (t->size != 4 && t->size != 8) 482 continue; 483 return t->size; 484 } 485 } 486 487 return -1; 488 } 489 490 static size_t btf_ptr_sz(const struct btf *btf) 491 { 492 if (!btf->ptr_sz) 493 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); 494 return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; 495 } 496 497 /* Return pointer size this BTF instance assumes. The size is heuristically 498 * determined by looking for 'long' or 'unsigned long' integer type and 499 * recording its size in bytes. If BTF type information doesn't have any such 500 * type, this function returns 0. In the latter case, native architecture's 501 * pointer size is assumed, so will be either 4 or 8, depending on 502 * architecture that libbpf was compiled for. It's possible to override 503 * guessed value by using btf__set_pointer_size() API. 504 */ 505 size_t btf__pointer_size(const struct btf *btf) 506 { 507 if (!btf->ptr_sz) 508 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); 509 510 if (btf->ptr_sz < 0) 511 /* not enough BTF type info to guess */ 512 return 0; 513 514 return btf->ptr_sz; 515 } 516 517 /* Override or set pointer size in bytes. Only values of 4 and 8 are 518 * supported. 519 */ 520 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) 521 { 522 if (ptr_sz != 4 && ptr_sz != 8) 523 return libbpf_err(-EINVAL); 524 btf->ptr_sz = ptr_sz; 525 return 0; 526 } 527 528 static bool is_host_big_endian(void) 529 { 530 #if __BYTE_ORDER == __LITTLE_ENDIAN 531 return false; 532 #elif __BYTE_ORDER == __BIG_ENDIAN 533 return true; 534 #else 535 # error "Unrecognized __BYTE_ORDER__" 536 #endif 537 } 538 539 enum btf_endianness btf__endianness(const struct btf *btf) 540 { 541 if (is_host_big_endian()) 542 return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; 543 else 544 return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; 545 } 546 547 int btf__set_endianness(struct btf *btf, enum btf_endianness endian) 548 { 549 if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) 550 return libbpf_err(-EINVAL); 551 552 btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); 553 if (!btf->swapped_endian) { 554 free(btf->raw_data_swapped); 555 btf->raw_data_swapped = NULL; 556 } 557 return 0; 558 } 559 560 static bool btf_type_is_void(const struct btf_type *t) 561 { 562 return t == &btf_void || btf_is_fwd(t); 563 } 564 565 static bool btf_type_is_void_or_null(const struct btf_type *t) 566 { 567 return !t || btf_type_is_void(t); 568 } 569 570 #define MAX_RESOLVE_DEPTH 32 571 572 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 573 { 574 const struct btf_array *array; 575 const struct btf_type *t; 576 __u32 nelems = 1; 577 __s64 size = -1; 578 int i; 579 580 t = btf__type_by_id(btf, type_id); 581 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { 582 switch (btf_kind(t)) { 583 case BTF_KIND_INT: 584 case BTF_KIND_STRUCT: 585 case BTF_KIND_UNION: 586 case BTF_KIND_ENUM: 587 case BTF_KIND_DATASEC: 588 case BTF_KIND_FLOAT: 589 size = t->size; 590 goto done; 591 case BTF_KIND_PTR: 592 size = btf_ptr_sz(btf); 593 goto done; 594 case BTF_KIND_TYPEDEF: 595 case BTF_KIND_VOLATILE: 596 case BTF_KIND_CONST: 597 case BTF_KIND_RESTRICT: 598 case BTF_KIND_VAR: 599 case BTF_KIND_TAG: 600 type_id = t->type; 601 break; 602 case BTF_KIND_ARRAY: 603 array = btf_array(t); 604 if (nelems && array->nelems > UINT32_MAX / nelems) 605 return libbpf_err(-E2BIG); 606 nelems *= array->nelems; 607 type_id = array->type; 608 break; 609 default: 610 return libbpf_err(-EINVAL); 611 } 612 613 t = btf__type_by_id(btf, type_id); 614 } 615 616 done: 617 if (size < 0) 618 return libbpf_err(-EINVAL); 619 if (nelems && size > UINT32_MAX / nelems) 620 return libbpf_err(-E2BIG); 621 622 return nelems * size; 623 } 624 625 int btf__align_of(const struct btf *btf, __u32 id) 626 { 627 const struct btf_type *t = btf__type_by_id(btf, id); 628 __u16 kind = btf_kind(t); 629 630 switch (kind) { 631 case BTF_KIND_INT: 632 case BTF_KIND_ENUM: 633 case BTF_KIND_FLOAT: 634 return min(btf_ptr_sz(btf), (size_t)t->size); 635 case BTF_KIND_PTR: 636 return btf_ptr_sz(btf); 637 case BTF_KIND_TYPEDEF: 638 case BTF_KIND_VOLATILE: 639 case BTF_KIND_CONST: 640 case BTF_KIND_RESTRICT: 641 return btf__align_of(btf, t->type); 642 case BTF_KIND_ARRAY: 643 return btf__align_of(btf, btf_array(t)->type); 644 case BTF_KIND_STRUCT: 645 case BTF_KIND_UNION: { 646 const struct btf_member *m = btf_members(t); 647 __u16 vlen = btf_vlen(t); 648 int i, max_align = 1, align; 649 650 for (i = 0; i < vlen; i++, m++) { 651 align = btf__align_of(btf, m->type); 652 if (align <= 0) 653 return libbpf_err(align); 654 max_align = max(max_align, align); 655 } 656 657 return max_align; 658 } 659 default: 660 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); 661 return errno = EINVAL, 0; 662 } 663 } 664 665 int btf__resolve_type(const struct btf *btf, __u32 type_id) 666 { 667 const struct btf_type *t; 668 int depth = 0; 669 670 t = btf__type_by_id(btf, type_id); 671 while (depth < MAX_RESOLVE_DEPTH && 672 !btf_type_is_void_or_null(t) && 673 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { 674 type_id = t->type; 675 t = btf__type_by_id(btf, type_id); 676 depth++; 677 } 678 679 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) 680 return libbpf_err(-EINVAL); 681 682 return type_id; 683 } 684 685 __s32 btf__find_by_name(const struct btf *btf, const char *type_name) 686 { 687 __u32 i, nr_types = btf__get_nr_types(btf); 688 689 if (!strcmp(type_name, "void")) 690 return 0; 691 692 for (i = 1; i <= nr_types; i++) { 693 const struct btf_type *t = btf__type_by_id(btf, i); 694 const char *name = btf__name_by_offset(btf, t->name_off); 695 696 if (name && !strcmp(type_name, name)) 697 return i; 698 } 699 700 return libbpf_err(-ENOENT); 701 } 702 703 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, 704 const char *type_name, __u32 kind) 705 { 706 __u32 i, nr_types = btf__get_nr_types(btf); 707 708 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) 709 return 0; 710 711 for (i = start_id; i <= nr_types; i++) { 712 const struct btf_type *t = btf__type_by_id(btf, i); 713 const char *name; 714 715 if (btf_kind(t) != kind) 716 continue; 717 name = btf__name_by_offset(btf, t->name_off); 718 if (name && !strcmp(type_name, name)) 719 return i; 720 } 721 722 return libbpf_err(-ENOENT); 723 } 724 725 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, 726 __u32 kind) 727 { 728 return btf_find_by_name_kind(btf, btf->start_id, type_name, kind); 729 } 730 731 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, 732 __u32 kind) 733 { 734 return btf_find_by_name_kind(btf, 1, type_name, kind); 735 } 736 737 static bool btf_is_modifiable(const struct btf *btf) 738 { 739 return (void *)btf->hdr != btf->raw_data; 740 } 741 742 void btf__free(struct btf *btf) 743 { 744 if (IS_ERR_OR_NULL(btf)) 745 return; 746 747 if (btf->fd >= 0) 748 close(btf->fd); 749 750 if (btf_is_modifiable(btf)) { 751 /* if BTF was modified after loading, it will have a split 752 * in-memory representation for header, types, and strings 753 * sections, so we need to free all of them individually. It 754 * might still have a cached contiguous raw data present, 755 * which will be unconditionally freed below. 756 */ 757 free(btf->hdr); 758 free(btf->types_data); 759 strset__free(btf->strs_set); 760 } 761 free(btf->raw_data); 762 free(btf->raw_data_swapped); 763 free(btf->type_offs); 764 free(btf); 765 } 766 767 static struct btf *btf_new_empty(struct btf *base_btf) 768 { 769 struct btf *btf; 770 771 btf = calloc(1, sizeof(*btf)); 772 if (!btf) 773 return ERR_PTR(-ENOMEM); 774 775 btf->nr_types = 0; 776 btf->start_id = 1; 777 btf->start_str_off = 0; 778 btf->fd = -1; 779 btf->ptr_sz = sizeof(void *); 780 btf->swapped_endian = false; 781 782 if (base_btf) { 783 btf->base_btf = base_btf; 784 btf->start_id = btf__get_nr_types(base_btf) + 1; 785 btf->start_str_off = base_btf->hdr->str_len; 786 } 787 788 /* +1 for empty string at offset 0 */ 789 btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1); 790 btf->raw_data = calloc(1, btf->raw_size); 791 if (!btf->raw_data) { 792 free(btf); 793 return ERR_PTR(-ENOMEM); 794 } 795 796 btf->hdr = btf->raw_data; 797 btf->hdr->hdr_len = sizeof(struct btf_header); 798 btf->hdr->magic = BTF_MAGIC; 799 btf->hdr->version = BTF_VERSION; 800 801 btf->types_data = btf->raw_data + btf->hdr->hdr_len; 802 btf->strs_data = btf->raw_data + btf->hdr->hdr_len; 803 btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */ 804 805 return btf; 806 } 807 808 struct btf *btf__new_empty(void) 809 { 810 return libbpf_ptr(btf_new_empty(NULL)); 811 } 812 813 struct btf *btf__new_empty_split(struct btf *base_btf) 814 { 815 return libbpf_ptr(btf_new_empty(base_btf)); 816 } 817 818 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) 819 { 820 struct btf *btf; 821 int err; 822 823 btf = calloc(1, sizeof(struct btf)); 824 if (!btf) 825 return ERR_PTR(-ENOMEM); 826 827 btf->nr_types = 0; 828 btf->start_id = 1; 829 btf->start_str_off = 0; 830 btf->fd = -1; 831 832 if (base_btf) { 833 btf->base_btf = base_btf; 834 btf->start_id = btf__get_nr_types(base_btf) + 1; 835 btf->start_str_off = base_btf->hdr->str_len; 836 } 837 838 btf->raw_data = malloc(size); 839 if (!btf->raw_data) { 840 err = -ENOMEM; 841 goto done; 842 } 843 memcpy(btf->raw_data, data, size); 844 btf->raw_size = size; 845 846 btf->hdr = btf->raw_data; 847 err = btf_parse_hdr(btf); 848 if (err) 849 goto done; 850 851 btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; 852 btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; 853 854 err = btf_parse_str_sec(btf); 855 err = err ?: btf_parse_type_sec(btf); 856 if (err) 857 goto done; 858 859 done: 860 if (err) { 861 btf__free(btf); 862 return ERR_PTR(err); 863 } 864 865 return btf; 866 } 867 868 struct btf *btf__new(const void *data, __u32 size) 869 { 870 return libbpf_ptr(btf_new(data, size, NULL)); 871 } 872 873 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, 874 struct btf_ext **btf_ext) 875 { 876 Elf_Data *btf_data = NULL, *btf_ext_data = NULL; 877 int err = 0, fd = -1, idx = 0; 878 struct btf *btf = NULL; 879 Elf_Scn *scn = NULL; 880 Elf *elf = NULL; 881 GElf_Ehdr ehdr; 882 size_t shstrndx; 883 884 if (elf_version(EV_CURRENT) == EV_NONE) { 885 pr_warn("failed to init libelf for %s\n", path); 886 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 887 } 888 889 fd = open(path, O_RDONLY); 890 if (fd < 0) { 891 err = -errno; 892 pr_warn("failed to open %s: %s\n", path, strerror(errno)); 893 return ERR_PTR(err); 894 } 895 896 err = -LIBBPF_ERRNO__FORMAT; 897 898 elf = elf_begin(fd, ELF_C_READ, NULL); 899 if (!elf) { 900 pr_warn("failed to open %s as ELF file\n", path); 901 goto done; 902 } 903 if (!gelf_getehdr(elf, &ehdr)) { 904 pr_warn("failed to get EHDR from %s\n", path); 905 goto done; 906 } 907 908 if (elf_getshdrstrndx(elf, &shstrndx)) { 909 pr_warn("failed to get section names section index for %s\n", 910 path); 911 goto done; 912 } 913 914 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { 915 pr_warn("failed to get e_shstrndx from %s\n", path); 916 goto done; 917 } 918 919 while ((scn = elf_nextscn(elf, scn)) != NULL) { 920 GElf_Shdr sh; 921 char *name; 922 923 idx++; 924 if (gelf_getshdr(scn, &sh) != &sh) { 925 pr_warn("failed to get section(%d) header from %s\n", 926 idx, path); 927 goto done; 928 } 929 name = elf_strptr(elf, shstrndx, sh.sh_name); 930 if (!name) { 931 pr_warn("failed to get section(%d) name from %s\n", 932 idx, path); 933 goto done; 934 } 935 if (strcmp(name, BTF_ELF_SEC) == 0) { 936 btf_data = elf_getdata(scn, 0); 937 if (!btf_data) { 938 pr_warn("failed to get section(%d, %s) data from %s\n", 939 idx, name, path); 940 goto done; 941 } 942 continue; 943 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { 944 btf_ext_data = elf_getdata(scn, 0); 945 if (!btf_ext_data) { 946 pr_warn("failed to get section(%d, %s) data from %s\n", 947 idx, name, path); 948 goto done; 949 } 950 continue; 951 } 952 } 953 954 err = 0; 955 956 if (!btf_data) { 957 err = -ENOENT; 958 goto done; 959 } 960 btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); 961 err = libbpf_get_error(btf); 962 if (err) 963 goto done; 964 965 switch (gelf_getclass(elf)) { 966 case ELFCLASS32: 967 btf__set_pointer_size(btf, 4); 968 break; 969 case ELFCLASS64: 970 btf__set_pointer_size(btf, 8); 971 break; 972 default: 973 pr_warn("failed to get ELF class (bitness) for %s\n", path); 974 break; 975 } 976 977 if (btf_ext && btf_ext_data) { 978 *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); 979 err = libbpf_get_error(*btf_ext); 980 if (err) 981 goto done; 982 } else if (btf_ext) { 983 *btf_ext = NULL; 984 } 985 done: 986 if (elf) 987 elf_end(elf); 988 close(fd); 989 990 if (!err) 991 return btf; 992 993 if (btf_ext) 994 btf_ext__free(*btf_ext); 995 btf__free(btf); 996 997 return ERR_PTR(err); 998 } 999 1000 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) 1001 { 1002 return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext)); 1003 } 1004 1005 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) 1006 { 1007 return libbpf_ptr(btf_parse_elf(path, base_btf, NULL)); 1008 } 1009 1010 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) 1011 { 1012 struct btf *btf = NULL; 1013 void *data = NULL; 1014 FILE *f = NULL; 1015 __u16 magic; 1016 int err = 0; 1017 long sz; 1018 1019 f = fopen(path, "rb"); 1020 if (!f) { 1021 err = -errno; 1022 goto err_out; 1023 } 1024 1025 /* check BTF magic */ 1026 if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { 1027 err = -EIO; 1028 goto err_out; 1029 } 1030 if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { 1031 /* definitely not a raw BTF */ 1032 err = -EPROTO; 1033 goto err_out; 1034 } 1035 1036 /* get file size */ 1037 if (fseek(f, 0, SEEK_END)) { 1038 err = -errno; 1039 goto err_out; 1040 } 1041 sz = ftell(f); 1042 if (sz < 0) { 1043 err = -errno; 1044 goto err_out; 1045 } 1046 /* rewind to the start */ 1047 if (fseek(f, 0, SEEK_SET)) { 1048 err = -errno; 1049 goto err_out; 1050 } 1051 1052 /* pre-alloc memory and read all of BTF data */ 1053 data = malloc(sz); 1054 if (!data) { 1055 err = -ENOMEM; 1056 goto err_out; 1057 } 1058 if (fread(data, 1, sz, f) < sz) { 1059 err = -EIO; 1060 goto err_out; 1061 } 1062 1063 /* finally parse BTF data */ 1064 btf = btf_new(data, sz, base_btf); 1065 1066 err_out: 1067 free(data); 1068 if (f) 1069 fclose(f); 1070 return err ? ERR_PTR(err) : btf; 1071 } 1072 1073 struct btf *btf__parse_raw(const char *path) 1074 { 1075 return libbpf_ptr(btf_parse_raw(path, NULL)); 1076 } 1077 1078 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) 1079 { 1080 return libbpf_ptr(btf_parse_raw(path, base_btf)); 1081 } 1082 1083 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) 1084 { 1085 struct btf *btf; 1086 int err; 1087 1088 if (btf_ext) 1089 *btf_ext = NULL; 1090 1091 btf = btf_parse_raw(path, base_btf); 1092 err = libbpf_get_error(btf); 1093 if (!err) 1094 return btf; 1095 if (err != -EPROTO) 1096 return ERR_PTR(err); 1097 return btf_parse_elf(path, base_btf, btf_ext); 1098 } 1099 1100 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) 1101 { 1102 return libbpf_ptr(btf_parse(path, NULL, btf_ext)); 1103 } 1104 1105 struct btf *btf__parse_split(const char *path, struct btf *base_btf) 1106 { 1107 return libbpf_ptr(btf_parse(path, base_btf, NULL)); 1108 } 1109 1110 static int compare_vsi_off(const void *_a, const void *_b) 1111 { 1112 const struct btf_var_secinfo *a = _a; 1113 const struct btf_var_secinfo *b = _b; 1114 1115 return a->offset - b->offset; 1116 } 1117 1118 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 1119 struct btf_type *t) 1120 { 1121 __u32 size = 0, off = 0, i, vars = btf_vlen(t); 1122 const char *name = btf__name_by_offset(btf, t->name_off); 1123 const struct btf_type *t_var; 1124 struct btf_var_secinfo *vsi; 1125 const struct btf_var *var; 1126 int ret; 1127 1128 if (!name) { 1129 pr_debug("No name found in string section for DATASEC kind.\n"); 1130 return -ENOENT; 1131 } 1132 1133 /* .extern datasec size and var offsets were set correctly during 1134 * extern collection step, so just skip straight to sorting variables 1135 */ 1136 if (t->size) 1137 goto sort_vars; 1138 1139 ret = bpf_object__section_size(obj, name, &size); 1140 if (ret || !size || (t->size && t->size != size)) { 1141 pr_debug("Invalid size for section %s: %u bytes\n", name, size); 1142 return -ENOENT; 1143 } 1144 1145 t->size = size; 1146 1147 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 1148 t_var = btf__type_by_id(btf, vsi->type); 1149 var = btf_var(t_var); 1150 1151 if (!btf_is_var(t_var)) { 1152 pr_debug("Non-VAR type seen in section %s\n", name); 1153 return -EINVAL; 1154 } 1155 1156 if (var->linkage == BTF_VAR_STATIC) 1157 continue; 1158 1159 name = btf__name_by_offset(btf, t_var->name_off); 1160 if (!name) { 1161 pr_debug("No name found in string section for VAR kind\n"); 1162 return -ENOENT; 1163 } 1164 1165 ret = bpf_object__variable_offset(obj, name, &off); 1166 if (ret) { 1167 pr_debug("No offset found in symbol table for VAR %s\n", 1168 name); 1169 return -ENOENT; 1170 } 1171 1172 vsi->offset = off; 1173 } 1174 1175 sort_vars: 1176 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); 1177 return 0; 1178 } 1179 1180 int btf__finalize_data(struct bpf_object *obj, struct btf *btf) 1181 { 1182 int err = 0; 1183 __u32 i; 1184 1185 for (i = 1; i <= btf->nr_types; i++) { 1186 struct btf_type *t = btf_type_by_id(btf, i); 1187 1188 /* Loader needs to fix up some of the things compiler 1189 * couldn't get its hands on while emitting BTF. This 1190 * is section size and global variable offset. We use 1191 * the info from the ELF itself for this purpose. 1192 */ 1193 if (btf_is_datasec(t)) { 1194 err = btf_fixup_datasec(obj, btf, t); 1195 if (err) 1196 break; 1197 } 1198 } 1199 1200 return libbpf_err(err); 1201 } 1202 1203 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); 1204 1205 int btf__load_into_kernel(struct btf *btf) 1206 { 1207 __u32 log_buf_size = 0, raw_size; 1208 char *log_buf = NULL; 1209 void *raw_data; 1210 int err = 0; 1211 1212 if (btf->fd >= 0) 1213 return libbpf_err(-EEXIST); 1214 1215 retry_load: 1216 if (log_buf_size) { 1217 log_buf = malloc(log_buf_size); 1218 if (!log_buf) 1219 return libbpf_err(-ENOMEM); 1220 1221 *log_buf = 0; 1222 } 1223 1224 raw_data = btf_get_raw_data(btf, &raw_size, false); 1225 if (!raw_data) { 1226 err = -ENOMEM; 1227 goto done; 1228 } 1229 /* cache native raw data representation */ 1230 btf->raw_size = raw_size; 1231 btf->raw_data = raw_data; 1232 1233 btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); 1234 if (btf->fd < 0) { 1235 if (!log_buf || errno == ENOSPC) { 1236 log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, 1237 log_buf_size << 1); 1238 free(log_buf); 1239 goto retry_load; 1240 } 1241 1242 err = -errno; 1243 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); 1244 if (*log_buf) 1245 pr_warn("%s\n", log_buf); 1246 goto done; 1247 } 1248 1249 done: 1250 free(log_buf); 1251 return libbpf_err(err); 1252 } 1253 int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel"))); 1254 1255 int btf__fd(const struct btf *btf) 1256 { 1257 return btf->fd; 1258 } 1259 1260 void btf__set_fd(struct btf *btf, int fd) 1261 { 1262 btf->fd = fd; 1263 } 1264 1265 static const void *btf_strs_data(const struct btf *btf) 1266 { 1267 return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set); 1268 } 1269 1270 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) 1271 { 1272 struct btf_header *hdr = btf->hdr; 1273 struct btf_type *t; 1274 void *data, *p; 1275 __u32 data_sz; 1276 int i; 1277 1278 data = swap_endian ? btf->raw_data_swapped : btf->raw_data; 1279 if (data) { 1280 *size = btf->raw_size; 1281 return data; 1282 } 1283 1284 data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; 1285 data = calloc(1, data_sz); 1286 if (!data) 1287 return NULL; 1288 p = data; 1289 1290 memcpy(p, hdr, hdr->hdr_len); 1291 if (swap_endian) 1292 btf_bswap_hdr(p); 1293 p += hdr->hdr_len; 1294 1295 memcpy(p, btf->types_data, hdr->type_len); 1296 if (swap_endian) { 1297 for (i = 0; i < btf->nr_types; i++) { 1298 t = p + btf->type_offs[i]; 1299 /* btf_bswap_type_rest() relies on native t->info, so 1300 * we swap base type info after we swapped all the 1301 * additional information 1302 */ 1303 if (btf_bswap_type_rest(t)) 1304 goto err_out; 1305 btf_bswap_type_base(t); 1306 } 1307 } 1308 p += hdr->type_len; 1309 1310 memcpy(p, btf_strs_data(btf), hdr->str_len); 1311 p += hdr->str_len; 1312 1313 *size = data_sz; 1314 return data; 1315 err_out: 1316 free(data); 1317 return NULL; 1318 } 1319 1320 const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) 1321 { 1322 struct btf *btf = (struct btf *)btf_ro; 1323 __u32 data_sz; 1324 void *data; 1325 1326 data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); 1327 if (!data) 1328 return errno = -ENOMEM, NULL; 1329 1330 btf->raw_size = data_sz; 1331 if (btf->swapped_endian) 1332 btf->raw_data_swapped = data; 1333 else 1334 btf->raw_data = data; 1335 *size = data_sz; 1336 return data; 1337 } 1338 1339 const char *btf__str_by_offset(const struct btf *btf, __u32 offset) 1340 { 1341 if (offset < btf->start_str_off) 1342 return btf__str_by_offset(btf->base_btf, offset); 1343 else if (offset - btf->start_str_off < btf->hdr->str_len) 1344 return btf_strs_data(btf) + (offset - btf->start_str_off); 1345 else 1346 return errno = EINVAL, NULL; 1347 } 1348 1349 const char *btf__name_by_offset(const struct btf *btf, __u32 offset) 1350 { 1351 return btf__str_by_offset(btf, offset); 1352 } 1353 1354 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) 1355 { 1356 struct bpf_btf_info btf_info; 1357 __u32 len = sizeof(btf_info); 1358 __u32 last_size; 1359 struct btf *btf; 1360 void *ptr; 1361 int err; 1362 1363 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so 1364 * let's start with a sane default - 4KiB here - and resize it only if 1365 * bpf_obj_get_info_by_fd() needs a bigger buffer. 1366 */ 1367 last_size = 4096; 1368 ptr = malloc(last_size); 1369 if (!ptr) 1370 return ERR_PTR(-ENOMEM); 1371 1372 memset(&btf_info, 0, sizeof(btf_info)); 1373 btf_info.btf = ptr_to_u64(ptr); 1374 btf_info.btf_size = last_size; 1375 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 1376 1377 if (!err && btf_info.btf_size > last_size) { 1378 void *temp_ptr; 1379 1380 last_size = btf_info.btf_size; 1381 temp_ptr = realloc(ptr, last_size); 1382 if (!temp_ptr) { 1383 btf = ERR_PTR(-ENOMEM); 1384 goto exit_free; 1385 } 1386 ptr = temp_ptr; 1387 1388 len = sizeof(btf_info); 1389 memset(&btf_info, 0, sizeof(btf_info)); 1390 btf_info.btf = ptr_to_u64(ptr); 1391 btf_info.btf_size = last_size; 1392 1393 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); 1394 } 1395 1396 if (err || btf_info.btf_size > last_size) { 1397 btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG); 1398 goto exit_free; 1399 } 1400 1401 btf = btf_new(ptr, btf_info.btf_size, base_btf); 1402 1403 exit_free: 1404 free(ptr); 1405 return btf; 1406 } 1407 1408 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) 1409 { 1410 struct btf *btf; 1411 int btf_fd; 1412 1413 btf_fd = bpf_btf_get_fd_by_id(id); 1414 if (btf_fd < 0) 1415 return libbpf_err_ptr(-errno); 1416 1417 btf = btf_get_from_fd(btf_fd, base_btf); 1418 close(btf_fd); 1419 1420 return libbpf_ptr(btf); 1421 } 1422 1423 struct btf *btf__load_from_kernel_by_id(__u32 id) 1424 { 1425 return btf__load_from_kernel_by_id_split(id, NULL); 1426 } 1427 1428 int btf__get_from_id(__u32 id, struct btf **btf) 1429 { 1430 struct btf *res; 1431 int err; 1432 1433 *btf = NULL; 1434 res = btf__load_from_kernel_by_id(id); 1435 err = libbpf_get_error(res); 1436 1437 if (err) 1438 return libbpf_err(err); 1439 1440 *btf = res; 1441 return 0; 1442 } 1443 1444 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, 1445 __u32 expected_key_size, __u32 expected_value_size, 1446 __u32 *key_type_id, __u32 *value_type_id) 1447 { 1448 const struct btf_type *container_type; 1449 const struct btf_member *key, *value; 1450 const size_t max_name = 256; 1451 char container_name[max_name]; 1452 __s64 key_size, value_size; 1453 __s32 container_id; 1454 1455 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) { 1456 pr_warn("map:%s length of '____btf_map_%s' is too long\n", 1457 map_name, map_name); 1458 return libbpf_err(-EINVAL); 1459 } 1460 1461 container_id = btf__find_by_name(btf, container_name); 1462 if (container_id < 0) { 1463 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", 1464 map_name, container_name); 1465 return libbpf_err(container_id); 1466 } 1467 1468 container_type = btf__type_by_id(btf, container_id); 1469 if (!container_type) { 1470 pr_warn("map:%s cannot find BTF type for container_id:%u\n", 1471 map_name, container_id); 1472 return libbpf_err(-EINVAL); 1473 } 1474 1475 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { 1476 pr_warn("map:%s container_name:%s is an invalid container struct\n", 1477 map_name, container_name); 1478 return libbpf_err(-EINVAL); 1479 } 1480 1481 key = btf_members(container_type); 1482 value = key + 1; 1483 1484 key_size = btf__resolve_size(btf, key->type); 1485 if (key_size < 0) { 1486 pr_warn("map:%s invalid BTF key_type_size\n", map_name); 1487 return libbpf_err(key_size); 1488 } 1489 1490 if (expected_key_size != key_size) { 1491 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", 1492 map_name, (__u32)key_size, expected_key_size); 1493 return libbpf_err(-EINVAL); 1494 } 1495 1496 value_size = btf__resolve_size(btf, value->type); 1497 if (value_size < 0) { 1498 pr_warn("map:%s invalid BTF value_type_size\n", map_name); 1499 return libbpf_err(value_size); 1500 } 1501 1502 if (expected_value_size != value_size) { 1503 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 1504 map_name, (__u32)value_size, expected_value_size); 1505 return libbpf_err(-EINVAL); 1506 } 1507 1508 *key_type_id = key->type; 1509 *value_type_id = value->type; 1510 1511 return 0; 1512 } 1513 1514 static void btf_invalidate_raw_data(struct btf *btf) 1515 { 1516 if (btf->raw_data) { 1517 free(btf->raw_data); 1518 btf->raw_data = NULL; 1519 } 1520 if (btf->raw_data_swapped) { 1521 free(btf->raw_data_swapped); 1522 btf->raw_data_swapped = NULL; 1523 } 1524 } 1525 1526 /* Ensure BTF is ready to be modified (by splitting into a three memory 1527 * regions for header, types, and strings). Also invalidate cached 1528 * raw_data, if any. 1529 */ 1530 static int btf_ensure_modifiable(struct btf *btf) 1531 { 1532 void *hdr, *types; 1533 struct strset *set = NULL; 1534 int err = -ENOMEM; 1535 1536 if (btf_is_modifiable(btf)) { 1537 /* any BTF modification invalidates raw_data */ 1538 btf_invalidate_raw_data(btf); 1539 return 0; 1540 } 1541 1542 /* split raw data into three memory regions */ 1543 hdr = malloc(btf->hdr->hdr_len); 1544 types = malloc(btf->hdr->type_len); 1545 if (!hdr || !types) 1546 goto err_out; 1547 1548 memcpy(hdr, btf->hdr, btf->hdr->hdr_len); 1549 memcpy(types, btf->types_data, btf->hdr->type_len); 1550 1551 /* build lookup index for all strings */ 1552 set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len); 1553 if (IS_ERR(set)) { 1554 err = PTR_ERR(set); 1555 goto err_out; 1556 } 1557 1558 /* only when everything was successful, update internal state */ 1559 btf->hdr = hdr; 1560 btf->types_data = types; 1561 btf->types_data_cap = btf->hdr->type_len; 1562 btf->strs_data = NULL; 1563 btf->strs_set = set; 1564 /* if BTF was created from scratch, all strings are guaranteed to be 1565 * unique and deduplicated 1566 */ 1567 if (btf->hdr->str_len == 0) 1568 btf->strs_deduped = true; 1569 if (!btf->base_btf && btf->hdr->str_len == 1) 1570 btf->strs_deduped = true; 1571 1572 /* invalidate raw_data representation */ 1573 btf_invalidate_raw_data(btf); 1574 1575 return 0; 1576 1577 err_out: 1578 strset__free(set); 1579 free(hdr); 1580 free(types); 1581 return err; 1582 } 1583 1584 /* Find an offset in BTF string section that corresponds to a given string *s*. 1585 * Returns: 1586 * - >0 offset into string section, if string is found; 1587 * - -ENOENT, if string is not in the string section; 1588 * - <0, on any other error. 1589 */ 1590 int btf__find_str(struct btf *btf, const char *s) 1591 { 1592 int off; 1593 1594 if (btf->base_btf) { 1595 off = btf__find_str(btf->base_btf, s); 1596 if (off != -ENOENT) 1597 return off; 1598 } 1599 1600 /* BTF needs to be in a modifiable state to build string lookup index */ 1601 if (btf_ensure_modifiable(btf)) 1602 return libbpf_err(-ENOMEM); 1603 1604 off = strset__find_str(btf->strs_set, s); 1605 if (off < 0) 1606 return libbpf_err(off); 1607 1608 return btf->start_str_off + off; 1609 } 1610 1611 /* Add a string s to the BTF string section. 1612 * Returns: 1613 * - > 0 offset into string section, on success; 1614 * - < 0, on error. 1615 */ 1616 int btf__add_str(struct btf *btf, const char *s) 1617 { 1618 int off; 1619 1620 if (btf->base_btf) { 1621 off = btf__find_str(btf->base_btf, s); 1622 if (off != -ENOENT) 1623 return off; 1624 } 1625 1626 if (btf_ensure_modifiable(btf)) 1627 return libbpf_err(-ENOMEM); 1628 1629 off = strset__add_str(btf->strs_set, s); 1630 if (off < 0) 1631 return libbpf_err(off); 1632 1633 btf->hdr->str_len = strset__data_size(btf->strs_set); 1634 1635 return btf->start_str_off + off; 1636 } 1637 1638 static void *btf_add_type_mem(struct btf *btf, size_t add_sz) 1639 { 1640 return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1, 1641 btf->hdr->type_len, UINT_MAX, add_sz); 1642 } 1643 1644 static void btf_type_inc_vlen(struct btf_type *t) 1645 { 1646 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); 1647 } 1648 1649 static int btf_commit_type(struct btf *btf, int data_sz) 1650 { 1651 int err; 1652 1653 err = btf_add_type_idx_entry(btf, btf->hdr->type_len); 1654 if (err) 1655 return libbpf_err(err); 1656 1657 btf->hdr->type_len += data_sz; 1658 btf->hdr->str_off += data_sz; 1659 btf->nr_types++; 1660 return btf->start_id + btf->nr_types - 1; 1661 } 1662 1663 struct btf_pipe { 1664 const struct btf *src; 1665 struct btf *dst; 1666 }; 1667 1668 static int btf_rewrite_str(__u32 *str_off, void *ctx) 1669 { 1670 struct btf_pipe *p = ctx; 1671 int off; 1672 1673 if (!*str_off) /* nothing to do for empty strings */ 1674 return 0; 1675 1676 off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off)); 1677 if (off < 0) 1678 return off; 1679 1680 *str_off = off; 1681 return 0; 1682 } 1683 1684 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) 1685 { 1686 struct btf_pipe p = { .src = src_btf, .dst = btf }; 1687 struct btf_type *t; 1688 int sz, err; 1689 1690 sz = btf_type_size(src_type); 1691 if (sz < 0) 1692 return libbpf_err(sz); 1693 1694 /* deconstruct BTF, if necessary, and invalidate raw_data */ 1695 if (btf_ensure_modifiable(btf)) 1696 return libbpf_err(-ENOMEM); 1697 1698 t = btf_add_type_mem(btf, sz); 1699 if (!t) 1700 return libbpf_err(-ENOMEM); 1701 1702 memcpy(t, src_type, sz); 1703 1704 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); 1705 if (err) 1706 return libbpf_err(err); 1707 1708 return btf_commit_type(btf, sz); 1709 } 1710 1711 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) 1712 { 1713 struct btf *btf = ctx; 1714 1715 if (!*type_id) /* nothing to do for VOID references */ 1716 return 0; 1717 1718 /* we haven't updated btf's type count yet, so 1719 * btf->start_id + btf->nr_types - 1 is the type ID offset we should 1720 * add to all newly added BTF types 1721 */ 1722 *type_id += btf->start_id + btf->nr_types - 1; 1723 return 0; 1724 } 1725 1726 int btf__add_btf(struct btf *btf, const struct btf *src_btf) 1727 { 1728 struct btf_pipe p = { .src = src_btf, .dst = btf }; 1729 int data_sz, sz, cnt, i, err, old_strs_len; 1730 __u32 *off; 1731 void *t; 1732 1733 /* appending split BTF isn't supported yet */ 1734 if (src_btf->base_btf) 1735 return libbpf_err(-ENOTSUP); 1736 1737 /* deconstruct BTF, if necessary, and invalidate raw_data */ 1738 if (btf_ensure_modifiable(btf)) 1739 return libbpf_err(-ENOMEM); 1740 1741 /* remember original strings section size if we have to roll back 1742 * partial strings section changes 1743 */ 1744 old_strs_len = btf->hdr->str_len; 1745 1746 data_sz = src_btf->hdr->type_len; 1747 cnt = btf__get_nr_types(src_btf); 1748 1749 /* pre-allocate enough memory for new types */ 1750 t = btf_add_type_mem(btf, data_sz); 1751 if (!t) 1752 return libbpf_err(-ENOMEM); 1753 1754 /* pre-allocate enough memory for type offset index for new types */ 1755 off = btf_add_type_offs_mem(btf, cnt); 1756 if (!off) 1757 return libbpf_err(-ENOMEM); 1758 1759 /* bulk copy types data for all types from src_btf */ 1760 memcpy(t, src_btf->types_data, data_sz); 1761 1762 for (i = 0; i < cnt; i++) { 1763 sz = btf_type_size(t); 1764 if (sz < 0) { 1765 /* unlikely, has to be corrupted src_btf */ 1766 err = sz; 1767 goto err_out; 1768 } 1769 1770 /* fill out type ID to type offset mapping for lookups by type ID */ 1771 *off = t - btf->types_data; 1772 1773 /* add, dedup, and remap strings referenced by this BTF type */ 1774 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); 1775 if (err) 1776 goto err_out; 1777 1778 /* remap all type IDs referenced from this BTF type */ 1779 err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); 1780 if (err) 1781 goto err_out; 1782 1783 /* go to next type data and type offset index entry */ 1784 t += sz; 1785 off++; 1786 } 1787 1788 /* Up until now any of the copied type data was effectively invisible, 1789 * so if we exited early before this point due to error, BTF would be 1790 * effectively unmodified. There would be extra internal memory 1791 * pre-allocated, but it would not be available for querying. But now 1792 * that we've copied and rewritten all the data successfully, we can 1793 * update type count and various internal offsets and sizes to 1794 * "commit" the changes and made them visible to the outside world. 1795 */ 1796 btf->hdr->type_len += data_sz; 1797 btf->hdr->str_off += data_sz; 1798 btf->nr_types += cnt; 1799 1800 /* return type ID of the first added BTF type */ 1801 return btf->start_id + btf->nr_types - cnt; 1802 err_out: 1803 /* zero out preallocated memory as if it was just allocated with 1804 * libbpf_add_mem() 1805 */ 1806 memset(btf->types_data + btf->hdr->type_len, 0, data_sz); 1807 memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); 1808 1809 /* and now restore original strings section size; types data size 1810 * wasn't modified, so doesn't need restoring, see big comment above */ 1811 btf->hdr->str_len = old_strs_len; 1812 1813 return libbpf_err(err); 1814 } 1815 1816 /* 1817 * Append new BTF_KIND_INT type with: 1818 * - *name* - non-empty, non-NULL type name; 1819 * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; 1820 * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. 1821 * Returns: 1822 * - >0, type ID of newly added BTF type; 1823 * - <0, on error. 1824 */ 1825 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) 1826 { 1827 struct btf_type *t; 1828 int sz, name_off; 1829 1830 /* non-empty name */ 1831 if (!name || !name[0]) 1832 return libbpf_err(-EINVAL); 1833 /* byte_sz must be power of 2 */ 1834 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) 1835 return libbpf_err(-EINVAL); 1836 if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) 1837 return libbpf_err(-EINVAL); 1838 1839 /* deconstruct BTF, if necessary, and invalidate raw_data */ 1840 if (btf_ensure_modifiable(btf)) 1841 return libbpf_err(-ENOMEM); 1842 1843 sz = sizeof(struct btf_type) + sizeof(int); 1844 t = btf_add_type_mem(btf, sz); 1845 if (!t) 1846 return libbpf_err(-ENOMEM); 1847 1848 /* if something goes wrong later, we might end up with an extra string, 1849 * but that shouldn't be a problem, because BTF can't be constructed 1850 * completely anyway and will most probably be just discarded 1851 */ 1852 name_off = btf__add_str(btf, name); 1853 if (name_off < 0) 1854 return name_off; 1855 1856 t->name_off = name_off; 1857 t->info = btf_type_info(BTF_KIND_INT, 0, 0); 1858 t->size = byte_sz; 1859 /* set INT info, we don't allow setting legacy bit offset/size */ 1860 *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); 1861 1862 return btf_commit_type(btf, sz); 1863 } 1864 1865 /* 1866 * Append new BTF_KIND_FLOAT type with: 1867 * - *name* - non-empty, non-NULL type name; 1868 * - *sz* - size of the type, in bytes; 1869 * Returns: 1870 * - >0, type ID of newly added BTF type; 1871 * - <0, on error. 1872 */ 1873 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz) 1874 { 1875 struct btf_type *t; 1876 int sz, name_off; 1877 1878 /* non-empty name */ 1879 if (!name || !name[0]) 1880 return libbpf_err(-EINVAL); 1881 1882 /* byte_sz must be one of the explicitly allowed values */ 1883 if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 && 1884 byte_sz != 16) 1885 return libbpf_err(-EINVAL); 1886 1887 if (btf_ensure_modifiable(btf)) 1888 return libbpf_err(-ENOMEM); 1889 1890 sz = sizeof(struct btf_type); 1891 t = btf_add_type_mem(btf, sz); 1892 if (!t) 1893 return libbpf_err(-ENOMEM); 1894 1895 name_off = btf__add_str(btf, name); 1896 if (name_off < 0) 1897 return name_off; 1898 1899 t->name_off = name_off; 1900 t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0); 1901 t->size = byte_sz; 1902 1903 return btf_commit_type(btf, sz); 1904 } 1905 1906 /* it's completely legal to append BTF types with type IDs pointing forward to 1907 * types that haven't been appended yet, so we only make sure that id looks 1908 * sane, we can't guarantee that ID will always be valid 1909 */ 1910 static int validate_type_id(int id) 1911 { 1912 if (id < 0 || id > BTF_MAX_NR_TYPES) 1913 return -EINVAL; 1914 return 0; 1915 } 1916 1917 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ 1918 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) 1919 { 1920 struct btf_type *t; 1921 int sz, name_off = 0; 1922 1923 if (validate_type_id(ref_type_id)) 1924 return libbpf_err(-EINVAL); 1925 1926 if (btf_ensure_modifiable(btf)) 1927 return libbpf_err(-ENOMEM); 1928 1929 sz = sizeof(struct btf_type); 1930 t = btf_add_type_mem(btf, sz); 1931 if (!t) 1932 return libbpf_err(-ENOMEM); 1933 1934 if (name && name[0]) { 1935 name_off = btf__add_str(btf, name); 1936 if (name_off < 0) 1937 return name_off; 1938 } 1939 1940 t->name_off = name_off; 1941 t->info = btf_type_info(kind, 0, 0); 1942 t->type = ref_type_id; 1943 1944 return btf_commit_type(btf, sz); 1945 } 1946 1947 /* 1948 * Append new BTF_KIND_PTR type with: 1949 * - *ref_type_id* - referenced type ID, it might not exist yet; 1950 * Returns: 1951 * - >0, type ID of newly added BTF type; 1952 * - <0, on error. 1953 */ 1954 int btf__add_ptr(struct btf *btf, int ref_type_id) 1955 { 1956 return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); 1957 } 1958 1959 /* 1960 * Append new BTF_KIND_ARRAY type with: 1961 * - *index_type_id* - type ID of the type describing array index; 1962 * - *elem_type_id* - type ID of the type describing array element; 1963 * - *nr_elems* - the size of the array; 1964 * Returns: 1965 * - >0, type ID of newly added BTF type; 1966 * - <0, on error. 1967 */ 1968 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) 1969 { 1970 struct btf_type *t; 1971 struct btf_array *a; 1972 int sz; 1973 1974 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) 1975 return libbpf_err(-EINVAL); 1976 1977 if (btf_ensure_modifiable(btf)) 1978 return libbpf_err(-ENOMEM); 1979 1980 sz = sizeof(struct btf_type) + sizeof(struct btf_array); 1981 t = btf_add_type_mem(btf, sz); 1982 if (!t) 1983 return libbpf_err(-ENOMEM); 1984 1985 t->name_off = 0; 1986 t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); 1987 t->size = 0; 1988 1989 a = btf_array(t); 1990 a->type = elem_type_id; 1991 a->index_type = index_type_id; 1992 a->nelems = nr_elems; 1993 1994 return btf_commit_type(btf, sz); 1995 } 1996 1997 /* generic STRUCT/UNION append function */ 1998 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) 1999 { 2000 struct btf_type *t; 2001 int sz, name_off = 0; 2002 2003 if (btf_ensure_modifiable(btf)) 2004 return libbpf_err(-ENOMEM); 2005 2006 sz = sizeof(struct btf_type); 2007 t = btf_add_type_mem(btf, sz); 2008 if (!t) 2009 return libbpf_err(-ENOMEM); 2010 2011 if (name && name[0]) { 2012 name_off = btf__add_str(btf, name); 2013 if (name_off < 0) 2014 return name_off; 2015 } 2016 2017 /* start out with vlen=0 and no kflag; this will be adjusted when 2018 * adding each member 2019 */ 2020 t->name_off = name_off; 2021 t->info = btf_type_info(kind, 0, 0); 2022 t->size = bytes_sz; 2023 2024 return btf_commit_type(btf, sz); 2025 } 2026 2027 /* 2028 * Append new BTF_KIND_STRUCT type with: 2029 * - *name* - name of the struct, can be NULL or empty for anonymous structs; 2030 * - *byte_sz* - size of the struct, in bytes; 2031 * 2032 * Struct initially has no fields in it. Fields can be added by 2033 * btf__add_field() right after btf__add_struct() succeeds. 2034 * 2035 * Returns: 2036 * - >0, type ID of newly added BTF type; 2037 * - <0, on error. 2038 */ 2039 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) 2040 { 2041 return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); 2042 } 2043 2044 /* 2045 * Append new BTF_KIND_UNION type with: 2046 * - *name* - name of the union, can be NULL or empty for anonymous union; 2047 * - *byte_sz* - size of the union, in bytes; 2048 * 2049 * Union initially has no fields in it. Fields can be added by 2050 * btf__add_field() right after btf__add_union() succeeds. All fields 2051 * should have *bit_offset* of 0. 2052 * 2053 * Returns: 2054 * - >0, type ID of newly added BTF type; 2055 * - <0, on error. 2056 */ 2057 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) 2058 { 2059 return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); 2060 } 2061 2062 static struct btf_type *btf_last_type(struct btf *btf) 2063 { 2064 return btf_type_by_id(btf, btf__get_nr_types(btf)); 2065 } 2066 2067 /* 2068 * Append new field for the current STRUCT/UNION type with: 2069 * - *name* - name of the field, can be NULL or empty for anonymous field; 2070 * - *type_id* - type ID for the type describing field type; 2071 * - *bit_offset* - bit offset of the start of the field within struct/union; 2072 * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; 2073 * Returns: 2074 * - 0, on success; 2075 * - <0, on error. 2076 */ 2077 int btf__add_field(struct btf *btf, const char *name, int type_id, 2078 __u32 bit_offset, __u32 bit_size) 2079 { 2080 struct btf_type *t; 2081 struct btf_member *m; 2082 bool is_bitfield; 2083 int sz, name_off = 0; 2084 2085 /* last type should be union/struct */ 2086 if (btf->nr_types == 0) 2087 return libbpf_err(-EINVAL); 2088 t = btf_last_type(btf); 2089 if (!btf_is_composite(t)) 2090 return libbpf_err(-EINVAL); 2091 2092 if (validate_type_id(type_id)) 2093 return libbpf_err(-EINVAL); 2094 /* best-effort bit field offset/size enforcement */ 2095 is_bitfield = bit_size || (bit_offset % 8 != 0); 2096 if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) 2097 return libbpf_err(-EINVAL); 2098 2099 /* only offset 0 is allowed for unions */ 2100 if (btf_is_union(t) && bit_offset) 2101 return libbpf_err(-EINVAL); 2102 2103 /* decompose and invalidate raw data */ 2104 if (btf_ensure_modifiable(btf)) 2105 return libbpf_err(-ENOMEM); 2106 2107 sz = sizeof(struct btf_member); 2108 m = btf_add_type_mem(btf, sz); 2109 if (!m) 2110 return libbpf_err(-ENOMEM); 2111 2112 if (name && name[0]) { 2113 name_off = btf__add_str(btf, name); 2114 if (name_off < 0) 2115 return name_off; 2116 } 2117 2118 m->name_off = name_off; 2119 m->type = type_id; 2120 m->offset = bit_offset | (bit_size << 24); 2121 2122 /* btf_add_type_mem can invalidate t pointer */ 2123 t = btf_last_type(btf); 2124 /* update parent type's vlen and kflag */ 2125 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); 2126 2127 btf->hdr->type_len += sz; 2128 btf->hdr->str_off += sz; 2129 return 0; 2130 } 2131 2132 /* 2133 * Append new BTF_KIND_ENUM type with: 2134 * - *name* - name of the enum, can be NULL or empty for anonymous enums; 2135 * - *byte_sz* - size of the enum, in bytes. 2136 * 2137 * Enum initially has no enum values in it (and corresponds to enum forward 2138 * declaration). Enumerator values can be added by btf__add_enum_value() 2139 * immediately after btf__add_enum() succeeds. 2140 * 2141 * Returns: 2142 * - >0, type ID of newly added BTF type; 2143 * - <0, on error. 2144 */ 2145 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) 2146 { 2147 struct btf_type *t; 2148 int sz, name_off = 0; 2149 2150 /* byte_sz must be power of 2 */ 2151 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) 2152 return libbpf_err(-EINVAL); 2153 2154 if (btf_ensure_modifiable(btf)) 2155 return libbpf_err(-ENOMEM); 2156 2157 sz = sizeof(struct btf_type); 2158 t = btf_add_type_mem(btf, sz); 2159 if (!t) 2160 return libbpf_err(-ENOMEM); 2161 2162 if (name && name[0]) { 2163 name_off = btf__add_str(btf, name); 2164 if (name_off < 0) 2165 return name_off; 2166 } 2167 2168 /* start out with vlen=0; it will be adjusted when adding enum values */ 2169 t->name_off = name_off; 2170 t->info = btf_type_info(BTF_KIND_ENUM, 0, 0); 2171 t->size = byte_sz; 2172 2173 return btf_commit_type(btf, sz); 2174 } 2175 2176 /* 2177 * Append new enum value for the current ENUM type with: 2178 * - *name* - name of the enumerator value, can't be NULL or empty; 2179 * - *value* - integer value corresponding to enum value *name*; 2180 * Returns: 2181 * - 0, on success; 2182 * - <0, on error. 2183 */ 2184 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) 2185 { 2186 struct btf_type *t; 2187 struct btf_enum *v; 2188 int sz, name_off; 2189 2190 /* last type should be BTF_KIND_ENUM */ 2191 if (btf->nr_types == 0) 2192 return libbpf_err(-EINVAL); 2193 t = btf_last_type(btf); 2194 if (!btf_is_enum(t)) 2195 return libbpf_err(-EINVAL); 2196 2197 /* non-empty name */ 2198 if (!name || !name[0]) 2199 return libbpf_err(-EINVAL); 2200 if (value < INT_MIN || value > UINT_MAX) 2201 return libbpf_err(-E2BIG); 2202 2203 /* decompose and invalidate raw data */ 2204 if (btf_ensure_modifiable(btf)) 2205 return libbpf_err(-ENOMEM); 2206 2207 sz = sizeof(struct btf_enum); 2208 v = btf_add_type_mem(btf, sz); 2209 if (!v) 2210 return libbpf_err(-ENOMEM); 2211 2212 name_off = btf__add_str(btf, name); 2213 if (name_off < 0) 2214 return name_off; 2215 2216 v->name_off = name_off; 2217 v->val = value; 2218 2219 /* update parent type's vlen */ 2220 t = btf_last_type(btf); 2221 btf_type_inc_vlen(t); 2222 2223 btf->hdr->type_len += sz; 2224 btf->hdr->str_off += sz; 2225 return 0; 2226 } 2227 2228 /* 2229 * Append new BTF_KIND_FWD type with: 2230 * - *name*, non-empty/non-NULL name; 2231 * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, 2232 * BTF_FWD_UNION, or BTF_FWD_ENUM; 2233 * Returns: 2234 * - >0, type ID of newly added BTF type; 2235 * - <0, on error. 2236 */ 2237 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) 2238 { 2239 if (!name || !name[0]) 2240 return libbpf_err(-EINVAL); 2241 2242 switch (fwd_kind) { 2243 case BTF_FWD_STRUCT: 2244 case BTF_FWD_UNION: { 2245 struct btf_type *t; 2246 int id; 2247 2248 id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); 2249 if (id <= 0) 2250 return id; 2251 t = btf_type_by_id(btf, id); 2252 t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); 2253 return id; 2254 } 2255 case BTF_FWD_ENUM: 2256 /* enum forward in BTF currently is just an enum with no enum 2257 * values; we also assume a standard 4-byte size for it 2258 */ 2259 return btf__add_enum(btf, name, sizeof(int)); 2260 default: 2261 return libbpf_err(-EINVAL); 2262 } 2263 } 2264 2265 /* 2266 * Append new BTF_KING_TYPEDEF type with: 2267 * - *name*, non-empty/non-NULL name; 2268 * - *ref_type_id* - referenced type ID, it might not exist yet; 2269 * Returns: 2270 * - >0, type ID of newly added BTF type; 2271 * - <0, on error. 2272 */ 2273 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) 2274 { 2275 if (!name || !name[0]) 2276 return libbpf_err(-EINVAL); 2277 2278 return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); 2279 } 2280 2281 /* 2282 * Append new BTF_KIND_VOLATILE type with: 2283 * - *ref_type_id* - referenced type ID, it might not exist yet; 2284 * Returns: 2285 * - >0, type ID of newly added BTF type; 2286 * - <0, on error. 2287 */ 2288 int btf__add_volatile(struct btf *btf, int ref_type_id) 2289 { 2290 return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); 2291 } 2292 2293 /* 2294 * Append new BTF_KIND_CONST type with: 2295 * - *ref_type_id* - referenced type ID, it might not exist yet; 2296 * Returns: 2297 * - >0, type ID of newly added BTF type; 2298 * - <0, on error. 2299 */ 2300 int btf__add_const(struct btf *btf, int ref_type_id) 2301 { 2302 return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); 2303 } 2304 2305 /* 2306 * Append new BTF_KIND_RESTRICT type with: 2307 * - *ref_type_id* - referenced type ID, it might not exist yet; 2308 * Returns: 2309 * - >0, type ID of newly added BTF type; 2310 * - <0, on error. 2311 */ 2312 int btf__add_restrict(struct btf *btf, int ref_type_id) 2313 { 2314 return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); 2315 } 2316 2317 /* 2318 * Append new BTF_KIND_FUNC type with: 2319 * - *name*, non-empty/non-NULL name; 2320 * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; 2321 * Returns: 2322 * - >0, type ID of newly added BTF type; 2323 * - <0, on error. 2324 */ 2325 int btf__add_func(struct btf *btf, const char *name, 2326 enum btf_func_linkage linkage, int proto_type_id) 2327 { 2328 int id; 2329 2330 if (!name || !name[0]) 2331 return libbpf_err(-EINVAL); 2332 if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && 2333 linkage != BTF_FUNC_EXTERN) 2334 return libbpf_err(-EINVAL); 2335 2336 id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); 2337 if (id > 0) { 2338 struct btf_type *t = btf_type_by_id(btf, id); 2339 2340 t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); 2341 } 2342 return libbpf_err(id); 2343 } 2344 2345 /* 2346 * Append new BTF_KIND_FUNC_PROTO with: 2347 * - *ret_type_id* - type ID for return result of a function. 2348 * 2349 * Function prototype initially has no arguments, but they can be added by 2350 * btf__add_func_param() one by one, immediately after 2351 * btf__add_func_proto() succeeded. 2352 * 2353 * Returns: 2354 * - >0, type ID of newly added BTF type; 2355 * - <0, on error. 2356 */ 2357 int btf__add_func_proto(struct btf *btf, int ret_type_id) 2358 { 2359 struct btf_type *t; 2360 int sz; 2361 2362 if (validate_type_id(ret_type_id)) 2363 return libbpf_err(-EINVAL); 2364 2365 if (btf_ensure_modifiable(btf)) 2366 return libbpf_err(-ENOMEM); 2367 2368 sz = sizeof(struct btf_type); 2369 t = btf_add_type_mem(btf, sz); 2370 if (!t) 2371 return libbpf_err(-ENOMEM); 2372 2373 /* start out with vlen=0; this will be adjusted when adding enum 2374 * values, if necessary 2375 */ 2376 t->name_off = 0; 2377 t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); 2378 t->type = ret_type_id; 2379 2380 return btf_commit_type(btf, sz); 2381 } 2382 2383 /* 2384 * Append new function parameter for current FUNC_PROTO type with: 2385 * - *name* - parameter name, can be NULL or empty; 2386 * - *type_id* - type ID describing the type of the parameter. 2387 * Returns: 2388 * - 0, on success; 2389 * - <0, on error. 2390 */ 2391 int btf__add_func_param(struct btf *btf, const char *name, int type_id) 2392 { 2393 struct btf_type *t; 2394 struct btf_param *p; 2395 int sz, name_off = 0; 2396 2397 if (validate_type_id(type_id)) 2398 return libbpf_err(-EINVAL); 2399 2400 /* last type should be BTF_KIND_FUNC_PROTO */ 2401 if (btf->nr_types == 0) 2402 return libbpf_err(-EINVAL); 2403 t = btf_last_type(btf); 2404 if (!btf_is_func_proto(t)) 2405 return libbpf_err(-EINVAL); 2406 2407 /* decompose and invalidate raw data */ 2408 if (btf_ensure_modifiable(btf)) 2409 return libbpf_err(-ENOMEM); 2410 2411 sz = sizeof(struct btf_param); 2412 p = btf_add_type_mem(btf, sz); 2413 if (!p) 2414 return libbpf_err(-ENOMEM); 2415 2416 if (name && name[0]) { 2417 name_off = btf__add_str(btf, name); 2418 if (name_off < 0) 2419 return name_off; 2420 } 2421 2422 p->name_off = name_off; 2423 p->type = type_id; 2424 2425 /* update parent type's vlen */ 2426 t = btf_last_type(btf); 2427 btf_type_inc_vlen(t); 2428 2429 btf->hdr->type_len += sz; 2430 btf->hdr->str_off += sz; 2431 return 0; 2432 } 2433 2434 /* 2435 * Append new BTF_KIND_VAR type with: 2436 * - *name* - non-empty/non-NULL name; 2437 * - *linkage* - variable linkage, one of BTF_VAR_STATIC, 2438 * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; 2439 * - *type_id* - type ID of the type describing the type of the variable. 2440 * Returns: 2441 * - >0, type ID of newly added BTF type; 2442 * - <0, on error. 2443 */ 2444 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) 2445 { 2446 struct btf_type *t; 2447 struct btf_var *v; 2448 int sz, name_off; 2449 2450 /* non-empty name */ 2451 if (!name || !name[0]) 2452 return libbpf_err(-EINVAL); 2453 if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && 2454 linkage != BTF_VAR_GLOBAL_EXTERN) 2455 return libbpf_err(-EINVAL); 2456 if (validate_type_id(type_id)) 2457 return libbpf_err(-EINVAL); 2458 2459 /* deconstruct BTF, if necessary, and invalidate raw_data */ 2460 if (btf_ensure_modifiable(btf)) 2461 return libbpf_err(-ENOMEM); 2462 2463 sz = sizeof(struct btf_type) + sizeof(struct btf_var); 2464 t = btf_add_type_mem(btf, sz); 2465 if (!t) 2466 return libbpf_err(-ENOMEM); 2467 2468 name_off = btf__add_str(btf, name); 2469 if (name_off < 0) 2470 return name_off; 2471 2472 t->name_off = name_off; 2473 t->info = btf_type_info(BTF_KIND_VAR, 0, 0); 2474 t->type = type_id; 2475 2476 v = btf_var(t); 2477 v->linkage = linkage; 2478 2479 return btf_commit_type(btf, sz); 2480 } 2481 2482 /* 2483 * Append new BTF_KIND_DATASEC type with: 2484 * - *name* - non-empty/non-NULL name; 2485 * - *byte_sz* - data section size, in bytes. 2486 * 2487 * Data section is initially empty. Variables info can be added with 2488 * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. 2489 * 2490 * Returns: 2491 * - >0, type ID of newly added BTF type; 2492 * - <0, on error. 2493 */ 2494 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) 2495 { 2496 struct btf_type *t; 2497 int sz, name_off; 2498 2499 /* non-empty name */ 2500 if (!name || !name[0]) 2501 return libbpf_err(-EINVAL); 2502 2503 if (btf_ensure_modifiable(btf)) 2504 return libbpf_err(-ENOMEM); 2505 2506 sz = sizeof(struct btf_type); 2507 t = btf_add_type_mem(btf, sz); 2508 if (!t) 2509 return libbpf_err(-ENOMEM); 2510 2511 name_off = btf__add_str(btf, name); 2512 if (name_off < 0) 2513 return name_off; 2514 2515 /* start with vlen=0, which will be update as var_secinfos are added */ 2516 t->name_off = name_off; 2517 t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); 2518 t->size = byte_sz; 2519 2520 return btf_commit_type(btf, sz); 2521 } 2522 2523 /* 2524 * Append new data section variable information entry for current DATASEC type: 2525 * - *var_type_id* - type ID, describing type of the variable; 2526 * - *offset* - variable offset within data section, in bytes; 2527 * - *byte_sz* - variable size, in bytes. 2528 * 2529 * Returns: 2530 * - 0, on success; 2531 * - <0, on error. 2532 */ 2533 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) 2534 { 2535 struct btf_type *t; 2536 struct btf_var_secinfo *v; 2537 int sz; 2538 2539 /* last type should be BTF_KIND_DATASEC */ 2540 if (btf->nr_types == 0) 2541 return libbpf_err(-EINVAL); 2542 t = btf_last_type(btf); 2543 if (!btf_is_datasec(t)) 2544 return libbpf_err(-EINVAL); 2545 2546 if (validate_type_id(var_type_id)) 2547 return libbpf_err(-EINVAL); 2548 2549 /* decompose and invalidate raw data */ 2550 if (btf_ensure_modifiable(btf)) 2551 return libbpf_err(-ENOMEM); 2552 2553 sz = sizeof(struct btf_var_secinfo); 2554 v = btf_add_type_mem(btf, sz); 2555 if (!v) 2556 return libbpf_err(-ENOMEM); 2557 2558 v->type = var_type_id; 2559 v->offset = offset; 2560 v->size = byte_sz; 2561 2562 /* update parent type's vlen */ 2563 t = btf_last_type(btf); 2564 btf_type_inc_vlen(t); 2565 2566 btf->hdr->type_len += sz; 2567 btf->hdr->str_off += sz; 2568 return 0; 2569 } 2570 2571 /* 2572 * Append new BTF_KIND_TAG type with: 2573 * - *value* - non-empty/non-NULL string; 2574 * - *ref_type_id* - referenced type ID, it might not exist yet; 2575 * - *component_idx* - -1 for tagging reference type, otherwise struct/union 2576 * member or function argument index; 2577 * Returns: 2578 * - >0, type ID of newly added BTF type; 2579 * - <0, on error. 2580 */ 2581 int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, 2582 int component_idx) 2583 { 2584 struct btf_type *t; 2585 int sz, value_off; 2586 2587 if (!value || !value[0] || component_idx < -1) 2588 return libbpf_err(-EINVAL); 2589 2590 if (validate_type_id(ref_type_id)) 2591 return libbpf_err(-EINVAL); 2592 2593 if (btf_ensure_modifiable(btf)) 2594 return libbpf_err(-ENOMEM); 2595 2596 sz = sizeof(struct btf_type) + sizeof(struct btf_tag); 2597 t = btf_add_type_mem(btf, sz); 2598 if (!t) 2599 return libbpf_err(-ENOMEM); 2600 2601 value_off = btf__add_str(btf, value); 2602 if (value_off < 0) 2603 return value_off; 2604 2605 t->name_off = value_off; 2606 t->info = btf_type_info(BTF_KIND_TAG, 0, false); 2607 t->type = ref_type_id; 2608 btf_tag(t)->component_idx = component_idx; 2609 2610 return btf_commit_type(btf, sz); 2611 } 2612 2613 struct btf_ext_sec_setup_param { 2614 __u32 off; 2615 __u32 len; 2616 __u32 min_rec_size; 2617 struct btf_ext_info *ext_info; 2618 const char *desc; 2619 }; 2620 2621 static int btf_ext_setup_info(struct btf_ext *btf_ext, 2622 struct btf_ext_sec_setup_param *ext_sec) 2623 { 2624 const struct btf_ext_info_sec *sinfo; 2625 struct btf_ext_info *ext_info; 2626 __u32 info_left, record_size; 2627 /* The start of the info sec (including the __u32 record_size). */ 2628 void *info; 2629 2630 if (ext_sec->len == 0) 2631 return 0; 2632 2633 if (ext_sec->off & 0x03) { 2634 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", 2635 ext_sec->desc); 2636 return -EINVAL; 2637 } 2638 2639 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; 2640 info_left = ext_sec->len; 2641 2642 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { 2643 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", 2644 ext_sec->desc, ext_sec->off, ext_sec->len); 2645 return -EINVAL; 2646 } 2647 2648 /* At least a record size */ 2649 if (info_left < sizeof(__u32)) { 2650 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); 2651 return -EINVAL; 2652 } 2653 2654 /* The record size needs to meet the minimum standard */ 2655 record_size = *(__u32 *)info; 2656 if (record_size < ext_sec->min_rec_size || 2657 record_size & 0x03) { 2658 pr_debug("%s section in .BTF.ext has invalid record size %u\n", 2659 ext_sec->desc, record_size); 2660 return -EINVAL; 2661 } 2662 2663 sinfo = info + sizeof(__u32); 2664 info_left -= sizeof(__u32); 2665 2666 /* If no records, return failure now so .BTF.ext won't be used. */ 2667 if (!info_left) { 2668 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); 2669 return -EINVAL; 2670 } 2671 2672 while (info_left) { 2673 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); 2674 __u64 total_record_size; 2675 __u32 num_records; 2676 2677 if (info_left < sec_hdrlen) { 2678 pr_debug("%s section header is not found in .BTF.ext\n", 2679 ext_sec->desc); 2680 return -EINVAL; 2681 } 2682 2683 num_records = sinfo->num_info; 2684 if (num_records == 0) { 2685 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 2686 ext_sec->desc); 2687 return -EINVAL; 2688 } 2689 2690 total_record_size = sec_hdrlen + 2691 (__u64)num_records * record_size; 2692 if (info_left < total_record_size) { 2693 pr_debug("%s section has incorrect num_records in .BTF.ext\n", 2694 ext_sec->desc); 2695 return -EINVAL; 2696 } 2697 2698 info_left -= total_record_size; 2699 sinfo = (void *)sinfo + total_record_size; 2700 } 2701 2702 ext_info = ext_sec->ext_info; 2703 ext_info->len = ext_sec->len - sizeof(__u32); 2704 ext_info->rec_size = record_size; 2705 ext_info->info = info + sizeof(__u32); 2706 2707 return 0; 2708 } 2709 2710 static int btf_ext_setup_func_info(struct btf_ext *btf_ext) 2711 { 2712 struct btf_ext_sec_setup_param param = { 2713 .off = btf_ext->hdr->func_info_off, 2714 .len = btf_ext->hdr->func_info_len, 2715 .min_rec_size = sizeof(struct bpf_func_info_min), 2716 .ext_info = &btf_ext->func_info, 2717 .desc = "func_info" 2718 }; 2719 2720 return btf_ext_setup_info(btf_ext, ¶m); 2721 } 2722 2723 static int btf_ext_setup_line_info(struct btf_ext *btf_ext) 2724 { 2725 struct btf_ext_sec_setup_param param = { 2726 .off = btf_ext->hdr->line_info_off, 2727 .len = btf_ext->hdr->line_info_len, 2728 .min_rec_size = sizeof(struct bpf_line_info_min), 2729 .ext_info = &btf_ext->line_info, 2730 .desc = "line_info", 2731 }; 2732 2733 return btf_ext_setup_info(btf_ext, ¶m); 2734 } 2735 2736 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) 2737 { 2738 struct btf_ext_sec_setup_param param = { 2739 .off = btf_ext->hdr->core_relo_off, 2740 .len = btf_ext->hdr->core_relo_len, 2741 .min_rec_size = sizeof(struct bpf_core_relo), 2742 .ext_info = &btf_ext->core_relo_info, 2743 .desc = "core_relo", 2744 }; 2745 2746 return btf_ext_setup_info(btf_ext, ¶m); 2747 } 2748 2749 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) 2750 { 2751 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 2752 2753 if (data_size < offsetofend(struct btf_ext_header, hdr_len) || 2754 data_size < hdr->hdr_len) { 2755 pr_debug("BTF.ext header not found"); 2756 return -EINVAL; 2757 } 2758 2759 if (hdr->magic == bswap_16(BTF_MAGIC)) { 2760 pr_warn("BTF.ext in non-native endianness is not supported\n"); 2761 return -ENOTSUP; 2762 } else if (hdr->magic != BTF_MAGIC) { 2763 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); 2764 return -EINVAL; 2765 } 2766 2767 if (hdr->version != BTF_VERSION) { 2768 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); 2769 return -ENOTSUP; 2770 } 2771 2772 if (hdr->flags) { 2773 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); 2774 return -ENOTSUP; 2775 } 2776 2777 if (data_size == hdr->hdr_len) { 2778 pr_debug("BTF.ext has no data\n"); 2779 return -EINVAL; 2780 } 2781 2782 return 0; 2783 } 2784 2785 void btf_ext__free(struct btf_ext *btf_ext) 2786 { 2787 if (IS_ERR_OR_NULL(btf_ext)) 2788 return; 2789 free(btf_ext->data); 2790 free(btf_ext); 2791 } 2792 2793 struct btf_ext *btf_ext__new(__u8 *data, __u32 size) 2794 { 2795 struct btf_ext *btf_ext; 2796 int err; 2797 2798 err = btf_ext_parse_hdr(data, size); 2799 if (err) 2800 return libbpf_err_ptr(err); 2801 2802 btf_ext = calloc(1, sizeof(struct btf_ext)); 2803 if (!btf_ext) 2804 return libbpf_err_ptr(-ENOMEM); 2805 2806 btf_ext->data_size = size; 2807 btf_ext->data = malloc(size); 2808 if (!btf_ext->data) { 2809 err = -ENOMEM; 2810 goto done; 2811 } 2812 memcpy(btf_ext->data, data, size); 2813 2814 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { 2815 err = -EINVAL; 2816 goto done; 2817 } 2818 2819 err = btf_ext_setup_func_info(btf_ext); 2820 if (err) 2821 goto done; 2822 2823 err = btf_ext_setup_line_info(btf_ext); 2824 if (err) 2825 goto done; 2826 2827 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) { 2828 err = -EINVAL; 2829 goto done; 2830 } 2831 2832 err = btf_ext_setup_core_relos(btf_ext); 2833 if (err) 2834 goto done; 2835 2836 done: 2837 if (err) { 2838 btf_ext__free(btf_ext); 2839 return libbpf_err_ptr(err); 2840 } 2841 2842 return btf_ext; 2843 } 2844 2845 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) 2846 { 2847 *size = btf_ext->data_size; 2848 return btf_ext->data; 2849 } 2850 2851 static int btf_ext_reloc_info(const struct btf *btf, 2852 const struct btf_ext_info *ext_info, 2853 const char *sec_name, __u32 insns_cnt, 2854 void **info, __u32 *cnt) 2855 { 2856 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); 2857 __u32 i, record_size, existing_len, records_len; 2858 struct btf_ext_info_sec *sinfo; 2859 const char *info_sec_name; 2860 __u64 remain_len; 2861 void *data; 2862 2863 record_size = ext_info->rec_size; 2864 sinfo = ext_info->info; 2865 remain_len = ext_info->len; 2866 while (remain_len > 0) { 2867 records_len = sinfo->num_info * record_size; 2868 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); 2869 if (strcmp(info_sec_name, sec_name)) { 2870 remain_len -= sec_hdrlen + records_len; 2871 sinfo = (void *)sinfo + sec_hdrlen + records_len; 2872 continue; 2873 } 2874 2875 existing_len = (*cnt) * record_size; 2876 data = realloc(*info, existing_len + records_len); 2877 if (!data) 2878 return libbpf_err(-ENOMEM); 2879 2880 memcpy(data + existing_len, sinfo->data, records_len); 2881 /* adjust insn_off only, the rest data will be passed 2882 * to the kernel. 2883 */ 2884 for (i = 0; i < sinfo->num_info; i++) { 2885 __u32 *insn_off; 2886 2887 insn_off = data + existing_len + (i * record_size); 2888 *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt; 2889 } 2890 *info = data; 2891 *cnt += sinfo->num_info; 2892 return 0; 2893 } 2894 2895 return libbpf_err(-ENOENT); 2896 } 2897 2898 int btf_ext__reloc_func_info(const struct btf *btf, 2899 const struct btf_ext *btf_ext, 2900 const char *sec_name, __u32 insns_cnt, 2901 void **func_info, __u32 *cnt) 2902 { 2903 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, 2904 insns_cnt, func_info, cnt); 2905 } 2906 2907 int btf_ext__reloc_line_info(const struct btf *btf, 2908 const struct btf_ext *btf_ext, 2909 const char *sec_name, __u32 insns_cnt, 2910 void **line_info, __u32 *cnt) 2911 { 2912 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, 2913 insns_cnt, line_info, cnt); 2914 } 2915 2916 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) 2917 { 2918 return btf_ext->func_info.rec_size; 2919 } 2920 2921 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) 2922 { 2923 return btf_ext->line_info.rec_size; 2924 } 2925 2926 struct btf_dedup; 2927 2928 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 2929 const struct btf_dedup_opts *opts); 2930 static void btf_dedup_free(struct btf_dedup *d); 2931 static int btf_dedup_prep(struct btf_dedup *d); 2932 static int btf_dedup_strings(struct btf_dedup *d); 2933 static int btf_dedup_prim_types(struct btf_dedup *d); 2934 static int btf_dedup_struct_types(struct btf_dedup *d); 2935 static int btf_dedup_ref_types(struct btf_dedup *d); 2936 static int btf_dedup_compact_types(struct btf_dedup *d); 2937 static int btf_dedup_remap_types(struct btf_dedup *d); 2938 2939 /* 2940 * Deduplicate BTF types and strings. 2941 * 2942 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF 2943 * section with all BTF type descriptors and string data. It overwrites that 2944 * memory in-place with deduplicated types and strings without any loss of 2945 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section 2946 * is provided, all the strings referenced from .BTF.ext section are honored 2947 * and updated to point to the right offsets after deduplication. 2948 * 2949 * If function returns with error, type/string data might be garbled and should 2950 * be discarded. 2951 * 2952 * More verbose and detailed description of both problem btf_dedup is solving, 2953 * as well as solution could be found at: 2954 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html 2955 * 2956 * Problem description and justification 2957 * ===================================== 2958 * 2959 * BTF type information is typically emitted either as a result of conversion 2960 * from DWARF to BTF or directly by compiler. In both cases, each compilation 2961 * unit contains information about a subset of all the types that are used 2962 * in an application. These subsets are frequently overlapping and contain a lot 2963 * of duplicated information when later concatenated together into a single 2964 * binary. This algorithm ensures that each unique type is represented by single 2965 * BTF type descriptor, greatly reducing resulting size of BTF data. 2966 * 2967 * Compilation unit isolation and subsequent duplication of data is not the only 2968 * problem. The same type hierarchy (e.g., struct and all the type that struct 2969 * references) in different compilation units can be represented in BTF to 2970 * various degrees of completeness (or, rather, incompleteness) due to 2971 * struct/union forward declarations. 2972 * 2973 * Let's take a look at an example, that we'll use to better understand the 2974 * problem (and solution). Suppose we have two compilation units, each using 2975 * same `struct S`, but each of them having incomplete type information about 2976 * struct's fields: 2977 * 2978 * // CU #1: 2979 * struct S; 2980 * struct A { 2981 * int a; 2982 * struct A* self; 2983 * struct S* parent; 2984 * }; 2985 * struct B; 2986 * struct S { 2987 * struct A* a_ptr; 2988 * struct B* b_ptr; 2989 * }; 2990 * 2991 * // CU #2: 2992 * struct S; 2993 * struct A; 2994 * struct B { 2995 * int b; 2996 * struct B* self; 2997 * struct S* parent; 2998 * }; 2999 * struct S { 3000 * struct A* a_ptr; 3001 * struct B* b_ptr; 3002 * }; 3003 * 3004 * In case of CU #1, BTF data will know only that `struct B` exist (but no 3005 * more), but will know the complete type information about `struct A`. While 3006 * for CU #2, it will know full type information about `struct B`, but will 3007 * only know about forward declaration of `struct A` (in BTF terms, it will 3008 * have `BTF_KIND_FWD` type descriptor with name `B`). 3009 * 3010 * This compilation unit isolation means that it's possible that there is no 3011 * single CU with complete type information describing structs `S`, `A`, and 3012 * `B`. Also, we might get tons of duplicated and redundant type information. 3013 * 3014 * Additional complication we need to keep in mind comes from the fact that 3015 * types, in general, can form graphs containing cycles, not just DAGs. 3016 * 3017 * While algorithm does deduplication, it also merges and resolves type 3018 * information (unless disabled throught `struct btf_opts`), whenever possible. 3019 * E.g., in the example above with two compilation units having partial type 3020 * information for structs `A` and `B`, the output of algorithm will emit 3021 * a single copy of each BTF type that describes structs `A`, `B`, and `S` 3022 * (as well as type information for `int` and pointers), as if they were defined 3023 * in a single compilation unit as: 3024 * 3025 * struct A { 3026 * int a; 3027 * struct A* self; 3028 * struct S* parent; 3029 * }; 3030 * struct B { 3031 * int b; 3032 * struct B* self; 3033 * struct S* parent; 3034 * }; 3035 * struct S { 3036 * struct A* a_ptr; 3037 * struct B* b_ptr; 3038 * }; 3039 * 3040 * Algorithm summary 3041 * ================= 3042 * 3043 * Algorithm completes its work in 6 separate passes: 3044 * 3045 * 1. Strings deduplication. 3046 * 2. Primitive types deduplication (int, enum, fwd). 3047 * 3. Struct/union types deduplication. 3048 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func 3049 * protos, and const/volatile/restrict modifiers). 3050 * 5. Types compaction. 3051 * 6. Types remapping. 3052 * 3053 * Algorithm determines canonical type descriptor, which is a single 3054 * representative type for each truly unique type. This canonical type is the 3055 * one that will go into final deduplicated BTF type information. For 3056 * struct/unions, it is also the type that algorithm will merge additional type 3057 * information into (while resolving FWDs), as it discovers it from data in 3058 * other CUs. Each input BTF type eventually gets either mapped to itself, if 3059 * that type is canonical, or to some other type, if that type is equivalent 3060 * and was chosen as canonical representative. This mapping is stored in 3061 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that 3062 * FWD type got resolved to. 3063 * 3064 * To facilitate fast discovery of canonical types, we also maintain canonical 3065 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash 3066 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types 3067 * that match that signature. With sufficiently good choice of type signature 3068 * hashing function, we can limit number of canonical types for each unique type 3069 * signature to a very small number, allowing to find canonical type for any 3070 * duplicated type very quickly. 3071 * 3072 * Struct/union deduplication is the most critical part and algorithm for 3073 * deduplicating structs/unions is described in greater details in comments for 3074 * `btf_dedup_is_equiv` function. 3075 */ 3076 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, 3077 const struct btf_dedup_opts *opts) 3078 { 3079 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); 3080 int err; 3081 3082 if (IS_ERR(d)) { 3083 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); 3084 return libbpf_err(-EINVAL); 3085 } 3086 3087 if (btf_ensure_modifiable(btf)) 3088 return libbpf_err(-ENOMEM); 3089 3090 err = btf_dedup_prep(d); 3091 if (err) { 3092 pr_debug("btf_dedup_prep failed:%d\n", err); 3093 goto done; 3094 } 3095 err = btf_dedup_strings(d); 3096 if (err < 0) { 3097 pr_debug("btf_dedup_strings failed:%d\n", err); 3098 goto done; 3099 } 3100 err = btf_dedup_prim_types(d); 3101 if (err < 0) { 3102 pr_debug("btf_dedup_prim_types failed:%d\n", err); 3103 goto done; 3104 } 3105 err = btf_dedup_struct_types(d); 3106 if (err < 0) { 3107 pr_debug("btf_dedup_struct_types failed:%d\n", err); 3108 goto done; 3109 } 3110 err = btf_dedup_ref_types(d); 3111 if (err < 0) { 3112 pr_debug("btf_dedup_ref_types failed:%d\n", err); 3113 goto done; 3114 } 3115 err = btf_dedup_compact_types(d); 3116 if (err < 0) { 3117 pr_debug("btf_dedup_compact_types failed:%d\n", err); 3118 goto done; 3119 } 3120 err = btf_dedup_remap_types(d); 3121 if (err < 0) { 3122 pr_debug("btf_dedup_remap_types failed:%d\n", err); 3123 goto done; 3124 } 3125 3126 done: 3127 btf_dedup_free(d); 3128 return libbpf_err(err); 3129 } 3130 3131 #define BTF_UNPROCESSED_ID ((__u32)-1) 3132 #define BTF_IN_PROGRESS_ID ((__u32)-2) 3133 3134 struct btf_dedup { 3135 /* .BTF section to be deduped in-place */ 3136 struct btf *btf; 3137 /* 3138 * Optional .BTF.ext section. When provided, any strings referenced 3139 * from it will be taken into account when deduping strings 3140 */ 3141 struct btf_ext *btf_ext; 3142 /* 3143 * This is a map from any type's signature hash to a list of possible 3144 * canonical representative type candidates. Hash collisions are 3145 * ignored, so even types of various kinds can share same list of 3146 * candidates, which is fine because we rely on subsequent 3147 * btf_xxx_equal() checks to authoritatively verify type equality. 3148 */ 3149 struct hashmap *dedup_table; 3150 /* Canonical types map */ 3151 __u32 *map; 3152 /* Hypothetical mapping, used during type graph equivalence checks */ 3153 __u32 *hypot_map; 3154 __u32 *hypot_list; 3155 size_t hypot_cnt; 3156 size_t hypot_cap; 3157 /* Whether hypothetical mapping, if successful, would need to adjust 3158 * already canonicalized types (due to a new forward declaration to 3159 * concrete type resolution). In such case, during split BTF dedup 3160 * candidate type would still be considered as different, because base 3161 * BTF is considered to be immutable. 3162 */ 3163 bool hypot_adjust_canon; 3164 /* Various option modifying behavior of algorithm */ 3165 struct btf_dedup_opts opts; 3166 /* temporary strings deduplication state */ 3167 struct strset *strs_set; 3168 }; 3169 3170 static long hash_combine(long h, long value) 3171 { 3172 return h * 31 + value; 3173 } 3174 3175 #define for_each_dedup_cand(d, node, hash) \ 3176 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) 3177 3178 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) 3179 { 3180 return hashmap__append(d->dedup_table, 3181 (void *)hash, (void *)(long)type_id); 3182 } 3183 3184 static int btf_dedup_hypot_map_add(struct btf_dedup *d, 3185 __u32 from_id, __u32 to_id) 3186 { 3187 if (d->hypot_cnt == d->hypot_cap) { 3188 __u32 *new_list; 3189 3190 d->hypot_cap += max((size_t)16, d->hypot_cap / 2); 3191 new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); 3192 if (!new_list) 3193 return -ENOMEM; 3194 d->hypot_list = new_list; 3195 } 3196 d->hypot_list[d->hypot_cnt++] = from_id; 3197 d->hypot_map[from_id] = to_id; 3198 return 0; 3199 } 3200 3201 static void btf_dedup_clear_hypot_map(struct btf_dedup *d) 3202 { 3203 int i; 3204 3205 for (i = 0; i < d->hypot_cnt; i++) 3206 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; 3207 d->hypot_cnt = 0; 3208 d->hypot_adjust_canon = false; 3209 } 3210 3211 static void btf_dedup_free(struct btf_dedup *d) 3212 { 3213 hashmap__free(d->dedup_table); 3214 d->dedup_table = NULL; 3215 3216 free(d->map); 3217 d->map = NULL; 3218 3219 free(d->hypot_map); 3220 d->hypot_map = NULL; 3221 3222 free(d->hypot_list); 3223 d->hypot_list = NULL; 3224 3225 free(d); 3226 } 3227 3228 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) 3229 { 3230 return (size_t)key; 3231 } 3232 3233 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) 3234 { 3235 return 0; 3236 } 3237 3238 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) 3239 { 3240 return k1 == k2; 3241 } 3242 3243 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 3244 const struct btf_dedup_opts *opts) 3245 { 3246 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 3247 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; 3248 int i, err = 0, type_cnt; 3249 3250 if (!d) 3251 return ERR_PTR(-ENOMEM); 3252 3253 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; 3254 /* dedup_table_size is now used only to force collisions in tests */ 3255 if (opts && opts->dedup_table_size == 1) 3256 hash_fn = btf_dedup_collision_hash_fn; 3257 3258 d->btf = btf; 3259 d->btf_ext = btf_ext; 3260 3261 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); 3262 if (IS_ERR(d->dedup_table)) { 3263 err = PTR_ERR(d->dedup_table); 3264 d->dedup_table = NULL; 3265 goto done; 3266 } 3267 3268 type_cnt = btf__get_nr_types(btf) + 1; 3269 d->map = malloc(sizeof(__u32) * type_cnt); 3270 if (!d->map) { 3271 err = -ENOMEM; 3272 goto done; 3273 } 3274 /* special BTF "void" type is made canonical immediately */ 3275 d->map[0] = 0; 3276 for (i = 1; i < type_cnt; i++) { 3277 struct btf_type *t = btf_type_by_id(d->btf, i); 3278 3279 /* VAR and DATASEC are never deduped and are self-canonical */ 3280 if (btf_is_var(t) || btf_is_datasec(t)) 3281 d->map[i] = i; 3282 else 3283 d->map[i] = BTF_UNPROCESSED_ID; 3284 } 3285 3286 d->hypot_map = malloc(sizeof(__u32) * type_cnt); 3287 if (!d->hypot_map) { 3288 err = -ENOMEM; 3289 goto done; 3290 } 3291 for (i = 0; i < type_cnt; i++) 3292 d->hypot_map[i] = BTF_UNPROCESSED_ID; 3293 3294 done: 3295 if (err) { 3296 btf_dedup_free(d); 3297 return ERR_PTR(err); 3298 } 3299 3300 return d; 3301 } 3302 3303 /* 3304 * Iterate over all possible places in .BTF and .BTF.ext that can reference 3305 * string and pass pointer to it to a provided callback `fn`. 3306 */ 3307 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx) 3308 { 3309 int i, r; 3310 3311 for (i = 0; i < d->btf->nr_types; i++) { 3312 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); 3313 3314 r = btf_type_visit_str_offs(t, fn, ctx); 3315 if (r) 3316 return r; 3317 } 3318 3319 if (!d->btf_ext) 3320 return 0; 3321 3322 r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx); 3323 if (r) 3324 return r; 3325 3326 return 0; 3327 } 3328 3329 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx) 3330 { 3331 struct btf_dedup *d = ctx; 3332 __u32 str_off = *str_off_ptr; 3333 const char *s; 3334 int off, err; 3335 3336 /* don't touch empty string or string in main BTF */ 3337 if (str_off == 0 || str_off < d->btf->start_str_off) 3338 return 0; 3339 3340 s = btf__str_by_offset(d->btf, str_off); 3341 if (d->btf->base_btf) { 3342 err = btf__find_str(d->btf->base_btf, s); 3343 if (err >= 0) { 3344 *str_off_ptr = err; 3345 return 0; 3346 } 3347 if (err != -ENOENT) 3348 return err; 3349 } 3350 3351 off = strset__add_str(d->strs_set, s); 3352 if (off < 0) 3353 return off; 3354 3355 *str_off_ptr = d->btf->start_str_off + off; 3356 return 0; 3357 } 3358 3359 /* 3360 * Dedup string and filter out those that are not referenced from either .BTF 3361 * or .BTF.ext (if provided) sections. 3362 * 3363 * This is done by building index of all strings in BTF's string section, 3364 * then iterating over all entities that can reference strings (e.g., type 3365 * names, struct field names, .BTF.ext line info, etc) and marking corresponding 3366 * strings as used. After that all used strings are deduped and compacted into 3367 * sequential blob of memory and new offsets are calculated. Then all the string 3368 * references are iterated again and rewritten using new offsets. 3369 */ 3370 static int btf_dedup_strings(struct btf_dedup *d) 3371 { 3372 int err; 3373 3374 if (d->btf->strs_deduped) 3375 return 0; 3376 3377 d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0); 3378 if (IS_ERR(d->strs_set)) { 3379 err = PTR_ERR(d->strs_set); 3380 goto err_out; 3381 } 3382 3383 if (!d->btf->base_btf) { 3384 /* insert empty string; we won't be looking it up during strings 3385 * dedup, but it's good to have it for generic BTF string lookups 3386 */ 3387 err = strset__add_str(d->strs_set, ""); 3388 if (err < 0) 3389 goto err_out; 3390 } 3391 3392 /* remap string offsets */ 3393 err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d); 3394 if (err) 3395 goto err_out; 3396 3397 /* replace BTF string data and hash with deduped ones */ 3398 strset__free(d->btf->strs_set); 3399 d->btf->hdr->str_len = strset__data_size(d->strs_set); 3400 d->btf->strs_set = d->strs_set; 3401 d->strs_set = NULL; 3402 d->btf->strs_deduped = true; 3403 return 0; 3404 3405 err_out: 3406 strset__free(d->strs_set); 3407 d->strs_set = NULL; 3408 3409 return err; 3410 } 3411 3412 static long btf_hash_common(struct btf_type *t) 3413 { 3414 long h; 3415 3416 h = hash_combine(0, t->name_off); 3417 h = hash_combine(h, t->info); 3418 h = hash_combine(h, t->size); 3419 return h; 3420 } 3421 3422 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) 3423 { 3424 return t1->name_off == t2->name_off && 3425 t1->info == t2->info && 3426 t1->size == t2->size; 3427 } 3428 3429 /* Calculate type signature hash of INT or TAG. */ 3430 static long btf_hash_int_tag(struct btf_type *t) 3431 { 3432 __u32 info = *(__u32 *)(t + 1); 3433 long h; 3434 3435 h = btf_hash_common(t); 3436 h = hash_combine(h, info); 3437 return h; 3438 } 3439 3440 /* Check structural equality of two INTs or TAGs. */ 3441 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) 3442 { 3443 __u32 info1, info2; 3444 3445 if (!btf_equal_common(t1, t2)) 3446 return false; 3447 info1 = *(__u32 *)(t1 + 1); 3448 info2 = *(__u32 *)(t2 + 1); 3449 return info1 == info2; 3450 } 3451 3452 /* Calculate type signature hash of ENUM. */ 3453 static long btf_hash_enum(struct btf_type *t) 3454 { 3455 long h; 3456 3457 /* don't hash vlen and enum members to support enum fwd resolving */ 3458 h = hash_combine(0, t->name_off); 3459 h = hash_combine(h, t->info & ~0xffff); 3460 h = hash_combine(h, t->size); 3461 return h; 3462 } 3463 3464 /* Check structural equality of two ENUMs. */ 3465 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) 3466 { 3467 const struct btf_enum *m1, *m2; 3468 __u16 vlen; 3469 int i; 3470 3471 if (!btf_equal_common(t1, t2)) 3472 return false; 3473 3474 vlen = btf_vlen(t1); 3475 m1 = btf_enum(t1); 3476 m2 = btf_enum(t2); 3477 for (i = 0; i < vlen; i++) { 3478 if (m1->name_off != m2->name_off || m1->val != m2->val) 3479 return false; 3480 m1++; 3481 m2++; 3482 } 3483 return true; 3484 } 3485 3486 static inline bool btf_is_enum_fwd(struct btf_type *t) 3487 { 3488 return btf_is_enum(t) && btf_vlen(t) == 0; 3489 } 3490 3491 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) 3492 { 3493 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) 3494 return btf_equal_enum(t1, t2); 3495 /* ignore vlen when comparing */ 3496 return t1->name_off == t2->name_off && 3497 (t1->info & ~0xffff) == (t2->info & ~0xffff) && 3498 t1->size == t2->size; 3499 } 3500 3501 /* 3502 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, 3503 * as referenced type IDs equivalence is established separately during type 3504 * graph equivalence check algorithm. 3505 */ 3506 static long btf_hash_struct(struct btf_type *t) 3507 { 3508 const struct btf_member *member = btf_members(t); 3509 __u32 vlen = btf_vlen(t); 3510 long h = btf_hash_common(t); 3511 int i; 3512 3513 for (i = 0; i < vlen; i++) { 3514 h = hash_combine(h, member->name_off); 3515 h = hash_combine(h, member->offset); 3516 /* no hashing of referenced type ID, it can be unresolved yet */ 3517 member++; 3518 } 3519 return h; 3520 } 3521 3522 /* 3523 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 3524 * IDs. This check is performed during type graph equivalence check and 3525 * referenced types equivalence is checked separately. 3526 */ 3527 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) 3528 { 3529 const struct btf_member *m1, *m2; 3530 __u16 vlen; 3531 int i; 3532 3533 if (!btf_equal_common(t1, t2)) 3534 return false; 3535 3536 vlen = btf_vlen(t1); 3537 m1 = btf_members(t1); 3538 m2 = btf_members(t2); 3539 for (i = 0; i < vlen; i++) { 3540 if (m1->name_off != m2->name_off || m1->offset != m2->offset) 3541 return false; 3542 m1++; 3543 m2++; 3544 } 3545 return true; 3546 } 3547 3548 /* 3549 * Calculate type signature hash of ARRAY, including referenced type IDs, 3550 * under assumption that they were already resolved to canonical type IDs and 3551 * are not going to change. 3552 */ 3553 static long btf_hash_array(struct btf_type *t) 3554 { 3555 const struct btf_array *info = btf_array(t); 3556 long h = btf_hash_common(t); 3557 3558 h = hash_combine(h, info->type); 3559 h = hash_combine(h, info->index_type); 3560 h = hash_combine(h, info->nelems); 3561 return h; 3562 } 3563 3564 /* 3565 * Check exact equality of two ARRAYs, taking into account referenced 3566 * type IDs, under assumption that they were already resolved to canonical 3567 * type IDs and are not going to change. 3568 * This function is called during reference types deduplication to compare 3569 * ARRAY to potential canonical representative. 3570 */ 3571 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) 3572 { 3573 const struct btf_array *info1, *info2; 3574 3575 if (!btf_equal_common(t1, t2)) 3576 return false; 3577 3578 info1 = btf_array(t1); 3579 info2 = btf_array(t2); 3580 return info1->type == info2->type && 3581 info1->index_type == info2->index_type && 3582 info1->nelems == info2->nelems; 3583 } 3584 3585 /* 3586 * Check structural compatibility of two ARRAYs, ignoring referenced type 3587 * IDs. This check is performed during type graph equivalence check and 3588 * referenced types equivalence is checked separately. 3589 */ 3590 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) 3591 { 3592 if (!btf_equal_common(t1, t2)) 3593 return false; 3594 3595 return btf_array(t1)->nelems == btf_array(t2)->nelems; 3596 } 3597 3598 /* 3599 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, 3600 * under assumption that they were already resolved to canonical type IDs and 3601 * are not going to change. 3602 */ 3603 static long btf_hash_fnproto(struct btf_type *t) 3604 { 3605 const struct btf_param *member = btf_params(t); 3606 __u16 vlen = btf_vlen(t); 3607 long h = btf_hash_common(t); 3608 int i; 3609 3610 for (i = 0; i < vlen; i++) { 3611 h = hash_combine(h, member->name_off); 3612 h = hash_combine(h, member->type); 3613 member++; 3614 } 3615 return h; 3616 } 3617 3618 /* 3619 * Check exact equality of two FUNC_PROTOs, taking into account referenced 3620 * type IDs, under assumption that they were already resolved to canonical 3621 * type IDs and are not going to change. 3622 * This function is called during reference types deduplication to compare 3623 * FUNC_PROTO to potential canonical representative. 3624 */ 3625 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) 3626 { 3627 const struct btf_param *m1, *m2; 3628 __u16 vlen; 3629 int i; 3630 3631 if (!btf_equal_common(t1, t2)) 3632 return false; 3633 3634 vlen = btf_vlen(t1); 3635 m1 = btf_params(t1); 3636 m2 = btf_params(t2); 3637 for (i = 0; i < vlen; i++) { 3638 if (m1->name_off != m2->name_off || m1->type != m2->type) 3639 return false; 3640 m1++; 3641 m2++; 3642 } 3643 return true; 3644 } 3645 3646 /* 3647 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type 3648 * IDs. This check is performed during type graph equivalence check and 3649 * referenced types equivalence is checked separately. 3650 */ 3651 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) 3652 { 3653 const struct btf_param *m1, *m2; 3654 __u16 vlen; 3655 int i; 3656 3657 /* skip return type ID */ 3658 if (t1->name_off != t2->name_off || t1->info != t2->info) 3659 return false; 3660 3661 vlen = btf_vlen(t1); 3662 m1 = btf_params(t1); 3663 m2 = btf_params(t2); 3664 for (i = 0; i < vlen; i++) { 3665 if (m1->name_off != m2->name_off) 3666 return false; 3667 m1++; 3668 m2++; 3669 } 3670 return true; 3671 } 3672 3673 /* Prepare split BTF for deduplication by calculating hashes of base BTF's 3674 * types and initializing the rest of the state (canonical type mapping) for 3675 * the fixed base BTF part. 3676 */ 3677 static int btf_dedup_prep(struct btf_dedup *d) 3678 { 3679 struct btf_type *t; 3680 int type_id; 3681 long h; 3682 3683 if (!d->btf->base_btf) 3684 return 0; 3685 3686 for (type_id = 1; type_id < d->btf->start_id; type_id++) { 3687 t = btf_type_by_id(d->btf, type_id); 3688 3689 /* all base BTF types are self-canonical by definition */ 3690 d->map[type_id] = type_id; 3691 3692 switch (btf_kind(t)) { 3693 case BTF_KIND_VAR: 3694 case BTF_KIND_DATASEC: 3695 /* VAR and DATASEC are never hash/deduplicated */ 3696 continue; 3697 case BTF_KIND_CONST: 3698 case BTF_KIND_VOLATILE: 3699 case BTF_KIND_RESTRICT: 3700 case BTF_KIND_PTR: 3701 case BTF_KIND_FWD: 3702 case BTF_KIND_TYPEDEF: 3703 case BTF_KIND_FUNC: 3704 case BTF_KIND_FLOAT: 3705 h = btf_hash_common(t); 3706 break; 3707 case BTF_KIND_INT: 3708 case BTF_KIND_TAG: 3709 h = btf_hash_int_tag(t); 3710 break; 3711 case BTF_KIND_ENUM: 3712 h = btf_hash_enum(t); 3713 break; 3714 case BTF_KIND_STRUCT: 3715 case BTF_KIND_UNION: 3716 h = btf_hash_struct(t); 3717 break; 3718 case BTF_KIND_ARRAY: 3719 h = btf_hash_array(t); 3720 break; 3721 case BTF_KIND_FUNC_PROTO: 3722 h = btf_hash_fnproto(t); 3723 break; 3724 default: 3725 pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id); 3726 return -EINVAL; 3727 } 3728 if (btf_dedup_table_add(d, h, type_id)) 3729 return -ENOMEM; 3730 } 3731 3732 return 0; 3733 } 3734 3735 /* 3736 * Deduplicate primitive types, that can't reference other types, by calculating 3737 * their type signature hash and comparing them with any possible canonical 3738 * candidate. If no canonical candidate matches, type itself is marked as 3739 * canonical and is added into `btf_dedup->dedup_table` as another candidate. 3740 */ 3741 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 3742 { 3743 struct btf_type *t = btf_type_by_id(d->btf, type_id); 3744 struct hashmap_entry *hash_entry; 3745 struct btf_type *cand; 3746 /* if we don't find equivalent type, then we are canonical */ 3747 __u32 new_id = type_id; 3748 __u32 cand_id; 3749 long h; 3750 3751 switch (btf_kind(t)) { 3752 case BTF_KIND_CONST: 3753 case BTF_KIND_VOLATILE: 3754 case BTF_KIND_RESTRICT: 3755 case BTF_KIND_PTR: 3756 case BTF_KIND_TYPEDEF: 3757 case BTF_KIND_ARRAY: 3758 case BTF_KIND_STRUCT: 3759 case BTF_KIND_UNION: 3760 case BTF_KIND_FUNC: 3761 case BTF_KIND_FUNC_PROTO: 3762 case BTF_KIND_VAR: 3763 case BTF_KIND_DATASEC: 3764 case BTF_KIND_TAG: 3765 return 0; 3766 3767 case BTF_KIND_INT: 3768 h = btf_hash_int_tag(t); 3769 for_each_dedup_cand(d, hash_entry, h) { 3770 cand_id = (__u32)(long)hash_entry->value; 3771 cand = btf_type_by_id(d->btf, cand_id); 3772 if (btf_equal_int_tag(t, cand)) { 3773 new_id = cand_id; 3774 break; 3775 } 3776 } 3777 break; 3778 3779 case BTF_KIND_ENUM: 3780 h = btf_hash_enum(t); 3781 for_each_dedup_cand(d, hash_entry, h) { 3782 cand_id = (__u32)(long)hash_entry->value; 3783 cand = btf_type_by_id(d->btf, cand_id); 3784 if (btf_equal_enum(t, cand)) { 3785 new_id = cand_id; 3786 break; 3787 } 3788 if (d->opts.dont_resolve_fwds) 3789 continue; 3790 if (btf_compat_enum(t, cand)) { 3791 if (btf_is_enum_fwd(t)) { 3792 /* resolve fwd to full enum */ 3793 new_id = cand_id; 3794 break; 3795 } 3796 /* resolve canonical enum fwd to full enum */ 3797 d->map[cand_id] = type_id; 3798 } 3799 } 3800 break; 3801 3802 case BTF_KIND_FWD: 3803 case BTF_KIND_FLOAT: 3804 h = btf_hash_common(t); 3805 for_each_dedup_cand(d, hash_entry, h) { 3806 cand_id = (__u32)(long)hash_entry->value; 3807 cand = btf_type_by_id(d->btf, cand_id); 3808 if (btf_equal_common(t, cand)) { 3809 new_id = cand_id; 3810 break; 3811 } 3812 } 3813 break; 3814 3815 default: 3816 return -EINVAL; 3817 } 3818 3819 d->map[type_id] = new_id; 3820 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 3821 return -ENOMEM; 3822 3823 return 0; 3824 } 3825 3826 static int btf_dedup_prim_types(struct btf_dedup *d) 3827 { 3828 int i, err; 3829 3830 for (i = 0; i < d->btf->nr_types; i++) { 3831 err = btf_dedup_prim_type(d, d->btf->start_id + i); 3832 if (err) 3833 return err; 3834 } 3835 return 0; 3836 } 3837 3838 /* 3839 * Check whether type is already mapped into canonical one (could be to itself). 3840 */ 3841 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) 3842 { 3843 return d->map[type_id] <= BTF_MAX_NR_TYPES; 3844 } 3845 3846 /* 3847 * Resolve type ID into its canonical type ID, if any; otherwise return original 3848 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow 3849 * STRUCT/UNION link and resolve it into canonical type ID as well. 3850 */ 3851 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) 3852 { 3853 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 3854 type_id = d->map[type_id]; 3855 return type_id; 3856 } 3857 3858 /* 3859 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original 3860 * type ID. 3861 */ 3862 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) 3863 { 3864 __u32 orig_type_id = type_id; 3865 3866 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) 3867 return type_id; 3868 3869 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) 3870 type_id = d->map[type_id]; 3871 3872 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) 3873 return type_id; 3874 3875 return orig_type_id; 3876 } 3877 3878 3879 static inline __u16 btf_fwd_kind(struct btf_type *t) 3880 { 3881 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; 3882 } 3883 3884 /* Check if given two types are identical ARRAY definitions */ 3885 static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) 3886 { 3887 struct btf_type *t1, *t2; 3888 3889 t1 = btf_type_by_id(d->btf, id1); 3890 t2 = btf_type_by_id(d->btf, id2); 3891 if (!btf_is_array(t1) || !btf_is_array(t2)) 3892 return 0; 3893 3894 return btf_equal_array(t1, t2); 3895 } 3896 3897 /* 3898 * Check equivalence of BTF type graph formed by candidate struct/union (we'll 3899 * call it "candidate graph" in this description for brevity) to a type graph 3900 * formed by (potential) canonical struct/union ("canonical graph" for brevity 3901 * here, though keep in mind that not all types in canonical graph are 3902 * necessarily canonical representatives themselves, some of them might be 3903 * duplicates or its uniqueness might not have been established yet). 3904 * Returns: 3905 * - >0, if type graphs are equivalent; 3906 * - 0, if not equivalent; 3907 * - <0, on error. 3908 * 3909 * Algorithm performs side-by-side DFS traversal of both type graphs and checks 3910 * equivalence of BTF types at each step. If at any point BTF types in candidate 3911 * and canonical graphs are not compatible structurally, whole graphs are 3912 * incompatible. If types are structurally equivalent (i.e., all information 3913 * except referenced type IDs is exactly the same), a mapping from `canon_id` to 3914 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). 3915 * If a type references other types, then those referenced types are checked 3916 * for equivalence recursively. 3917 * 3918 * During DFS traversal, if we find that for current `canon_id` type we 3919 * already have some mapping in hypothetical map, we check for two possible 3920 * situations: 3921 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will 3922 * happen when type graphs have cycles. In this case we assume those two 3923 * types are equivalent. 3924 * - `canon_id` is mapped to different type. This is contradiction in our 3925 * hypothetical mapping, because same graph in canonical graph corresponds 3926 * to two different types in candidate graph, which for equivalent type 3927 * graphs shouldn't happen. This condition terminates equivalence check 3928 * with negative result. 3929 * 3930 * If type graphs traversal exhausts types to check and find no contradiction, 3931 * then type graphs are equivalent. 3932 * 3933 * When checking types for equivalence, there is one special case: FWD types. 3934 * If FWD type resolution is allowed and one of the types (either from canonical 3935 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind 3936 * flag) and their names match, hypothetical mapping is updated to point from 3937 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, 3938 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. 3939 * 3940 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, 3941 * if there are two exactly named (or anonymous) structs/unions that are 3942 * compatible structurally, one of which has FWD field, while other is concrete 3943 * STRUCT/UNION, but according to C sources they are different structs/unions 3944 * that are referencing different types with the same name. This is extremely 3945 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if 3946 * this logic is causing problems. 3947 * 3948 * Doing FWD resolution means that both candidate and/or canonical graphs can 3949 * consists of portions of the graph that come from multiple compilation units. 3950 * This is due to the fact that types within single compilation unit are always 3951 * deduplicated and FWDs are already resolved, if referenced struct/union 3952 * definiton is available. So, if we had unresolved FWD and found corresponding 3953 * STRUCT/UNION, they will be from different compilation units. This 3954 * consequently means that when we "link" FWD to corresponding STRUCT/UNION, 3955 * type graph will likely have at least two different BTF types that describe 3956 * same type (e.g., most probably there will be two different BTF types for the 3957 * same 'int' primitive type) and could even have "overlapping" parts of type 3958 * graph that describe same subset of types. 3959 * 3960 * This in turn means that our assumption that each type in canonical graph 3961 * must correspond to exactly one type in candidate graph might not hold 3962 * anymore and will make it harder to detect contradictions using hypothetical 3963 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION 3964 * resolution only in canonical graph. FWDs in candidate graphs are never 3965 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs 3966 * that can occur: 3967 * - Both types in canonical and candidate graphs are FWDs. If they are 3968 * structurally equivalent, then they can either be both resolved to the 3969 * same STRUCT/UNION or not resolved at all. In both cases they are 3970 * equivalent and there is no need to resolve FWD on candidate side. 3971 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, 3972 * so nothing to resolve as well, algorithm will check equivalence anyway. 3973 * - Type in canonical graph is FWD, while type in candidate is concrete 3974 * STRUCT/UNION. In this case candidate graph comes from single compilation 3975 * unit, so there is exactly one BTF type for each unique C type. After 3976 * resolving FWD into STRUCT/UNION, there might be more than one BTF type 3977 * in canonical graph mapping to single BTF type in candidate graph, but 3978 * because hypothetical mapping maps from canonical to candidate types, it's 3979 * alright, and we still maintain the property of having single `canon_id` 3980 * mapping to single `cand_id` (there could be two different `canon_id` 3981 * mapped to the same `cand_id`, but it's not contradictory). 3982 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate 3983 * graph is FWD. In this case we are just going to check compatibility of 3984 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll 3985 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to 3986 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs 3987 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from 3988 * canonical graph. 3989 */ 3990 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, 3991 __u32 canon_id) 3992 { 3993 struct btf_type *cand_type; 3994 struct btf_type *canon_type; 3995 __u32 hypot_type_id; 3996 __u16 cand_kind; 3997 __u16 canon_kind; 3998 int i, eq; 3999 4000 /* if both resolve to the same canonical, they must be equivalent */ 4001 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) 4002 return 1; 4003 4004 canon_id = resolve_fwd_id(d, canon_id); 4005 4006 hypot_type_id = d->hypot_map[canon_id]; 4007 if (hypot_type_id <= BTF_MAX_NR_TYPES) { 4008 /* In some cases compiler will generate different DWARF types 4009 * for *identical* array type definitions and use them for 4010 * different fields within the *same* struct. This breaks type 4011 * equivalence check, which makes an assumption that candidate 4012 * types sub-graph has a consistent and deduped-by-compiler 4013 * types within a single CU. So work around that by explicitly 4014 * allowing identical array types here. 4015 */ 4016 return hypot_type_id == cand_id || 4017 btf_dedup_identical_arrays(d, hypot_type_id, cand_id); 4018 } 4019 4020 if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) 4021 return -ENOMEM; 4022 4023 cand_type = btf_type_by_id(d->btf, cand_id); 4024 canon_type = btf_type_by_id(d->btf, canon_id); 4025 cand_kind = btf_kind(cand_type); 4026 canon_kind = btf_kind(canon_type); 4027 4028 if (cand_type->name_off != canon_type->name_off) 4029 return 0; 4030 4031 /* FWD <--> STRUCT/UNION equivalence check, if enabled */ 4032 if (!d->opts.dont_resolve_fwds 4033 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) 4034 && cand_kind != canon_kind) { 4035 __u16 real_kind; 4036 __u16 fwd_kind; 4037 4038 if (cand_kind == BTF_KIND_FWD) { 4039 real_kind = canon_kind; 4040 fwd_kind = btf_fwd_kind(cand_type); 4041 } else { 4042 real_kind = cand_kind; 4043 fwd_kind = btf_fwd_kind(canon_type); 4044 /* we'd need to resolve base FWD to STRUCT/UNION */ 4045 if (fwd_kind == real_kind && canon_id < d->btf->start_id) 4046 d->hypot_adjust_canon = true; 4047 } 4048 return fwd_kind == real_kind; 4049 } 4050 4051 if (cand_kind != canon_kind) 4052 return 0; 4053 4054 switch (cand_kind) { 4055 case BTF_KIND_INT: 4056 return btf_equal_int_tag(cand_type, canon_type); 4057 4058 case BTF_KIND_ENUM: 4059 if (d->opts.dont_resolve_fwds) 4060 return btf_equal_enum(cand_type, canon_type); 4061 else 4062 return btf_compat_enum(cand_type, canon_type); 4063 4064 case BTF_KIND_FWD: 4065 case BTF_KIND_FLOAT: 4066 return btf_equal_common(cand_type, canon_type); 4067 4068 case BTF_KIND_CONST: 4069 case BTF_KIND_VOLATILE: 4070 case BTF_KIND_RESTRICT: 4071 case BTF_KIND_PTR: 4072 case BTF_KIND_TYPEDEF: 4073 case BTF_KIND_FUNC: 4074 if (cand_type->info != canon_type->info) 4075 return 0; 4076 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 4077 4078 case BTF_KIND_ARRAY: { 4079 const struct btf_array *cand_arr, *canon_arr; 4080 4081 if (!btf_compat_array(cand_type, canon_type)) 4082 return 0; 4083 cand_arr = btf_array(cand_type); 4084 canon_arr = btf_array(canon_type); 4085 eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); 4086 if (eq <= 0) 4087 return eq; 4088 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); 4089 } 4090 4091 case BTF_KIND_STRUCT: 4092 case BTF_KIND_UNION: { 4093 const struct btf_member *cand_m, *canon_m; 4094 __u16 vlen; 4095 4096 if (!btf_shallow_equal_struct(cand_type, canon_type)) 4097 return 0; 4098 vlen = btf_vlen(cand_type); 4099 cand_m = btf_members(cand_type); 4100 canon_m = btf_members(canon_type); 4101 for (i = 0; i < vlen; i++) { 4102 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); 4103 if (eq <= 0) 4104 return eq; 4105 cand_m++; 4106 canon_m++; 4107 } 4108 4109 return 1; 4110 } 4111 4112 case BTF_KIND_FUNC_PROTO: { 4113 const struct btf_param *cand_p, *canon_p; 4114 __u16 vlen; 4115 4116 if (!btf_compat_fnproto(cand_type, canon_type)) 4117 return 0; 4118 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 4119 if (eq <= 0) 4120 return eq; 4121 vlen = btf_vlen(cand_type); 4122 cand_p = btf_params(cand_type); 4123 canon_p = btf_params(canon_type); 4124 for (i = 0; i < vlen; i++) { 4125 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); 4126 if (eq <= 0) 4127 return eq; 4128 cand_p++; 4129 canon_p++; 4130 } 4131 return 1; 4132 } 4133 4134 default: 4135 return -EINVAL; 4136 } 4137 return 0; 4138 } 4139 4140 /* 4141 * Use hypothetical mapping, produced by successful type graph equivalence 4142 * check, to augment existing struct/union canonical mapping, where possible. 4143 * 4144 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record 4145 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: 4146 * it doesn't matter if FWD type was part of canonical graph or candidate one, 4147 * we are recording the mapping anyway. As opposed to carefulness required 4148 * for struct/union correspondence mapping (described below), for FWD resolution 4149 * it's not important, as by the time that FWD type (reference type) will be 4150 * deduplicated all structs/unions will be deduped already anyway. 4151 * 4152 * Recording STRUCT/UNION mapping is purely a performance optimization and is 4153 * not required for correctness. It needs to be done carefully to ensure that 4154 * struct/union from candidate's type graph is not mapped into corresponding 4155 * struct/union from canonical type graph that itself hasn't been resolved into 4156 * canonical representative. The only guarantee we have is that canonical 4157 * struct/union was determined as canonical and that won't change. But any 4158 * types referenced through that struct/union fields could have been not yet 4159 * resolved, so in case like that it's too early to establish any kind of 4160 * correspondence between structs/unions. 4161 * 4162 * No canonical correspondence is derived for primitive types (they are already 4163 * deduplicated completely already anyway) or reference types (they rely on 4164 * stability of struct/union canonical relationship for equivalence checks). 4165 */ 4166 static void btf_dedup_merge_hypot_map(struct btf_dedup *d) 4167 { 4168 __u32 canon_type_id, targ_type_id; 4169 __u16 t_kind, c_kind; 4170 __u32 t_id, c_id; 4171 int i; 4172 4173 for (i = 0; i < d->hypot_cnt; i++) { 4174 canon_type_id = d->hypot_list[i]; 4175 targ_type_id = d->hypot_map[canon_type_id]; 4176 t_id = resolve_type_id(d, targ_type_id); 4177 c_id = resolve_type_id(d, canon_type_id); 4178 t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); 4179 c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); 4180 /* 4181 * Resolve FWD into STRUCT/UNION. 4182 * It's ok to resolve FWD into STRUCT/UNION that's not yet 4183 * mapped to canonical representative (as opposed to 4184 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because 4185 * eventually that struct is going to be mapped and all resolved 4186 * FWDs will automatically resolve to correct canonical 4187 * representative. This will happen before ref type deduping, 4188 * which critically depends on stability of these mapping. This 4189 * stability is not a requirement for STRUCT/UNION equivalence 4190 * checks, though. 4191 */ 4192 4193 /* if it's the split BTF case, we still need to point base FWD 4194 * to STRUCT/UNION in a split BTF, because FWDs from split BTF 4195 * will be resolved against base FWD. If we don't point base 4196 * canonical FWD to the resolved STRUCT/UNION, then all the 4197 * FWDs in split BTF won't be correctly resolved to a proper 4198 * STRUCT/UNION. 4199 */ 4200 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) 4201 d->map[c_id] = t_id; 4202 4203 /* if graph equivalence determined that we'd need to adjust 4204 * base canonical types, then we need to only point base FWDs 4205 * to STRUCTs/UNIONs and do no more modifications. For all 4206 * other purposes the type graphs were not equivalent. 4207 */ 4208 if (d->hypot_adjust_canon) 4209 continue; 4210 4211 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) 4212 d->map[t_id] = c_id; 4213 4214 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && 4215 c_kind != BTF_KIND_FWD && 4216 is_type_mapped(d, c_id) && 4217 !is_type_mapped(d, t_id)) { 4218 /* 4219 * as a perf optimization, we can map struct/union 4220 * that's part of type graph we just verified for 4221 * equivalence. We can do that for struct/union that has 4222 * canonical representative only, though. 4223 */ 4224 d->map[t_id] = c_id; 4225 } 4226 } 4227 } 4228 4229 /* 4230 * Deduplicate struct/union types. 4231 * 4232 * For each struct/union type its type signature hash is calculated, taking 4233 * into account type's name, size, number, order and names of fields, but 4234 * ignoring type ID's referenced from fields, because they might not be deduped 4235 * completely until after reference types deduplication phase. This type hash 4236 * is used to iterate over all potential canonical types, sharing same hash. 4237 * For each canonical candidate we check whether type graphs that they form 4238 * (through referenced types in fields and so on) are equivalent using algorithm 4239 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and 4240 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping 4241 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence 4242 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to 4243 * potentially map other structs/unions to their canonical representatives, 4244 * if such relationship hasn't yet been established. This speeds up algorithm 4245 * by eliminating some of the duplicate work. 4246 * 4247 * If no matching canonical representative was found, struct/union is marked 4248 * as canonical for itself and is added into btf_dedup->dedup_table hash map 4249 * for further look ups. 4250 */ 4251 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) 4252 { 4253 struct btf_type *cand_type, *t; 4254 struct hashmap_entry *hash_entry; 4255 /* if we don't find equivalent type, then we are canonical */ 4256 __u32 new_id = type_id; 4257 __u16 kind; 4258 long h; 4259 4260 /* already deduped or is in process of deduping (loop detected) */ 4261 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 4262 return 0; 4263 4264 t = btf_type_by_id(d->btf, type_id); 4265 kind = btf_kind(t); 4266 4267 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) 4268 return 0; 4269 4270 h = btf_hash_struct(t); 4271 for_each_dedup_cand(d, hash_entry, h) { 4272 __u32 cand_id = (__u32)(long)hash_entry->value; 4273 int eq; 4274 4275 /* 4276 * Even though btf_dedup_is_equiv() checks for 4277 * btf_shallow_equal_struct() internally when checking two 4278 * structs (unions) for equivalence, we need to guard here 4279 * from picking matching FWD type as a dedup candidate. 4280 * This can happen due to hash collision. In such case just 4281 * relying on btf_dedup_is_equiv() would lead to potentially 4282 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 4283 * FWD and compatible STRUCT/UNION are considered equivalent. 4284 */ 4285 cand_type = btf_type_by_id(d->btf, cand_id); 4286 if (!btf_shallow_equal_struct(t, cand_type)) 4287 continue; 4288 4289 btf_dedup_clear_hypot_map(d); 4290 eq = btf_dedup_is_equiv(d, type_id, cand_id); 4291 if (eq < 0) 4292 return eq; 4293 if (!eq) 4294 continue; 4295 btf_dedup_merge_hypot_map(d); 4296 if (d->hypot_adjust_canon) /* not really equivalent */ 4297 continue; 4298 new_id = cand_id; 4299 break; 4300 } 4301 4302 d->map[type_id] = new_id; 4303 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 4304 return -ENOMEM; 4305 4306 return 0; 4307 } 4308 4309 static int btf_dedup_struct_types(struct btf_dedup *d) 4310 { 4311 int i, err; 4312 4313 for (i = 0; i < d->btf->nr_types; i++) { 4314 err = btf_dedup_struct_type(d, d->btf->start_id + i); 4315 if (err) 4316 return err; 4317 } 4318 return 0; 4319 } 4320 4321 /* 4322 * Deduplicate reference type. 4323 * 4324 * Once all primitive and struct/union types got deduplicated, we can easily 4325 * deduplicate all other (reference) BTF types. This is done in two steps: 4326 * 4327 * 1. Resolve all referenced type IDs into their canonical type IDs. This 4328 * resolution can be done either immediately for primitive or struct/union types 4329 * (because they were deduped in previous two phases) or recursively for 4330 * reference types. Recursion will always terminate at either primitive or 4331 * struct/union type, at which point we can "unwind" chain of reference types 4332 * one by one. There is no danger of encountering cycles because in C type 4333 * system the only way to form type cycle is through struct/union, so any chain 4334 * of reference types, even those taking part in a type cycle, will inevitably 4335 * reach struct/union at some point. 4336 * 4337 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type 4338 * becomes "stable", in the sense that no further deduplication will cause 4339 * any changes to it. With that, it's now possible to calculate type's signature 4340 * hash (this time taking into account referenced type IDs) and loop over all 4341 * potential canonical representatives. If no match was found, current type 4342 * will become canonical representative of itself and will be added into 4343 * btf_dedup->dedup_table as another possible canonical representative. 4344 */ 4345 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) 4346 { 4347 struct hashmap_entry *hash_entry; 4348 __u32 new_id = type_id, cand_id; 4349 struct btf_type *t, *cand; 4350 /* if we don't find equivalent type, then we are representative type */ 4351 int ref_type_id; 4352 long h; 4353 4354 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 4355 return -ELOOP; 4356 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 4357 return resolve_type_id(d, type_id); 4358 4359 t = btf_type_by_id(d->btf, type_id); 4360 d->map[type_id] = BTF_IN_PROGRESS_ID; 4361 4362 switch (btf_kind(t)) { 4363 case BTF_KIND_CONST: 4364 case BTF_KIND_VOLATILE: 4365 case BTF_KIND_RESTRICT: 4366 case BTF_KIND_PTR: 4367 case BTF_KIND_TYPEDEF: 4368 case BTF_KIND_FUNC: 4369 ref_type_id = btf_dedup_ref_type(d, t->type); 4370 if (ref_type_id < 0) 4371 return ref_type_id; 4372 t->type = ref_type_id; 4373 4374 h = btf_hash_common(t); 4375 for_each_dedup_cand(d, hash_entry, h) { 4376 cand_id = (__u32)(long)hash_entry->value; 4377 cand = btf_type_by_id(d->btf, cand_id); 4378 if (btf_equal_common(t, cand)) { 4379 new_id = cand_id; 4380 break; 4381 } 4382 } 4383 break; 4384 4385 case BTF_KIND_TAG: 4386 ref_type_id = btf_dedup_ref_type(d, t->type); 4387 if (ref_type_id < 0) 4388 return ref_type_id; 4389 t->type = ref_type_id; 4390 4391 h = btf_hash_int_tag(t); 4392 for_each_dedup_cand(d, hash_entry, h) { 4393 cand_id = (__u32)(long)hash_entry->value; 4394 cand = btf_type_by_id(d->btf, cand_id); 4395 if (btf_equal_int_tag(t, cand)) { 4396 new_id = cand_id; 4397 break; 4398 } 4399 } 4400 break; 4401 4402 case BTF_KIND_ARRAY: { 4403 struct btf_array *info = btf_array(t); 4404 4405 ref_type_id = btf_dedup_ref_type(d, info->type); 4406 if (ref_type_id < 0) 4407 return ref_type_id; 4408 info->type = ref_type_id; 4409 4410 ref_type_id = btf_dedup_ref_type(d, info->index_type); 4411 if (ref_type_id < 0) 4412 return ref_type_id; 4413 info->index_type = ref_type_id; 4414 4415 h = btf_hash_array(t); 4416 for_each_dedup_cand(d, hash_entry, h) { 4417 cand_id = (__u32)(long)hash_entry->value; 4418 cand = btf_type_by_id(d->btf, cand_id); 4419 if (btf_equal_array(t, cand)) { 4420 new_id = cand_id; 4421 break; 4422 } 4423 } 4424 break; 4425 } 4426 4427 case BTF_KIND_FUNC_PROTO: { 4428 struct btf_param *param; 4429 __u16 vlen; 4430 int i; 4431 4432 ref_type_id = btf_dedup_ref_type(d, t->type); 4433 if (ref_type_id < 0) 4434 return ref_type_id; 4435 t->type = ref_type_id; 4436 4437 vlen = btf_vlen(t); 4438 param = btf_params(t); 4439 for (i = 0; i < vlen; i++) { 4440 ref_type_id = btf_dedup_ref_type(d, param->type); 4441 if (ref_type_id < 0) 4442 return ref_type_id; 4443 param->type = ref_type_id; 4444 param++; 4445 } 4446 4447 h = btf_hash_fnproto(t); 4448 for_each_dedup_cand(d, hash_entry, h) { 4449 cand_id = (__u32)(long)hash_entry->value; 4450 cand = btf_type_by_id(d->btf, cand_id); 4451 if (btf_equal_fnproto(t, cand)) { 4452 new_id = cand_id; 4453 break; 4454 } 4455 } 4456 break; 4457 } 4458 4459 default: 4460 return -EINVAL; 4461 } 4462 4463 d->map[type_id] = new_id; 4464 if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) 4465 return -ENOMEM; 4466 4467 return new_id; 4468 } 4469 4470 static int btf_dedup_ref_types(struct btf_dedup *d) 4471 { 4472 int i, err; 4473 4474 for (i = 0; i < d->btf->nr_types; i++) { 4475 err = btf_dedup_ref_type(d, d->btf->start_id + i); 4476 if (err < 0) 4477 return err; 4478 } 4479 /* we won't need d->dedup_table anymore */ 4480 hashmap__free(d->dedup_table); 4481 d->dedup_table = NULL; 4482 return 0; 4483 } 4484 4485 /* 4486 * Compact types. 4487 * 4488 * After we established for each type its corresponding canonical representative 4489 * type, we now can eliminate types that are not canonical and leave only 4490 * canonical ones layed out sequentially in memory by copying them over 4491 * duplicates. During compaction btf_dedup->hypot_map array is reused to store 4492 * a map from original type ID to a new compacted type ID, which will be used 4493 * during next phase to "fix up" type IDs, referenced from struct/union and 4494 * reference types. 4495 */ 4496 static int btf_dedup_compact_types(struct btf_dedup *d) 4497 { 4498 __u32 *new_offs; 4499 __u32 next_type_id = d->btf->start_id; 4500 const struct btf_type *t; 4501 void *p; 4502 int i, id, len; 4503 4504 /* we are going to reuse hypot_map to store compaction remapping */ 4505 d->hypot_map[0] = 0; 4506 /* base BTF types are not renumbered */ 4507 for (id = 1; id < d->btf->start_id; id++) 4508 d->hypot_map[id] = id; 4509 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) 4510 d->hypot_map[id] = BTF_UNPROCESSED_ID; 4511 4512 p = d->btf->types_data; 4513 4514 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) { 4515 if (d->map[id] != id) 4516 continue; 4517 4518 t = btf__type_by_id(d->btf, id); 4519 len = btf_type_size(t); 4520 if (len < 0) 4521 return len; 4522 4523 memmove(p, t, len); 4524 d->hypot_map[id] = next_type_id; 4525 d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data; 4526 p += len; 4527 next_type_id++; 4528 } 4529 4530 /* shrink struct btf's internal types index and update btf_header */ 4531 d->btf->nr_types = next_type_id - d->btf->start_id; 4532 d->btf->type_offs_cap = d->btf->nr_types; 4533 d->btf->hdr->type_len = p - d->btf->types_data; 4534 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, 4535 sizeof(*new_offs)); 4536 if (d->btf->type_offs_cap && !new_offs) 4537 return -ENOMEM; 4538 d->btf->type_offs = new_offs; 4539 d->btf->hdr->str_off = d->btf->hdr->type_len; 4540 d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; 4541 return 0; 4542 } 4543 4544 /* 4545 * Figure out final (deduplicated and compacted) type ID for provided original 4546 * `type_id` by first resolving it into corresponding canonical type ID and 4547 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, 4548 * which is populated during compaction phase. 4549 */ 4550 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) 4551 { 4552 struct btf_dedup *d = ctx; 4553 __u32 resolved_type_id, new_type_id; 4554 4555 resolved_type_id = resolve_type_id(d, *type_id); 4556 new_type_id = d->hypot_map[resolved_type_id]; 4557 if (new_type_id > BTF_MAX_NR_TYPES) 4558 return -EINVAL; 4559 4560 *type_id = new_type_id; 4561 return 0; 4562 } 4563 4564 /* 4565 * Remap referenced type IDs into deduped type IDs. 4566 * 4567 * After BTF types are deduplicated and compacted, their final type IDs may 4568 * differ from original ones. The map from original to a corresponding 4569 * deduped type ID is stored in btf_dedup->hypot_map and is populated during 4570 * compaction phase. During remapping phase we are rewriting all type IDs 4571 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to 4572 * their final deduped type IDs. 4573 */ 4574 static int btf_dedup_remap_types(struct btf_dedup *d) 4575 { 4576 int i, r; 4577 4578 for (i = 0; i < d->btf->nr_types; i++) { 4579 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); 4580 4581 r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d); 4582 if (r) 4583 return r; 4584 } 4585 4586 if (!d->btf_ext) 4587 return 0; 4588 4589 r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); 4590 if (r) 4591 return r; 4592 4593 return 0; 4594 } 4595 4596 /* 4597 * Probe few well-known locations for vmlinux kernel image and try to load BTF 4598 * data out of it to use for target BTF. 4599 */ 4600 struct btf *btf__load_vmlinux_btf(void) 4601 { 4602 struct { 4603 const char *path_fmt; 4604 bool raw_btf; 4605 } locations[] = { 4606 /* try canonical vmlinux BTF through sysfs first */ 4607 { "/sys/kernel/btf/vmlinux", true /* raw BTF */ }, 4608 /* fall back to trying to find vmlinux ELF on disk otherwise */ 4609 { "/boot/vmlinux-%1$s" }, 4610 { "/lib/modules/%1$s/vmlinux-%1$s" }, 4611 { "/lib/modules/%1$s/build/vmlinux" }, 4612 { "/usr/lib/modules/%1$s/kernel/vmlinux" }, 4613 { "/usr/lib/debug/boot/vmlinux-%1$s" }, 4614 { "/usr/lib/debug/boot/vmlinux-%1$s.debug" }, 4615 { "/usr/lib/debug/lib/modules/%1$s/vmlinux" }, 4616 }; 4617 char path[PATH_MAX + 1]; 4618 struct utsname buf; 4619 struct btf *btf; 4620 int i, err; 4621 4622 uname(&buf); 4623 4624 for (i = 0; i < ARRAY_SIZE(locations); i++) { 4625 snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release); 4626 4627 if (access(path, R_OK)) 4628 continue; 4629 4630 if (locations[i].raw_btf) 4631 btf = btf__parse_raw(path); 4632 else 4633 btf = btf__parse_elf(path, NULL); 4634 err = libbpf_get_error(btf); 4635 pr_debug("loading kernel BTF '%s': %d\n", path, err); 4636 if (err) 4637 continue; 4638 4639 return btf; 4640 } 4641 4642 pr_warn("failed to find valid kernel BTF\n"); 4643 return libbpf_err_ptr(-ESRCH); 4644 } 4645 4646 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf"))); 4647 4648 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf) 4649 { 4650 char path[80]; 4651 4652 snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name); 4653 return btf__parse_split(path, vmlinux_btf); 4654 } 4655 4656 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx) 4657 { 4658 int i, n, err; 4659 4660 switch (btf_kind(t)) { 4661 case BTF_KIND_INT: 4662 case BTF_KIND_FLOAT: 4663 case BTF_KIND_ENUM: 4664 return 0; 4665 4666 case BTF_KIND_FWD: 4667 case BTF_KIND_CONST: 4668 case BTF_KIND_VOLATILE: 4669 case BTF_KIND_RESTRICT: 4670 case BTF_KIND_PTR: 4671 case BTF_KIND_TYPEDEF: 4672 case BTF_KIND_FUNC: 4673 case BTF_KIND_VAR: 4674 case BTF_KIND_TAG: 4675 return visit(&t->type, ctx); 4676 4677 case BTF_KIND_ARRAY: { 4678 struct btf_array *a = btf_array(t); 4679 4680 err = visit(&a->type, ctx); 4681 err = err ?: visit(&a->index_type, ctx); 4682 return err; 4683 } 4684 4685 case BTF_KIND_STRUCT: 4686 case BTF_KIND_UNION: { 4687 struct btf_member *m = btf_members(t); 4688 4689 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4690 err = visit(&m->type, ctx); 4691 if (err) 4692 return err; 4693 } 4694 return 0; 4695 } 4696 4697 case BTF_KIND_FUNC_PROTO: { 4698 struct btf_param *m = btf_params(t); 4699 4700 err = visit(&t->type, ctx); 4701 if (err) 4702 return err; 4703 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4704 err = visit(&m->type, ctx); 4705 if (err) 4706 return err; 4707 } 4708 return 0; 4709 } 4710 4711 case BTF_KIND_DATASEC: { 4712 struct btf_var_secinfo *m = btf_var_secinfos(t); 4713 4714 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4715 err = visit(&m->type, ctx); 4716 if (err) 4717 return err; 4718 } 4719 return 0; 4720 } 4721 4722 default: 4723 return -EINVAL; 4724 } 4725 } 4726 4727 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx) 4728 { 4729 int i, n, err; 4730 4731 err = visit(&t->name_off, ctx); 4732 if (err) 4733 return err; 4734 4735 switch (btf_kind(t)) { 4736 case BTF_KIND_STRUCT: 4737 case BTF_KIND_UNION: { 4738 struct btf_member *m = btf_members(t); 4739 4740 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4741 err = visit(&m->name_off, ctx); 4742 if (err) 4743 return err; 4744 } 4745 break; 4746 } 4747 case BTF_KIND_ENUM: { 4748 struct btf_enum *m = btf_enum(t); 4749 4750 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4751 err = visit(&m->name_off, ctx); 4752 if (err) 4753 return err; 4754 } 4755 break; 4756 } 4757 case BTF_KIND_FUNC_PROTO: { 4758 struct btf_param *m = btf_params(t); 4759 4760 for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 4761 err = visit(&m->name_off, ctx); 4762 if (err) 4763 return err; 4764 } 4765 break; 4766 } 4767 default: 4768 break; 4769 } 4770 4771 return 0; 4772 } 4773 4774 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) 4775 { 4776 const struct btf_ext_info *seg; 4777 struct btf_ext_info_sec *sec; 4778 int i, err; 4779 4780 seg = &btf_ext->func_info; 4781 for_each_btf_ext_sec(seg, sec) { 4782 struct bpf_func_info_min *rec; 4783 4784 for_each_btf_ext_rec(seg, sec, i, rec) { 4785 err = visit(&rec->type_id, ctx); 4786 if (err < 0) 4787 return err; 4788 } 4789 } 4790 4791 seg = &btf_ext->core_relo_info; 4792 for_each_btf_ext_sec(seg, sec) { 4793 struct bpf_core_relo *rec; 4794 4795 for_each_btf_ext_rec(seg, sec, i, rec) { 4796 err = visit(&rec->type_id, ctx); 4797 if (err < 0) 4798 return err; 4799 } 4800 } 4801 4802 return 0; 4803 } 4804 4805 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx) 4806 { 4807 const struct btf_ext_info *seg; 4808 struct btf_ext_info_sec *sec; 4809 int i, err; 4810 4811 seg = &btf_ext->func_info; 4812 for_each_btf_ext_sec(seg, sec) { 4813 err = visit(&sec->sec_name_off, ctx); 4814 if (err) 4815 return err; 4816 } 4817 4818 seg = &btf_ext->line_info; 4819 for_each_btf_ext_sec(seg, sec) { 4820 struct bpf_line_info_min *rec; 4821 4822 err = visit(&sec->sec_name_off, ctx); 4823 if (err) 4824 return err; 4825 4826 for_each_btf_ext_rec(seg, sec, i, rec) { 4827 err = visit(&rec->file_name_off, ctx); 4828 if (err) 4829 return err; 4830 err = visit(&rec->line_off, ctx); 4831 if (err) 4832 return err; 4833 } 4834 } 4835 4836 seg = &btf_ext->core_relo_info; 4837 for_each_btf_ext_sec(seg, sec) { 4838 struct bpf_core_relo *rec; 4839 4840 err = visit(&sec->sec_name_off, ctx); 4841 if (err) 4842 return err; 4843 4844 for_each_btf_ext_rec(seg, sec, i, rec) { 4845 err = visit(&rec->access_str_off, ctx); 4846 if (err) 4847 return err; 4848 } 4849 } 4850 4851 return 0; 4852 } 4853