1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Facebook */ 3 4 #ifndef _GNU_SOURCE 5 #define _GNU_SOURCE 6 #endif 7 #include <ctype.h> 8 #include <errno.h> 9 #include <fcntl.h> 10 #include <linux/err.h> 11 #include <stdbool.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <unistd.h> 15 #include <bpf/bpf.h> 16 #include <bpf/libbpf.h> 17 #include <bpf/libbpf_internal.h> 18 #include <sys/types.h> 19 #include <sys/stat.h> 20 #include <sys/mman.h> 21 #include <bpf/btf.h> 22 23 #include "json_writer.h" 24 #include "main.h" 25 26 #define MAX_OBJ_NAME_LEN 64 27 28 static void sanitize_identifier(char *name) 29 { 30 int i; 31 32 for (i = 0; name[i]; i++) 33 if (!isalnum(name[i]) && name[i] != '_') 34 name[i] = '_'; 35 } 36 37 static bool str_has_prefix(const char *str, const char *prefix) 38 { 39 return strncmp(str, prefix, strlen(prefix)) == 0; 40 } 41 42 static bool str_has_suffix(const char *str, const char *suffix) 43 { 44 size_t i, n1 = strlen(str), n2 = strlen(suffix); 45 46 if (n1 < n2) 47 return false; 48 49 for (i = 0; i < n2; i++) { 50 if (str[n1 - i - 1] != suffix[n2 - i - 1]) 51 return false; 52 } 53 54 return true; 55 } 56 57 static void get_obj_name(char *name, const char *file) 58 { 59 /* Using basename() GNU version which doesn't modify arg. */ 60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); 61 name[MAX_OBJ_NAME_LEN - 1] = '\0'; 62 if (str_has_suffix(name, ".o")) 63 name[strlen(name) - 2] = '\0'; 64 sanitize_identifier(name); 65 } 66 67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix) 68 { 69 int i; 70 71 sprintf(guard, "__%s_%s__", obj_name, suffix); 72 for (i = 0; guard[i]; i++) 73 guard[i] = toupper(guard[i]); 74 } 75 76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz) 77 { 78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 79 const char *name = bpf_map__name(map); 80 int i, n; 81 82 if (!bpf_map__is_internal(map)) { 83 snprintf(buf, buf_sz, "%s", name); 84 return true; 85 } 86 87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { 88 const char *sfx = sfxs[i], *p; 89 90 p = strstr(name, sfx); 91 if (p) { 92 snprintf(buf, buf_sz, "%s", p + 1); 93 sanitize_identifier(buf); 94 return true; 95 } 96 } 97 98 return false; 99 } 100 101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) 102 { 103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; 104 int i, n; 105 106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { 107 const char *pfx = pfxs[i]; 108 109 if (str_has_prefix(sec_name, pfx)) { 110 snprintf(buf, buf_sz, "%s", sec_name + 1); 111 sanitize_identifier(buf); 112 return true; 113 } 114 } 115 116 return false; 117 } 118 119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) 120 { 121 vprintf(fmt, args); 122 } 123 124 static int codegen_datasec_def(struct bpf_object *obj, 125 struct btf *btf, 126 struct btf_dump *d, 127 const struct btf_type *sec, 128 const char *obj_name) 129 { 130 const char *sec_name = btf__name_by_offset(btf, sec->name_off); 131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); 132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); 133 char var_ident[256], sec_ident[256]; 134 bool strip_mods = false; 135 136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 137 return 0; 138 139 if (strcmp(sec_name, ".kconfig") != 0) 140 strip_mods = true; 141 142 printf(" struct %s__%s {\n", obj_name, sec_ident); 143 for (i = 0; i < vlen; i++, sec_var++) { 144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 145 const char *var_name = btf__name_by_offset(btf, var->name_off); 146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 147 .field_name = var_ident, 148 .indent_level = 2, 149 .strip_mods = strip_mods, 150 ); 151 int need_off = sec_var->offset, align_off, align; 152 __u32 var_type_id = var->type; 153 154 /* static variables are not exposed through BPF skeleton */ 155 if (btf_var(var)->linkage == BTF_VAR_STATIC) 156 continue; 157 158 if (off > need_off) { 159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", 160 sec_name, i, need_off, off); 161 return -EINVAL; 162 } 163 164 align = btf__align_of(btf, var->type); 165 if (align <= 0) { 166 p_err("Failed to determine alignment of variable '%s': %d", 167 var_name, align); 168 return -EINVAL; 169 } 170 /* Assume 32-bit architectures when generating data section 171 * struct memory layout. Given bpftool can't know which target 172 * host architecture it's emitting skeleton for, we need to be 173 * conservative and assume 32-bit one to ensure enough padding 174 * bytes are generated for pointer and long types. This will 175 * still work correctly for 64-bit architectures, because in 176 * the worst case we'll generate unnecessary padding field, 177 * which on 64-bit architectures is not strictly necessary and 178 * would be handled by natural 8-byte alignment. But it still 179 * will be a correct memory layout, based on recorded offsets 180 * in BTF. 181 */ 182 if (align > 4) 183 align = 4; 184 185 align_off = (off + align - 1) / align * align; 186 if (align_off != need_off) { 187 printf("\t\tchar __pad%d[%d];\n", 188 pad_cnt, need_off - off); 189 pad_cnt++; 190 } 191 192 /* sanitize variable name, e.g., for static vars inside 193 * a function, it's name is '<function name>.<variable name>', 194 * which we'll turn into a '<function name>_<variable name>' 195 */ 196 var_ident[0] = '\0'; 197 strncat(var_ident, var_name, sizeof(var_ident) - 1); 198 sanitize_identifier(var_ident); 199 200 printf("\t\t"); 201 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 202 if (err) 203 return err; 204 printf(";\n"); 205 206 off = sec_var->offset + sec_var->size; 207 } 208 printf(" } *%s;\n", sec_ident); 209 return 0; 210 } 211 212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident) 213 { 214 int n = btf__type_cnt(btf), i; 215 char sec_ident[256]; 216 217 for (i = 1; i < n; i++) { 218 const struct btf_type *t = btf__type_by_id(btf, i); 219 const char *name; 220 221 if (!btf_is_datasec(t)) 222 continue; 223 224 name = btf__str_by_offset(btf, t->name_off); 225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident))) 226 continue; 227 228 if (strcmp(sec_ident, map_ident) == 0) 229 return t; 230 } 231 return NULL; 232 } 233 234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) 235 { 236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 237 return false; 238 239 if (!get_map_ident(map, buf, sz)) 240 return false; 241 242 return true; 243 } 244 245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) 246 { 247 struct btf *btf = bpf_object__btf(obj); 248 struct btf_dump *d; 249 struct bpf_map *map; 250 const struct btf_type *sec; 251 char map_ident[256]; 252 int err = 0; 253 254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 255 err = libbpf_get_error(d); 256 if (err) 257 return err; 258 259 bpf_object__for_each_map(map, obj) { 260 /* only generate definitions for memory-mapped internal maps */ 261 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 262 continue; 263 264 sec = find_type_for_map(btf, map_ident); 265 266 /* In some cases (e.g., sections like .rodata.cst16 containing 267 * compiler allocated string constants only) there will be 268 * special internal maps with no corresponding DATASEC BTF 269 * type. In such case, generate empty structs for each such 270 * map. It will still be memory-mapped and its contents 271 * accessible from user-space through BPF skeleton. 272 */ 273 if (!sec) { 274 printf(" struct %s__%s {\n", obj_name, map_ident); 275 printf(" } *%s;\n", map_ident); 276 } else { 277 err = codegen_datasec_def(obj, btf, d, sec, obj_name); 278 if (err) 279 goto out; 280 } 281 } 282 283 284 out: 285 btf_dump__free(d); 286 return err; 287 } 288 289 static bool btf_is_ptr_to_func_proto(const struct btf *btf, 290 const struct btf_type *v) 291 { 292 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type)); 293 } 294 295 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name) 296 { 297 struct btf *btf = bpf_object__btf(obj); 298 struct btf_dump *d; 299 struct bpf_map *map; 300 const struct btf_type *sec, *var; 301 const struct btf_var_secinfo *sec_var; 302 int i, err = 0, vlen; 303 char map_ident[256], sec_ident[256]; 304 bool strip_mods = false, needs_typeof = false; 305 const char *sec_name, *var_name; 306 __u32 var_type_id; 307 308 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); 309 if (!d) 310 return -errno; 311 312 bpf_object__for_each_map(map, obj) { 313 /* only generate definitions for memory-mapped internal maps */ 314 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 315 continue; 316 317 sec = find_type_for_map(btf, map_ident); 318 if (!sec) 319 continue; 320 321 sec_name = btf__name_by_offset(btf, sec->name_off); 322 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) 323 continue; 324 325 strip_mods = strcmp(sec_name, ".kconfig") != 0; 326 printf(" struct %s__%s {\n", obj_name, sec_ident); 327 328 sec_var = btf_var_secinfos(sec); 329 vlen = btf_vlen(sec); 330 for (i = 0; i < vlen; i++, sec_var++) { 331 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, 332 .indent_level = 2, 333 .strip_mods = strip_mods, 334 /* we'll print the name separately */ 335 .field_name = "", 336 ); 337 338 var = btf__type_by_id(btf, sec_var->type); 339 var_name = btf__name_by_offset(btf, var->name_off); 340 var_type_id = var->type; 341 342 /* static variables are not exposed through BPF skeleton */ 343 if (btf_var(var)->linkage == BTF_VAR_STATIC) 344 continue; 345 346 /* The datasec member has KIND_VAR but we want the 347 * underlying type of the variable (e.g. KIND_INT). 348 */ 349 var = skip_mods_and_typedefs(btf, var->type, NULL); 350 351 printf("\t\t"); 352 /* Func and array members require special handling. 353 * Instead of producing `typename *var`, they produce 354 * `typeof(typename) *var`. This allows us to keep a 355 * similar syntax where the identifier is just prefixed 356 * by *, allowing us to ignore C declaration minutiae. 357 */ 358 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); 359 if (needs_typeof) 360 printf("typeof("); 361 362 err = btf_dump__emit_type_decl(d, var_type_id, &opts); 363 if (err) 364 goto out; 365 366 if (needs_typeof) 367 printf(")"); 368 369 printf(" *%s;\n", var_name); 370 } 371 printf(" } %s;\n", sec_ident); 372 } 373 374 out: 375 btf_dump__free(d); 376 return err; 377 } 378 379 static void codegen(const char *template, ...) 380 { 381 const char *src, *end; 382 int skip_tabs = 0, n; 383 char *s, *dst; 384 va_list args; 385 char c; 386 387 n = strlen(template); 388 s = malloc(n + 1); 389 if (!s) 390 exit(-1); 391 src = template; 392 dst = s; 393 394 /* find out "baseline" indentation to skip */ 395 while ((c = *src++)) { 396 if (c == '\t') { 397 skip_tabs++; 398 } else if (c == '\n') { 399 break; 400 } else { 401 p_err("unrecognized character at pos %td in template '%s': '%c'", 402 src - template - 1, template, c); 403 free(s); 404 exit(-1); 405 } 406 } 407 408 while (*src) { 409 /* skip baseline indentation tabs */ 410 for (n = skip_tabs; n > 0; n--, src++) { 411 if (*src != '\t') { 412 p_err("not enough tabs at pos %td in template '%s'", 413 src - template - 1, template); 414 free(s); 415 exit(-1); 416 } 417 } 418 /* trim trailing whitespace */ 419 end = strchrnul(src, '\n'); 420 for (n = end - src; n > 0 && isspace(src[n - 1]); n--) 421 ; 422 memcpy(dst, src, n); 423 dst += n; 424 if (*end) 425 *dst++ = '\n'; 426 src = *end ? end + 1 : end; 427 } 428 *dst++ = '\0'; 429 430 /* print out using adjusted template */ 431 va_start(args, template); 432 n = vprintf(s, args); 433 va_end(args); 434 435 free(s); 436 } 437 438 static void print_hex(const char *data, int data_sz) 439 { 440 int i, len; 441 442 for (i = 0, len = 0; i < data_sz; i++) { 443 int w = data[i] ? 4 : 2; 444 445 len += w; 446 if (len > 78) { 447 printf("\\\n"); 448 len = w; 449 } 450 if (!data[i]) 451 printf("\\0"); 452 else 453 printf("\\x%02x", (unsigned char)data[i]); 454 } 455 } 456 457 static size_t bpf_map_mmap_sz(const struct bpf_map *map) 458 { 459 long page_sz = sysconf(_SC_PAGE_SIZE); 460 size_t map_sz; 461 462 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map); 463 map_sz = roundup(map_sz, page_sz); 464 return map_sz; 465 } 466 467 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */ 468 static void codegen_asserts(struct bpf_object *obj, const char *obj_name) 469 { 470 struct btf *btf = bpf_object__btf(obj); 471 struct bpf_map *map; 472 struct btf_var_secinfo *sec_var; 473 int i, vlen; 474 const struct btf_type *sec; 475 char map_ident[256], var_ident[256]; 476 477 codegen("\ 478 \n\ 479 __attribute__((unused)) static void \n\ 480 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\ 481 { \n\ 482 #ifdef __cplusplus \n\ 483 #define _Static_assert static_assert \n\ 484 #endif \n\ 485 ", obj_name); 486 487 bpf_object__for_each_map(map, obj) { 488 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) 489 continue; 490 491 sec = find_type_for_map(btf, map_ident); 492 if (!sec) { 493 /* best effort, couldn't find the type for this map */ 494 continue; 495 } 496 497 sec_var = btf_var_secinfos(sec); 498 vlen = btf_vlen(sec); 499 500 for (i = 0; i < vlen; i++, sec_var++) { 501 const struct btf_type *var = btf__type_by_id(btf, sec_var->type); 502 const char *var_name = btf__name_by_offset(btf, var->name_off); 503 long var_size; 504 505 /* static variables are not exposed through BPF skeleton */ 506 if (btf_var(var)->linkage == BTF_VAR_STATIC) 507 continue; 508 509 var_size = btf__resolve_size(btf, var->type); 510 if (var_size < 0) 511 continue; 512 513 var_ident[0] = '\0'; 514 strncat(var_ident, var_name, sizeof(var_ident) - 1); 515 sanitize_identifier(var_ident); 516 517 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n", 518 map_ident, var_ident, var_size, var_ident); 519 } 520 } 521 codegen("\ 522 \n\ 523 #ifdef __cplusplus \n\ 524 #undef _Static_assert \n\ 525 #endif \n\ 526 } \n\ 527 "); 528 } 529 530 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) 531 { 532 struct bpf_program *prog; 533 534 bpf_object__for_each_program(prog, obj) { 535 const char *tp_name; 536 537 codegen("\ 538 \n\ 539 \n\ 540 static inline int \n\ 541 %1$s__%2$s__attach(struct %1$s *skel) \n\ 542 { \n\ 543 int prog_fd = skel->progs.%2$s.prog_fd; \n\ 544 ", obj_name, bpf_program__name(prog)); 545 546 switch (bpf_program__type(prog)) { 547 case BPF_PROG_TYPE_RAW_TRACEPOINT: 548 tp_name = strchr(bpf_program__section_name(prog), '/') + 1; 549 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); 550 break; 551 case BPF_PROG_TYPE_TRACING: 552 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER) 553 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n"); 554 else 555 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n"); 556 break; 557 default: 558 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); 559 break; 560 } 561 codegen("\ 562 \n\ 563 \n\ 564 if (fd > 0) \n\ 565 skel->links.%1$s_fd = fd; \n\ 566 return fd; \n\ 567 } \n\ 568 ", bpf_program__name(prog)); 569 } 570 571 codegen("\ 572 \n\ 573 \n\ 574 static inline int \n\ 575 %1$s__attach(struct %1$s *skel) \n\ 576 { \n\ 577 int ret = 0; \n\ 578 \n\ 579 ", obj_name); 580 581 bpf_object__for_each_program(prog, obj) { 582 codegen("\ 583 \n\ 584 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\ 585 ", obj_name, bpf_program__name(prog)); 586 } 587 588 codegen("\ 589 \n\ 590 return ret < 0 ? ret : 0; \n\ 591 } \n\ 592 \n\ 593 static inline void \n\ 594 %1$s__detach(struct %1$s *skel) \n\ 595 { \n\ 596 ", obj_name); 597 598 bpf_object__for_each_program(prog, obj) { 599 codegen("\ 600 \n\ 601 skel_closenz(skel->links.%1$s_fd); \n\ 602 ", bpf_program__name(prog)); 603 } 604 605 codegen("\ 606 \n\ 607 } \n\ 608 "); 609 } 610 611 static void codegen_destroy(struct bpf_object *obj, const char *obj_name) 612 { 613 struct bpf_program *prog; 614 struct bpf_map *map; 615 char ident[256]; 616 617 codegen("\ 618 \n\ 619 static void \n\ 620 %1$s__destroy(struct %1$s *skel) \n\ 621 { \n\ 622 if (!skel) \n\ 623 return; \n\ 624 %1$s__detach(skel); \n\ 625 ", 626 obj_name); 627 628 bpf_object__for_each_program(prog, obj) { 629 codegen("\ 630 \n\ 631 skel_closenz(skel->progs.%1$s.prog_fd); \n\ 632 ", bpf_program__name(prog)); 633 } 634 635 bpf_object__for_each_map(map, obj) { 636 if (!get_map_ident(map, ident, sizeof(ident))) 637 continue; 638 if (bpf_map__is_internal(map) && 639 (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) 640 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n", 641 ident, bpf_map_mmap_sz(map)); 642 codegen("\ 643 \n\ 644 skel_closenz(skel->maps.%1$s.map_fd); \n\ 645 ", ident); 646 } 647 codegen("\ 648 \n\ 649 skel_free(skel); \n\ 650 } \n\ 651 ", 652 obj_name); 653 } 654 655 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) 656 { 657 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); 658 struct bpf_map *map; 659 char ident[256]; 660 int err = 0; 661 662 err = bpf_object__gen_loader(obj, &opts); 663 if (err) 664 return err; 665 666 err = bpf_object__load(obj); 667 if (err) { 668 p_err("failed to load object file"); 669 goto out; 670 } 671 /* If there was no error during load then gen_loader_opts 672 * are populated with the loader program. 673 */ 674 675 /* finish generating 'struct skel' */ 676 codegen("\ 677 \n\ 678 }; \n\ 679 ", obj_name); 680 681 682 codegen_attach_detach(obj, obj_name); 683 684 codegen_destroy(obj, obj_name); 685 686 codegen("\ 687 \n\ 688 static inline struct %1$s * \n\ 689 %1$s__open(void) \n\ 690 { \n\ 691 struct %1$s *skel; \n\ 692 \n\ 693 skel = skel_alloc(sizeof(*skel)); \n\ 694 if (!skel) \n\ 695 goto cleanup; \n\ 696 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\ 697 ", 698 obj_name, opts.data_sz); 699 bpf_object__for_each_map(map, obj) { 700 const void *mmap_data = NULL; 701 size_t mmap_size = 0; 702 703 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 704 continue; 705 706 codegen("\ 707 \n\ 708 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\ 709 ", ident); 710 mmap_data = bpf_map__initial_value(map, &mmap_size); 711 print_hex(mmap_data, mmap_size); 712 codegen("\ 713 \n\ 714 \", %1$zd, %2$zd); \n\ 715 if (!skel->%3$s) \n\ 716 goto cleanup; \n\ 717 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\ 718 ", bpf_map_mmap_sz(map), mmap_size, ident); 719 } 720 codegen("\ 721 \n\ 722 return skel; \n\ 723 cleanup: \n\ 724 %1$s__destroy(skel); \n\ 725 return NULL; \n\ 726 } \n\ 727 \n\ 728 static inline int \n\ 729 %1$s__load(struct %1$s *skel) \n\ 730 { \n\ 731 struct bpf_load_and_run_opts opts = {}; \n\ 732 int err; \n\ 733 \n\ 734 opts.ctx = (struct bpf_loader_ctx *)skel; \n\ 735 opts.data_sz = %2$d; \n\ 736 opts.data = (void *)\"\\ \n\ 737 ", 738 obj_name, opts.data_sz); 739 print_hex(opts.data, opts.data_sz); 740 codegen("\ 741 \n\ 742 \"; \n\ 743 "); 744 745 codegen("\ 746 \n\ 747 opts.insns_sz = %d; \n\ 748 opts.insns = (void *)\"\\ \n\ 749 ", 750 opts.insns_sz); 751 print_hex(opts.insns, opts.insns_sz); 752 codegen("\ 753 \n\ 754 \"; \n\ 755 err = bpf_load_and_run(&opts); \n\ 756 if (err < 0) \n\ 757 return err; \n\ 758 ", obj_name); 759 bpf_object__for_each_map(map, obj) { 760 const char *mmap_flags; 761 762 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 763 continue; 764 765 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) 766 mmap_flags = "PROT_READ"; 767 else 768 mmap_flags = "PROT_READ | PROT_WRITE"; 769 770 codegen("\ 771 \n\ 772 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\ 773 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\ 774 if (!skel->%1$s) \n\ 775 return -ENOMEM; \n\ 776 ", 777 ident, bpf_map_mmap_sz(map), mmap_flags); 778 } 779 codegen("\ 780 \n\ 781 return 0; \n\ 782 } \n\ 783 \n\ 784 static inline struct %1$s * \n\ 785 %1$s__open_and_load(void) \n\ 786 { \n\ 787 struct %1$s *skel; \n\ 788 \n\ 789 skel = %1$s__open(); \n\ 790 if (!skel) \n\ 791 return NULL; \n\ 792 if (%1$s__load(skel)) { \n\ 793 %1$s__destroy(skel); \n\ 794 return NULL; \n\ 795 } \n\ 796 return skel; \n\ 797 } \n\ 798 \n\ 799 ", obj_name); 800 801 codegen_asserts(obj, obj_name); 802 803 codegen("\ 804 \n\ 805 \n\ 806 #endif /* %s */ \n\ 807 ", 808 header_guard); 809 err = 0; 810 out: 811 return err; 812 } 813 814 static void 815 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) 816 { 817 struct bpf_map *map; 818 char ident[256]; 819 size_t i; 820 821 if (!map_cnt) 822 return; 823 824 codegen("\ 825 \n\ 826 \n\ 827 /* maps */ \n\ 828 s->map_cnt = %zu; \n\ 829 s->map_skel_sz = sizeof(*s->maps); \n\ 830 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ 831 if (!s->maps) \n\ 832 goto err; \n\ 833 ", 834 map_cnt 835 ); 836 i = 0; 837 bpf_object__for_each_map(map, obj) { 838 if (!get_map_ident(map, ident, sizeof(ident))) 839 continue; 840 841 codegen("\ 842 \n\ 843 \n\ 844 s->maps[%zu].name = \"%s\"; \n\ 845 s->maps[%zu].map = &obj->maps.%s; \n\ 846 ", 847 i, bpf_map__name(map), i, ident); 848 /* memory-mapped internal maps */ 849 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) { 850 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", 851 i, ident); 852 } 853 i++; 854 } 855 } 856 857 static void 858 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links) 859 { 860 struct bpf_program *prog; 861 int i; 862 863 if (!prog_cnt) 864 return; 865 866 codegen("\ 867 \n\ 868 \n\ 869 /* programs */ \n\ 870 s->prog_cnt = %zu; \n\ 871 s->prog_skel_sz = sizeof(*s->progs); \n\ 872 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ 873 if (!s->progs) \n\ 874 goto err; \n\ 875 ", 876 prog_cnt 877 ); 878 i = 0; 879 bpf_object__for_each_program(prog, obj) { 880 codegen("\ 881 \n\ 882 \n\ 883 s->progs[%1$zu].name = \"%2$s\"; \n\ 884 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ 885 ", 886 i, bpf_program__name(prog)); 887 888 if (populate_links) { 889 codegen("\ 890 \n\ 891 s->progs[%1$zu].link = &obj->links.%2$s;\n\ 892 ", 893 i, bpf_program__name(prog)); 894 } 895 i++; 896 } 897 } 898 899 static int do_skeleton(int argc, char **argv) 900 { 901 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; 902 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; 903 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 904 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 905 struct bpf_object *obj = NULL; 906 const char *file; 907 char ident[256]; 908 struct bpf_program *prog; 909 int fd, err = -1; 910 struct bpf_map *map; 911 struct btf *btf; 912 struct stat st; 913 914 if (!REQ_ARGS(1)) { 915 usage(); 916 return -1; 917 } 918 file = GET_ARG(); 919 920 while (argc) { 921 if (!REQ_ARGS(2)) 922 return -1; 923 924 if (is_prefix(*argv, "name")) { 925 NEXT_ARG(); 926 927 if (obj_name[0] != '\0') { 928 p_err("object name already specified"); 929 return -1; 930 } 931 932 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 933 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 934 } else { 935 p_err("unknown arg %s", *argv); 936 return -1; 937 } 938 939 NEXT_ARG(); 940 } 941 942 if (argc) { 943 p_err("extra unknown arguments"); 944 return -1; 945 } 946 947 if (stat(file, &st)) { 948 p_err("failed to stat() %s: %s", file, strerror(errno)); 949 return -1; 950 } 951 file_sz = st.st_size; 952 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 953 fd = open(file, O_RDONLY); 954 if (fd < 0) { 955 p_err("failed to open() %s: %s", file, strerror(errno)); 956 return -1; 957 } 958 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 959 if (obj_data == MAP_FAILED) { 960 obj_data = NULL; 961 p_err("failed to mmap() %s: %s", file, strerror(errno)); 962 goto out; 963 } 964 if (obj_name[0] == '\0') 965 get_obj_name(obj_name, file); 966 opts.object_name = obj_name; 967 if (verifier_logs) 968 /* log_level1 + log_level2 + stats, but not stable UAPI */ 969 opts.kernel_log_level = 1 + 2 + 4; 970 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 971 err = libbpf_get_error(obj); 972 if (err) { 973 char err_buf[256]; 974 975 libbpf_strerror(err, err_buf, sizeof(err_buf)); 976 p_err("failed to open BPF object file: %s", err_buf); 977 obj = NULL; 978 goto out; 979 } 980 981 bpf_object__for_each_map(map, obj) { 982 if (!get_map_ident(map, ident, sizeof(ident))) { 983 p_err("ignoring unrecognized internal map '%s'...", 984 bpf_map__name(map)); 985 continue; 986 } 987 map_cnt++; 988 } 989 bpf_object__for_each_program(prog, obj) { 990 prog_cnt++; 991 } 992 993 get_header_guard(header_guard, obj_name, "SKEL_H"); 994 if (use_loader) { 995 codegen("\ 996 \n\ 997 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 998 /* THIS FILE IS AUTOGENERATED! */ \n\ 999 #ifndef %2$s \n\ 1000 #define %2$s \n\ 1001 \n\ 1002 #include <bpf/skel_internal.h> \n\ 1003 \n\ 1004 struct %1$s { \n\ 1005 struct bpf_loader_ctx ctx; \n\ 1006 ", 1007 obj_name, header_guard 1008 ); 1009 } else { 1010 codegen("\ 1011 \n\ 1012 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1013 \n\ 1014 /* THIS FILE IS AUTOGENERATED! */ \n\ 1015 #ifndef %2$s \n\ 1016 #define %2$s \n\ 1017 \n\ 1018 #include <errno.h> \n\ 1019 #include <stdlib.h> \n\ 1020 #include <bpf/libbpf.h> \n\ 1021 \n\ 1022 struct %1$s { \n\ 1023 struct bpf_object_skeleton *skeleton; \n\ 1024 struct bpf_object *obj; \n\ 1025 ", 1026 obj_name, header_guard 1027 ); 1028 } 1029 1030 if (map_cnt) { 1031 printf("\tstruct {\n"); 1032 bpf_object__for_each_map(map, obj) { 1033 if (!get_map_ident(map, ident, sizeof(ident))) 1034 continue; 1035 if (use_loader) 1036 printf("\t\tstruct bpf_map_desc %s;\n", ident); 1037 else 1038 printf("\t\tstruct bpf_map *%s;\n", ident); 1039 } 1040 printf("\t} maps;\n"); 1041 } 1042 1043 if (prog_cnt) { 1044 printf("\tstruct {\n"); 1045 bpf_object__for_each_program(prog, obj) { 1046 if (use_loader) 1047 printf("\t\tstruct bpf_prog_desc %s;\n", 1048 bpf_program__name(prog)); 1049 else 1050 printf("\t\tstruct bpf_program *%s;\n", 1051 bpf_program__name(prog)); 1052 } 1053 printf("\t} progs;\n"); 1054 printf("\tstruct {\n"); 1055 bpf_object__for_each_program(prog, obj) { 1056 if (use_loader) 1057 printf("\t\tint %s_fd;\n", 1058 bpf_program__name(prog)); 1059 else 1060 printf("\t\tstruct bpf_link *%s;\n", 1061 bpf_program__name(prog)); 1062 } 1063 printf("\t} links;\n"); 1064 } 1065 1066 btf = bpf_object__btf(obj); 1067 if (btf) { 1068 err = codegen_datasecs(obj, obj_name); 1069 if (err) 1070 goto out; 1071 } 1072 if (use_loader) { 1073 err = gen_trace(obj, obj_name, header_guard); 1074 goto out; 1075 } 1076 1077 codegen("\ 1078 \n\ 1079 \n\ 1080 #ifdef __cplusplus \n\ 1081 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\ 1082 static inline struct %1$s *open_and_load(); \n\ 1083 static inline int load(struct %1$s *skel); \n\ 1084 static inline int attach(struct %1$s *skel); \n\ 1085 static inline void detach(struct %1$s *skel); \n\ 1086 static inline void destroy(struct %1$s *skel); \n\ 1087 static inline const void *elf_bytes(size_t *sz); \n\ 1088 #endif /* __cplusplus */ \n\ 1089 }; \n\ 1090 \n\ 1091 static void \n\ 1092 %1$s__destroy(struct %1$s *obj) \n\ 1093 { \n\ 1094 if (!obj) \n\ 1095 return; \n\ 1096 if (obj->skeleton) \n\ 1097 bpf_object__destroy_skeleton(obj->skeleton);\n\ 1098 free(obj); \n\ 1099 } \n\ 1100 \n\ 1101 static inline int \n\ 1102 %1$s__create_skeleton(struct %1$s *obj); \n\ 1103 \n\ 1104 static inline struct %1$s * \n\ 1105 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\ 1106 { \n\ 1107 struct %1$s *obj; \n\ 1108 int err; \n\ 1109 \n\ 1110 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1111 if (!obj) { \n\ 1112 errno = ENOMEM; \n\ 1113 return NULL; \n\ 1114 } \n\ 1115 \n\ 1116 err = %1$s__create_skeleton(obj); \n\ 1117 if (err) \n\ 1118 goto err_out; \n\ 1119 \n\ 1120 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\ 1121 if (err) \n\ 1122 goto err_out; \n\ 1123 \n\ 1124 return obj; \n\ 1125 err_out: \n\ 1126 %1$s__destroy(obj); \n\ 1127 errno = -err; \n\ 1128 return NULL; \n\ 1129 } \n\ 1130 \n\ 1131 static inline struct %1$s * \n\ 1132 %1$s__open(void) \n\ 1133 { \n\ 1134 return %1$s__open_opts(NULL); \n\ 1135 } \n\ 1136 \n\ 1137 static inline int \n\ 1138 %1$s__load(struct %1$s *obj) \n\ 1139 { \n\ 1140 return bpf_object__load_skeleton(obj->skeleton); \n\ 1141 } \n\ 1142 \n\ 1143 static inline struct %1$s * \n\ 1144 %1$s__open_and_load(void) \n\ 1145 { \n\ 1146 struct %1$s *obj; \n\ 1147 int err; \n\ 1148 \n\ 1149 obj = %1$s__open(); \n\ 1150 if (!obj) \n\ 1151 return NULL; \n\ 1152 err = %1$s__load(obj); \n\ 1153 if (err) { \n\ 1154 %1$s__destroy(obj); \n\ 1155 errno = -err; \n\ 1156 return NULL; \n\ 1157 } \n\ 1158 return obj; \n\ 1159 } \n\ 1160 \n\ 1161 static inline int \n\ 1162 %1$s__attach(struct %1$s *obj) \n\ 1163 { \n\ 1164 return bpf_object__attach_skeleton(obj->skeleton); \n\ 1165 } \n\ 1166 \n\ 1167 static inline void \n\ 1168 %1$s__detach(struct %1$s *obj) \n\ 1169 { \n\ 1170 return bpf_object__detach_skeleton(obj->skeleton); \n\ 1171 } \n\ 1172 ", 1173 obj_name 1174 ); 1175 1176 codegen("\ 1177 \n\ 1178 \n\ 1179 static inline const void *%1$s__elf_bytes(size_t *sz); \n\ 1180 \n\ 1181 static inline int \n\ 1182 %1$s__create_skeleton(struct %1$s *obj) \n\ 1183 { \n\ 1184 struct bpf_object_skeleton *s; \n\ 1185 \n\ 1186 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ 1187 if (!s) \n\ 1188 goto err; \n\ 1189 \n\ 1190 s->sz = sizeof(*s); \n\ 1191 s->name = \"%1$s\"; \n\ 1192 s->obj = &obj->obj; \n\ 1193 ", 1194 obj_name 1195 ); 1196 1197 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); 1198 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); 1199 1200 codegen("\ 1201 \n\ 1202 \n\ 1203 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ 1204 \n\ 1205 obj->skeleton = s; \n\ 1206 return 0; \n\ 1207 err: \n\ 1208 bpf_object__destroy_skeleton(s); \n\ 1209 return -ENOMEM; \n\ 1210 } \n\ 1211 \n\ 1212 static inline const void *%2$s__elf_bytes(size_t *sz) \n\ 1213 { \n\ 1214 *sz = %1$d; \n\ 1215 return (const void *)\"\\ \n\ 1216 " 1217 , file_sz, obj_name); 1218 1219 /* embed contents of BPF object file */ 1220 print_hex(obj_data, file_sz); 1221 1222 codegen("\ 1223 \n\ 1224 \"; \n\ 1225 } \n\ 1226 \n\ 1227 #ifdef __cplusplus \n\ 1228 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\ 1229 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\ 1230 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\ 1231 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\ 1232 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\ 1233 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\ 1234 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\ 1235 #endif /* __cplusplus */ \n\ 1236 \n\ 1237 ", 1238 obj_name); 1239 1240 codegen_asserts(obj, obj_name); 1241 1242 codegen("\ 1243 \n\ 1244 \n\ 1245 #endif /* %1$s */ \n\ 1246 ", 1247 header_guard); 1248 err = 0; 1249 out: 1250 bpf_object__close(obj); 1251 if (obj_data) 1252 munmap(obj_data, mmap_sz); 1253 close(fd); 1254 return err; 1255 } 1256 1257 /* Subskeletons are like skeletons, except they don't own the bpf_object, 1258 * associated maps, links, etc. Instead, they know about the existence of 1259 * variables, maps, programs and are able to find their locations 1260 * _at runtime_ from an already loaded bpf_object. 1261 * 1262 * This allows for library-like BPF objects to have userspace counterparts 1263 * with access to their own items without having to know anything about the 1264 * final BPF object that the library was linked into. 1265 */ 1266 static int do_subskeleton(int argc, char **argv) 1267 { 1268 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")]; 1269 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0; 1270 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 1271 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 1272 struct bpf_object *obj = NULL; 1273 const char *file, *var_name; 1274 char ident[256]; 1275 int fd, err = -1, map_type_id; 1276 const struct bpf_map *map; 1277 struct bpf_program *prog; 1278 struct btf *btf; 1279 const struct btf_type *map_type, *var_type; 1280 const struct btf_var_secinfo *var; 1281 struct stat st; 1282 1283 if (!REQ_ARGS(1)) { 1284 usage(); 1285 return -1; 1286 } 1287 file = GET_ARG(); 1288 1289 while (argc) { 1290 if (!REQ_ARGS(2)) 1291 return -1; 1292 1293 if (is_prefix(*argv, "name")) { 1294 NEXT_ARG(); 1295 1296 if (obj_name[0] != '\0') { 1297 p_err("object name already specified"); 1298 return -1; 1299 } 1300 1301 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); 1302 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; 1303 } else { 1304 p_err("unknown arg %s", *argv); 1305 return -1; 1306 } 1307 1308 NEXT_ARG(); 1309 } 1310 1311 if (argc) { 1312 p_err("extra unknown arguments"); 1313 return -1; 1314 } 1315 1316 if (use_loader) { 1317 p_err("cannot use loader for subskeletons"); 1318 return -1; 1319 } 1320 1321 if (stat(file, &st)) { 1322 p_err("failed to stat() %s: %s", file, strerror(errno)); 1323 return -1; 1324 } 1325 file_sz = st.st_size; 1326 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); 1327 fd = open(file, O_RDONLY); 1328 if (fd < 0) { 1329 p_err("failed to open() %s: %s", file, strerror(errno)); 1330 return -1; 1331 } 1332 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); 1333 if (obj_data == MAP_FAILED) { 1334 obj_data = NULL; 1335 p_err("failed to mmap() %s: %s", file, strerror(errno)); 1336 goto out; 1337 } 1338 if (obj_name[0] == '\0') 1339 get_obj_name(obj_name, file); 1340 1341 /* The empty object name allows us to use bpf_map__name and produce 1342 * ELF section names out of it. (".data" instead of "obj.data") 1343 */ 1344 opts.object_name = ""; 1345 obj = bpf_object__open_mem(obj_data, file_sz, &opts); 1346 if (!obj) { 1347 char err_buf[256]; 1348 1349 libbpf_strerror(errno, err_buf, sizeof(err_buf)); 1350 p_err("failed to open BPF object file: %s", err_buf); 1351 obj = NULL; 1352 goto out; 1353 } 1354 1355 btf = bpf_object__btf(obj); 1356 if (!btf) { 1357 err = -1; 1358 p_err("need btf type information for %s", obj_name); 1359 goto out; 1360 } 1361 1362 bpf_object__for_each_program(prog, obj) { 1363 prog_cnt++; 1364 } 1365 1366 /* First, count how many variables we have to find. 1367 * We need this in advance so the subskel can allocate the right 1368 * amount of storage. 1369 */ 1370 bpf_object__for_each_map(map, obj) { 1371 if (!get_map_ident(map, ident, sizeof(ident))) 1372 continue; 1373 1374 /* Also count all maps that have a name */ 1375 map_cnt++; 1376 1377 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1378 continue; 1379 1380 map_type_id = bpf_map__btf_value_type_id(map); 1381 if (map_type_id <= 0) { 1382 err = map_type_id; 1383 goto out; 1384 } 1385 map_type = btf__type_by_id(btf, map_type_id); 1386 1387 var = btf_var_secinfos(map_type); 1388 len = btf_vlen(map_type); 1389 for (i = 0; i < len; i++, var++) { 1390 var_type = btf__type_by_id(btf, var->type); 1391 1392 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1393 continue; 1394 1395 var_cnt++; 1396 } 1397 } 1398 1399 get_header_guard(header_guard, obj_name, "SUBSKEL_H"); 1400 codegen("\ 1401 \n\ 1402 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 1403 \n\ 1404 /* THIS FILE IS AUTOGENERATED! */ \n\ 1405 #ifndef %2$s \n\ 1406 #define %2$s \n\ 1407 \n\ 1408 #include <errno.h> \n\ 1409 #include <stdlib.h> \n\ 1410 #include <bpf/libbpf.h> \n\ 1411 \n\ 1412 struct %1$s { \n\ 1413 struct bpf_object *obj; \n\ 1414 struct bpf_object_subskeleton *subskel; \n\ 1415 ", obj_name, header_guard); 1416 1417 if (map_cnt) { 1418 printf("\tstruct {\n"); 1419 bpf_object__for_each_map(map, obj) { 1420 if (!get_map_ident(map, ident, sizeof(ident))) 1421 continue; 1422 printf("\t\tstruct bpf_map *%s;\n", ident); 1423 } 1424 printf("\t} maps;\n"); 1425 } 1426 1427 if (prog_cnt) { 1428 printf("\tstruct {\n"); 1429 bpf_object__for_each_program(prog, obj) { 1430 printf("\t\tstruct bpf_program *%s;\n", 1431 bpf_program__name(prog)); 1432 } 1433 printf("\t} progs;\n"); 1434 } 1435 1436 err = codegen_subskel_datasecs(obj, obj_name); 1437 if (err) 1438 goto out; 1439 1440 /* emit code that will allocate enough storage for all symbols */ 1441 codegen("\ 1442 \n\ 1443 \n\ 1444 #ifdef __cplusplus \n\ 1445 static inline struct %1$s *open(const struct bpf_object *src);\n\ 1446 static inline void destroy(struct %1$s *skel); \n\ 1447 #endif /* __cplusplus */ \n\ 1448 }; \n\ 1449 \n\ 1450 static inline void \n\ 1451 %1$s__destroy(struct %1$s *skel) \n\ 1452 { \n\ 1453 if (!skel) \n\ 1454 return; \n\ 1455 if (skel->subskel) \n\ 1456 bpf_object__destroy_subskeleton(skel->subskel);\n\ 1457 free(skel); \n\ 1458 } \n\ 1459 \n\ 1460 static inline struct %1$s * \n\ 1461 %1$s__open(const struct bpf_object *src) \n\ 1462 { \n\ 1463 struct %1$s *obj; \n\ 1464 struct bpf_object_subskeleton *s; \n\ 1465 int err; \n\ 1466 \n\ 1467 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ 1468 if (!obj) { \n\ 1469 errno = ENOMEM; \n\ 1470 goto err; \n\ 1471 } \n\ 1472 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\ 1473 if (!s) { \n\ 1474 errno = ENOMEM; \n\ 1475 goto err; \n\ 1476 } \n\ 1477 s->sz = sizeof(*s); \n\ 1478 s->obj = src; \n\ 1479 s->var_skel_sz = sizeof(*s->vars); \n\ 1480 obj->subskel = s; \n\ 1481 \n\ 1482 /* vars */ \n\ 1483 s->var_cnt = %2$d; \n\ 1484 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\ 1485 if (!s->vars) { \n\ 1486 errno = ENOMEM; \n\ 1487 goto err; \n\ 1488 } \n\ 1489 ", 1490 obj_name, var_cnt 1491 ); 1492 1493 /* walk through each symbol and emit the runtime representation */ 1494 bpf_object__for_each_map(map, obj) { 1495 if (!is_internal_mmapable_map(map, ident, sizeof(ident))) 1496 continue; 1497 1498 map_type_id = bpf_map__btf_value_type_id(map); 1499 if (map_type_id <= 0) 1500 /* skip over internal maps with no type*/ 1501 continue; 1502 1503 map_type = btf__type_by_id(btf, map_type_id); 1504 var = btf_var_secinfos(map_type); 1505 len = btf_vlen(map_type); 1506 for (i = 0; i < len; i++, var++) { 1507 var_type = btf__type_by_id(btf, var->type); 1508 var_name = btf__name_by_offset(btf, var_type->name_off); 1509 1510 if (btf_var(var_type)->linkage == BTF_VAR_STATIC) 1511 continue; 1512 1513 /* Note that we use the dot prefix in .data as the 1514 * field access operator i.e. maps%s becomes maps.data 1515 */ 1516 codegen("\ 1517 \n\ 1518 \n\ 1519 s->vars[%3$d].name = \"%1$s\"; \n\ 1520 s->vars[%3$d].map = &obj->maps.%2$s; \n\ 1521 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\ 1522 ", var_name, ident, var_idx); 1523 1524 var_idx++; 1525 } 1526 } 1527 1528 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); 1529 codegen_progs_skeleton(obj, prog_cnt, false /*links*/); 1530 1531 codegen("\ 1532 \n\ 1533 \n\ 1534 err = bpf_object__open_subskeleton(s); \n\ 1535 if (err) \n\ 1536 goto err; \n\ 1537 \n\ 1538 return obj; \n\ 1539 err: \n\ 1540 %1$s__destroy(obj); \n\ 1541 return NULL; \n\ 1542 } \n\ 1543 \n\ 1544 #ifdef __cplusplus \n\ 1545 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\ 1546 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\ 1547 #endif /* __cplusplus */ \n\ 1548 \n\ 1549 #endif /* %2$s */ \n\ 1550 ", 1551 obj_name, header_guard); 1552 err = 0; 1553 out: 1554 bpf_object__close(obj); 1555 if (obj_data) 1556 munmap(obj_data, mmap_sz); 1557 close(fd); 1558 return err; 1559 } 1560 1561 static int do_object(int argc, char **argv) 1562 { 1563 struct bpf_linker *linker; 1564 const char *output_file, *file; 1565 int err = 0; 1566 1567 if (!REQ_ARGS(2)) { 1568 usage(); 1569 return -1; 1570 } 1571 1572 output_file = GET_ARG(); 1573 1574 linker = bpf_linker__new(output_file, NULL); 1575 if (!linker) { 1576 p_err("failed to create BPF linker instance"); 1577 return -1; 1578 } 1579 1580 while (argc) { 1581 file = GET_ARG(); 1582 1583 err = bpf_linker__add_file(linker, file, NULL); 1584 if (err) { 1585 p_err("failed to link '%s': %s (%d)", file, strerror(err), err); 1586 goto out; 1587 } 1588 } 1589 1590 err = bpf_linker__finalize(linker); 1591 if (err) { 1592 p_err("failed to finalize ELF file: %s (%d)", strerror(err), err); 1593 goto out; 1594 } 1595 1596 err = 0; 1597 out: 1598 bpf_linker__free(linker); 1599 return err; 1600 } 1601 1602 static int do_help(int argc, char **argv) 1603 { 1604 if (json_output) { 1605 jsonw_null(json_wtr); 1606 return 0; 1607 } 1608 1609 fprintf(stderr, 1610 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n" 1611 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n" 1612 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n" 1613 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n" 1614 " %1$s %2$s help\n" 1615 "\n" 1616 " " HELP_SPEC_OPTIONS " |\n" 1617 " {-L|--use-loader} }\n" 1618 "", 1619 bin_name, "gen"); 1620 1621 return 0; 1622 } 1623 1624 static int btf_save_raw(const struct btf *btf, const char *path) 1625 { 1626 const void *data; 1627 FILE *f = NULL; 1628 __u32 data_sz; 1629 int err = 0; 1630 1631 data = btf__raw_data(btf, &data_sz); 1632 if (!data) 1633 return -ENOMEM; 1634 1635 f = fopen(path, "wb"); 1636 if (!f) 1637 return -errno; 1638 1639 if (fwrite(data, 1, data_sz, f) != data_sz) 1640 err = -errno; 1641 1642 fclose(f); 1643 return err; 1644 } 1645 1646 struct btfgen_info { 1647 struct btf *src_btf; 1648 struct btf *marked_btf; /* btf structure used to mark used types */ 1649 }; 1650 1651 static size_t btfgen_hash_fn(const void *key, void *ctx) 1652 { 1653 return (size_t)key; 1654 } 1655 1656 static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx) 1657 { 1658 return k1 == k2; 1659 } 1660 1661 static void *u32_as_hash_key(__u32 x) 1662 { 1663 return (void *)(uintptr_t)x; 1664 } 1665 1666 static void btfgen_free_info(struct btfgen_info *info) 1667 { 1668 if (!info) 1669 return; 1670 1671 btf__free(info->src_btf); 1672 btf__free(info->marked_btf); 1673 1674 free(info); 1675 } 1676 1677 static struct btfgen_info * 1678 btfgen_new_info(const char *targ_btf_path) 1679 { 1680 struct btfgen_info *info; 1681 int err; 1682 1683 info = calloc(1, sizeof(*info)); 1684 if (!info) 1685 return NULL; 1686 1687 info->src_btf = btf__parse(targ_btf_path, NULL); 1688 if (!info->src_btf) { 1689 err = -errno; 1690 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1691 goto err_out; 1692 } 1693 1694 info->marked_btf = btf__parse(targ_btf_path, NULL); 1695 if (!info->marked_btf) { 1696 err = -errno; 1697 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno)); 1698 goto err_out; 1699 } 1700 1701 return info; 1702 1703 err_out: 1704 btfgen_free_info(info); 1705 errno = -err; 1706 return NULL; 1707 } 1708 1709 #define MARKED UINT32_MAX 1710 1711 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx) 1712 { 1713 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id); 1714 struct btf_member *m = btf_members(t) + idx; 1715 1716 m->name_off = MARKED; 1717 } 1718 1719 static int 1720 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers) 1721 { 1722 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id); 1723 struct btf_type *cloned_type; 1724 struct btf_param *param; 1725 struct btf_array *array; 1726 int err, i; 1727 1728 if (type_id == 0) 1729 return 0; 1730 1731 /* mark type on cloned BTF as used */ 1732 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id); 1733 cloned_type->name_off = MARKED; 1734 1735 /* recursively mark other types needed by it */ 1736 switch (btf_kind(btf_type)) { 1737 case BTF_KIND_UNKN: 1738 case BTF_KIND_INT: 1739 case BTF_KIND_FLOAT: 1740 case BTF_KIND_ENUM: 1741 case BTF_KIND_STRUCT: 1742 case BTF_KIND_UNION: 1743 break; 1744 case BTF_KIND_PTR: 1745 if (follow_pointers) { 1746 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1747 if (err) 1748 return err; 1749 } 1750 break; 1751 case BTF_KIND_CONST: 1752 case BTF_KIND_VOLATILE: 1753 case BTF_KIND_TYPEDEF: 1754 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1755 if (err) 1756 return err; 1757 break; 1758 case BTF_KIND_ARRAY: 1759 array = btf_array(btf_type); 1760 1761 /* mark array type */ 1762 err = btfgen_mark_type(info, array->type, follow_pointers); 1763 /* mark array's index type */ 1764 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers); 1765 if (err) 1766 return err; 1767 break; 1768 case BTF_KIND_FUNC_PROTO: 1769 /* mark ret type */ 1770 err = btfgen_mark_type(info, btf_type->type, follow_pointers); 1771 if (err) 1772 return err; 1773 1774 /* mark parameters types */ 1775 param = btf_params(btf_type); 1776 for (i = 0; i < btf_vlen(btf_type); i++) { 1777 err = btfgen_mark_type(info, param->type, follow_pointers); 1778 if (err) 1779 return err; 1780 param++; 1781 } 1782 break; 1783 /* tells if some other type needs to be handled */ 1784 default: 1785 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id); 1786 return -EINVAL; 1787 } 1788 1789 return 0; 1790 } 1791 1792 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1793 { 1794 struct btf *btf = info->src_btf; 1795 const struct btf_type *btf_type; 1796 struct btf_member *btf_member; 1797 struct btf_array *array; 1798 unsigned int type_id = targ_spec->root_type_id; 1799 int idx, err; 1800 1801 /* mark root type */ 1802 btf_type = btf__type_by_id(btf, type_id); 1803 err = btfgen_mark_type(info, type_id, false); 1804 if (err) 1805 return err; 1806 1807 /* mark types for complex types (arrays, unions, structures) */ 1808 for (int i = 1; i < targ_spec->raw_len; i++) { 1809 /* skip typedefs and mods */ 1810 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) { 1811 type_id = btf_type->type; 1812 btf_type = btf__type_by_id(btf, type_id); 1813 } 1814 1815 switch (btf_kind(btf_type)) { 1816 case BTF_KIND_STRUCT: 1817 case BTF_KIND_UNION: 1818 idx = targ_spec->raw_spec[i]; 1819 btf_member = btf_members(btf_type) + idx; 1820 1821 /* mark member */ 1822 btfgen_mark_member(info, type_id, idx); 1823 1824 /* mark member's type */ 1825 type_id = btf_member->type; 1826 btf_type = btf__type_by_id(btf, type_id); 1827 err = btfgen_mark_type(info, type_id, false); 1828 if (err) 1829 return err; 1830 break; 1831 case BTF_KIND_ARRAY: 1832 array = btf_array(btf_type); 1833 type_id = array->type; 1834 btf_type = btf__type_by_id(btf, type_id); 1835 break; 1836 default: 1837 p_err("unsupported kind: %s (%d)", 1838 btf_kind_str(btf_type), btf_type->type); 1839 return -EINVAL; 1840 } 1841 } 1842 1843 return 0; 1844 } 1845 1846 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1847 { 1848 return btfgen_mark_type(info, targ_spec->root_type_id, true); 1849 } 1850 1851 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec) 1852 { 1853 return btfgen_mark_type(info, targ_spec->root_type_id, false); 1854 } 1855 1856 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res) 1857 { 1858 switch (res->relo_kind) { 1859 case BPF_CORE_FIELD_BYTE_OFFSET: 1860 case BPF_CORE_FIELD_BYTE_SIZE: 1861 case BPF_CORE_FIELD_EXISTS: 1862 case BPF_CORE_FIELD_SIGNED: 1863 case BPF_CORE_FIELD_LSHIFT_U64: 1864 case BPF_CORE_FIELD_RSHIFT_U64: 1865 return btfgen_record_field_relo(info, res); 1866 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */ 1867 return 0; 1868 case BPF_CORE_TYPE_ID_TARGET: 1869 case BPF_CORE_TYPE_EXISTS: 1870 case BPF_CORE_TYPE_SIZE: 1871 return btfgen_record_type_relo(info, res); 1872 case BPF_CORE_ENUMVAL_EXISTS: 1873 case BPF_CORE_ENUMVAL_VALUE: 1874 return btfgen_record_enumval_relo(info, res); 1875 default: 1876 return -EINVAL; 1877 } 1878 } 1879 1880 static struct bpf_core_cand_list * 1881 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id) 1882 { 1883 const struct btf_type *local_type; 1884 struct bpf_core_cand_list *cands = NULL; 1885 struct bpf_core_cand local_cand = {}; 1886 size_t local_essent_len; 1887 const char *local_name; 1888 int err; 1889 1890 local_cand.btf = local_btf; 1891 local_cand.id = local_id; 1892 1893 local_type = btf__type_by_id(local_btf, local_id); 1894 if (!local_type) { 1895 err = -EINVAL; 1896 goto err_out; 1897 } 1898 1899 local_name = btf__name_by_offset(local_btf, local_type->name_off); 1900 if (!local_name) { 1901 err = -EINVAL; 1902 goto err_out; 1903 } 1904 local_essent_len = bpf_core_essential_name_len(local_name); 1905 1906 cands = calloc(1, sizeof(*cands)); 1907 if (!cands) 1908 return NULL; 1909 1910 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands); 1911 if (err) 1912 goto err_out; 1913 1914 return cands; 1915 1916 err_out: 1917 bpf_core_free_cands(cands); 1918 errno = -err; 1919 return NULL; 1920 } 1921 1922 /* Record relocation information for a single BPF object */ 1923 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path) 1924 { 1925 const struct btf_ext_info_sec *sec; 1926 const struct bpf_core_relo *relo; 1927 const struct btf_ext_info *seg; 1928 struct hashmap_entry *entry; 1929 struct hashmap *cand_cache = NULL; 1930 struct btf_ext *btf_ext = NULL; 1931 unsigned int relo_idx; 1932 struct btf *btf = NULL; 1933 size_t i; 1934 int err; 1935 1936 btf = btf__parse(obj_path, &btf_ext); 1937 if (!btf) { 1938 err = -errno; 1939 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno)); 1940 return err; 1941 } 1942 1943 if (!btf_ext) { 1944 p_err("failed to parse BPF object '%s': section %s not found", 1945 obj_path, BTF_EXT_ELF_SEC); 1946 err = -EINVAL; 1947 goto out; 1948 } 1949 1950 if (btf_ext->core_relo_info.len == 0) { 1951 err = 0; 1952 goto out; 1953 } 1954 1955 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL); 1956 if (IS_ERR(cand_cache)) { 1957 err = PTR_ERR(cand_cache); 1958 goto out; 1959 } 1960 1961 seg = &btf_ext->core_relo_info; 1962 for_each_btf_ext_sec(seg, sec) { 1963 for_each_btf_ext_rec(seg, sec, relo_idx, relo) { 1964 struct bpf_core_spec specs_scratch[3] = {}; 1965 struct bpf_core_relo_res targ_res = {}; 1966 struct bpf_core_cand_list *cands = NULL; 1967 const void *type_key = u32_as_hash_key(relo->type_id); 1968 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off); 1969 1970 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 1971 !hashmap__find(cand_cache, type_key, (void **)&cands)) { 1972 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id); 1973 if (!cands) { 1974 err = -errno; 1975 goto out; 1976 } 1977 1978 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); 1979 if (err) 1980 goto out; 1981 } 1982 1983 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands, 1984 specs_scratch, &targ_res); 1985 if (err) 1986 goto out; 1987 1988 /* specs_scratch[2] is the target spec */ 1989 err = btfgen_record_reloc(info, &specs_scratch[2]); 1990 if (err) 1991 goto out; 1992 } 1993 } 1994 1995 out: 1996 btf__free(btf); 1997 btf_ext__free(btf_ext); 1998 1999 if (!IS_ERR_OR_NULL(cand_cache)) { 2000 hashmap__for_each_entry(cand_cache, entry, i) { 2001 bpf_core_free_cands(entry->value); 2002 } 2003 hashmap__free(cand_cache); 2004 } 2005 2006 return err; 2007 } 2008 2009 static int btfgen_remap_id(__u32 *type_id, void *ctx) 2010 { 2011 unsigned int *ids = ctx; 2012 2013 *type_id = ids[*type_id]; 2014 2015 return 0; 2016 } 2017 2018 /* Generate BTF from relocation information previously recorded */ 2019 static struct btf *btfgen_get_btf(struct btfgen_info *info) 2020 { 2021 struct btf *btf_new = NULL; 2022 unsigned int *ids = NULL; 2023 unsigned int i, n = btf__type_cnt(info->marked_btf); 2024 int err = 0; 2025 2026 btf_new = btf__new_empty(); 2027 if (!btf_new) { 2028 err = -errno; 2029 goto err_out; 2030 } 2031 2032 ids = calloc(n, sizeof(*ids)); 2033 if (!ids) { 2034 err = -errno; 2035 goto err_out; 2036 } 2037 2038 /* first pass: add all marked types to btf_new and add their new ids to the ids map */ 2039 for (i = 1; i < n; i++) { 2040 const struct btf_type *cloned_type, *type; 2041 const char *name; 2042 int new_id; 2043 2044 cloned_type = btf__type_by_id(info->marked_btf, i); 2045 2046 if (cloned_type->name_off != MARKED) 2047 continue; 2048 2049 type = btf__type_by_id(info->src_btf, i); 2050 2051 /* add members for struct and union */ 2052 if (btf_is_composite(type)) { 2053 struct btf_member *cloned_m, *m; 2054 unsigned short vlen; 2055 int idx_src; 2056 2057 name = btf__str_by_offset(info->src_btf, type->name_off); 2058 2059 if (btf_is_struct(type)) 2060 err = btf__add_struct(btf_new, name, type->size); 2061 else 2062 err = btf__add_union(btf_new, name, type->size); 2063 2064 if (err < 0) 2065 goto err_out; 2066 new_id = err; 2067 2068 cloned_m = btf_members(cloned_type); 2069 m = btf_members(type); 2070 vlen = btf_vlen(cloned_type); 2071 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) { 2072 /* add only members that are marked as used */ 2073 if (cloned_m->name_off != MARKED) 2074 continue; 2075 2076 name = btf__str_by_offset(info->src_btf, m->name_off); 2077 err = btf__add_field(btf_new, name, m->type, 2078 btf_member_bit_offset(cloned_type, idx_src), 2079 btf_member_bitfield_size(cloned_type, idx_src)); 2080 if (err < 0) 2081 goto err_out; 2082 } 2083 } else { 2084 err = btf__add_type(btf_new, info->src_btf, type); 2085 if (err < 0) 2086 goto err_out; 2087 new_id = err; 2088 } 2089 2090 /* add ID mapping */ 2091 ids[i] = new_id; 2092 } 2093 2094 /* second pass: fix up type ids */ 2095 for (i = 1; i < btf__type_cnt(btf_new); i++) { 2096 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i); 2097 2098 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids); 2099 if (err) 2100 goto err_out; 2101 } 2102 2103 free(ids); 2104 return btf_new; 2105 2106 err_out: 2107 btf__free(btf_new); 2108 free(ids); 2109 errno = -err; 2110 return NULL; 2111 } 2112 2113 /* Create minimized BTF file for a set of BPF objects. 2114 * 2115 * The BTFGen algorithm is divided in two main parts: (1) collect the 2116 * BTF types that are involved in relocations and (2) generate the BTF 2117 * object using the collected types. 2118 * 2119 * In order to collect the types involved in the relocations, we parse 2120 * the BTF and BTF.ext sections of the BPF objects and use 2121 * bpf_core_calc_relo_insn() to get the target specification, this 2122 * indicates how the types and fields are used in a relocation. 2123 * 2124 * Types are recorded in different ways according to the kind of the 2125 * relocation. For field-based relocations only the members that are 2126 * actually used are saved in order to reduce the size of the generated 2127 * BTF file. For type-based relocations empty struct / unions are 2128 * generated and for enum-based relocations the whole type is saved. 2129 * 2130 * The second part of the algorithm generates the BTF object. It creates 2131 * an empty BTF object and fills it with the types recorded in the 2132 * previous step. This function takes care of only adding the structure 2133 * and union members that were marked as used and it also fixes up the 2134 * type IDs on the generated BTF object. 2135 */ 2136 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[]) 2137 { 2138 struct btfgen_info *info; 2139 struct btf *btf_new = NULL; 2140 int err, i; 2141 2142 info = btfgen_new_info(src_btf); 2143 if (!info) { 2144 err = -errno; 2145 p_err("failed to allocate info structure: %s", strerror(errno)); 2146 goto out; 2147 } 2148 2149 for (i = 0; objspaths[i] != NULL; i++) { 2150 err = btfgen_record_obj(info, objspaths[i]); 2151 if (err) { 2152 p_err("error recording relocations for %s: %s", objspaths[i], 2153 strerror(errno)); 2154 goto out; 2155 } 2156 } 2157 2158 btf_new = btfgen_get_btf(info); 2159 if (!btf_new) { 2160 err = -errno; 2161 p_err("error generating BTF: %s", strerror(errno)); 2162 goto out; 2163 } 2164 2165 err = btf_save_raw(btf_new, dst_btf); 2166 if (err) { 2167 p_err("error saving btf file: %s", strerror(errno)); 2168 goto out; 2169 } 2170 2171 out: 2172 btf__free(btf_new); 2173 btfgen_free_info(info); 2174 2175 return err; 2176 } 2177 2178 static int do_min_core_btf(int argc, char **argv) 2179 { 2180 const char *input, *output, **objs; 2181 int i, err; 2182 2183 if (!REQ_ARGS(3)) { 2184 usage(); 2185 return -1; 2186 } 2187 2188 input = GET_ARG(); 2189 output = GET_ARG(); 2190 2191 objs = (const char **) calloc(argc + 1, sizeof(*objs)); 2192 if (!objs) { 2193 p_err("failed to allocate array for object names"); 2194 return -ENOMEM; 2195 } 2196 2197 i = 0; 2198 while (argc) 2199 objs[i++] = GET_ARG(); 2200 2201 err = minimize_btf(input, output, objs); 2202 free(objs); 2203 return err; 2204 } 2205 2206 static const struct cmd cmds[] = { 2207 { "object", do_object }, 2208 { "skeleton", do_skeleton }, 2209 { "subskeleton", do_subskeleton }, 2210 { "min_core_btf", do_min_core_btf}, 2211 { "help", do_help }, 2212 { 0 } 2213 }; 2214 2215 int do_gen(int argc, char **argv) 2216 { 2217 return cmd_select(cmds, argc, argv, do_help); 2218 } 2219